diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 89d894403ff1a..1b9b30625eb83 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v4 - name: Get changed files id: changed-files-specific - uses: tj-actions/changed-files@v44 + uses: tj-actions/changed-files@v45 with: files_ignore: | release-notes/*.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c854b6b96480..e8117ea05f80c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix for hasInitiatedFetching to fix allocation explain and manual reroute APIs (([#14972](https://github.com/opensearch-project/OpenSearch/pull/14972)) - [Workload Management] Add queryGroupId to Task ([14708](https://github.com/opensearch-project/OpenSearch/pull/14708)) - Add setting to ignore throttling nodes for allocation of unassigned primaries in remote restore ([#14991](https://github.com/opensearch-project/OpenSearch/pull/14991)) +- [Workload Management] Add Delete QueryGroup API Logic ([#14735](https://github.com/opensearch-project/OpenSearch/pull/14735)) - [Streaming Indexing] Enhance RestClient with a new streaming API support ([#14437](https://github.com/opensearch-project/OpenSearch/pull/14437)) - Add basic aggregation support for derived fields ([#14618](https://github.com/opensearch-project/OpenSearch/pull/14618)) - [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680))- [Workload Management] Add Create QueryGroup API Logic ([#14680](https://github.com/opensearch-project/OpenSearch/pull/14680)) @@ -18,28 +19,43 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add `rangeQuery` and `regexpQuery` for `constant_keyword` field type ([#14711](https://github.com/opensearch-project/OpenSearch/pull/14711)) - Add took time to request nodes stats ([#15054](https://github.com/opensearch-project/OpenSearch/pull/15054)) - [Workload Management] Add Get QueryGroup API Logic ([14709](https://github.com/opensearch-project/OpenSearch/pull/14709)) +- [Workload Management] Add Settings for Workload Management feature ([#15028](https://github.com/opensearch-project/OpenSearch/pull/15028)) - [Workload Management] QueryGroup resource tracking framework changes ([#13897](https://github.com/opensearch-project/OpenSearch/pull/13897)) +- Support filtering on a large list encoded by bitmap ([#14774](https://github.com/opensearch-project/OpenSearch/pull/14774)) - Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153)) +- Add allowlist setting for ingest-geoip and ingest-useragent ([#15325](https://github.com/opensearch-project/OpenSearch/pull/15325)) +- Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895)) +- Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) - Bump `org.apache.commons:commons-lang3` from 3.14.0 to 3.16.0 ([#14861](https://github.com/opensearch-project/OpenSearch/pull/14861), [#15205](https://github.com/opensearch-project/OpenSearch/pull/15205)) - OpenJDK Update (July 2024 Patch releases) ([#14998](https://github.com/opensearch-project/OpenSearch/pull/14998)) -- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.16.2 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995)) +- Bump `com.microsoft.azure:msal4j` from 1.16.1 to 1.17.0 ([#14995](https://github.com/opensearch-project/OpenSearch/pull/14995), [#15420](https://github.com/opensearch-project/OpenSearch/pull/15420)) - Bump `actions/github-script` from 6 to 7 ([#14997](https://github.com/opensearch-project/OpenSearch/pull/14997)) - Bump `org.tukaani:xz` from 1.9 to 1.10 ([#15110](https://github.com/opensearch-project/OpenSearch/pull/15110)) - Bump `actions/setup-java` from 1 to 4 ([#15104](https://github.com/opensearch-project/OpenSearch/pull/15104)) - Bump `org.apache.avro:avro` from 1.11.3 to 1.12.0 in /plugins/repository-hdfs ([#15119](https://github.com/opensearch-project/OpenSearch/pull/15119)) -- Bump `org.bouncycastle:bcpg-fips` from 1.0.7.1 to 2.0.8 and `org.bouncycastle:bc-fips` from 1.0.2.5 to 2.0.0 in /distribution/tools/plugin-cli ([#15103](https://github.com/opensearch-project/OpenSearch/pull/15103)) +- Bump `org.bouncycastle:bcpg-fips` from 1.0.7.1 to 2.0.9 ([#15103](https://github.com/opensearch-project/OpenSearch/pull/15103), [#15299](https://github.com/opensearch-project/OpenSearch/pull/15299)) - Bump `com.azure:azure-core` from 1.49.1 to 1.51.0 ([#15111](https://github.com/opensearch-project/OpenSearch/pull/15111)) - Bump `org.xerial.snappy:snappy-java` from 1.1.10.5 to 1.1.10.6 ([#15207](https://github.com/opensearch-project/OpenSearch/pull/15207)) - Bump `com.azure:azure-xml` from 1.0.0 to 1.1.0 ([#15206](https://github.com/opensearch-project/OpenSearch/pull/15206)) - Bump `reactor` from 3.5.19 to 3.5.20 ([#15262](https://github.com/opensearch-project/OpenSearch/pull/15262)) - Bump `reactor-netty` from 1.1.21 to 1.1.22 ([#15262](https://github.com/opensearch-project/OpenSearch/pull/15262)) - Bump `org.apache.kerby:kerb-admin` from 2.0.3 to 2.1.0 ([#15301](https://github.com/opensearch-project/OpenSearch/pull/15301)) +- Bump `com.azure:azure-core-http-netty` from 1.15.1 to 1.15.3 ([#15300](https://github.com/opensearch-project/OpenSearch/pull/15300)) +- Bump `com.gradle.develocity` from 3.17.6 to 3.18 ([#15297](https://github.com/opensearch-project/OpenSearch/pull/15297)) +- Bump `commons-cli:commons-cli` from 1.8.0 to 1.9.0 ([#15298](https://github.com/opensearch-project/OpenSearch/pull/15298)) +- Bump `opentelemetry` from 1.40.0 to 1.41.0 ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `opentelemetry-semconv` from 1.26.0-alpha to 1.27.0-alpha ([#15361](https://github.com/opensearch-project/OpenSearch/pull/15361)) +- Bump `tj-actions/changed-files` from 44 to 45 ([#15422](https://github.com/opensearch-project/OpenSearch/pull/15422)) +- Bump `dnsjava:dnsjava` from 3.6.0 to 3.6.1 ([#15418](https://github.com/opensearch-project/OpenSearch/pull/15418)) +- Bump `com.netflix.nebula.ospackage-base` from 11.9.1 to 11.10.0 ([#15419](https://github.com/opensearch-project/OpenSearch/pull/15419)) +- Bump `org.roaringbitmap:RoaringBitmap` from 1.1.0 to 1.2.1 ([#15423](https://github.com/opensearch-project/OpenSearch/pull/15423)) ### Changed - Add lower limit for primary and replica batch allocators timeout ([#14979](https://github.com/opensearch-project/OpenSearch/pull/14979)) +- Optimize regexp-based include/exclude on aggregations when pattern matches prefixes ([#14371](https://github.com/opensearch-project/OpenSearch/pull/14371)) - Replace and block usages of org.apache.logging.log4j.util.Strings ([#15238](https://github.com/opensearch-project/OpenSearch/pull/15238)) ### Deprecated @@ -55,6 +71,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fixed array field name omission in flat_object function for nested JSON ([#13620](https://github.com/opensearch-project/OpenSearch/pull/13620)) - Fix range aggregation optimization ignoring top level queries ([#15194](https://github.com/opensearch-project/OpenSearch/pull/15194)) - Fix incorrect parameter names in MinHash token filter configuration handling ([#15233](https://github.com/opensearch-project/OpenSearch/pull/15233)) +- Fix indexing error when flat_object field is explicitly null ([#15375](https://github.com/opensearch-project/OpenSearch/pull/15375)) +- Fix split response processor not included in allowlist ([#15393](https://github.com/opensearch-project/OpenSearch/pull/15393)) +- Fix unchecked cast in dynamic action map getter ([#15394](https://github.com/opensearch-project/OpenSearch/pull/15394)) ### Security diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 296c2c30aca49..4a8aa9305df74 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -22,7 +22,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Varun Bansal | [linuxpi](https://github.com/linuxpi) | Amazon | | Marc Handalian | [mch2](https://github.com/mch2) | Amazon | | Michael Froh | [msfroh](https://github.com/msfroh) | Amazon | -| Nick Knize | [nknize](https://github.com/nknize) | Amazon | +| Nick Knize | [nknize](https://github.com/nknize) | Lucenia | | Owais Kazi | [owaiskazi19](https://github.com/owaiskazi19) | Amazon | | Peter Nied | [peternied](https://github.com/peternied) | Amazon | | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon | diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 9d7bbf6f8f769..ccec8e2891a65 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.40.0 -opentelemetrysemconv = 1.26.0-alpha +opentelemetry = 1.41.0 +opentelemetrysemconv = 1.27.0-alpha diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 621620eef9d71..25af649bb4aed 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.9.1" + id "com.netflix.nebula.ospackage-base" version "11.10.0" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index a619ba1acf6a7..784cdc457a1a9 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -37,7 +37,7 @@ base { dependencies { compileOnly project(":server") compileOnly project(":libs:opensearch-cli") - api "org.bouncycastle:bcpg-fips:2.0.8" + api "org.bouncycastle:bcpg-fips:2.0.9" api "org.bouncycastle:bc-fips:2.0.0" testImplementation project(":test:framework") testImplementation 'com.google.jimfs:jimfs:1.3.0' diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 deleted file mode 100644 index 758ee2fdf9de6..0000000000000 --- a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51c2f633e0c32d10de1ebab4c86f93310ff820f8 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 new file mode 100644 index 0000000000000..20cdbf6dc8aa8 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/bcpg-fips-2.0.9.jar.sha1 @@ -0,0 +1 @@ +f69719ef8dbf34d5f906ce480496446b2fd2ae27 \ No newline at end of file diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java index 8d29a347caeb8..181ebe5500ee5 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/HyphenationCompoundWordTokenFilterFactory.java @@ -54,11 +54,16 @@ */ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundWordTokenFilterFactory { + private final boolean noSubMatches; + private final boolean noOverlappingMatches; private final HyphenationTree hyphenationTree; HyphenationCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); + noSubMatches = settings.getAsBoolean("no_sub_matches", false); + noOverlappingMatches = settings.getAsBoolean("no_overlapping_matches", false); + String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null); if (hyphenationPatternsPath == null) { throw new IllegalArgumentException("hyphenation_patterns_path is a required setting."); @@ -85,7 +90,9 @@ public TokenStream create(TokenStream tokenStream) { minWordSize, minSubwordSize, maxSubwordSize, - onlyLongestMatch + onlyLongestMatch, + noSubMatches, + noOverlappingMatches ); } } diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java index a681d9a104ecf..bfe30da50c055 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/CompoundAnalysisTests.java @@ -50,8 +50,12 @@ import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.hamcrest.MatcherAssert; +import org.junit.Before; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -63,17 +67,27 @@ import static org.hamcrest.Matchers.instanceOf; public class CompoundAnalysisTests extends OpenSearchTestCase { + + Settings[] settingsArr; + + @Before + public void initialize() throws IOException { + final Path home = createTempDir(); + copyHyphenationPatternsFile(home); + this.settingsArr = new Settings[] { getJsonSettings(home), getYamlSettings(home) }; + } + public void testDefaultsCompoundAnalysis() throws Exception { - Settings settings = getJsonSettings(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); - AnalysisModule analysisModule = createAnalysisModule(settings); - TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec"); - MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); + for (Settings settings : this.settingsArr) { + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); + AnalysisModule analysisModule = createAnalysisModule(settings); + TokenFilterFactory filterFactory = analysisModule.getAnalysisRegistry().buildTokenFilterFactories(idxSettings).get("dict_dec"); + MatcherAssert.assertThat(filterFactory, instanceOf(DictionaryCompoundWordTokenFilterFactory.class)); + } } public void testDictionaryDecompounder() throws Exception { - Settings[] settingsArr = new Settings[] { getJsonSettings(), getYamlSettings() }; - for (Settings settings : settingsArr) { + for (Settings settings : this.settingsArr) { List terms = analyze(settings, "decompoundingAnalyzer", "donaudampfschiff spargelcremesuppe"); MatcherAssert.assertThat(terms.size(), equalTo(8)); MatcherAssert.assertThat( @@ -83,6 +97,26 @@ public void testDictionaryDecompounder() throws Exception { } } + // Hyphenation Decompounder tests mimic the behavior of lucene tests + // lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java + public void testHyphenationDecompounder() throws Exception { + for (Settings settings : this.settingsArr) { + List terms = analyze(settings, "hyphenationAnalyzer", "min veninde som er lidt af en læsehest"); + MatcherAssert.assertThat(terms.size(), equalTo(10)); + MatcherAssert.assertThat(terms, hasItems("min", "veninde", "som", "er", "lidt", "af", "en", "læsehest", "læse", "hest")); + } + } + + // Hyphenation Decompounder tests mimic the behavior of lucene tests + // lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java + public void testHyphenationDecompounderNoSubMatches() throws Exception { + for (Settings settings : this.settingsArr) { + List terms = analyze(settings, "hyphenationAnalyzerNoSubMatches", "basketballkurv"); + MatcherAssert.assertThat(terms.size(), equalTo(3)); + MatcherAssert.assertThat(terms, hasItems("basketballkurv", "basketball", "kurv")); + } + } + private List analyze(Settings settings, String analyzerName, String text) throws IOException { IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings); AnalysisModule analysisModule = createAnalysisModule(settings); @@ -111,21 +145,28 @@ public Map> getTokenFilters() { })); } - private Settings getJsonSettings() throws IOException { + private void copyHyphenationPatternsFile(Path home) throws IOException { + InputStream hyphenation_patterns_path = getClass().getResourceAsStream("da_UTF8.xml"); + Path config = home.resolve("config"); + Files.createDirectory(config); + Files.copy(hyphenation_patterns_path, config.resolve("da_UTF8.xml")); + } + + private Settings getJsonSettings(Path home) throws IOException { String json = "/org/opensearch/analysis/common/test1.json"; return Settings.builder() .loadFromStream(json, getClass().getResourceAsStream(json), false) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), home.toString()) .build(); } - private Settings getYamlSettings() throws IOException { + private Settings getYamlSettings(Path home) throws IOException { String yaml = "/org/opensearch/analysis/common/test1.yml"; return Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml), false) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), home.toString()) .build(); } } diff --git a/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml new file mode 100644 index 0000000000000..2c8d203be6881 --- /dev/null +++ b/modules/analysis-common/src/test/resources/org/opensearch/analysis/common/da_UTF8.xml @@ -0,0 +1,1208 @@ + + + + + + + + + + +aA +bB +cC +dD +eE +fF +gG +hH +iI +jJ +kK +lL +mM +nN +oO +pP +qQ +rR +sS +tT +uU +vV +wW +xX +yY +zZ +æÆ +øØ +åÅ + + + +.ae3 +.an3k +.an1s +.be5la +.be1t +.bi4tr +.der3i +.diagno5 +.her3 +.hoved3 +.ne4t5 +.om1 +.ove4 +.po1 +.til3 +.yd5r +ab5le +3abst +a3c +ade5la +5adg +a1e +5afg +5a4f1l +af3r +af4ri +5afs +a4gef +a4gi +ag5in +ag5si +3agti +a4gy +a3h +ais5t +a3j +a5ka +a3ke +a5kr +aku5 +a3la +a1le +a1li +al3k +4alkv +a1lo +al5si +a3lu +a1ly +am4pa +3analy +an4k5r +a3nu +3anv +a5o +a5pe +a3pi +a5po +a1ra +ar5af +1arb +a1re +5arg +a1ri +a3ro +a3sa +a3sc +a1si +a3sk +a3so +3a3sp +a3ste +a3sti +a1ta1 +a1te +a1ti +a4t5in +a1to +ato5v +a5tr +a1tu +a5va +a1ve +a5z +1ba +ba4ti +4bd +1be +be1k +be3ro +be5ru +be1s4 +be1tr +1bi +bi5sk +b1j +4b1n +1bo +bo4gr +bo3ra +bo5re +1br4 +4bs +bs5k +b3so +b1st +b5t +3bu +bu4s5tr +b5w +1by +by5s +4c1c +1ce +ce5ro +3ch +4ch. +ci4o +ck3 +5cy +3da +4d3af +d5anta +da4s +d1b +d1d4 +1de +de5d +4de4lem +der5eri +de4rig +de5sk +d1f +d1g +d3h +1di +di1e +di5l +d3j +d1k +d1l +d1m +4d1n +3do +4dop +d5ov +d1p +4drett +5d4reve +3drif +3driv +d5ros +d5ru +ds5an +ds5in +d1ski +d4sm +d4su +dsu5l +ds5vi +d3ta +d1te +dt5o +d5tr +dt5u +1du +dub5 +d1v +3dy +e5ad +e3af +e5ag +e3ak +e1al +ea4la +e3an +e5ap +e3at +e3bl +ebs3 +e1ci +ed5ar +edde4 +eddel5 +e4do +ed5ra +ed3re +ed3rin +ed4str +e3e +3eff +e3fr +3eft +e3gu +e1h +e3in +ei5s +e3je +e4j5el +e1ka +e3ke +e3kl +4e1ko +e5kr +ek5sa +3eksem +3eksp +e3ku +e1kv +e5ky +e3lad +el3ak +el3ar +e1las +e3le +e4lek +3elem +e1li +5elim +e3lo +el5sa +e5lu +e3ly +e4mad +em4p5le +em1s +en5ak +e4nan +4enn +e4no +en3so +e5nu +e5ol +e3op +e1or +e3ov +epi3 +e1pr +e3ra +er3af +e4rag +e4rak +e1re +e4ref +er5ege +5erhv +e1ri +e4rib +er1k +ero5d +er5ov +er3s +er5tr +e3rum +er5un +e5ry +e1ta +e1te +etek4s +e1ti +e3tj +e1to +e3tr +e3tu +e1ty +e3um +e3un +3eur +e1va +e3ve +e4v3erf +e1vi +e5x +1fa +fa4ce +fags3 +f1b +f1d +1fe +fej4 +fejl1 +f1f +f1g +f1h +1fi +f1k +3fl +1fo +for1en +fo4ri +f1p +f1s4 +4ft +f3ta +f1te +f1ti +f5to +f5tvi +1fu +f1v +3fy +1ga +g3art +g1b +g1d +1ge +4g5enden +ger3in +ge3s +g3f +g1g +g1h +1gi +gi4b +gi3st +5gj +g3k +g1l +g1m +3go +4g5om +g5ov +g3p +1gr +gs1a +gsde4len +g4se +gsha4 +g5sla +gs3or +gs1p +g5s4tide +g4str +gs1v +g3ta +g1te +g1ti +g5to +g3tr +gt4s +g3ud +gun5 +g3v +1gy +g5yd +4ha. +heds3 +he5s +4het +hi4e +hi4n5 +hi3s +ho5ko +ho5ve +4h3t +hun4 +hund3 +hvo4 +i1a +i3b +i4ble +i1c +i3dr +ids5k +i1el +i1en +i3er +i3et. +if3r +i3gu +i3h +i5i +i5j +i1ka +i1ke +ik1l +i5ko +ik3re +ik5ri +iks5t +ik4tu +i3ku +ik3v +i3lag +il3eg +il5ej +il5el +i3li +i4l5id +il3k +i1lo +il5u +i3mu +ind3t +5inf +ings1 +in3s +in4sv +inter1 +i3nu +i3od +i3og +i5ok +i3ol +ion4 +ions1 +i5o5r +i3ot +i5pi +i3pli +i5pr +i3re +i3ri +ir5t +i3sc +i3si +i4sm +is3p +i1ster +i3sti +i5sua +i1ta +i1te +i1ti +i3to +i3tr +it5re. +i1tu +i3ty +i1u +i1va +i1ve +i1vi +j3ag +jde4rer +jds1 +jek4to +4j5en. +j5k +j3le +j3li +jlmeld5 +jlmel4di +j3r +jre5 +ju3s +5kap +k5au +5kav +k5b +kel5s +ke3sk +ke5st +ke4t5a +k3h +ki3e +ki3st +k1k +k5lak +k1le +3klu +k4ny +5kod +1kon +ko3ra +3kort +ko3v +1kra +5kry +ks3an +k1si +ks3k +ks1p +k3ste +k5stu +ks5v +k1t +k4tar +k4terh +kti4e +kt5re +kt5s +3kur +1kus +3kut +k4vo +k4vu +5lab +lad3r +5lagd +la4g3r +5lam +1lat +l1b +ldiagnos5 +l3dr +ld3st +1le. +5led +4lele +le4mo +3len +1ler +1les +4leu +l1f +lfin4 +lfind5 +l1go1 +l3h +li4ga +4l5ins +4l3int +li5o +l3j +l1ke +l1ko +l3ky +l1l +l5mu +lo4du +l3op +4l5or +3lov +4l3p +l4ps +l3r +4ls +lses1 +ls5in +l5sj +l1ta +l4taf +l1te +l4t5erf +l3ti +lt3o +l3tr +l3tu +lu5l +l3ve +l3vi +1ma +m1b +m3d +1me +4m5ej +m3f +m1g +m3h +1mi +mi3k +m5ing +mi4o +mi5sty +m3k +m1l +m1m +mmen5 +m1n +3mo +mo4da +4mop +4m5ov +m1pe +m3pi +m3pl +m1po +m3pr +m1r +mse5s +ms5in +m5sk +ms3p +m3ste +ms5v +m3ta +m3te +m3ti +m3tr +m1ud +1mul +mu1li +3my +3na +4nak +1nal +n1b +n1c +4nd +n3dr +nd5si +nd5sk +nd5sp +1ne +ne5a +ne4da +nemen4 +nement5e +neo4 +n3erk +n5erl +ne5sl +ne5st +n1f +n4go +4n1h +1ni +4nim +ni5o +ni3st +n1ke +n1ko +n3kr +n3ku +n5kv +4n1l +n1m +n1n +1no +n3ord +n5p +n3r +4ns +n3si +n1sku +ns3po +n1sta +n5sti +n1ta +nta4le +n1te +n1ti +ntiali4 +n3to +n1tr +nt4s5t +nt4su +n3tu +n3ty +4n1v +3ny +n3z +o3a +o4as +ob3li +o1c +o4din +od5ri +od5s +od5un +o1e +of5r +o4gek +o4gel +o4g5o +og5re +og5sk +o5h +o5in +oi6s5e +o1j +o3ka +o1ke +o3ku +o3la +o3le +o1li +o1lo +o3lu +o5ly +1omr +on3k +ook5 +o3or +o5ov +o3pi +op3l +op3r +op3s +3opta +4or. +or1an +3ordn +ord5s +o3re. +o3reg +o3rek +o3rer +o3re3s +o3ret +o3ri +3orient +or5im +o4r5in +or3k +or5o +or3sl +or3st +o3si +o3so +o3t +o1te +o5un +ov4s +3pa +pa5gh +p5anl +p3d +4pec +3pen +1per +pe1ra +pe5s +pe3u +p3f +4p5h +1pla +p4lan +4ple. +4pler +4ples +p3m +p3n +5pok +4po3re +3pot +4p5p4 +p4ro +1proc +p3sk +p5so +ps4p +p3st +p1t +1pu +pu5b +p5ule +p5v +5py3 +qu4 +4raf +ra5is +4rarb +r1b +r4d5ar +r3dr +rd4s3 +4reks +1rel +re5la +r5enss +5rese +re5spo +4ress +re3st +re5s4u +5rett +r1f +r1gu +r1h +ri1e +ri5la +4rimo +r4ing +ringse4 +ringso4r +4rinp +4rint +r3ka +r1ke +r1ki +rk3so +r3ku +r1l +rmo4 +r5mu +r1n +ro1b +ro3p +r3or +r3p +r1r +rre5s +rro4n5 +r1sa +r1si +r5skr +r4sk5v +rs4n +r3sp +r5stu +r5su +r3sv +r5tal +r1te +r4teli +r1ti +r3to +r4t5or +rt5rat +rt3re +r5tri +r5tro +rt3s +r5ty +r3ud +run4da +5rut +r3va +r1ve +r3vi +ry4s +s3af +1sam +sa4ma +s3ap +s1ar +1sat +4s1b +s1d +sdy4 +1se +s4ed +5s4er +se4se +s1f +4s1g4 +4s3h +si4bl +1sig +s5int +5sis +5sit +5siu +s5ju +4sk. +1skab +1ske +s3kl +sk5s4 +5sky +s1le +s1li +slo3 +5slu +s5ly +s1m +s4my +4snin +s4nit +so5k +5sol +5som. +3somm +s5oms +5somt +3son +4s1op +sp4 +3spec +4sper +3s4pi +s1pl +3sprog. +s5r4 +s1s4 +4st. +5s4tam +1stan +st5as +3stat +1stav +1ste. +1sted +3stel +5stemo +1sten +5step +3ster. +3stes +5stet +5stj +3sto +st5om +1str +s1ud +3sul +s3un +3sur +s3ve +3s4y +1sy1s +5ta. +1tag +tands3 +4tanv +4tb +tede4l +teds5 +3teg +5tekn +teo1 +5term +te5ro +4t1f +6t3g +t1h +tialis5t +3tid +ti4en +ti3st +4t3k +4t1l +tli4s5 +t1m +t1n +to5ra +to1re +to1ri +tor4m +4t3p +t4ra +4tres +tro5v +1try +4ts +t3si +ts4pa +ts5pr +t3st +ts5ul +4t1t +t5uds +5tur +t5ve +1typ +u1a +5udl +ud5r +ud3s +3udv +u1e +ue4t5 +uge4ri +ugs3 +u5gu +u3i +u5kl +uk4ta +uk4tr +u1la +u1le +u5ly +u5pe +up5l +u5q +u3ra +u3re +u4r3eg +u1rer +u3ro +us5a +u3si +u5ska +u5so +us5v +u1te +u1ti +u1to +ut5r +ut5s4 +5u5v +va5d +3varm +1ved +ve4l5e +ve4reg +ve3s +5vet +v5h +vi4l3in +1vis +v5j +v5k +vl4 +v3le +v5li +vls1 +1vo +4v5om +v5p +v5re +v3st +v5su +v5t +3vu +y3a +y5dr +y3e +y3ke +y5ki +yk3li +y3ko +yk4s5 +y3kv +y5li +y5lo +y5mu +yns5 +y5o +y1pe +y3pi +y3re +yr3ek +y3ri +y3si +y3ti +y5t3r +y5ve +zi5o + +.så3 +.ær5i +.øv3r +a3tø +a5væ +brød3 +5bæ +5drøv +dstå4 +3dæ +3dø +e3læ +e3lø +e3rø +er5øn +e5tæ +e5tø +e1væ +e3æ +e5å +3fæ +3fø +fø4r5en +giø4 +g4sø +g5så +3gæ +3gø1 +3gå +i5tæ +i3ø +3kø +3kå +lingeniø4 +l3væ +5løs +m5tå +1mæ +3mø +3må +n3kæ +n5tæ +3næ +4n5æb +5nø +o5læ +or3ø +o5å +5præ +5pæd +på3 +r5kæ +r5tæ +r5tø +r3væ +r5æl +4røn +5rør +3råd +r5år +s4kå +3slå +s4næ +5stø +1stå +1sæ +4s5æn +1sø +s5øk +så4r5 +ti4ø +3træk. +t4sø +t5så +t3væ +u3læ +3værd +1værk +5vå +y5væ +æb3l +æ3c +æ3e +æg5a +æ4gek +æ4g5r +ægs5 +æ5i +æ5kv +ælle4 +æn1dr +æ5o +æ1re +ær4g5r +æ3ri +ær4ma +ær4mo +ær5s +æ5si +æ3so +æ3ste +æ3ve +øde5 +ø3e +ø1je +ø3ke +ø3le +øms5 +øn3st +øn4t3 +ø1re +ø3ri +ørne3 +ør5o +ø1ve +å1d +å1e +å5h +å3l +å3re +års5t +å5sk +å3t + + diff --git a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java index 524d1719c37c0..742b923f05199 100644 --- a/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java +++ b/modules/ingest-geoip/src/main/java/org/opensearch/ingest/geoip/IngestGeoIpModulePlugin.java @@ -44,6 +44,7 @@ import org.opensearch.common.cache.CacheBuilder; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; @@ -62,10 +63,18 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Closeable { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.geoip.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); public static final Setting CACHE_SIZE = Setting.longSetting("ingest.geoip.cache_size", 1000, 0, Setting.Property.NodeScope); static String[] DEFAULT_DATABASE_FILENAMES = new String[] { "GeoLite2-ASN.mmdb", "GeoLite2-City.mmdb", "GeoLite2-Country.mmdb" }; @@ -74,7 +83,7 @@ public class IngestGeoIpModulePlugin extends Plugin implements IngestPlugin, Clo @Override public List> getSettings() { - return Arrays.asList(CACHE_SIZE); + return Arrays.asList(CACHE_SIZE, PROCESSORS_ALLOWLIST_SETTING); } @Override @@ -90,7 +99,10 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))); + return filterForAllowlistSetting( + parameters.env.settings(), + Map.of(GeoIpProcessor.TYPE, new GeoIpProcessor.Factory(databaseReaders, new GeoIpCache(cacheSize))) + ); } /* @@ -175,6 +187,30 @@ public void close() throws IOException { } } + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); + } + /** * The in-memory cache for the geoip data. There should only be 1 instance of this class.. * This cache differs from the maxmind's {@link NodeCache} such that this cache stores the deserialized Json objects to avoid the diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index f6d42d1d65670..9446ec1228532 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -35,8 +35,20 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; import org.opensearch.ingest.geoip.IngestGeoIpModulePlugin.GeoIpCache; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.StreamsUtils; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; import static org.mockito.Mockito.mock; @@ -77,4 +89,87 @@ public void testInvalidInit() { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new GeoIpCache(-1)); assertEquals("geoip max cache size must be 0 or greater", ex.getMessage()); } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("geoip")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + Settings.Builder settingsBuilder = Settings.builder() + .putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), invalidAllowList); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settingsBuilder.build())) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.remove(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + createDb(settingsBuilder); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + final Set expected = Set.of("geoip"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + createDb(settingsBuilder); + final Settings settings = settingsBuilder.putList(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList).build(); + try (IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private void createDb(Settings.Builder settingsBuilder) throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-geoip"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder.put("ingest.geoip.database_path", configDir).put("path.home", configDir); + try { + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-City.mmdb")), + configDir.resolve("GeoLite2-City.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-Country.mmdb")), + configDir.resolve("GeoLite2-Country.mmdb") + ); + Files.copy( + new ByteArrayInputStream(StreamsUtils.copyToBytesFromClasspath("/GeoLite2-ASN.mmdb")), + configDir.resolve("GeoLite2-ASN.mmdb") + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } } diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index cd96fcf2347ef..bac90d20b44e1 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -33,6 +33,7 @@ package org.opensearch.ingest.useragent; import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; import org.opensearch.ingest.Processor; import org.opensearch.plugins.IngestPlugin; import org.opensearch.plugins.Plugin; @@ -47,10 +48,19 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; import java.util.stream.Stream; public class IngestUserAgentModulePlugin extends Plugin implements IngestPlugin { + static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting( + "ingest.useragent.processors.allowed", + List.of(), + Function.identity(), + Setting.Property.NodeScope + ); private final Setting CACHE_SIZE_SETTING = Setting.longSetting( "ingest.user_agent.cache_size", 1000, @@ -77,7 +87,34 @@ public Map getProcessors(Processor.Parameters paramet } catch (IOException e) { throw new RuntimeException(e); } - return Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)); + return filterForAllowlistSetting( + parameters.env.settings(), + Collections.singletonMap(UserAgentProcessor.TYPE, new UserAgentProcessor.Factory(userAgentParsers)) + ); + } + + private Map filterForAllowlistSetting(Settings settings, Map map) { + if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) { + return Map.copyOf(map); + } + final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings)); + // Assert that no unknown processors are defined in the allowlist + final Set unknownAllowlistProcessors = allowlist.stream() + .filter(p -> map.containsKey(p) == false) + .collect(Collectors.toUnmodifiableSet()); + if (unknownAllowlistProcessors.isEmpty() == false) { + throw new IllegalArgumentException( + "Processor(s) " + + unknownAllowlistProcessors + + " were defined in [" + + PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist" + ); + } + return map.entrySet() + .stream() + .filter(e -> allowlist.contains(e.getKey())) + .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); } static Map createUserAgentParsers(Path userAgentConfigDirectory, UserAgentCache cache) throws IOException { diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java new file mode 100644 index 0000000000000..31fdafff1188a --- /dev/null +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.useragent; + +import org.opensearch.common.settings.Settings; +import org.opensearch.env.TestEnvironment; +import org.opensearch.ingest.Processor; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; + +public class IngestUserAgentModulePluginTests extends OpenSearchTestCase { + private Settings.Builder settingsBuilder; + + @Before + public void setup() throws IOException { + Path configDir = createTempDir(); + Path userAgentConfigDir = configDir.resolve("ingest-user-agent"); + Files.createDirectories(userAgentConfigDir); + settingsBuilder = Settings.builder().put("ingest-user-agent", configDir).put("path.home", configDir); + + // Copy file, leaving out the device parsers at the end + String regexWithoutDevicesFilename = "regexes_without_devices.yml"; + try ( + BufferedReader reader = new BufferedReader( + new InputStreamReader(UserAgentProcessor.class.getResourceAsStream("/regexes.yml"), StandardCharsets.UTF_8) + ); + BufferedWriter writer = Files.newBufferedWriter(userAgentConfigDir.resolve(regexWithoutDevicesFilename)); + ) { + String line; + while ((line = reader.readLine()) != null) { + if (line.startsWith("device_parsers:")) { + break; + } + + writer.write(line); + writer.newLine(); + } + } + } + + public void testAllowList() throws IOException { + runAllowListTest(List.of()); + runAllowListTest(List.of("user_agent")); + } + + public void testInvalidAllowList() throws IOException { + List invalidAllowList = List.of("set"); + final Settings settings = settingsBuilder.putList( + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), + invalidAllowList + ).build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> plugin.getProcessors(createParameters(settings)) + ); + assertEquals( + "Processor(s) " + + invalidAllowList + + " were defined in [" + + IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey() + + "] but do not exist", + e.getMessage() + ); + } + } + + public void testAllowListNotSpecified() throws IOException { + settingsBuilder.remove(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey()); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + final Set expected = Set.of("user_agent"); + assertEquals(expected, plugin.getProcessors(createParameters(settingsBuilder.build())).keySet()); + } + } + + private void runAllowListTest(List allowList) throws IOException { + final Settings settings = settingsBuilder.putList(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList) + .build(); + try (IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin()) { + assertEquals(Set.copyOf(allowList), plugin.getProcessors(createParameters(settings)).keySet()); + } + } + + private static Processor.Parameters createParameters(Settings settings) { + return new Processor.Parameters( + TestEnvironment.newEnvironment(settings), + null, + null, + null, + () -> 0L, + (a, b) -> null, + null, + null, + $ -> {}, + null + ); + } +} diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index 2a2de9debb9d9..488b9e632aa2a 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -97,6 +97,8 @@ public Map> getResponseProces new TruncateHitsResponseProcessor.Factory(), CollapseResponseProcessor.TYPE, new CollapseResponseProcessor.Factory(), + SplitResponseProcessor.TYPE, + new SplitResponseProcessor.Factory(), SortResponseProcessor.TYPE, new SortResponseProcessor.Factory() ) diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java index 404842742629c..e10f06da29ba0 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java @@ -47,6 +47,7 @@ public void testResponseProcessorAllowlist() throws IOException { List.of("rename_field", "truncate_hits", "collapse"), SearchPipelineCommonModulePlugin::getResponseProcessors ); + runAllowlistTest(key, List.of("split", "sort"), SearchPipelineCommonModulePlugin::getResponseProcessors); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, @@ -82,7 +83,7 @@ public void testAllowlistNotSpecified() throws IOException { try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) { assertEquals(Set.of("oversample", "filter_query", "script"), plugin.getRequestProcessors(createParameters(settings)).keySet()); assertEquals( - Set.of("rename_field", "truncate_hits", "collapse", "sort"), + Set.of("rename_field", "truncate_hits", "collapse", "split", "sort"), plugin.getResponseProcessors(createParameters(settings)).keySet() ); assertEquals(Set.of(), plugin.getSearchPhaseResultsProcessors(createParameters(settings)).keySet()); diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 6b63311cb3125..e76556f24cc23 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -48,7 +48,7 @@ dependencies { api 'com.azure:azure-json:1.1.0' api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.25.1' - api 'com.azure:azure-core-http-netty:1.15.1' + api 'com.azure:azure-core-http-netty:1.15.3' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -61,7 +61,7 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.16.2' + api 'com.microsoft.azure:msal4j:1.17.0' api 'com.nimbusds:oauth2-oidc-sdk:11.9.1' api 'com.nimbusds:nimbus-jose-jwt:9.40' api 'com.nimbusds:content-type:2.3' diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 deleted file mode 100644 index 3a0747a0daacb..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -036f7466a521aa99c79a491a9cf20444667df78b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 new file mode 100644 index 0000000000000..3cea52ba67ce5 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 @@ -0,0 +1 @@ +03b5bd5f5c16eea71f130119dbfb1fe5239f806a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 deleted file mode 100644 index 1363e5a0793d2..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.16.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b43ec4dd657f8ed5922bc0a8ccbe49000968bd15 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 new file mode 100644 index 0000000000000..34101c989eecd --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.17.0.jar.sha1 @@ -0,0 +1 @@ +7d37157da92b719f250b0023234ac9dda922a2a5 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index e1e5f422f3e07..eeb5e2cd88317 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -70,7 +70,7 @@ dependencies { api 'com.google.code.gson:gson:2.11.0' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.8.0' + api 'commons-cli:commons-cli:1.9.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 deleted file mode 100644 index 65102052409ea..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -41a4bff12057eecb6daaf9c7f36c237815be3da1 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 new file mode 100644 index 0000000000000..9a97a11dbe8d5 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.9.0.jar.sha1 @@ -0,0 +1 @@ +e1cdfa8bf40ccbb7440b2d1232f9f45bb20a1844 \ No newline at end of file diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 66d172e3dc7f3..872d928aa093f 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -86,7 +86,9 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'kotlin.io.path.PathsKt', 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', - 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener' + 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 deleted file mode 100644 index 04ec81edf969c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6db562f2b74ffaa7253d740e9aa7a3c4f2e392ec \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ead8fb235fa12 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 @@ -0,0 +1 @@ +ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 deleted file mode 100644 index bcd7c886b5f6c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43115633361430a3c6aaa39fd78363014ac79270 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..b601a4fb5246f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 @@ -0,0 +1 @@ +fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 deleted file mode 100644 index 9716ec518c886..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf1db0f288b9baaabdb439ab6179b673b751511e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..74b7cb25cdfe5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 @@ -0,0 +1 @@ +3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 deleted file mode 100644 index c0e79b05aa675..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b883b179c242a1761df2d408fe01ec41b17327a3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..d8d8f75850cb6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 deleted file mode 100644 index 1df0ad183c475..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8c1f9b05ac9fb1259517cf53950ccecaf84ebe1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..3e1212943f894 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 @@ -0,0 +1 @@ +8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 deleted file mode 100644 index ebeb639a8459c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d8b92bcdb0ace48fb5764cc1ad7a0de197d5b8c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..21a29cc8445e5 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 deleted file mode 100644 index b630c808d4763..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80fa10130cc7e7626e2581aa7c5871eab7381889 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..ae522ac698aa8 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 deleted file mode 100644 index eda90dc825e6f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -006dcdbf8eb911ad4d11c54fa824f5a97f582850 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..a741d0a167d60 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 @@ -0,0 +1 @@ +368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 deleted file mode 100644 index cdd7dc6551b33..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -59f260c5412b79a5a40c7d433600248727cd195a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..972e7de1c74be --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 @@ -0,0 +1 @@ +c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 deleted file mode 100644 index 668291498bbae..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7042214012232a5d6a251aca4aa5932014a4946b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..c56ca0b9e8169 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 @@ -0,0 +1 @@ +b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 deleted file mode 100644 index 74f0786e21954..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c6b884d65f79d40429263ac0ab7ed1422237837 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..39db6cb73727f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 @@ -0,0 +1 @@ +f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 deleted file mode 100644 index 23ef1bf6e6b2c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1c9b33a8660ace82aecb7f1c7ea50093dc87f0a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..6dcd496e033d3 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 @@ -0,0 +1 @@ +9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 deleted file mode 100644 index aea753f0df18b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5145f077bf2821ad243617baf8c1810d29af8566 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 new file mode 100644 index 0000000000000..161e400f87077 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 @@ -0,0 +1 @@ +d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 deleted file mode 100644 index 7124dcb31da3f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -955de1d2de4d3d2bb6ba2498f19c9a06da2f3956 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e986b4b53388e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 @@ -0,0 +1 @@ +906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java index 6b4496af76dc3..64f510fa1db67 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPlugin.java @@ -11,6 +11,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.inject.Module; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; @@ -18,10 +19,13 @@ import org.opensearch.common.settings.SettingsFilter; import org.opensearch.core.action.ActionResponse; import org.opensearch.plugin.wlm.action.CreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.GetQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportCreateQueryGroupAction; +import org.opensearch.plugin.wlm.action.TransportDeleteQueryGroupAction; import org.opensearch.plugin.wlm.action.TransportGetQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestCreateQueryGroupAction; +import org.opensearch.plugin.wlm.rest.RestDeleteQueryGroupAction; import org.opensearch.plugin.wlm.rest.RestGetQueryGroupAction; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.plugins.ActionPlugin; @@ -29,6 +33,7 @@ import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; +import java.util.Collection; import java.util.List; import java.util.function.Supplier; @@ -46,7 +51,8 @@ public WorkloadManagementPlugin() {} public List> getActions() { return List.of( new ActionPlugin.ActionHandler<>(CreateQueryGroupAction.INSTANCE, TransportCreateQueryGroupAction.class), - new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class) + new ActionPlugin.ActionHandler<>(GetQueryGroupAction.INSTANCE, TransportGetQueryGroupAction.class), + new ActionPlugin.ActionHandler<>(DeleteQueryGroupAction.INSTANCE, TransportDeleteQueryGroupAction.class) ); } @@ -60,11 +66,16 @@ public List getRestHandlers( IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster ) { - return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction()); + return List.of(new RestCreateQueryGroupAction(), new RestGetQueryGroupAction(), new RestDeleteQueryGroupAction()); } @Override public List> getSettings() { return List.of(QueryGroupPersistenceService.MAX_QUERY_GROUP_COUNT); } + + @Override + public Collection createGuiceModules() { + return List.of(new WorkloadManagementPluginModule()); + } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java new file mode 100644 index 0000000000000..b7c7805639eb2 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/WorkloadManagementPluginModule.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm; + +import org.opensearch.common.inject.AbstractModule; +import org.opensearch.common.inject.Singleton; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; + +/** + * Guice Module to manage WorkloadManagement related objects + */ +public class WorkloadManagementPluginModule extends AbstractModule { + + /** + * Constructor for WorkloadManagementPluginModule + */ + public WorkloadManagementPluginModule() {} + + @Override + protected void configure() { + // Bind QueryGroupPersistenceService as a singleton to ensure a single instance is used, + // preventing multiple throttling key registrations in the constructor. + bind(QueryGroupPersistenceService.class).in(Singleton.class); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java new file mode 100644 index 0000000000000..c78952a2f89ad --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.master.AcknowledgedResponse; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupAction extends ActionType { + + /** + /** + * An instance of DeleteQueryGroupAction + */ + public static final DeleteQueryGroupAction INSTANCE = new DeleteQueryGroupAction(); + + /** + * Name for DeleteQueryGroupAction + */ + public static final String NAME = "cluster:admin/opensearch/wlm/query_group/_delete"; + + /** + * Default constructor + */ + private DeleteQueryGroupAction() { + super(NAME, AcknowledgedResponse::new); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java new file mode 100644 index 0000000000000..e514943c2c7e9 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for delete QueryGroup + * + * @opensearch.experimental + */ +public class DeleteQueryGroupRequest extends AcknowledgedRequest { + private final String name; + + /** + * Default constructor for DeleteQueryGroupRequest + * @param name - name for the QueryGroup to get + */ + public DeleteQueryGroupRequest(String name) { + this.name = name; + } + + /** + * Constructor for DeleteQueryGroupRequest + * @param in - A {@link StreamInput} object + */ + public DeleteQueryGroupRequest(StreamInput in) throws IOException { + super(in); + name = in.readOptionalString(); + } + + @Override + public ActionRequestValidationException validate() { + if (name == null) { + ActionRequestValidationException actionRequestValidationException = new ActionRequestValidationException(); + actionRequestValidationException.addValidationError("QueryGroup name is missing"); + return actionRequestValidationException; + } + return null; + } + + /** + * Name getter + */ + public String getName() { + return name; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalString(name); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java index 01aa8cfb5e610..190ff17261bb4 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -14,7 +14,6 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; import org.opensearch.tasks.Task; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; /** @@ -24,7 +23,6 @@ */ public class TransportCreateQueryGroupAction extends HandledTransportAction { - private final ThreadPool threadPool; private final QueryGroupPersistenceService queryGroupPersistenceService; /** @@ -33,7 +31,6 @@ public class TransportCreateQueryGroupAction extends HandledTransportAction listener) { - threadPool.executor(ThreadPool.Names.SAME) - .execute(() -> queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener)); + queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..e4d3908d4a208 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete QueryGroup + * + * @opensearch.experimental + */ +public class TransportDeleteQueryGroupAction extends TransportClusterManagerNodeAction { + + private final QueryGroupPersistenceService queryGroupPersistenceService; + + /** + * Constructor for TransportDeleteQueryGroupAction + * + * @param clusterService - a {@link ClusterService} object + * @param transportService - a {@link TransportService} object + * @param actionFilters - a {@link ActionFilters} object + * @param threadPool - a {@link ThreadPool} object + * @param indexNameExpressionResolver - a {@link IndexNameExpressionResolver} object + * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object + */ + @Inject + public TransportDeleteQueryGroupAction( + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + ThreadPool threadPool, + IndexNameExpressionResolver indexNameExpressionResolver, + QueryGroupPersistenceService queryGroupPersistenceService + ) { + super( + DeleteQueryGroupAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteQueryGroupRequest::new, + indexNameExpressionResolver + ); + this.queryGroupPersistenceService = queryGroupPersistenceService; + } + + @Override + protected void clusterManagerOperation( + DeleteQueryGroupRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java new file mode 100644 index 0000000000000..8ad621cf8a1e4 --- /dev/null +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action to delete a QueryGroup + * + * @opensearch.experimental + */ +public class RestDeleteQueryGroupAction extends BaseRestHandler { + + /** + * Constructor for RestDeleteQueryGroupAction + */ + public RestDeleteQueryGroupAction() {} + + @Override + public String getName() { + return "delete_query_group"; + } + + /** + * The list of {@link Route}s that this RestHandler is responsible for handling. + */ + @Override + public List routes() { + return List.of(new Route(DELETE, "_wlm/query_group/{name}")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteQueryGroupRequest deleteQueryGroupRequest = new DeleteQueryGroupRequest(request.param("name")); + deleteQueryGroupRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", deleteQueryGroupRequest.clusterManagerNodeTimeout()) + ); + deleteQueryGroupRequest.timeout(request.paramAsTime("timeout", deleteQueryGroupRequest.timeout())); + return channel -> client.execute(DeleteQueryGroupAction.INSTANCE, deleteQueryGroupRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java index fe7080da78bbe..ba5161a2c855e 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -10,10 +10,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterManagerTaskThrottler.ThrottlingKey; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -24,6 +28,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.rest.RestStatus; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import java.util.Collection; @@ -38,6 +43,7 @@ public class QueryGroupPersistenceService { static final String SOURCE = "query-group-persistence-service"; private static final String CREATE_QUERY_GROUP_THROTTLING_KEY = "create-query-group"; + private static final String DELETE_QUERY_GROUP_THROTTLING_KEY = "delete-query-group"; private static final Logger logger = LogManager.getLogger(QueryGroupPersistenceService.class); /** * max QueryGroup count setting name @@ -65,6 +71,7 @@ public class QueryGroupPersistenceService { private final ClusterService clusterService; private volatile int maxQueryGroupCount; final ThrottlingKey createQueryGroupThrottlingKey; + final ThrottlingKey deleteQueryGroupThrottlingKey; /** * Constructor for QueryGroupPersistenceService @@ -81,6 +88,7 @@ public QueryGroupPersistenceService( ) { this.clusterService = clusterService; this.createQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(CREATE_QUERY_GROUP_THROTTLING_KEY, true); + this.deleteQueryGroupThrottlingKey = clusterService.registerClusterManagerTask(DELETE_QUERY_GROUP_THROTTLING_KEY, true); setMaxQueryGroupCount(MAX_QUERY_GROUP_COUNT.get(settings)); clusterSettings.addSettingsUpdateConsumer(MAX_QUERY_GROUP_COUNT, this::setMaxQueryGroupCount); } @@ -212,6 +220,50 @@ public static Collection getFromClusterStateMetadata(String name, Cl .collect(Collectors.toList()); } + /** + * Modify cluster state to delete the QueryGroup + * @param deleteQueryGroupRequest - request to delete a QueryGroup + * @param listener - ActionListener for AcknowledgedResponse + */ + public void deleteInClusterStateMetadata( + DeleteQueryGroupRequest deleteQueryGroupRequest, + ActionListener listener + ) { + clusterService.submitStateUpdateTask(SOURCE, new AckedClusterStateUpdateTask<>(deleteQueryGroupRequest, listener) { + @Override + public ClusterState execute(ClusterState currentState) { + return deleteQueryGroupInClusterState(deleteQueryGroupRequest.getName(), currentState); + } + + @Override + public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey() { + return deleteQueryGroupThrottlingKey; + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + }); + } + + /** + * Modify cluster state to delete the QueryGroup, and return the new cluster state + * @param name - the name for QueryGroup to be deleted + * @param currentClusterState - current cluster state + */ + ClusterState deleteQueryGroupInClusterState(final String name, final ClusterState currentClusterState) { + final Metadata metadata = currentClusterState.metadata(); + final QueryGroup queryGroupToRemove = metadata.queryGroups() + .values() + .stream() + .filter(queryGroup -> queryGroup.getName().equals(name)) + .findAny() + .orElseThrow(() -> new ResourceNotFoundException("No QueryGroup exists with the provided name: " + name)); + + return ClusterState.builder(currentClusterState).metadata(Metadata.builder(metadata).remove(queryGroupToRemove).build()).build(); + } + /** * maxQueryGroupCount getter */ diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java index 038f015713c5b..ecb9a6b2dc0d2 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/CreateQueryGroupResponseTests.java @@ -27,7 +27,7 @@ public class CreateQueryGroupResponseTests extends OpenSearchTestCase { /** - * Test case to verify the serialization and deserialization of CreateQueryGroupResponse. + * Test case to verify serialization and deserialization of CreateQueryGroupResponse. */ public void testSerialization() throws IOException { CreateQueryGroupResponse response = new CreateQueryGroupResponse(QueryGroupTestUtils.queryGroupOne, RestStatus.OK); @@ -46,7 +46,7 @@ public void testSerialization() throws IOException { } /** - * Test case to verify the toXContent method of CreateQueryGroupResponse. + * Test case to validate the toXContent method of CreateQueryGroupResponse. */ public void testToXContentCreateQueryGroup() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java new file mode 100644 index 0000000000000..bc2e4f0faca4c --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequestTests.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.plugin.wlm.QueryGroupTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteQueryGroupRequestTests extends OpenSearchTestCase { + + /** + * Test case to verify the serialization and deserialization of DeleteQueryGroupRequest. + */ + public void testSerialization() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(QueryGroupTestUtils.NAME_ONE); + assertEquals(QueryGroupTestUtils.NAME_ONE, request.getName()); + BytesStreamOutput out = new BytesStreamOutput(); + request.writeTo(out); + StreamInput streamInput = out.bytes().streamInput(); + DeleteQueryGroupRequest otherRequest = new DeleteQueryGroupRequest(streamInput); + assertEquals(request.getName(), otherRequest.getName()); + } + + /** + * Test case to validate a DeleteQueryGroupRequest. + */ + public void testSerializationWithNull() throws IOException { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest((String) null); + ActionRequestValidationException actionRequestValidationException = request.validate(); + assertFalse(actionRequestValidationException.getMessage().isEmpty()); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..253d65f8da80f --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.action; + +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.core.action.ActionListener; +import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class TransportDeleteQueryGroupActionTests extends OpenSearchTestCase { + + ClusterService clusterService = mock(ClusterService.class); + TransportService transportService = mock(TransportService.class); + ActionFilters actionFilters = mock(ActionFilters.class); + ThreadPool threadPool = mock(ThreadPool.class); + IndexNameExpressionResolver indexNameExpressionResolver = mock(IndexNameExpressionResolver.class); + QueryGroupPersistenceService queryGroupPersistenceService = mock(QueryGroupPersistenceService.class); + + TransportDeleteQueryGroupAction action = new TransportDeleteQueryGroupAction( + clusterService, + transportService, + actionFilters, + threadPool, + indexNameExpressionResolver, + queryGroupPersistenceService + ); + + /** + * Test case to validate the construction for TransportDeleteQueryGroupAction + */ + public void testConstruction() { + assertNotNull(action); + assertEquals(ThreadPool.Names.SAME, action.executor()); + } + + /** + * Test case to validate the clusterManagerOperation function in TransportDeleteQueryGroupAction + */ + public void testClusterManagerOperation() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest("testGroup"); + @SuppressWarnings("unchecked") + ActionListener listener = mock(ActionListener.class); + ClusterState clusterState = mock(ClusterState.class); + action.clusterManagerOperation(request, clusterState, listener); + verify(queryGroupPersistenceService).deleteInClusterStateMetadata(eq(request), eq(listener)); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java new file mode 100644 index 0000000000000..72191e076bb87 --- /dev/null +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.wlm.rest; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupAction; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.List; + +import org.mockito.ArgumentCaptor; + +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +public class RestDeleteQueryGroupActionTests extends OpenSearchTestCase { + /** + * Test case to validate the construction for RestDeleteQueryGroupAction + */ + public void testConstruction() { + RestDeleteQueryGroupAction action = new RestDeleteQueryGroupAction(); + assertNotNull(action); + assertEquals("delete_query_group", action.getName()); + List routes = action.routes(); + assertEquals(1, routes.size()); + RestHandler.Route route = routes.get(0); + assertEquals(DELETE, route.getMethod()); + assertEquals("_wlm/query_group/{name}", route.getPath()); + } + + /** + * Test case to validate the prepareRequest logic for RestDeleteQueryGroupAction + */ + @SuppressWarnings("unchecked") + public void testPrepareRequest() throws Exception { + RestDeleteQueryGroupAction restDeleteQueryGroupAction = new RestDeleteQueryGroupAction(); + NodeClient nodeClient = mock(NodeClient.class); + RestRequest realRequest = new FakeRestRequest(); + realRequest.params().put("name", NAME_ONE); + ; + RestRequest spyRequest = spy(realRequest); + + doReturn(TimeValue.timeValueSeconds(30)).when(spyRequest).paramAsTime(eq("cluster_manager_timeout"), any(TimeValue.class)); + doReturn(TimeValue.timeValueSeconds(60)).when(spyRequest).paramAsTime(eq("timeout"), any(TimeValue.class)); + + CheckedConsumer consumer = restDeleteQueryGroupAction.prepareRequest(spyRequest, nodeClient); + assertNotNull(consumer); + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteQueryGroupRequest.class); + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(RestToXContentListener.class); + doNothing().when(nodeClient).execute(eq(DeleteQueryGroupAction.INSTANCE), requestCaptor.capture(), listenerCaptor.capture()); + + consumer.accept(mock(RestChannel.class)); + DeleteQueryGroupRequest capturedRequest = requestCaptor.getValue(); + assertEquals(NAME_ONE, capturedRequest.getName()); + assertEquals(TimeValue.timeValueSeconds(30), capturedRequest.clusterManagerNodeTimeout()); + assertEquals(TimeValue.timeValueSeconds(60), capturedRequest.timeout()); + verify(nodeClient).execute( + eq(DeleteQueryGroupAction.INSTANCE), + any(DeleteQueryGroupRequest.class), + any(RestToXContentListener.class) + ); + } +} diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java index 2aa3b9e168852..a516ffdde839e 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -8,6 +8,9 @@ package org.opensearch.plugin.wlm.service; +import org.opensearch.ResourceNotFoundException; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -20,6 +23,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.plugin.wlm.QueryGroupTestUtils; import org.opensearch.plugin.wlm.action.CreateQueryGroupResponse; +import org.opensearch.plugin.wlm.action.DeleteQueryGroupRequest; import org.opensearch.search.ResourceType; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; @@ -39,6 +43,7 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.MONITOR_STRING; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_NONE_EXISTED; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_ONE; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.NAME_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_ONE; import static org.opensearch.plugin.wlm.QueryGroupTestUtils._ID_TWO; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.assertEqualQueryGroups; @@ -48,12 +53,14 @@ import static org.opensearch.plugin.wlm.QueryGroupTestUtils.preparePersistenceServiceSetup; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupList; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupOne; +import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupPersistenceService; import static org.opensearch.plugin.wlm.QueryGroupTestUtils.queryGroupTwo; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.QUERY_GROUP_COUNT_SETTING_NAME; import static org.opensearch.plugin.wlm.service.QueryGroupPersistenceService.SOURCE; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -298,4 +305,55 @@ public void testMaxQueryGroupCount() { queryGroupPersistenceService.setMaxQueryGroupCount(50); assertEquals(50, queryGroupPersistenceService.getMaxQueryGroupCount()); } + + /** + * Tests delete a single QueryGroup + */ + public void testDeleteSingleQueryGroup() { + ClusterState newClusterState = queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_TWO, clusterState()); + Map afterDeletionGroups = newClusterState.getMetadata().queryGroups(); + assertFalse(afterDeletionGroups.containsKey(_ID_TWO)); + assertEquals(1, afterDeletionGroups.size()); + List oldQueryGroups = new ArrayList<>(); + oldQueryGroups.add(queryGroupOne); + assertEqualQueryGroups(new ArrayList<>(afterDeletionGroups.values()), oldQueryGroups); + } + + /** + * Tests delete a QueryGroup with invalid name + */ + public void testDeleteNonExistedQueryGroup() { + assertThrows( + ResourceNotFoundException.class, + () -> queryGroupPersistenceService().deleteQueryGroupInClusterState(NAME_NONE_EXISTED, clusterState()) + ); + } + + /** + * Tests DeleteInClusterStateMetadata function + */ + @SuppressWarnings("unchecked") + public void testDeleteInClusterStateMetadata() throws Exception { + DeleteQueryGroupRequest request = new DeleteQueryGroupRequest(NAME_ONE); + ClusterService clusterService = mock(ClusterService.class); + + ActionListener listener = mock(ActionListener.class); + QueryGroupPersistenceService queryGroupPersistenceService = new QueryGroupPersistenceService( + clusterService, + QueryGroupTestUtils.settings(), + clusterSettings() + ); + doAnswer(invocation -> { + AckedClusterStateUpdateTask task = invocation.getArgument(1); + ClusterState initialState = clusterState(); + ClusterState newState = task.execute(initialState); + assertNotNull(newState); + assertEquals(queryGroupPersistenceService.deleteQueryGroupThrottlingKey, task.getClusterManagerThrottlingKey()); + task.onAllNodesAcked(null); + verify(listener).onResponse(argThat(response -> response.isAcknowledged())); + return null; + }).when(clusterService).submitStateUpdateTask(anyString(), any()); + queryGroupPersistenceService.deleteInClusterStateMetadata(request, listener); + verify(clusterService).submitStateUpdateTask(eq(SOURCE), any(AckedClusterStateUpdateTask.class)); + } } diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json new file mode 100644 index 0000000000000..16930427fc2fe --- /dev/null +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/api/delete_query_group_context.json @@ -0,0 +1,22 @@ +{ + "delete_query_group_context": { + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_wlm/query_group/{name}", + "methods": [ + "DELETE" + ], + "parts": { + "name": { + "type": "string", + "description": "QueryGroup name" + } + } + } + ] + }, + "params":{} + } +} diff --git a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml index a22dfa2f4477e..a00314986a5cf 100644 --- a/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml +++ b/plugins/workload-management/src/yamlRestTest/resources/rest-api-spec/test/wlm/10_query_group.yml @@ -106,3 +106,9 @@ - match: { query_groups.0.resiliency_mode: "monitor" } - match: { query_groups.0.resource_limits.cpu: 0.35 } - match: { query_groups.0.resource_limits.memory: 0.25 } + + - do: + delete_query_group_context: + name: "analytics2" + + - match: { acknowledged: true } diff --git a/release-notes/opensearch.release-notes-1.3.19.md b/release-notes/opensearch.release-notes-1.3.19.md new file mode 100644 index 0000000000000..fe62624fc6362 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.19.md @@ -0,0 +1,5 @@ +## 2024-08-22 Version 1.3.19 Release Notes + +### Upgrades +- OpenJDK Update (July 2024 Patch releases) ([#15002](https://github.com/opensearch-project/OpenSearch/pull/15002)) +- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml index 91e4127da9c32..e1bc86f1c9f3d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml @@ -88,7 +88,16 @@ setup: } ] } } - + - do: + index: + index: test_partial_flat_object + id: 4 + body: { + "issue": { + "number": 999, + "labels": null + } + } - do: indices.refresh: index: test_partial_flat_object @@ -135,7 +144,7 @@ teardown: } } - - length: { hits.hits: 3 } + - length: { hits.hits: 4 } # Match Query with exact dot path. - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml new file mode 100644 index 0000000000000..d728070adb188 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml @@ -0,0 +1,184 @@ +--- +setup: + - skip: + version: " - 2.99.99" + reason: The bitmap filtering feature is available in 2.17 and later. + - do: + indices.create: + index: students + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + student_id: + type: integer + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "students", "_id": "1" } } + - { "name": "Jane Doe", "student_id": 111 } + - { "index": { "_index": "students", "_id": "2" } } + - { "name": "Mary Major", "student_id": 222 } + - { "index": { "_index": "students", "_id": "3" } } + - { "name": "John Doe", "student_id": 333 } + - do: + indices.create: + index: classes + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + enrolled: + type: binary + store: true + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "classes", "_id": "101" } } + - { "enrolled": "OjAAAAEAAAAAAAEAEAAAAG8A3gA=" } # 111,222 + - { "index": { "_index": "classes", "_id": "102" } } + - { "enrolled": "OjAAAAEAAAAAAAAAEAAAAG8A" } # 111 + - { "index": { "_index": "classes", "_id": "103" } } + - { "enrolled": "OjAAAAEAAAAAAAAAEAAAAE0B" } # 333 + - { "index": { "_index": "classes", "_id": "104" } } + - { "enrolled": "OjAAAAEAAAAAAAEAEAAAAN4ATQE=" } # 222,333 + - do: + cluster.health: + wait_for_status: green + +--- +"Terms lookup on a binary field with bitmap": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + - match: { hits.hits.1._source.name: Mary Major } + - match: { hits.hits.1._source.student_id: 222 } + +--- +"Terms query accepting bitmap as value": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "terms": { + "student_id": ["OjAAAAEAAAAAAAEAEAAAAG8A3gA="], + "value_type": "bitmap" + } + } + } + - match: { hits.total: 2 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + - match: { hits.hits.1._source.name: Mary Major } + - match: { hits.hits.1._source.student_id: 222 } + +--- +"Boolean must bitmap filtering": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "bool": { + "must": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ], + "must_not": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "104", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ] + } + } + } + - match: { hits.total: 1 } + - match: { hits.hits.0._source.name: Jane Doe } + - match: { hits.hits.0._source.student_id: 111 } + +--- +"Boolean should bitmap filtering": + - do: + search: + rest_total_hits_as_int: true + index: students + body: { + "query": { + "bool": { + "should": [ + { + "terms": { + "student_id": { + "index": "classes", + "id": "101", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + }, + { + "terms": { + "student_id": { + "index": "classes", + "id": "104", + "path": "enrolled", + "store": true + }, + "value_type": "bitmap" + } + } + ] + } + } + } + - match: { hits.total: 3 } + - match: { hits.hits.0._source.name: Mary Major } + - match: { hits.hits.0._source.student_id: 222 } + - match: { hits.hits.1._source.name: Jane Doe } + - match: { hits.hits.1._source.student_id: 111 } + - match: { hits.hits.2._source.name: John Doe } + - match: { hits.hits.2._source.student_id: 333 } diff --git a/server/build.gradle b/server/build.gradle index 5facc73dff968..0cc42ad690eab 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -126,6 +126,9 @@ dependencies { api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" + // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap + implementation 'org.roaringbitmap:RoaringBitmap:1.2.1' + testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' diff --git a/server/licenses/RoaringBitmap-1.2.1.jar.sha1 b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 new file mode 100644 index 0000000000000..ef8cd48c7a388 --- /dev/null +++ b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 @@ -0,0 +1 @@ +828eb489b5e8c8762f2471010e9c7f20c7de596d \ No newline at end of file diff --git a/server/licenses/RoaringBitmap-LICENSE.txt b/server/licenses/RoaringBitmap-LICENSE.txt new file mode 100644 index 0000000000000..3bdd0036295a6 --- /dev/null +++ b/server/licenses/RoaringBitmap-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013-2016 the RoaringBitmap authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/server/licenses/RoaringBitmap-NOTICE.txt b/server/licenses/RoaringBitmap-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java index 07d6e1379ced8..6a2e7ce4957ae 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -50,15 +50,22 @@ public class RemoteStatePublicationIT extends RemoteStoreBaseIntegTestCase { private static String INDEX_NAME = "test-index"; + private boolean isRemoteStateEnabled = true; + private String isRemotePublicationEnabled = "true"; @Before public void setup() { asyncUploadMockFsRepo = false; + isRemoteStateEnabled = true; + isRemotePublicationEnabled = "true"; } @Override protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, "true").build(); + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL, isRemotePublicationEnabled) + .build(); } @Override @@ -76,7 +83,7 @@ protected Settings nodeSettings(int nodeOrdinal) { ); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), isRemoteStateEnabled) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, routingTableRepoName) .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) @@ -136,6 +143,18 @@ public void testPublication() throws Exception { } } + public void testRemotePublicationDisableIfRemoteStateDisabled() { + // only disable remote state + isRemoteStateEnabled = false; + // create cluster with multi node with in-consistent settings + prepareCluster(3, 2, INDEX_NAME, 1, 2); + // assert cluster is stable, ensuring publication falls back to legacy transport with inconsistent settings + ensureStableCluster(5); + ensureGreen(INDEX_NAME); + + assertNull(internalCluster().getCurrentClusterManagerNodeInstance(RemoteClusterStateService.class)); + } + private Map getMetadataFiles(BlobStoreRepository repository, String subDirectory) throws IOException { BlobPath metadataPath = repository.basePath() .add( @@ -151,5 +170,4 @@ private Map getMetadataFiles(BlobStoreRepository repository, St return fileName.split(DELIMITER)[0]; }).collect(Collectors.toMap(Function.identity(), key -> 1, Integer::sum)); } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 6f5b4bba481dd..52c6c6801a3c2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -264,15 +264,16 @@ public void testValidCompositeIndex() { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + assertEquals("_doc_count", starTreeFieldType.getMetrics().get(1).getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), starTreeFieldType.getMetrics().get(1).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, @@ -349,13 +350,9 @@ public void testUpdateIndexWhenMappingIsSame() { assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + + // Assert default metrics + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals( diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 09d5c208a8756..108ef14f0fcb4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -34,6 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.Weight; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -56,7 +62,10 @@ import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.request.RequestCacheStats; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.bucket.histogram.Histogram; @@ -65,6 +74,7 @@ import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; import org.opensearch.test.hamcrest.OpenSearchAssertions; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.time.ZoneId; @@ -768,6 +778,59 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception { assertTrue(stats.getMemorySizeInBytes() == 0); } + public void testTimedOutQuery() throws Exception { + // A timed out query should be cached and then invalidated + Client client = client(); + String index = "index"; + assertAcked( + client.admin() + .indices() + .prepareCreate(index) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + // Disable index refreshing to avoid cache being invalidated mid-test + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1)) + ) + .get() + ); + indexRandom(true, client.prepareIndex(index).setSource("k", "hello")); + ensureSearchable(index); + // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache + forceMerge(client, index); + + QueryBuilder timeoutQueryBuilder = new TermQueryBuilder("k", "hello") { + @Override + protected Query doToQuery(QueryShardContext context) { + return new TermQuery(new Term("k", "hello")) { + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + // Create the weight before sleeping. Otherwise, TermStates.build() (in the call to super.createWeight()) will + // sometimes throw an exception on timeout, rather than timing out gracefully. + Weight result = super.createWeight(searcher, scoreMode, boost); + try { + Thread.sleep(500); + } catch (InterruptedException ignored) {} + return result; + } + }; + } + }; + + SearchResponse resp = client.prepareSearch(index) + .setRequestCache(true) + .setQuery(timeoutQueryBuilder) + .setTimeout(TimeValue.ZERO) + .get(); + assertTrue(resp.isTimedOut()); + RequestCacheStats requestCacheStats = getRequestCacheStats(client, index); + // The cache should be empty as the timed-out query was invalidated + assertEquals(0, requestCacheStats.getMemorySizeInBytes()); + } + private Path[] shardDirectory(String server, Index index, int shard) { NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server); final Path[] paths = env.availableShardPaths(new ShardId(index, shard)); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 194dce5f4a57a..a327b683874f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -217,10 +217,15 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { } else { // As delete is async its possible that the file gets created before the deletion or after // deletion. - MatcherAssert.assertThat( - actualFileCount, - is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) - ); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= lastNMetadataFilesToKeep); + } else { + MatcherAssert.assertThat( + actualFileCount, + is(oneOf(lastNMetadataFilesToKeep - 1, lastNMetadataFilesToKeep, lastNMetadataFilesToKeep + 1)) + ); + } } }, 30, TimeUnit.SECONDS); } @@ -249,7 +254,12 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. - MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + // With pinned timestamp, we also keep md files since last successful fetch + assertTrue(actualFileCount >= 4); + } else { + assertEquals(4, actualFileCount); + } } public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java index 0bb53309f7a78..cb91c63e17245 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -9,8 +9,10 @@ package org.opensearch.remotestore; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.test.OpenSearchIntegTestCase; @@ -20,6 +22,14 @@ public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + } + ActionListener noOpActionListener = new ActionListener<>() { @Override public void onResponse(Void unused) {} @@ -54,7 +64,7 @@ public void testTimestampPinUnpin() throws Exception { remoteStorePinnedTimestampService.pinTimestamp(timestamp2, "ss3", noOpActionListener); remoteStorePinnedTimestampService.pinTimestamp(timestamp3, "ss4", noOpActionListener); - remoteStorePinnedTimestampService.setPinnedTimestampsSchedulerInterval(TimeValue.timeValueSeconds(1)); + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); assertBusy(() -> { Tuple> pinnedTimestampWithFetchTimestamp_2 = RemoteStorePinnedTimestampService.getPinnedTimestamps(); @@ -63,7 +73,7 @@ public void testTimestampPinUnpin() throws Exception { assertEquals(Set.of(timestamp1, timestamp2, timestamp3), pinnedTimestampWithFetchTimestamp_2.v2()); }); - remoteStorePinnedTimestampService.setPinnedTimestampsSchedulerInterval(TimeValue.timeValueMinutes(3)); + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); // This should be a no-op as pinning entity is different remoteStorePinnedTimestampService.unpinTimestamp(timestamp1, "no-snapshot", noOpActionListener); @@ -72,7 +82,7 @@ public void testTimestampPinUnpin() throws Exception { // Adding different entity to already pinned timestamp remoteStorePinnedTimestampService.pinTimestamp(timestamp3, "ss5", noOpActionListener); - remoteStorePinnedTimestampService.setPinnedTimestampsSchedulerInterval(TimeValue.timeValueSeconds(1)); + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); assertBusy(() -> { Tuple> pinnedTimestampWithFetchTimestamp_3 = RemoteStorePinnedTimestampService.getPinnedTimestamps(); @@ -81,6 +91,6 @@ public void testTimestampPinUnpin() throws Exception { assertEquals(Set.of(timestamp1, timestamp3), pinnedTimestampWithFetchTimestamp_3.v2()); }); - remoteStorePinnedTimestampService.setPinnedTimestampsSchedulerInterval(TimeValue.timeValueMinutes(3)); + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index 01ad06757640c..3cf63e2f19a16 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -52,6 +52,7 @@ import org.opensearch.common.time.DateFormatter; import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -66,6 +67,7 @@ import org.opensearch.index.query.QueryBuilders; import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.index.query.WildcardQueryBuilder; import org.opensearch.index.query.WrapperQueryBuilder; import org.opensearch.index.query.functionscore.ScoreFunctionBuilders; @@ -84,6 +86,7 @@ import java.io.IOException; import java.io.Reader; +import java.nio.ByteBuffer; import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; @@ -98,6 +101,8 @@ import java.util.concurrent.ExecutionException; import java.util.regex.Pattern; +import org.roaringbitmap.RoaringBitmap; + import static java.util.Collections.singletonMap; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -1157,6 +1162,41 @@ public void testTermsQuery() throws Exception { assertHitCount(searchResponse, 0L); } + public void testTermsQueryWithBitmapDocValuesQuery() throws Exception { + assertAcked( + prepareCreate("products").setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("product") + .field("type", "integer") + .field("index", false) + .endObject() + .endObject() + .endObject() + ) + ); + indexRandom( + true, + client().prepareIndex("products").setId("1").setSource("product", 1), + client().prepareIndex("products").setId("2").setSource("product", 2), + client().prepareIndex("products").setId("3").setSource("product", new int[] { 1, 3 }), + client().prepareIndex("products").setId("4").setSource("product", 4) + ); + + RoaringBitmap r = new RoaringBitmap(); + r.add(1); + r.add(4); + byte[] array = new byte[r.serializedSizeInBytes()]; + r.serialize(ByteBuffer.wrap(array)); + BytesArray bitmap = new BytesArray(array); + // directly building the terms query builder, so pass in the bitmap value as BytesArray + SearchResponse searchResponse = client().prepareSearch("products") + .setQuery(constantScoreQuery(termsQuery("product", bitmap).valueType(TermsQueryBuilder.ValueType.BITMAP))) + .get(); + assertHitCount(searchResponse, 3L); + assertSearchHits(searchResponse, "1", "3", "4"); + } + public void testTermsLookupFilter() throws Exception { assertAcked(prepareCreate("lookup").setMapping("terms", "type=text", "other", "type=text")); indexRandomForConcurrentSearch("lookup"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index e688a4491b1a7..2331d52c3a1bc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -19,6 +19,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -287,8 +288,14 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { public void testRemoteStoreCleanupForDeletedIndex() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); - internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + Settings settings = remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath); + // Disabling pinned timestamp as this test is specifically for shallow snapshot. + settings = Settings.builder() + .put(settings) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), false) + .build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNode(settings); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java new file mode 100644 index 0000000000000..67ee45f4c9306 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesConsumerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is an abstraction of the {@link DocValuesConsumer} for the Star Tree index structure. + * It is responsible to consume various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesConsumerWrapper implements Closeable { + + private final Lucene90DocValuesConsumer lucene90DocValuesConsumer; + + public Lucene90DocValuesConsumerWrapper( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesConsumer = new Lucene90DocValuesConsumer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public Lucene90DocValuesConsumer getLucene90DocValuesConsumer() { + return lucene90DocValuesConsumer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesConsumer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java new file mode 100644 index 0000000000000..a213852c59094 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/codecs/lucene90/Lucene90DocValuesProducerWrapper.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.codecs.lucene90; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.SegmentReadState; + +import java.io.Closeable; +import java.io.IOException; + +/** + * This class is a custom abstraction of the {@link DocValuesProducer} for the Star Tree index structure. + * It is responsible for providing access to various types of document values (numeric, binary, sorted, sorted numeric, + * and sorted set) for fields in the Star Tree index. + * + * @opensearch.experimental + */ +public class Lucene90DocValuesProducerWrapper implements Closeable { + + private final Lucene90DocValuesProducer lucene90DocValuesProducer; + + public Lucene90DocValuesProducerWrapper( + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + lucene90DocValuesProducer = new Lucene90DocValuesProducer(state, dataCodec, dataExtension, metaCodec, metaExtension); + } + + public DocValuesProducer getLucene90DocValuesProducer() { + return lucene90DocValuesProducer; + } + + @Override + public void close() throws IOException { + lucene90DocValuesProducer.close(); + } +} diff --git a/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java new file mode 100644 index 0000000000000..f7759fcced284 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/SortedNumericDocValuesWriterWrapper.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.util.Counter; + +/** + * A wrapper class for writing sorted numeric doc values. + *

+ * This class provides a convenient way to add sorted numeric doc values to a field + * and retrieve the corresponding {@link SortedNumericDocValues} instance. + * + * @opensearch.experimental + */ +public class SortedNumericDocValuesWriterWrapper { + + private final SortedNumericDocValuesWriter sortedNumericDocValuesWriter; + + /** + * Sole constructor. Constructs a new {@link SortedNumericDocValuesWriterWrapper} instance. + * + * @param fieldInfo the field information for the field being written + * @param counter a counter for tracking memory usage + */ + public SortedNumericDocValuesWriterWrapper(FieldInfo fieldInfo, Counter counter) { + sortedNumericDocValuesWriter = new SortedNumericDocValuesWriter(fieldInfo, counter); + } + + /** + * Adds a value to the sorted numeric doc values for the specified document. + * + * @param docID the document ID + * @param value the value to add + */ + public void addValue(int docID, long value) { + sortedNumericDocValuesWriter.addValue(docID, value); + } + + /** + * Returns the {@link SortedNumericDocValues} instance containing the sorted numeric doc values + * + * @return the {@link SortedNumericDocValues} instance + */ + public SortedNumericDocValues getDocValues() { + return sortedNumericDocValuesWriter.getDocValues(); + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 574b7029a6501..c86e6580122d5 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -1200,9 +1200,12 @@ public void unregisterDynamicRoute(NamedRoute route) { * @param route The {@link RestHandler.Route}. * @return the corresponding {@link RestSendToExtensionAction} if it is registered, null otherwise. */ - @SuppressWarnings("unchecked") public RestSendToExtensionAction get(RestHandler.Route route) { - return routeRegistry.get(route); + if (route instanceof NamedRoute) { + return routeRegistry.get((NamedRoute) route); + } + // Only NamedRoutes are map keys so any other route is not in the map + return null; } } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java index 7fa63ae8abc62..c7820c2c9a365 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/CoordinationState.java @@ -105,7 +105,8 @@ public CoordinationState( .getLastAcceptedConfiguration(); this.publishVotes = new VoteCollection(); this.isRemoteStateEnabled = isRemoteStoreClusterStateEnabled(settings); - this.isRemotePublicationEnabled = FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) + this.isRemotePublicationEnabled = isRemoteStateEnabled + && FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && localNode.isRemoteStatePublicationEnabled(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index 4da6c68b40733..6163fd624c838 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -1397,7 +1397,14 @@ public Builder put(final QueryGroup queryGroup) { return queryGroups(existing); } - public Map getQueryGroups() { + public Builder remove(final QueryGroup queryGroup) { + Objects.requireNonNull(queryGroup, "queryGroup should not be null"); + Map existing = new HashMap<>(getQueryGroups()); + existing.remove(queryGroup.get_id()); + return queryGroups(existing); + } + + private Map getQueryGroups() { return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE)) .map(o -> (QueryGroupMetadata) o) .map(QueryGroupMetadata::queryGroups) @@ -1830,9 +1837,7 @@ static void validateDataStreams(SortedMap indicesLooku if (dsMetadata != null) { for (DataStream ds : dsMetadata.dataStreams().values()) { String prefix = DataStream.BACKING_INDEX_PREFIX + ds.getName() + "-"; - Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") // '.' is the - // char after - // '-' + Set conflicts = indicesLookup.subMap(prefix, DataStream.BACKING_INDEX_PREFIX + ds.getName() + ".") .keySet() .stream() .filter(s -> NUMBER_PATTERN.matcher(s.substring(prefix.length())).matches()) diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 7baae17dd77cd..8daf9125bb27e 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -142,7 +142,6 @@ import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.node.NodeRoleSettings; import org.opensearch.node.remotestore.RemoteStoreNodeService; -import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; @@ -174,6 +173,7 @@ import org.opensearch.transport.SniffConnectionStrategy; import org.opensearch.transport.TransportSettings; import org.opensearch.watcher.ResourceWatcherService; +import org.opensearch.wlm.WorkloadManagementSettings; import java.util.Arrays; import java.util.Collections; @@ -759,14 +759,22 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_HASH_ALGORITHM_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_MAX_TRANSLOG_READERS, RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA, - SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL, + RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, - RemoteStorePinnedTimestampService.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL, + SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, // Composite index settings CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING, - SystemTemplatesService.SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED + SystemTemplatesService.SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED, + + // WorkloadManagement settings + WorkloadManagementSettings.NODE_LEVEL_CPU_REJECTION_THRESHOLD, + WorkloadManagementSettings.NODE_LEVEL_CPU_CANCELLATION_THRESHOLD, + WorkloadManagementSettings.NODE_LEVEL_MEMORY_REJECTION_THRESHOLD, + WorkloadManagementSettings.NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD ) ) ); diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index a4d60bc76127c..284eb43aa5509 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -238,6 +238,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { // Settings for concurrent segment search IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT, IndexSettings.ALLOW_DERIVED_FIELDS, // Settings for star tree index diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index a833d66fab5d9..9cab68d646b6e 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -76,6 +76,7 @@ import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE; /** * This class encapsulates all index level settings and handles settings updates. @@ -691,6 +692,14 @@ public static IndexMergePolicy fromString(String text) { Property.Dynamic ); + public static final Setting INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT = Setting.intSetting( + "index.search.concurrent.max_slice_count", + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, + CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE, + Property.Dynamic, + Property.IndexScope + ); + public static final Setting INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING = Setting.boolSetting( "index.optimize_doc_id_lookup.fuzzy_set.enabled", false, diff --git a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java index 3acedc6a27d7f..99691d7061ac9 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/CompositeCodecFactory.java @@ -12,6 +12,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import java.util.HashMap; @@ -29,6 +30,10 @@ */ @ExperimentalApi public class CompositeCodecFactory { + + // we can use this to track the latest composite codec + public static final String COMPOSITE_CODEC = Composite99Codec.COMPOSITE_INDEX_CODEC_NAME; + public CompositeCodecFactory() {} public Map getCompositeIndexCodecs(MapperService mapperService, Logger logger) { diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java new file mode 100644 index 0000000000000..1ed672870337e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactory.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumerWrapper; +import org.apache.lucene.index.SegmentWriteState; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesConsumer} instances + * for the latest composite codec. + *

+ * The segments are written using the latest composite codec. The codec + * internally manages calling the appropriate consumer factory for its abstractions. + *

+ * This design ensures forward compatibility for writing operations + * + * @opensearch.experimental + */ +public class LuceneDocValuesConsumerFactory { + + public static DocValuesConsumer getDocValuesConsumerForCompositeCodec( + SegmentWriteState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + try ( + Lucene90DocValuesConsumerWrapper lucene90DocValuesConsumerWrapper = new Lucene90DocValuesConsumerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesConsumerWrapper.getLucene90DocValuesConsumer(); + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java new file mode 100644 index 0000000000000..611a97ffeb834 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactory.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducerWrapper; +import org.apache.lucene.index.SegmentReadState; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; + +import java.io.IOException; + +/** + * A factory class that provides a factory method for creating {@link DocValuesProducer} instances + * based on the specified composite codec. + *

+ * In producers, we want to ensure compatibility with older codec versions during the segment reads. + * This approach allows for writing with only the latest codec while maintaining + * the ability to read data encoded with any codec version present in the segment. + *

+ * This design ensures backward compatibility for reads across different codec versions. + * + * @opensearch.experimental + */ +public class LuceneDocValuesProducerFactory { + + public static DocValuesProducer getDocValuesProducerForCompositeCodec( + String compositeCodec, + SegmentReadState state, + String dataCodec, + String dataExtension, + String metaCodec, + String metaExtension + ) throws IOException { + + switch (compositeCodec) { + case Composite99Codec.COMPOSITE_INDEX_CODEC_NAME: + try ( + Lucene90DocValuesProducerWrapper lucene90DocValuesProducerWrapper = new Lucene90DocValuesProducerWrapper( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ) + ) { + return lucene90DocValuesProducerWrapper.getLucene90DocValuesProducer(); + } + default: + throw new IllegalStateException("Invalid composite codec " + "[" + compositeCodec + "]"); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java index de04944e67cd2..8422932e937c2 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99Codec.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99Codec.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java similarity index 97% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java index 216ed4f68f333..e8c69b11b7c88 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesFormat.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesFormat.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesFormat; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java similarity index 91% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java index df5008a7f294e..e3bfe01cfa2d5 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesReader.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.BinaryDocValues; @@ -17,6 +17,9 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java similarity index 82% rename from server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java rename to server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java index 6ed1a8c42e380..74ab7d423998e 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/Composite99DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/Composite99DocValuesWriter.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.index.codec.composite; +package org.opensearch.index.codec.composite.composite99; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; @@ -15,13 +15,18 @@ import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.MergeState; +import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedNumericDocValues; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.CompositeIndexValues; import org.opensearch.index.codec.composite.datacube.startree.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -60,21 +65,29 @@ public Composite99DocValuesWriter(DocValuesConsumer delegate, SegmentWriteState this.compositeMappedFieldTypes = mapperService.getCompositeFieldTypes(); compositeFieldSet = new HashSet<>(); segmentFieldSet = new HashSet<>(); + // TODO : add integ test for this for (FieldInfo fi : segmentWriteState.fieldInfos) { if (DocValuesType.SORTED_NUMERIC.equals(fi.getDocValuesType())) { segmentFieldSet.add(fi.name); + } else if (fi.name.equals(DocCountFieldMapper.NAME)) { + segmentFieldSet.add(fi.name); } } for (CompositeMappedFieldType type : compositeMappedFieldTypes) { compositeFieldSet.addAll(type.fields()); } // check if there are any composite fields which are part of the segment + // TODO : add integ test where there are no composite fields in a segment, test both flush and merge cases segmentHasCompositeFields = Collections.disjoint(segmentFieldSet, compositeFieldSet) == false; } @Override public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { delegate.addNumericField(field, valuesProducer); + // Perform this only during flush flow + if (mergeState.get() == null && segmentHasCompositeFields) { + createCompositeIndicesIfPossible(valuesProducer, field); + } } @Override @@ -116,13 +129,7 @@ private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, if (segmentFieldSet.isEmpty()) { Set compositeFieldSetCopy = new HashSet<>(compositeFieldSet); for (String compositeField : compositeFieldSetCopy) { - fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { - @Override - public SortedNumericDocValues getSortedNumeric(FieldInfo field) { - return DocValues.emptySortedNumeric(); - } - }); - compositeFieldSet.remove(compositeField); + addDocValuesForEmptyField(compositeField); } } // we have all the required fields to build composite fields @@ -135,7 +142,28 @@ public SortedNumericDocValues getSortedNumeric(FieldInfo field) { } } } + } + /** + * Add empty doc values for fields not present in segment + */ + private void addDocValuesForEmptyField(String compositeField) { + if (compositeField.equals(DocCountFieldMapper.NAME)) { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public NumericDocValues getNumeric(FieldInfo field) { + return DocValues.emptyNumeric(); + } + }); + } else { + fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { + @Override + public SortedNumericDocValues getSortedNumeric(FieldInfo field) { + return DocValues.emptySortedNumeric(); + } + }); + } + compositeFieldSet.remove(compositeField); } @Override diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java new file mode 100644 index 0000000000000..3d6f130b9a7c8 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite99/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Responsible for handling all composite index codecs and operations associated with Composite99 codec + */ +package org.opensearch.index.codec.composite.composite99; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java index df3b2229d2c5b..1522078024b64 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/MetricStat.java @@ -10,6 +10,9 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.Arrays; +import java.util.List; + /** * Supported metric types for composite index * @@ -18,24 +21,56 @@ @ExperimentalApi public enum MetricStat { VALUE_COUNT("value_count"), - AVG("avg"), SUM("sum"), MIN("min"), - MAX("max"); + MAX("max"), + AVG("avg", VALUE_COUNT, SUM), + DOC_COUNT("doc_count", true); private final String typeName; + private final MetricStat[] baseMetrics; + + // System field stats cannot be used as input for user metric types + private final boolean isSystemFieldStat; MetricStat(String typeName) { + this(typeName, false); + } + + MetricStat(String typeName, MetricStat... baseMetrics) { + this(typeName, false, baseMetrics); + } + + MetricStat(String typeName, boolean isSystemFieldStat, MetricStat... baseMetrics) { this.typeName = typeName; + this.isSystemFieldStat = isSystemFieldStat; + this.baseMetrics = baseMetrics; } public String getTypeName() { return typeName; } + /** + * Return the list of metrics that this metric is derived from + * For example, AVG is derived from COUNT and SUM + */ + public List getBaseMetrics() { + return Arrays.asList(baseMetrics); + } + + /** + * Return true if this metric is derived from other metrics + * For example, AVG is derived from COUNT and SUM + */ + public boolean isDerivedMetric() { + return baseMetrics != null && baseMetrics.length > 0; + } + public static MetricStat fromTypeName(String typeName) { for (MetricStat metric : MetricStat.values()) { - if (metric.getTypeName().equalsIgnoreCase(typeName)) { + // prevent system fields to be entered as user input + if (metric.getTypeName().equalsIgnoreCase(typeName) && metric.isSystemFieldStat == false) { return metric; } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java index 6535f8ed11da3..ce389a99b3626 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java @@ -15,6 +15,7 @@ import java.util.Arrays; import java.util.List; +import java.util.function.Function; /** * Index settings for star tree fields. The settings are final as right now @@ -93,16 +94,10 @@ public class StarTreeIndexSettings { /** * Default metrics for metrics as part of star tree fields */ - public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( + public static final Setting> DEFAULT_METRICS_LIST = Setting.listSetting( "index.composite_index.star_tree.field.default.metrics", - Arrays.asList( - MetricStat.AVG.toString(), - MetricStat.VALUE_COUNT.toString(), - MetricStat.SUM.toString(), - MetricStat.MAX.toString(), - MetricStat.MIN.toString() - ), - MetricStat::fromTypeName, + Arrays.asList(MetricStat.VALUE_COUNT.toString(), MetricStat.SUM.toString()), + Function.identity(), Setting.Property.IndexScope, Setting.Property.Final ); diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java index cbed46604681d..203bca3f1c292 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeValidator.java @@ -14,6 +14,7 @@ import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.StarTreeMapper; @@ -78,7 +79,7 @@ public static void validate(MapperService mapperService, CompositeIndexSettings String.format(Locale.ROOT, "unknown metric field [%s] as part of star tree field", metric.getField()) ); } - if (ft.isAggregatable() == false) { + if (ft.isAggregatable() == false && ft instanceof DocCountFieldMapper.DocCountFieldType == false) { throw new IllegalArgumentException( String.format( Locale.ROOT, diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java index 81807cd174a10..e79abe0f170b3 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregator.java @@ -17,12 +17,9 @@ class CountValueAggregator implements ValueAggregator { public static final long DEFAULT_INITIAL_VALUE = 1L; - private final StarTreeNumericType starTreeNumericType; private static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; - public CountValueAggregator(StarTreeNumericType starTreeNumericType) { - this.starTreeNumericType = starTreeNumericType; - } + public CountValueAggregator() {} @Override public StarTreeNumericType getAggregatedValueType() { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java new file mode 100644 index 0000000000000..0896fa54e9f46 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregator.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Aggregator to handle '_doc_count' field + * + * @opensearch.experimental + */ +public class DocCountAggregator implements ValueAggregator { + + private static final StarTreeNumericType VALUE_AGGREGATOR_TYPE = StarTreeNumericType.LONG; + + public DocCountAggregator() {} + + @Override + public StarTreeNumericType getAggregatedValueType() { + return VALUE_AGGREGATOR_TYPE; + } + + /** + * If _doc_count field for a doc is missing, we increment the _doc_count by '1' for the associated doc + * otherwise take the actual value present in the field + */ + @Override + public Long getInitialAggregatedValueForSegmentDocValue(Long segmentDocValue) { + if (segmentDocValue == null) { + return getIdentityMetricValue(); + } + return segmentDocValue; + } + + @Override + public Long mergeAggregatedValueAndSegmentValue(Long value, Long segmentDocValue) { + assert value != null; + return mergeAggregatedValues(value, segmentDocValue); + } + + @Override + public Long mergeAggregatedValues(Long value, Long aggregatedValue) { + if (value == null) { + value = getIdentityMetricValue(); + } + if (aggregatedValue == null) { + aggregatedValue = getIdentityMetricValue(); + } + return value + aggregatedValue; + } + + @Override + public Long toAggregatedValueType(Long rawValue) { + return rawValue; + } + + /** + * If _doc_count field for a doc is missing, we increment the _doc_count by '1' for the associated doc + */ + @Override + public Long getIdentityMetricValue() { + return 1L; + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java index ef5b773d81d27..bdc381110365d 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/ValueAggregatorFactory.java @@ -31,11 +31,13 @@ public static ValueAggregator getValueAggregator(MetricStat aggregationType, Sta case SUM: return new SumValueAggregator(starTreeNumericType); case VALUE_COUNT: - return new CountValueAggregator(starTreeNumericType); + return new CountValueAggregator(); case MIN: return new MinValueAggregator(starTreeNumericType); case MAX: return new MaxValueAggregator(starTreeNumericType); + case DOC_COUNT: + return new DocCountAggregator(); default: throw new IllegalStateException("Unsupported aggregation type: " + aggregationType); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index 90b2d0727d572..ddcf02cc6291a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.index.DocValues; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexOptions; @@ -28,6 +29,7 @@ import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.TreeNode; import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; @@ -117,7 +119,20 @@ protected BaseStarTreeBuilder(StarTreeField starTreeField, SegmentWriteState sta public List generateMetricAggregatorInfos(MapperService mapperService) { List metricAggregatorInfos = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { + if (metric.getField().equals(DocCountFieldMapper.NAME)) { + MetricAggregatorInfo metricAggregatorInfo = new MetricAggregatorInfo( + MetricStat.DOC_COUNT, + metric.getField(), + starTreeField.getName(), + IndexNumericFieldData.NumericType.LONG + ); + metricAggregatorInfos.add(metricAggregatorInfo); + continue; + } for (MetricStat metricStat : metric.getMetrics()) { + if (metricStat.isDerivedMetric()) { + continue; + } IndexNumericFieldData.NumericType numericType; Mapper fieldMapper = mapperService.documentMapper().mappers().getMapper(metric.getField()); if (fieldMapper instanceof NumberFieldMapper) { @@ -426,7 +441,7 @@ public void build(Map fieldProducerMap) throws IOExce String dimension = dimensionsSplitOrder.get(i).getField(); FieldInfo dimensionFieldInfo = state.fieldInfos.fieldInfo(dimension); if (dimensionFieldInfo == null) { - dimensionFieldInfo = getFieldInfo(dimension); + dimensionFieldInfo = getFieldInfo(dimension, DocValuesType.SORTED_NUMERIC); } dimensionReaders[i] = new SequentialDocValuesIterator( fieldProducerMap.get(dimensionFieldInfo.name).getSortedNumeric(dimensionFieldInfo) @@ -438,15 +453,15 @@ public void build(Map fieldProducerMap) throws IOExce logger.debug("Finished Building star-tree in ms : {}", (System.currentTimeMillis() - startTime)); } - private static FieldInfo getFieldInfo(String field) { + private static FieldInfo getFieldInfo(String field, DocValuesType docValuesType) { return new FieldInfo( field, - 1, + 1, // This is filled as part of doc values creation and is not used otherwise false, false, false, IndexOptions.NONE, - DocValuesType.SORTED_NUMERIC, + docValuesType, -1, Collections.emptyMap(), 0, @@ -470,20 +485,44 @@ public List getMetricReaders(SegmentWriteState stat List metricReaders = new ArrayList<>(); for (Metric metric : this.starTreeField.getMetrics()) { for (MetricStat metricStat : metric.getMetrics()) { + SequentialDocValuesIterator metricReader = null; FieldInfo metricFieldInfo = state.fieldInfos.fieldInfo(metric.getField()); - if (metricFieldInfo == null) { - metricFieldInfo = getFieldInfo(metric.getField()); + if (metricStat.equals(MetricStat.DOC_COUNT)) { + // _doc_count is numeric field , so we convert to sortedNumericDocValues and get iterator + metricReader = getIteratorForNumericField(fieldProducerMap, metricFieldInfo, DocCountFieldMapper.NAME); + } else { + if (metricFieldInfo == null) { + metricFieldInfo = getFieldInfo(metric.getField(), DocValuesType.SORTED_NUMERIC); + } + metricReader = new SequentialDocValuesIterator( + fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) + ); } - - SequentialDocValuesIterator metricReader = new SequentialDocValuesIterator( - fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) - ); metricReaders.add(metricReader); } } return metricReaders; } + /** + * Converts numericDocValues to sortedNumericDocValues and returns SequentialDocValuesIterator + */ + private SequentialDocValuesIterator getIteratorForNumericField( + Map fieldProducerMap, + FieldInfo fieldInfo, + String name + ) throws IOException { + if (fieldInfo == null) { + fieldInfo = getFieldInfo(name, DocValuesType.NUMERIC); + } + SequentialDocValuesIterator sequentialDocValuesIterator; + assert fieldProducerMap.containsKey(fieldInfo.name); + sequentialDocValuesIterator = new SequentialDocValuesIterator( + DocValues.singleton(fieldProducerMap.get(fieldInfo.name).getNumeric(fieldInfo)) + ); + return sequentialDocValuesIterator; + } + /** * Builds the star tree using Star-Tree Document * diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index b82fa3999612a..bf8f83e1b95df 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -568,11 +568,7 @@ protected void parseCreateField(ParseContext context) throws IOException { if (context.externalValueSet()) { String value = context.externalValue().toString(); parseValueAddFields(context, value, fieldType().name()); - } else if (context.parser().currentToken() == XContentParser.Token.VALUE_NULL) { - context.parser().nextToken(); // This triggers an exception in DocumentParser. - // We could remove the above nextToken() call to skip the null value, but the existing - // behavior (since 2.7) throws the exception. - } else { + } else if (context.parser().currentToken() != XContentParser.Token.VALUE_NULL) { JsonToStringXContentParser jsonToStringParser = new JsonToStringXContentParser( NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index 27e62c3746a8e..9286b5c64b5f2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -46,8 +46,10 @@ import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PointInSetQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; @@ -58,6 +60,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; import org.opensearch.index.document.SortedUnsignedLongDocValuesRangeQuery; @@ -68,13 +71,16 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.DocValueFormat; import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.query.BitmapDocValuesQuery; import java.io.IOException; import java.math.BigInteger; +import java.nio.ByteBuffer; import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -82,6 +88,8 @@ import java.util.function.Function; import java.util.function.Supplier; +import org.roaringbitmap.RoaringBitmap; + /** * A {@link FieldMapper} for numeric types: byte, short, int, long, float and double. * @@ -822,6 +830,24 @@ public Query termsQuery(String field, List values, boolean hasDocValues, return IntPoint.newSetQuery(field, v); } + @Override + public Query bitmapQuery(String field, BytesArray bitmapArray, boolean isSearchable, boolean hasDocValues) { + RoaringBitmap bitmap = new RoaringBitmap(); + try { + bitmap.deserialize(ByteBuffer.wrap(bitmapArray.array())); + } catch (Exception e) { + throw new IllegalArgumentException("Failed to deserialize the bitmap.", e); + } + + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery(bitmapIndexQuery(field, bitmap), new BitmapDocValuesQuery(field, bitmap)); + } + if (isSearchable) { + return bitmapIndexQuery(field, bitmap); + } + return new BitmapDocValuesQuery(field, bitmap); + } + @Override public Query rangeQuery( String field, @@ -1174,6 +1200,10 @@ public final TypeParser parser() { public abstract Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable); + public Query bitmapQuery(String field, BytesArray bitmap, boolean isSearchable, boolean hasDocValues) { + throw new IllegalArgumentException("Field [" + name + "] of type [" + typeName() + "] does not support bitmap queries"); + } + public abstract Query rangeQuery( String field, Object lowerTerm, @@ -1415,6 +1445,40 @@ public static Query unsignedLongRangeQuery( } return builder.apply(l, u); } + + static PointInSetQuery bitmapIndexQuery(String field, RoaringBitmap bitmap) { + final BytesRef encoded = new BytesRef(new byte[Integer.BYTES]); + return new PointInSetQuery(field, 1, Integer.BYTES, new PointInSetQuery.Stream() { + + final Iterator iterator = bitmap.iterator(); + + @Override + public BytesRef next() { + int value; + if (iterator.hasNext()) { + value = iterator.next(); + } else { + return null; + } + IntPoint.encodeDimension(value, encoded.bytes, 0); + return encoded; + } + }) { + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + if (bitmap.isEmpty()) { + return new MatchNoDocsQuery(); + } + return super.rewrite(indexSearcher); + } + + @Override + protected String toString(byte[] value) { + assert value.length == Integer.BYTES; + return Integer.toString(IntPoint.decodeDimension(value, 0)); + } + }; + } } /** @@ -1495,6 +1559,11 @@ public Query termsQuery(List values, QueryShardContext context) { return query; } + public Query bitmapQuery(BytesArray bitmap) { + failIfNotIndexedAndNoDocValues(); + return type.bitmapQuery(name(), bitmap, isSearchable(), hasDocValues()); + } + @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { failIfNotIndexedAndNoDocValues(); diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index d9539f9dc0c82..e52d6a621e4e8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -28,6 +28,7 @@ import java.util.Locale; import java.util.Map; import java.util.Optional; +import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; @@ -225,6 +226,10 @@ private List buildMetrics(String fieldName, Map map, Map for (Object metric : metricsList) { Map metricMap = (Map) metric; String name = (String) XContentMapValues.extractValue(CompositeDataCubeFieldType.NAME, metricMap); + // Handle _doc_count metric separately at the end + if (name.equals(DocCountFieldMapper.NAME)) { + continue; + } metricMap.remove(CompositeDataCubeFieldType.NAME); if (objbuilder == null || objbuilder.mappersBuilders == null) { metrics.add(getMetric(name, metricMap, context)); @@ -249,7 +254,8 @@ private List buildMetrics(String fieldName, Map map, Map } else { throw new MapperParsingException(String.format(Locale.ROOT, "unable to parse metrics for star tree field [%s]", this.name)); } - + Metric docCountMetric = new Metric(DocCountFieldMapper.NAME, List.of(MetricStat.DOC_COUNT)); + metrics.add(docCountMetric); return metrics; } @@ -262,17 +268,50 @@ private Metric getMetric(String name, Map metric, Mapper.TypePar .collect(Collectors.toList()); metric.remove(STATS); if (metricStrings.isEmpty()) { - metricTypes = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); - } else { - Set metricSet = new LinkedHashSet<>(); - for (String metricString : metricStrings) { - metricSet.add(MetricStat.fromTypeName(metricString)); - } - metricTypes = new ArrayList<>(metricSet); + metricStrings = new ArrayList<>(StarTreeIndexSettings.DEFAULT_METRICS_LIST.get(context.getSettings())); + } + // Add all required metrics initially + Set metricSet = new LinkedHashSet<>(); + for (String metricString : metricStrings) { + MetricStat metricStat = MetricStat.fromTypeName(metricString); + metricSet.add(metricStat); + addBaseMetrics(metricStat, metricSet); } + addEligibleDerivedMetrics(metricSet); + metricTypes = new ArrayList<>(metricSet); return new Metric(name, metricTypes); } + /** + * Add base metrics of derived metric to metric set + */ + private void addBaseMetrics(MetricStat metricStat, Set metricSet) { + if (metricStat.isDerivedMetric()) { + Queue metricQueue = new LinkedList<>(metricStat.getBaseMetrics()); + while (metricQueue.isEmpty() == false) { + MetricStat metric = metricQueue.poll(); + if (metric.isDerivedMetric() && !metricSet.contains(metric)) { + metricQueue.addAll(metric.getBaseMetrics()); + } + metricSet.add(metric); + } + } + } + + /** + * Add derived metrics if all associated base metrics are present + */ + private void addEligibleDerivedMetrics(Set metricStats) { + for (MetricStat metric : MetricStat.values()) { + if (metric.isDerivedMetric() && !metricStats.contains(metric)) { + List sourceMetrics = metric.getBaseMetrics(); + if (metricStats.containsAll(sourceMetrics)) { + metricStats.add(metric); + } + } + } + } + @Override protected List> getParameters() { return List.of(config); diff --git a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java index ac0ca3919ea38..4b92d6a1f5460 100644 --- a/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/TermsQueryBuilder.java @@ -37,14 +37,17 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.Version; import org.opensearch.action.get.GetRequest; import org.opensearch.client.Client; import org.opensearch.common.SetOnce; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.core.ParseField; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -53,6 +56,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.ConstantFieldType; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.indices.TermsLookup; import java.io.IOException; @@ -60,6 +64,7 @@ import java.util.AbstractList; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -82,6 +87,39 @@ public class TermsQueryBuilder extends AbstractQueryBuilder { private final TermsLookup termsLookup; private final Supplier> supplier; + private static final ParseField VALUE_TYPE_FIELD = new ParseField("value_type"); + private ValueType valueType = ValueType.DEFAULT; + + /** + * Terms query may accept different types of value + *

+ * This flag is used to decide how to parse the value and build query upon later + */ + public enum ValueType { + DEFAULT("default"), + BITMAP("bitmap"); + + private final String type; + + ValueType(String type) { + this.type = type; + } + + static ValueType fromString(String type) { + for (ValueType valueType : ValueType.values()) { + if (valueType.type.equalsIgnoreCase(type)) { + return valueType; + } + } + throw new IllegalArgumentException(type + " is not valid " + VALUE_TYPE_FIELD); + } + } + + public TermsQueryBuilder valueType(ValueType valueType) { + this.valueType = valueType; + return this; + } + public TermsQueryBuilder(String fieldName, TermsLookup termsLookup) { this(fieldName, null, termsLookup); } @@ -187,6 +225,11 @@ public TermsQueryBuilder(String fieldName, Iterable values) { this.supplier = null; } + private TermsQueryBuilder(String fieldName, Iterable values, ValueType valueType) { + this(fieldName, values); + this.valueType = valueType; + } + private TermsQueryBuilder(String fieldName, Supplier> supplier) { this.fieldName = fieldName; this.values = null; @@ -194,6 +237,11 @@ private TermsQueryBuilder(String fieldName, Supplier> supplier) { this.supplier = supplier; } + private TermsQueryBuilder(String fieldName, Supplier> supplier, ValueType valueType) { + this(fieldName, supplier); + this.valueType = valueType; + } + /** * Read from a stream. */ @@ -203,6 +251,9 @@ public TermsQueryBuilder(StreamInput in) throws IOException { termsLookup = in.readOptionalWriteable(TermsLookup::new); values = (List) in.readGenericValue(); this.supplier = null; + if (in.getVersion().onOrAfter(Version.V_2_17_0)) { + valueType = in.readEnum(ValueType.class); + } } @Override @@ -213,6 +264,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeOptionalWriteable(termsLookup); out.writeGenericValue(values); + if (out.getVersion().onOrAfter(Version.V_2_17_0)) { + out.writeEnum(valueType); + } } public String fieldName() { @@ -360,6 +414,9 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.field(fieldName, convertBack(values)); } printBoostAndQueryName(builder); + if (valueType != ValueType.DEFAULT) { + builder.field(VALUE_TYPE_FIELD.getPreferredName(), valueType.type); + } builder.endObject(); } @@ -371,6 +428,8 @@ public static TermsQueryBuilder fromXContent(XContentParser parser) throws IOExc String queryName = null; float boost = AbstractQueryBuilder.DEFAULT_BOOST; + String valueTypeStr = ValueType.DEFAULT.type; + XContentParser.Token token; String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -406,6 +465,8 @@ public static TermsQueryBuilder fromXContent(XContentParser parser) throws IOExc boost = parser.floatValue(); } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { queryName = parser.text(); + } else if (VALUE_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + valueTypeStr = parser.text(); } else { throw new ParsingException( parser.getTokenLocation(), @@ -430,7 +491,18 @@ public static TermsQueryBuilder fromXContent(XContentParser parser) throws IOExc ); } - return new TermsQueryBuilder(fieldName, values, termsLookup).boost(boost).queryName(queryName); + ValueType valueType = ValueType.fromString(valueTypeStr); + if (valueType == ValueType.BITMAP) { + if (values != null && values.size() == 1 && values.get(0) instanceof BytesRef) { + values.set(0, new BytesArray(Base64.getDecoder().decode(((BytesRef) values.get(0)).utf8ToString()))); + } else if (termsLookup == null) { + throw new IllegalArgumentException( + "Invalid value for bitmap type: Expected a single-element array with a base64 encoded serialized bitmap." + ); + } + } + + return new TermsQueryBuilder(fieldName, values, termsLookup).boost(boost).queryName(queryName).valueType(valueType); } static List parseValues(XContentParser parser) throws IOException { @@ -473,17 +545,37 @@ protected Query doToQuery(QueryShardContext context) throws IOException { if (fieldType == null) { throw new IllegalStateException("Rewrite first"); } + if (valueType == ValueType.BITMAP) { + if (values.size() == 1 && values.get(0) instanceof BytesArray) { + if (fieldType instanceof NumberFieldMapper.NumberFieldType) { + return ((NumberFieldMapper.NumberFieldType) fieldType).bitmapQuery((BytesArray) values.get(0)); + } + } + } return fieldType.termsQuery(values, context); } private void fetch(TermsLookup termsLookup, Client client, ActionListener> actionListener) { GetRequest getRequest = new GetRequest(termsLookup.index(), termsLookup.id()); getRequest.preference("_local").routing(termsLookup.routing()); + if (termsLookup.store()) { + getRequest.storedFields(termsLookup.path()); + } client.get(getRequest, ActionListener.delegateFailure(actionListener, (delegatedListener, getResponse) -> { List terms = new ArrayList<>(); - if (getResponse.isSourceEmpty() == false) { // extract terms only if the doc source exists - List extractedValues = XContentMapValues.extractRawValues(termsLookup.path(), getResponse.getSourceAsMap()); - terms.addAll(extractedValues); + if (termsLookup.store()) { + List values = getResponse.getField(termsLookup.path()).getValues(); + if (values.size() != 1 && valueType == ValueType.BITMAP) { + throw new IllegalArgumentException( + "Invalid value for bitmap type: Expected a single base64 encoded serialized bitmap." + ); + } + terms.addAll(values); + } else { + if (getResponse.isSourceEmpty() == false) { // extract terms only if the doc source exists + List extractedValues = XContentMapValues.extractRawValues(termsLookup.path(), getResponse.getSourceAsMap()); + terms.addAll(extractedValues); + } } delegatedListener.onResponse(terms); })); @@ -491,7 +583,7 @@ private void fetch(TermsLookup termsLookup, Client client, ActionListener> supplier = new SetOnce<>(); queryRewriteContext.registerAsyncAction((client, listener) -> fetch(termsLookup, client, ActionListener.map(listener, list -> { supplier.set(list); return null; }))); - return new TermsQueryBuilder(this.fieldName, supplier::get); + return new TermsQueryBuilder(this.fieldName, supplier::get, valueType); } if (values == null || values.isEmpty()) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index a5e0c10f72301..b2bc8a0294a49 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -20,20 +20,27 @@ import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; import java.util.function.Function; import java.util.stream.Collectors; @@ -373,4 +380,173 @@ public static boolean isSwitchToStrictCompatibilityMode(ClusterUpdateSettingsReq incomingSettings ) == RemoteStoreNodeService.CompatibilityMode.STRICT; } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. + * + * This method is an overloaded version of getPinnedTimestampLockedFiles and do not use cached entries to find + * the metadata file + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Function getTimestampFunction, + Function> prefixFunction + ) { + return getPinnedTimestampLockedFiles(metadataFiles, pinnedTimestampSet, new HashMap<>(), getTimestampFunction, prefixFunction); + } + + /** + * Determines and returns a set of metadata files that match provided pinned timestamps. If pinned timestamp + * feature is not enabled, this function is a no-op. + * + * This method identifies metadata files that are considered implicitly locked due to their timestamps + * matching or being the closest preceding timestamp to the pinned timestamps. It uses a caching mechanism + * to improve performance for previously processed timestamps. + * + * The method performs the following steps: + * 1. Validates input parameters. + * 2. Updates the cache (metadataFilePinnedTimestampMap) to remove outdated entries. + * 3. Processes cached entries and identifies new timestamps to process. + * 4. For new timestamps, iterates through metadata files to find matching or closest preceding files. + * 5. Updates the cache with newly processed timestamps and their corresponding metadata files. + * + * @param metadataFiles A list of metadata file names. Expected to be sorted in descending order of timestamp. + * @param pinnedTimestampSet A set of timestamps representing pinned points in time. + * @param metadataFilePinnedTimestampMap A map used for caching processed timestamps and their corresponding metadata files. + * @param getTimestampFunction A function that extracts the timestamp from a metadata file name. + * @param prefixFunction A function that extracts a tuple of prefix information from a metadata file name. + * @return A set of metadata file names that are implicitly locked based on the pinned timestamps. + * + */ + public static Set getPinnedTimestampLockedFiles( + List metadataFiles, + Set pinnedTimestampSet, + Map metadataFilePinnedTimestampMap, + Function getTimestampFunction, + Function> prefixFunction + ) { + Set implicitLockedFiles = new HashSet<>(); + + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return implicitLockedFiles; + } + + if (metadataFiles == null || metadataFiles.isEmpty() || pinnedTimestampSet == null) { + return implicitLockedFiles; + } + + // Remove entries for timestamps that are no longer pinned + metadataFilePinnedTimestampMap.keySet().retainAll(pinnedTimestampSet); + + // Add cached entries and collect new timestamps + Set newPinnedTimestamps = new TreeSet<>(Collections.reverseOrder()); + for (Long pinnedTimestamp : pinnedTimestampSet) { + String cachedFile = metadataFilePinnedTimestampMap.get(pinnedTimestamp); + if (cachedFile != null) { + implicitLockedFiles.add(cachedFile); + } else { + newPinnedTimestamps.add(pinnedTimestamp); + } + } + + if (newPinnedTimestamps.isEmpty()) { + return implicitLockedFiles; + } + + // Sort metadata files in descending order of timestamp + // ToDo: Do we really need this? Files fetched from remote store are already lexicographically sorted. + metadataFiles.sort(String::compareTo); + + // If we have metadata files from multiple writers, it can result in picking file generated by stale primary. + // To avoid this, we fail fast. + RemoteStoreUtils.verifyNoMultipleWriters(metadataFiles, prefixFunction); + + Iterator timestampIterator = newPinnedTimestamps.iterator(); + Long currentPinnedTimestamp = timestampIterator.next(); + long prevMdTimestamp = Long.MAX_VALUE; + for (String metadataFileName : metadataFiles) { + long currentMdTimestamp = getTimestampFunction.apply(metadataFileName); + // We always prefer md file with higher values of prefix like primary term, generation etc. + if (currentMdTimestamp > prevMdTimestamp) { + continue; + } + while (currentMdTimestamp <= currentPinnedTimestamp && prevMdTimestamp > currentPinnedTimestamp) { + implicitLockedFiles.add(metadataFileName); + // Do not cache entry for latest metadata file as the next metadata can also match the same pinned timestamp + if (prevMdTimestamp != Long.MAX_VALUE) { + metadataFilePinnedTimestampMap.put(currentPinnedTimestamp, metadataFileName); + } + if (timestampIterator.hasNext() == false) { + return implicitLockedFiles; + } + currentPinnedTimestamp = timestampIterator.next(); + } + prevMdTimestamp = currentMdTimestamp; + } + + return implicitLockedFiles; + } + + /** + * Filters out metadata files based on their age and pinned timestamps settings. + * + * This method filters a list of metadata files, keeping only those that are older + * than a certain threshold determined by the last successful fetch of pinned timestamps + * and a configured lookback interval. + * + * @param metadataFiles A list of metadata file names to be filtered. + * @param getTimestampFunction A function that extracts a timestamp from a metadata file name. + * @param lastSuccessfulFetchOfPinnedTimestamps The timestamp of the last successful fetch of pinned timestamps. + * @return A new list containing only the metadata files that meet the age criteria. + * If pinned timestamps are not enabled, returns a copy of the input list. + */ + public static List filterOutMetadataFilesBasedOnAge( + List metadataFiles, + Function getTimestampFunction, + long lastSuccessfulFetchOfPinnedTimestamps + ) { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return new ArrayList<>(metadataFiles); + } + long maximumAllowedTimestamp = lastSuccessfulFetchOfPinnedTimestamps - RemoteStoreSettings.getPinnedTimestampsLookbackInterval() + .getMillis(); + List metadataFilesWithMinAge = new ArrayList<>(); + for (String metadataFileName : metadataFiles) { + long metadataTimestamp = getTimestampFunction.apply(metadataFileName); + if (metadataTimestamp < maximumAllowedTimestamp) { + metadataFilesWithMinAge.add(metadataFileName); + } + } + return metadataFilesWithMinAge; + } + + /** + * Determines if the pinned timestamp state is stale. + * + * This method checks whether the last successful fetch of pinned timestamps + * is considered stale based on the current time and configured intervals. + * The state is considered stale if the last successful fetch occurred before + * a certain threshold, which is calculated as three times the scheduler interval + * plus the lookback interval. + * + * @return true if the pinned timestamp state is stale, false otherwise. + * Always returns false if pinned timestamps are not enabled. + */ + public static boolean isPinnedTimestampStateStale() { + if (RemoteStoreSettings.isPinnedTimestampsEnabled() == false) { + return false; + } + long lastSuccessfulFetchTimestamp = RemoteStorePinnedTimestampService.getPinnedTimestamps().v1(); + long staleBufferInMillis = (RemoteStoreSettings.getPinnedTimestampsSchedulerInterval().millis() * 3) + RemoteStoreSettings + .getPinnedTimestampsLookbackInterval() + .millis(); + return lastSuccessfulFetchTimestamp < (System.currentTimeMillis() - staleBufferInMillis); + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 8c0ecb4cc783a..26871429e41d6 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -40,6 +40,7 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.threadpool.ThreadPool; import java.io.FileNotFoundException; @@ -91,6 +92,8 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final RemoteStoreLockManager mdLockManager; + private final Map metadataFilePinnedTimestampMap; + private final ThreadPool threadPool; /** @@ -132,6 +135,7 @@ public RemoteSegmentStoreDirectory( this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; this.threadPool = threadPool; + this.metadataFilePinnedTimestampMap = new HashMap<>(); this.logger = Loggers.getLogger(getClass(), shardId); init(); } @@ -176,6 +180,42 @@ public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long c return remoteSegmentMetadata; } + /** + * Initializes the remote segment metadata to a specific timestamp. + * + * @param timestamp The timestamp to initialize the remote segment metadata to. + * @return The RemoteSegmentMetadata object corresponding to the specified timestamp, or null if no metadata file is found for that timestamp. + * @throws IOException If an I/O error occurs while reading the metadata file. + */ + public RemoteSegmentMetadata initializeToSpecificTimestamp(long timestamp) throws IOException { + List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ); + Set lockedMetadataFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + Set.of(timestamp), + MetadataFilenameUtils::getTimestamp, + MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + if (lockedMetadataFiles.isEmpty()) { + return null; + } + if (lockedMetadataFiles.size() > 1) { + throw new IOException( + "Expected exactly one metadata file matching timestamp: " + timestamp + " but got " + lockedMetadataFiles + ); + } + String metadataFile = lockedMetadataFiles.iterator().next(); + RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); + if (remoteSegmentMetadata != null) { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); + } else { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); + } + return remoteSegmentMetadata; + } + /** * Read the latest metadata file to get the list of segments uploaded to the remote segment store. * We upload a metadata file per refresh, but it is not unique per refresh. Refresh metadata file is unique for a given commit. @@ -324,7 +364,8 @@ public static String getMetadataFilename( long translogGeneration, long uploadCounter, int metadataVersion, - String nodeId + String nodeId, + long creationTimestamp ) { return String.join( SEPARATOR, @@ -334,11 +375,30 @@ public static String getMetadataFilename( RemoteStoreUtils.invertLong(translogGeneration), RemoteStoreUtils.invertLong(uploadCounter), String.valueOf(Objects.hash(nodeId)), - RemoteStoreUtils.invertLong(System.currentTimeMillis()), + RemoteStoreUtils.invertLong(creationTimestamp), String.valueOf(metadataVersion) ); } + public static String getMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion, + String nodeId + ) { + return getMetadataFilename( + primaryTerm, + generation, + translogGeneration, + uploadCounter, + metadataVersion, + nodeId, + System.currentTimeMillis() + ); + } + // Visible for testing static long getPrimaryTerm(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[1]); @@ -349,6 +409,11 @@ static long getGeneration(String[] filenameTokens) { return RemoteStoreUtils.invertLong(filenameTokens[2]); } + public static long getTimestamp(String filename) { + String[] filenameTokens = filename.split(SEPARATOR); + return RemoteStoreUtils.invertLong(filenameTokens[6]); + } + public static Tuple getNodeIdByPrimaryTermAndGen(String filename) { String[] tokens = filename.split(SEPARATOR); if (tokens.length < 8) { @@ -773,6 +838,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException ); return; } + List sortedMetadataFileList = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( MetadataFilenameUtils.METADATA_PREFIX, Integer.MAX_VALUE @@ -786,16 +852,44 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException return; } - List metadataFilesEligibleToDelete = new ArrayList<>( - sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) + // Check last fetch status of pinned timestamps. If stale, return. + if (RemoteStoreUtils.isPinnedTimestampStateStale()) { + logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); + return; + } + + Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + sortedMetadataFileList, + pinnedTimestampsState.v2(), + metadataFilePinnedTimestampMap, + MetadataFilenameUtils::getTimestamp, + MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen ); - Set allLockFiles; + final Set allLockFiles = new HashSet<>(implicitLockedFiles); + try { - allLockFiles = ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX); + allLockFiles.addAll( + ((RemoteStoreMetadataLockManager) mdLockManager).fetchLockedMetadataFiles(MetadataFilenameUtils.METADATA_PREFIX) + ); } catch (Exception e) { logger.error("Exception while fetching segment metadata lock files, skipping deleteStaleSegments", e); return; } + + List metadataFilesEligibleToDelete = new ArrayList<>( + sortedMetadataFileList.subList(lastNMetadataFilesToKeep, sortedMetadataFileList.size()) + ); + + // Along with last N files, we need to keep files since last successful run of scheduler + long lastSuccessfulFetchOfPinnedTimestamps = pinnedTimestampsState.v1(); + metadataFilesEligibleToDelete = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFilesEligibleToDelete, + MetadataFilenameUtils::getTimestamp, + lastSuccessfulFetchOfPinnedTimestamps + ); + List metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream() .filter(metadataFile -> allLockFiles.contains(metadataFile) == false) .collect(Collectors.toList()); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java index 134994c0ae32c..ad42b6d677b41 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteStoreFileDownloader.java @@ -149,6 +149,7 @@ private void copyOneFile( try { cancellableThreads.executeIO(() -> { destination.copyFrom(source, file, file, IOContext.DEFAULT); + logger.trace("Downloaded file {} of size {}", file, destination.fileLength(file)); onFileCompletion.run(); if (secondDestination != null) { secondDestination.copyFrom(destination, file, file, IOContext.DEFAULT); diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 93946fa11de13..71f8cf5a78ec5 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -310,12 +310,11 @@ BytesReference getOrCompute( * @param cacheKey the cache key to invalidate */ void invalidate(IndicesService.IndexShardCacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { - assert reader.getReaderCacheHelper() != null; - String readerCacheKeyId = null; - if (reader instanceof OpenSearchDirectoryReader) { - IndexReader.CacheHelper cacheHelper = ((OpenSearchDirectoryReader) reader).getDelegatingCacheHelper(); - readerCacheKeyId = ((OpenSearchDirectoryReader.DelegatingCacheHelper) cacheHelper).getDelegatingCacheKey().getId(); - } + assert reader.getReaderCacheHelper() instanceof OpenSearchDirectoryReader.DelegatingCacheHelper; + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + IndexShard indexShard = (IndexShard) cacheEntity.getCacheIdentity(); cache.invalidate(getICacheKey(new Key(indexShard.shardId(), cacheKey, readerCacheKeyId, System.identityHashCode(indexShard)))); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 902ca95643625..a78328e24c588 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -68,6 +68,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; @@ -1754,8 +1755,7 @@ public boolean canCache(ShardSearchRequest request, SearchContext context) { if (context.getQueryShardContext().isCacheable() == false) { return false; } - return true; - + return context.searcher().getDirectoryReader().getReaderCacheHelper() instanceof DelegatingCacheHelper; } /** diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 8cb482c8d8681..55280ca5c96d6 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -134,6 +134,36 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * Controls pinned timestamp feature enablement + */ + public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED = Setting.boolSetting( + "cluster.remote_store.pinned_timestamps.enabled", + false, + Setting.Property.NodeScope + ); + + /** + * Controls pinned timestamp scheduler interval + */ + public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL = Setting.timeSetting( + "cluster.remote_store.pinned_timestamps.scheduler_interval", + TimeValue.timeValueMinutes(3), + TimeValue.timeValueMinutes(1), + Setting.Property.NodeScope + ); + + /** + * Controls allowed timestamp values to be pinned from past + */ + public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL = Setting.timeSetting( + "cluster.remote_store.pinned_timestamps.lookback_interval", + TimeValue.timeValueMinutes(1), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueMinutes(5), + Setting.Property.NodeScope + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; private volatile TimeValue clusterRemoteTranslogTransferTimeout; @@ -142,6 +172,9 @@ public class RemoteStoreSettings { private volatile RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm; private volatile int maxRemoteTranslogReaders; private volatile boolean isTranslogMetadataEnabled; + private static volatile boolean isPinnedTimestampsEnabled; + private static volatile TimeValue pinnedTimestampsSchedulerInterval; + private static volatile TimeValue pinnedTimestampsLookbackInterval; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); @@ -179,6 +212,10 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { CLUSTER_REMOTE_SEGMENT_TRANSFER_TIMEOUT_SETTING, this::setClusterRemoteSegmentTransferTimeout ); + + pinnedTimestampsSchedulerInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL.get(settings); + pinnedTimestampsLookbackInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL.get(settings); + isPinnedTimestampsEnabled = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings); } public TimeValue getClusterRemoteTranslogBufferInterval() { @@ -246,4 +283,16 @@ public int getMaxRemoteTranslogReaders() { private void setMaxRemoteTranslogReaders(int maxRemoteTranslogReaders) { this.maxRemoteTranslogReaders = maxRemoteTranslogReaders; } + + public static TimeValue getPinnedTimestampsSchedulerInterval() { + return pinnedTimestampsSchedulerInterval; + } + + public static TimeValue getPinnedTimestampsLookbackInterval() { + return pinnedTimestampsLookbackInterval; + } + + public static boolean isPinnedTimestampsEnabled() { + return isPinnedTimestampsEnabled; + } } diff --git a/server/src/main/java/org/opensearch/indices/TermsLookup.java b/server/src/main/java/org/opensearch/indices/TermsLookup.java index 37533c0809d7a..7337ed31ce41e 100644 --- a/server/src/main/java/org/opensearch/indices/TermsLookup.java +++ b/server/src/main/java/org/opensearch/indices/TermsLookup.java @@ -87,6 +87,9 @@ public TermsLookup(StreamInput in) throws IOException { path = in.readString(); index = in.readString(); routing = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_2_17_0)) { + store = in.readBoolean(); + } } @Override @@ -98,6 +101,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(path); out.writeString(index); out.writeOptionalString(routing); + if (out.getVersion().onOrAfter(Version.V_2_17_0)) { + out.writeBoolean(store); + } } public String index() { @@ -121,6 +127,17 @@ public TermsLookup routing(String routing) { return this; } + private boolean store; + + public boolean store() { + return store; + } + + public TermsLookup store(boolean store) { + this.store = store; + return this; + } + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("terms_lookup", args -> { String index = (String) args[0]; String id = (String) args[1]; @@ -132,6 +149,7 @@ public TermsLookup routing(String routing) { PARSER.declareString(constructorArg(), new ParseField("id")); PARSER.declareString(constructorArg(), new ParseField("path")); PARSER.declareString(TermsLookup::routing, new ParseField("routing")); + PARSER.declareBoolean(TermsLookup::store, new ParseField("store")); } public static TermsLookup parseTermsLookup(XContentParser parser) throws IOException { @@ -151,12 +169,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (routing != null) { builder.field("routing", routing); } + if (store) { + builder.field("store", true); + } return builder; } @Override public int hashCode() { - return Objects.hash(index, id, path, routing); + return Objects.hash(index, id, path, routing, store); } @Override @@ -171,6 +192,7 @@ public boolean equals(Object obj) { return Objects.equals(index, other.index) && Objects.equals(id, other.id) && Objects.equals(path, other.path) - && Objects.equals(routing, other.routing); + && Objects.equals(routing, other.routing) + && Objects.equals(store, other.store); } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 1a9b233b387b2..7e867d3966ff5 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -305,6 +305,7 @@ import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreClusterStateEnabled; @@ -812,7 +813,7 @@ protected Node( remoteClusterStateCleanupManager = null; } final RemoteStorePinnedTimestampService remoteStorePinnedTimestampService; - if (isRemoteStoreAttributePresent(settings)) { + if (isRemoteStoreAttributePresent(settings) && CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.get(settings)) { remoteStorePinnedTimestampService = new RemoteStorePinnedTimestampService( repositoriesServiceReference::get, settings, diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java index 35730a75a8142..f7b262664d147 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java @@ -15,7 +15,6 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractAsyncTask; @@ -24,6 +23,7 @@ import org.opensearch.gateway.remote.model.RemotePinnedTimestamps.PinnedTimestamps; import org.opensearch.gateway.remote.model.RemoteStorePinnedTimestampsBlobStore; import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.Node; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -61,30 +61,8 @@ public class RemoteStorePinnedTimestampService implements Closeable { private BlobStoreTransferService blobStoreTransferService; private RemoteStorePinnedTimestampsBlobStore pinnedTimestampsBlobStore; private AsyncUpdatePinnedTimestampTask asyncUpdatePinnedTimestampTask; - private volatile TimeValue pinnedTimestampsSchedulerInterval; private final Semaphore updateTimetampPinningSemaphore = new Semaphore(1); - /** - * Controls pinned timestamp scheduler interval - */ - public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL = Setting.timeSetting( - "cluster.remote_store.pinned_timestamps.scheduler_interval", - TimeValue.timeValueMinutes(3), - TimeValue.timeValueMinutes(1), - Setting.Property.NodeScope - ); - - /** - * Controls allowed timestamp values to be pinned from past - */ - public static final Setting CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL = Setting.timeSetting( - "cluster.remote_store.pinned_timestamps.lookback_interval", - TimeValue.timeValueMinutes(1), - TimeValue.timeValueMinutes(1), - TimeValue.timeValueMinutes(5), - Setting.Property.NodeScope - ); - public RemoteStorePinnedTimestampService( Supplier repositoriesService, Settings settings, @@ -95,8 +73,6 @@ public RemoteStorePinnedTimestampService( this.settings = settings; this.threadPool = threadPool; this.clusterService = clusterService; - - pinnedTimestampsSchedulerInterval = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL.get(settings); } /** @@ -107,7 +83,7 @@ public RemoteStorePinnedTimestampService( public void start() { validateRemoteStoreConfiguration(); initializeComponents(); - startAsyncUpdateTask(); + startAsyncUpdateTask(RemoteStoreSettings.getPinnedTimestampsSchedulerInterval()); } private void validateRemoteStoreConfiguration() { @@ -132,7 +108,7 @@ private void initializeComponents() { ); } - private void startAsyncUpdateTask() { + private void startAsyncUpdateTask(TimeValue pinnedTimestampsSchedulerInterval) { asyncUpdatePinnedTimestampTask = new AsyncUpdatePinnedTimestampTask(logger, threadPool, pinnedTimestampsSchedulerInterval, true); } @@ -147,8 +123,8 @@ private void startAsyncUpdateTask() { public void pinTimestamp(long timestamp, String pinningEntity, ActionListener listener) { // If a caller uses current system time to pin the timestamp, following check will almost always fail. // So, we allow pinning timestamp in the past upto some buffer - long lookbackIntervalInMills = CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL.get(settings).millis(); - if (timestamp < TimeUnit.NANOSECONDS.toMillis(System.nanoTime()) - lookbackIntervalInMills) { + long lookbackIntervalInMills = RemoteStoreSettings.getPinnedTimestampsLookbackInterval().millis(); + if (timestamp < (System.currentTimeMillis() - lookbackIntervalInMills)) { throw new IllegalArgumentException( "Timestamp to be pinned is less than current timestamp - value of cluster.remote_store.pinned_timestamps.lookback_interval" ); @@ -243,9 +219,9 @@ private ActionListener getListenerForWriteCallResponse( private PinnedTimestamps readExistingPinnedTimestamps(String blobFilename, RemotePinnedTimestamps remotePinnedTimestamps) { remotePinnedTimestamps.setBlobFileName(blobFilename); - remotePinnedTimestamps.setFullBlobName(pinnedTimestampsBlobStore.getBlobPathForUpload(remotePinnedTimestamps)); + remotePinnedTimestamps.setFullBlobName(pinnedTimestampsBlobStore().getBlobPathForUpload(remotePinnedTimestamps)); try { - return pinnedTimestampsBlobStore.read(remotePinnedTimestamps); + return pinnedTimestampsBlobStore().read(remotePinnedTimestamps); } catch (IOException e) { throw new RuntimeException("Failed to read existing pinned timestamps", e); } @@ -256,17 +232,12 @@ public void close() throws IOException { asyncUpdatePinnedTimestampTask.close(); } - // Visible for testing - public void setPinnedTimestampsSchedulerInterval(TimeValue pinnedTimestampsSchedulerInterval) { - this.pinnedTimestampsSchedulerInterval = pinnedTimestampsSchedulerInterval; - rescheduleAsyncUpdatePinnedTimestampTask(); - } - - private void rescheduleAsyncUpdatePinnedTimestampTask() { + // Used in integ tests + public void rescheduleAsyncUpdatePinnedTimestampTask(TimeValue pinnedTimestampsSchedulerInterval) { if (pinnedTimestampsSchedulerInterval != null) { pinnedTimestampsSet = new Tuple<>(-1L, Set.of()); asyncUpdatePinnedTimestampTask.close(); - startAsyncUpdateTask(); + startAsyncUpdateTask(pinnedTimestampsSchedulerInterval); } } @@ -274,6 +245,14 @@ public static Tuple> getPinnedTimestamps() { return pinnedTimestampsSet; } + public RemoteStorePinnedTimestampsBlobStore pinnedTimestampsBlobStore() { + return pinnedTimestampsBlobStore; + } + + public BlobStoreTransferService blobStoreTransferService() { + return blobStoreTransferService; + } + /** * Inner class for asynchronously updating the pinned timestamp set. */ @@ -295,11 +274,12 @@ protected void runInternal() { clusterService.state().metadata().clusterUUID(), blobStoreRepository.getCompressor() ); - BlobPath path = pinnedTimestampsBlobStore.getBlobPathForUpload(remotePinnedTimestamps); - blobStoreTransferService.listAllInSortedOrder(path, remotePinnedTimestamps.getType(), 1, new ActionListener<>() { + BlobPath path = pinnedTimestampsBlobStore().getBlobPathForUpload(remotePinnedTimestamps); + blobStoreTransferService().listAllInSortedOrder(path, remotePinnedTimestamps.getType(), 1, new ActionListener<>() { @Override public void onResponse(List blobMetadata) { if (blobMetadata.isEmpty()) { + pinnedTimestampsSet = new Tuple<>(triggerTimestamp, Set.of()); return; } PinnedTimestamps pinnedTimestamps = readExistingPinnedTimestamps(blobMetadata.get(0).name(), remotePinnedTimestamps); diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index abb968c2de245..1401c6b89d38a 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -991,7 +991,14 @@ public int getTargetMaxSliceCount() { if (shouldUseConcurrentSearch() == false) { throw new IllegalStateException("Target slice count should not be used when concurrent search is disabled"); } - return clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING); + + return indexService.getIndexSettings() + .getSettings() + .getAsInt( + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey(), + clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING) + ); + } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java index f8061bcaca50f..20f294bc7199b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -57,7 +57,11 @@ import org.opensearch.search.DocValueFormat; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Objects; import java.util.Set; import java.util.SortedSet; @@ -86,6 +90,10 @@ public class IncludeExclude implements Writeable, ToXContentFragment { * https://github.com/opensearch-project/OpenSearch/issues/2858 */ private static final int DEFAULT_MAX_REGEX_LENGTH = 1000; + /** + * The maximum number of prefixes to extract from a regex in tryCreatePrefixOrdinalsFilter + */ + private static final int MAX_PREFIXES = 1000; // for parsing purposes only // TODO: move all aggs to the same package so that this stuff could be pkg-private @@ -393,6 +401,92 @@ public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) thro } + /** + * An ordinals filter that includes/excludes all ordinals corresponding to terms starting with the given prefixes + */ + static class PrefixBackedOrdinalsFilter extends OrdinalsFilter { + + private final SortedSet includePrefixes, excludePrefixes; + + private PrefixBackedOrdinalsFilter(SortedSet includePrefixes, SortedSet excludePrefixes) { + this.includePrefixes = includePrefixes; + this.excludePrefixes = excludePrefixes; + } + + private static BytesRef nextBytesRef(BytesRef bytesRef) { + BytesRef next = BytesRef.deepCopyOf(bytesRef); + int pos = next.offset + next.length - 1; + while (pos >= next.offset && next.bytes[pos] == -1) { + next.bytes[pos] = 0; + pos--; + } + if (pos >= next.offset) { + next.bytes[pos]++; + } else { + // Every byte in our prefix had value 0xFF. We must match all subsequent ordinals. + return null; + } + return next; + } + + private interface LongBiConsumer { + void accept(long a, long b); + } + + private static void process(SortedSetDocValues globalOrdinals, long length, SortedSet prefixes, LongBiConsumer consumer) + throws IOException { + for (BytesRef prefix : prefixes) { + long startOrd = globalOrdinals.lookupTerm(prefix); + if (startOrd < 0) { + // The prefix is not an exact match in the ordinals (can skip equal length below) + startOrd = -1 - startOrd; + // Make sure that the term at startOrd starts with prefix + BytesRef startTerm = globalOrdinals.lookupOrd(startOrd); + if (startTerm.length <= prefix.length + || !Arrays.equals( + startTerm.bytes, + startTerm.offset, + startTerm.offset + prefix.length, + prefix.bytes, + prefix.offset, + prefix.offset + prefix.length + )) { + continue; + } + } + if (startOrd >= length) { + continue; + } + BytesRef next = nextBytesRef(prefix); + if (next == null) { + consumer.accept(startOrd, length); + } else { + long endOrd = globalOrdinals.lookupTerm(next); + if (endOrd < 0) { + endOrd = -1 - endOrd; + } + if (startOrd < endOrd) { + consumer.accept(startOrd, endOrd); + } + } + } + + } + + @Override + public LongBitSet acceptedGlobalOrdinals(SortedSetDocValues globalOrdinals) throws IOException { + LongBitSet accept = new LongBitSet(globalOrdinals.getValueCount()); + if (!includePrefixes.isEmpty()) { + process(globalOrdinals, accept.length(), includePrefixes, accept::set); + } else if (accept.length() > 0) { + // Exclude-only + accept.set(0, accept.length()); + } + process(globalOrdinals, accept.length(), excludePrefixes, accept::clear); + return accept; + } + } + private final String include, exclude; private final SortedSet includeValues, excludeValues; private final int incZeroBasedPartition; @@ -709,8 +803,13 @@ public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format) { } public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format, int maxRegexLength) { - if (isRegexBased()) { + if ((include == null || include.endsWith(".*")) && (exclude == null || exclude.endsWith(".*"))) { + PrefixBackedOrdinalsFilter prefixBackedOrdinalsFilter = tryCreatePrefixOrdinalsFilter(maxRegexLength); + if (prefixBackedOrdinalsFilter != null) { + return prefixBackedOrdinalsFilter; + } + } return new AutomatonBackedOrdinalsFilter(toAutomaton(maxRegexLength)); } if (isPartitionBased()) { @@ -720,6 +819,94 @@ public OrdinalsFilter convertToOrdinalsFilter(DocValueFormat format, int maxRege return new TermListBackedOrdinalsFilter(parseForDocValues(includeValues, format), parseForDocValues(excludeValues, format)); } + private static List expandRegexp(RegExp regExp, int maxPrefixes) { + switch (regExp.kind) { + case REGEXP_UNION: + List alternatives = new ArrayList<>(); + while (regExp.exp1.kind == RegExp.Kind.REGEXP_UNION) { + alternatives.add(regExp.exp2); + regExp = regExp.exp1; + } + alternatives.add(regExp.exp2); + alternatives.add(regExp.exp1); + List output = new ArrayList<>(); + for (RegExp leaf : alternatives) { + List leafExpansions = expandRegexp(leaf, maxPrefixes); + if (leafExpansions == null) { + return null; + } else { + if (output.size() + leafExpansions.size() > maxPrefixes) { + return null; + } + output.addAll(leafExpansions); + } + } + return output; + case REGEXP_CONCATENATION: + List prefixes = expandRegexp(regExp.exp1, maxPrefixes); + if (prefixes == null) { + return null; + } + List suffixes = expandRegexp(regExp.exp2, maxPrefixes); + if (suffixes == null) { + return null; + } + if (prefixes.size() * suffixes.size() > maxPrefixes) { + return null; + } + List out = new ArrayList<>(); + StringBuilder stringBuilder = new StringBuilder(); + for (String prefix : prefixes) { + for (String suffix : suffixes) { + stringBuilder.setLength(0); + stringBuilder.append(prefix).append(suffix); + out.add(stringBuilder.toString()); + } + } + return out; + case REGEXP_CHAR: + return List.of(Character.toString(regExp.c)); + case REGEXP_STRING: + return List.of(regExp.s); + default: + return null; + } + } + + static SortedSet extractPrefixes(String pattern, int maxRegexLength) { + if (pattern == null) { + return Collections.emptySortedSet(); + } + SortedSet prefixSet = null; + validateRegExpStringLength(pattern, maxRegexLength); + RegExp regExp = new RegExp(pattern); + if (regExp.kind == RegExp.Kind.REGEXP_CONCATENATION && regExp.exp2.kind == RegExp.Kind.REGEXP_REPEAT) { + RegExp tail = regExp.exp2.exp1; + if (tail.kind == RegExp.Kind.REGEXP_ANYCHAR || tail.kind == RegExp.Kind.REGEXP_ANYSTRING) { + List prefixes = expandRegexp(regExp.exp1, MAX_PREFIXES); + if (prefixes != null) { + prefixSet = new TreeSet<>(); + for (String prefix : prefixes) { + prefixSet.add(new BytesRef(prefix)); + } + } + } + } + return prefixSet; + } + + private PrefixBackedOrdinalsFilter tryCreatePrefixOrdinalsFilter(int maxRegexLength) { + SortedSet includeSet = extractPrefixes(include, maxRegexLength); + if (includeSet == null) { + return null; + } + SortedSet excludeSet = extractPrefixes(exclude, maxRegexLength); + if (excludeSet == null) { + return null; + } + return new PrefixBackedOrdinalsFilter(includeSet, excludeSet); + } + public LongFilter convertToLongFilter(DocValueFormat format) { if (isPartitionBased()) { diff --git a/server/src/main/java/org/opensearch/search/query/BitmapDocValuesQuery.java b/server/src/main/java/org/opensearch/search/query/BitmapDocValuesQuery.java new file mode 100644 index 0000000000000..dfa5fc4567f80 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/query/BitmapDocValuesQuery.java @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; + +import java.io.IOException; +import java.util.Objects; + +import org.roaringbitmap.RoaringBitmap; + +/** + * Filter with bitmap + *

+ * Similar to Lucene SortedNumericDocValuesSetQuery + */ +public class BitmapDocValuesQuery extends Query implements Accountable { + + final String field; + final RoaringBitmap bitmap; + final long min; + final long max; + + public BitmapDocValuesQuery(String field, RoaringBitmap bitmap) { + this.field = field; + this.bitmap = bitmap; + if (!bitmap.isEmpty()) { + min = bitmap.first(); + max = bitmap.last(); + } else { + min = 0; // final field + max = 0; + } + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new ConstantScoreWeight(this, boost) { + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field); + final NumericDocValues singleton = DocValues.unwrapSingleton(values); + final TwoPhaseIterator iterator; + if (singleton != null) { + iterator = new TwoPhaseIterator(singleton) { + @Override + public boolean matches() throws IOException { + long value = singleton.longValue(); + return value >= min && value <= max && bitmap.contains((int) value); + } + + @Override + public float matchCost() { + return 5; // 2 comparisons, possible lookup in the bitmap + } + }; + } else { + iterator = new TwoPhaseIterator(values) { + @Override + public boolean matches() throws IOException { + int count = values.docValueCount(); + for (int i = 0; i < count; i++) { + final long value = values.nextValue(); + if (value < min) { + continue; + } else if (value > max) { + return false; // values are sorted, terminate + } else if (bitmap.contains((int) value)) { + return true; + } + } + return false; + } + + @Override + public float matchCost() { + return 5; // 2 comparisons, possible lookup in the bitmap + } + }; + } + return new ConstantScoreScorer(this, score(), scoreMode, iterator); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return DocValues.isCacheable(ctx, field); + } + }; + } + + @Override + public String toString(String field) { + // bitmap may contain high cardinality, so choose to not show the actual values in it + return field + " cardinality: " + bitmap.getLongCardinality(); + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + if (bitmap.isEmpty()) { + return new MatchNoDocsQuery(); + } + return super.rewrite(indexSearcher); + } + + @Override + public boolean equals(Object other) { + if (sameClassAs(other) == false) { + return false; + } + BitmapDocValuesQuery that = (BitmapDocValuesQuery) other; + return field.equals(that.field) && bitmap.equals(that.bitmap); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), field, bitmap); + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.shallowSizeOfInstance(BitmapDocValuesQuery.class) + RamUsageEstimator.sizeOfObject(field) + + RamUsageEstimator.sizeOfObject(bitmap); + } + + @Override + public void visit(QueryVisitor visitor) { + if (visitor.acceptField(field)) { + visitor.visitLeaf(this); + } + } +} diff --git a/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java new file mode 100644 index 0000000000000..b104925df77b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/wlm/WorkloadManagementSettings.java @@ -0,0 +1,256 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Main class to declare Workload Management related settings + */ +public class WorkloadManagementSettings { + private static final Double DEFAULT_NODE_LEVEL_MEMORY_REJECTION_THRESHOLD = 0.8; + private static final Double DEFAULT_NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD = 0.9; + private static final Double DEFAULT_NODE_LEVEL_CPU_REJECTION_THRESHOLD = 0.8; + private static final Double DEFAULT_NODE_LEVEL_CPU_CANCELLATION_THRESHOLD = 0.9; + public static final double NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD_MAX_VALUE = 0.95; + public static final double NODE_LEVEL_MEMORY_REJECTION_THRESHOLD_MAX_VALUE = 0.9; + public static final double NODE_LEVEL_CPU_CANCELLATION_THRESHOLD_MAX_VALUE = 0.95; + public static final double NODE_LEVEL_CPU_REJECTION_THRESHOLD_MAX_VALUE = 0.9; + + private Double nodeLevelMemoryCancellationThreshold; + private Double nodeLevelMemoryRejectionThreshold; + private Double nodeLevelCpuCancellationThreshold; + private Double nodeLevelCpuRejectionThreshold; + + /** + * Setting name for node level memory based rejection threshold for QueryGroup service + */ + public static final String NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.memory_rejection_threshold"; + /** + * Setting to control the memory based rejection threshold + */ + public static final Setting NODE_LEVEL_MEMORY_REJECTION_THRESHOLD = Setting.doubleSetting( + NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, + DEFAULT_NODE_LEVEL_MEMORY_REJECTION_THRESHOLD, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + /** + * Setting name for node level cpu based rejection threshold for QueryGroup service + */ + public static final String NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.cpu_rejection_threshold"; + /** + * Setting to control the cpu based rejection threshold + */ + public static final Setting NODE_LEVEL_CPU_REJECTION_THRESHOLD = Setting.doubleSetting( + NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, + DEFAULT_NODE_LEVEL_CPU_REJECTION_THRESHOLD, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + /** + * Setting name for node level memory based cancellation threshold for QueryGroup service + */ + public static final String NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.memory_cancellation_threshold"; + /** + * Setting to control the memory based cancellation threshold + */ + public static final Setting NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD = Setting.doubleSetting( + NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, + DEFAULT_NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + /** + * Setting name for node level cpu based cancellation threshold for QueryGroup service + */ + public static final String NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME = "wlm.query_group.node.cpu_cancellation_threshold"; + /** + * Setting to control the cpu based cancellation threshold + */ + public static final Setting NODE_LEVEL_CPU_CANCELLATION_THRESHOLD = Setting.doubleSetting( + NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, + DEFAULT_NODE_LEVEL_CPU_CANCELLATION_THRESHOLD, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * QueryGroup service settings constructor + * @param settings - QueryGroup service settings + * @param clusterSettings - QueryGroup cluster settings + */ + public WorkloadManagementSettings(Settings settings, ClusterSettings clusterSettings) { + nodeLevelMemoryCancellationThreshold = NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD.get(settings); + nodeLevelMemoryRejectionThreshold = NODE_LEVEL_MEMORY_REJECTION_THRESHOLD.get(settings); + nodeLevelCpuCancellationThreshold = NODE_LEVEL_CPU_CANCELLATION_THRESHOLD.get(settings); + nodeLevelCpuRejectionThreshold = NODE_LEVEL_CPU_REJECTION_THRESHOLD.get(settings); + + ensureRejectionThresholdIsLessThanCancellation( + nodeLevelMemoryRejectionThreshold, + nodeLevelMemoryCancellationThreshold, + NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, + NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME + ); + ensureRejectionThresholdIsLessThanCancellation( + nodeLevelCpuRejectionThreshold, + nodeLevelCpuCancellationThreshold, + NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, + NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME + ); + + clusterSettings.addSettingsUpdateConsumer(NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD, this::setNodeLevelMemoryCancellationThreshold); + clusterSettings.addSettingsUpdateConsumer(NODE_LEVEL_MEMORY_REJECTION_THRESHOLD, this::setNodeLevelMemoryRejectionThreshold); + clusterSettings.addSettingsUpdateConsumer(NODE_LEVEL_CPU_CANCELLATION_THRESHOLD, this::setNodeLevelCpuCancellationThreshold); + clusterSettings.addSettingsUpdateConsumer(NODE_LEVEL_CPU_REJECTION_THRESHOLD, this::setNodeLevelCpuRejectionThreshold); + } + + /** + * Method to get the node level memory based cancellation threshold + * @return current node level memory based cancellation threshold + */ + public Double getNodeLevelMemoryCancellationThreshold() { + return nodeLevelMemoryCancellationThreshold; + } + + /** + * Method to set the node level memory based cancellation threshold + * @param nodeLevelMemoryCancellationThreshold sets the new node level memory based cancellation threshold + * @throws IllegalArgumentException if the value is > 0.95 and cancellation < rejection threshold + */ + public void setNodeLevelMemoryCancellationThreshold(Double nodeLevelMemoryCancellationThreshold) { + if (Double.compare(nodeLevelMemoryCancellationThreshold, NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD_MAX_VALUE) > 0) { + throw new IllegalArgumentException( + NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME + " value cannot be greater than 0.95 as it can result in a node drop" + ); + } + + ensureRejectionThresholdIsLessThanCancellation( + nodeLevelMemoryRejectionThreshold, + nodeLevelMemoryCancellationThreshold, + NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, + NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME + ); + + this.nodeLevelMemoryCancellationThreshold = nodeLevelMemoryCancellationThreshold; + } + + /** + * Method to get the node level cpu based cancellation threshold + * @return current node level cpu based cancellation threshold + */ + public Double getNodeLevelCpuCancellationThreshold() { + return nodeLevelCpuCancellationThreshold; + } + + /** + * Method to set the node level cpu based cancellation threshold + * @param nodeLevelCpuCancellationThreshold sets the new node level cpu based cancellation threshold + * @throws IllegalArgumentException if the value is > 0.95 and cancellation < rejection threshold + */ + public void setNodeLevelCpuCancellationThreshold(Double nodeLevelCpuCancellationThreshold) { + if (Double.compare(nodeLevelCpuCancellationThreshold, NODE_LEVEL_CPU_CANCELLATION_THRESHOLD_MAX_VALUE) > 0) { + throw new IllegalArgumentException( + NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME + " value cannot be greater than 0.95 as it can result in a node drop" + ); + } + + ensureRejectionThresholdIsLessThanCancellation( + nodeLevelCpuRejectionThreshold, + nodeLevelCpuCancellationThreshold, + NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, + NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME + ); + + this.nodeLevelCpuCancellationThreshold = nodeLevelCpuCancellationThreshold; + } + + /** + * Method to get the memory based node level rejection threshold + * @return the current memory based node level rejection threshold + */ + public Double getNodeLevelMemoryRejectionThreshold() { + return nodeLevelMemoryRejectionThreshold; + } + + /** + * Method to set the node level memory based rejection threshold + * @param nodeLevelMemoryRejectionThreshold sets the new memory based rejection threshold + * @throws IllegalArgumentException if rejection > 0.90 and rejection < cancellation threshold + */ + public void setNodeLevelMemoryRejectionThreshold(Double nodeLevelMemoryRejectionThreshold) { + if (Double.compare(nodeLevelMemoryRejectionThreshold, NODE_LEVEL_MEMORY_REJECTION_THRESHOLD_MAX_VALUE) > 0) { + throw new IllegalArgumentException( + NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME + " value cannot be greater than 0.90 as it can result in a node drop" + ); + } + + ensureRejectionThresholdIsLessThanCancellation( + nodeLevelMemoryRejectionThreshold, + nodeLevelMemoryCancellationThreshold, + NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, + NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME + ); + + this.nodeLevelMemoryRejectionThreshold = nodeLevelMemoryRejectionThreshold; + } + + /** + * Method to get the cpu based node level rejection threshold + * @return the current cpu based node level rejection threshold + */ + public Double getNodeLevelCpuRejectionThreshold() { + return nodeLevelCpuRejectionThreshold; + } + + /** + * Method to set the node level cpu based rejection threshold + * @param nodeLevelCpuRejectionThreshold sets the new cpu based rejection threshold + * @throws IllegalArgumentException if rejection > 0.90 and rejection < cancellation threshold + */ + public void setNodeLevelCpuRejectionThreshold(Double nodeLevelCpuRejectionThreshold) { + if (Double.compare(nodeLevelCpuRejectionThreshold, NODE_LEVEL_CPU_REJECTION_THRESHOLD_MAX_VALUE) > 0) { + throw new IllegalArgumentException( + NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME + " value cannot be greater than 0.90 as it can result in a node drop" + ); + } + + ensureRejectionThresholdIsLessThanCancellation( + nodeLevelCpuRejectionThreshold, + nodeLevelCpuCancellationThreshold, + NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, + NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME + ); + + this.nodeLevelCpuRejectionThreshold = nodeLevelCpuRejectionThreshold; + } + + /** + * Method to validate that the cancellation threshold is greater than or equal to rejection threshold + * @param nodeLevelRejectionThreshold rejection threshold to be compared + * @param nodeLevelCancellationThreshold cancellation threshold to be compared + * @param rejectionThresholdSettingName name of the rejection threshold setting + * @param cancellationThresholdSettingName name of the cancellation threshold setting + * @throws IllegalArgumentException if cancellation threshold is less than rejection threshold + */ + private void ensureRejectionThresholdIsLessThanCancellation( + Double nodeLevelRejectionThreshold, + Double nodeLevelCancellationThreshold, + String rejectionThresholdSettingName, + String cancellationThresholdSettingName + ) { + if (Double.compare(nodeLevelCancellationThreshold, nodeLevelRejectionThreshold) < 0) { + throw new IllegalArgumentException( + cancellationThresholdSettingName + " value should not be less than " + rejectionThresholdSettingName + ); + } + } +} diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index e030a813373c1..f51452c57f975 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -1 +1 @@ -org.opensearch.index.codec.composite.Composite99Codec +org.opensearch.index.codec.composite.composite99.Composite99Codec diff --git a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java index 20c2b1f17124c..1e2b29287acb3 100644 --- a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java +++ b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java @@ -20,6 +20,7 @@ import org.opensearch.extensions.action.ExtensionTransportAction; import org.opensearch.extensions.rest.RestSendToExtensionAction; import org.opensearch.rest.NamedRoute; +import org.opensearch.rest.RestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskManager; @@ -85,13 +86,17 @@ public void testDynamicActionRegistryWithNamedRoutes() { RestSendToExtensionAction action2 = mock(RestSendToExtensionAction.class); NamedRoute r1 = new NamedRoute.Builder().method(RestRequest.Method.GET).path("/foo").uniqueName("foo").build(); NamedRoute r2 = new NamedRoute.Builder().method(RestRequest.Method.PUT).path("/bar").uniqueName("bar").build(); + RestHandler.Route r3 = new RestHandler.Route(RestRequest.Method.DELETE, "/foo"); DynamicActionRegistry registry = new DynamicActionRegistry(); registry.registerDynamicRoute(r1, action); registry.registerDynamicRoute(r2, action2); assertTrue(registry.isActionRegistered("foo")); + assertEquals(action, registry.get(r1)); assertTrue(registry.isActionRegistered("bar")); + assertEquals(action2, registry.get(r2)); + assertNull(registry.get(r3)); registry.unregisterDynamicRoute(r2); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index e74962dcbba1b..3ee2278aec546 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -66,6 +66,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -971,7 +973,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, randomRepoName) .put(stateRepoTypeAttributeKey, FsRepository.TYPE) .put(stateRepoSettingsAttributeKeyPrefix + "location", "randomRepoPath") - .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .build(); final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, settings); @@ -1002,6 +1004,16 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep ); } + public void testIsRemotePublicationEnabled_WithInconsistentSettings() { + // create settings with remote state disabled but publication enabled + Settings settings = Settings.builder() + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) + .put(REMOTE_PUBLICATION_EXPERIMENTAL, true) + .build(); + CoordinationState coordinationState = createCoordinationState(psr1, node1, settings); + assertFalse(coordinationState.isRemotePublicationEnabled()); + } + public static CoordinationState createCoordinationState( PersistedStateRegistry persistedStateRegistry, DiscoveryNode localNode, diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 7146b7dc51753..bbf98b5c32918 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -48,7 +48,7 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java new file mode 100644 index 0000000000000..7fb8fe7f68f45 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesConsumerFactoryTests.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +public class LuceneDocValuesConsumerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesConsumerForCompositeCodec() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesConsumer", consumer.getClass().getName()); + assertEquals(CompositeCodecFactory.COMPOSITE_CODEC, Composite99Codec.COMPOSITE_INDEX_CODEC_NAME); + consumer.close(); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java new file mode 100644 index 0000000000000..55d637dfb9cae --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/LuceneDocValuesProducerFactoryTests.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.codecs.DocValuesConsumer; +import org.apache.lucene.codecs.DocValuesProducer; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.SegmentInfo; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.InfoStream; +import org.apache.lucene.util.Version; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.UUID; + +import static org.mockito.Mockito.mock; + +public class LuceneDocValuesProducerFactoryTests extends OpenSearchTestCase { + + private Directory directory; + private final String dataCodec = "data_codec"; + private final String dataExtension = "data_extension"; + private final String metaCodec = "meta_codec"; + private final String metaExtension = "meta_extension"; + + @Before + public void setup() { + directory = newDirectory(); + } + + public void testGetDocValuesProducerForCompositeCodec99() throws IOException { + SegmentInfo segmentInfo = new SegmentInfo( + directory, + Version.LATEST, + Version.LUCENE_9_11_0, + "test_segment", + randomInt(), + false, + false, + new Lucene99Codec(), + new HashMap<>(), + UUID.randomUUID().toString().substring(0, 16).getBytes(StandardCharsets.UTF_8), + new HashMap<>(), + null + ); + + // open an consumer first in order for the producer to find the file + SegmentWriteState state = new SegmentWriteState( + InfoStream.getDefault(), + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + null, + newIOContext(random()) + ); + DocValuesConsumer consumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + state, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + consumer.close(); + + SegmentReadState segmentReadState = new SegmentReadState( + segmentInfo.dir, + segmentInfo, + new FieldInfos(new FieldInfo[0]), + newIOContext(random()) + ); + DocValuesProducer producer = LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + Composite99Codec.COMPOSITE_INDEX_CODEC_NAME, + segmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + + assertNotNull(producer); + assertEquals("org.apache.lucene.codecs.lucene90.Lucene90DocValuesProducer", producer.getClass().getName()); + producer.close(); + } + + public void testGetDocValuesProducerForCompositeCodec_InvalidCodec() { + SegmentReadState mockSegmentReadState = mock(SegmentReadState.class); + + IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { + LuceneDocValuesProducerFactory.getDocValuesProducerForCompositeCodec( + "invalid_codec", + mockSegmentReadState, + dataCodec, + dataExtension, + metaCodec, + metaExtension + ); + }); + + assertNotNull(exception); + assertTrue(exception.getMessage().contains("Invalid composite codec")); + } + + @After + public void teardown() throws Exception { + super.tearDown(); + directory.close(); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java new file mode 100644 index 0000000000000..54eead20ef354 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/codec/composite/SortedNumericDocValuesWriterWrapperTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec.composite; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedNumericDocValuesWriterWrapper; +import org.apache.lucene.index.VectorEncoding; +import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.util.Counter; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class SortedNumericDocValuesWriterWrapperTests extends OpenSearchTestCase { + + private SortedNumericDocValuesWriterWrapper wrapper; + private FieldInfo fieldInfo; + private Counter counter; + + @Override + public void setUp() throws Exception { + super.setUp(); + fieldInfo = new FieldInfo( + "field", + 1, + false, + false, + true, + IndexOptions.NONE, + DocValuesType.NONE, + -1, + Collections.emptyMap(), + 0, + 0, + 0, + 0, + VectorEncoding.FLOAT32, + VectorSimilarityFunction.EUCLIDEAN, + false, + false + ); + counter = Counter.newCounter(); + wrapper = new SortedNumericDocValuesWriterWrapper(fieldInfo, counter); + } + + public void testAddValue() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(1, 20); + wrapper.addValue(2, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(1, docValues.nextDoc()); + assertEquals(20, docValues.nextValue()); + assertEquals(2, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + } + + public void testGetDocValues() { + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + } + + public void testMultipleValues() throws IOException { + wrapper.addValue(0, 10); + wrapper.addValue(0, 20); + wrapper.addValue(1, 30); + + SortedNumericDocValues docValues = wrapper.getDocValues(); + assertNotNull(docValues); + + assertEquals(0, docValues.nextDoc()); + assertEquals(10, docValues.nextValue()); + assertEquals(20, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + + assertEquals(1, docValues.nextDoc()); + assertEquals(30, docValues.nextValue()); + assertThrows(IllegalStateException.class, docValues::nextValue); + } +} diff --git a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java index 6fa88215cad48..54a9cc035d7a9 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite/datacube/startree/StarTreeDocValuesFormatTests.java @@ -29,7 +29,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.MapperTestUtils; -import org.opensearch.index.codec.composite.Composite99Codec; +import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.IndicesModule; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java index f5d3e197aa287..36f75834abba8 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/AbstractValueAggregatorTests.java @@ -48,11 +48,7 @@ public void testGetInitialAggregatedValueForSegmentDocNullValue() { } public void testMergeAggregatedNullValueAndSegmentNullValue() { - if (aggregator instanceof CountValueAggregator) { - assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); - } else { - assertEquals(aggregator.getIdentityMetricValue(), aggregator.mergeAggregatedValueAndSegmentValue(null, null)); - } + assertEquals(aggregator.getIdentityMetricValue(), aggregator.mergeAggregatedValueAndSegmentValue(null, null)); } public void testMergeAggregatedNullValues() { @@ -65,13 +61,6 @@ public void testGetInitialAggregatedNullValue() { public void testGetInitialAggregatedValueForSegmentDocValue() { long randomLong = randomLong(); - if (aggregator instanceof CountValueAggregator) { - assertEquals(CountValueAggregator.DEFAULT_INITIAL_VALUE, aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong())); - } else { - assertEquals( - starTreeNumericType.getDoubleValue(randomLong), - aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong) - ); - } + assertEquals(starTreeNumericType.getDoubleValue(randomLong), aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java index 550a4fea1174a..b270c1b1bc26c 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/CountValueAggregatorTests.java @@ -31,6 +31,11 @@ public void testMergeAggregatedValues() { assertEquals(randomLong2, aggregator.mergeAggregatedValues(null, randomLong2), 0.0); } + @Override + public void testMergeAggregatedNullValueAndSegmentNullValue() { + assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); + } + public void testGetInitialAggregatedValue() { long randomLong = randomLong(); assertEquals(randomLong, aggregator.getInitialAggregatedValue(randomLong), 0.0); @@ -48,8 +53,13 @@ public void testIdentityMetricValue() { @Override public ValueAggregator getValueAggregator(StarTreeNumericType starTreeNumericType) { - aggregator = new CountValueAggregator(starTreeNumericType); + aggregator = new CountValueAggregator(); return aggregator; } + @Override + public void testGetInitialAggregatedValueForSegmentDocValue() { + long randomLong = randomLong(); + assertEquals(CountValueAggregator.DEFAULT_INITIAL_VALUE, (long) aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); + } } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java new file mode 100644 index 0000000000000..2765629aa5950 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/aggregators/DocCountAggregatorTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.aggregators; + +import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; + +/** + * Unit tests for {@link DocCountAggregator}. + */ +public class DocCountAggregatorTests extends AbstractValueAggregatorTests { + + private DocCountAggregator aggregator; + + public DocCountAggregatorTests(StarTreeNumericType starTreeNumericType) { + super(starTreeNumericType); + } + + public void testMergeAggregatedValueAndSegmentValue() { + long randomLong = randomLong(); + assertEquals(randomLong + 3L, (long) aggregator.mergeAggregatedValueAndSegmentValue(randomLong, 3L)); + } + + public void testMergeAggregatedValues() { + long randomLong1 = randomLong(); + long randomLong2 = randomLong(); + assertEquals(randomLong1 + randomLong2, (long) aggregator.mergeAggregatedValues(randomLong1, randomLong2)); + assertEquals(randomLong1 + 1L, (long) aggregator.mergeAggregatedValues(randomLong1, null)); + assertEquals(randomLong2 + 1L, (long) aggregator.mergeAggregatedValues(null, randomLong2)); + } + + @Override + public void testMergeAggregatedNullValueAndSegmentNullValue() { + assertThrows(AssertionError.class, () -> aggregator.mergeAggregatedValueAndSegmentValue(null, null)); + } + + @Override + public void testMergeAggregatedNullValues() { + assertEquals( + (aggregator.getIdentityMetricValue() + aggregator.getIdentityMetricValue()), + (long) aggregator.mergeAggregatedValues(null, null) + ); + } + + public void testGetInitialAggregatedValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.getInitialAggregatedValue(randomLong)); + } + + public void testToStarTreeNumericTypeValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.toAggregatedValueType(randomLong)); + } + + public void testIdentityMetricValue() { + assertEquals(1L, (long) aggregator.getIdentityMetricValue()); + } + + @Override + public ValueAggregator getValueAggregator(StarTreeNumericType starTreeNumericType) { + aggregator = new DocCountAggregator(); + return aggregator; + } + + @Override + public void testGetInitialAggregatedValueForSegmentDocValue() { + long randomLong = randomLong(); + assertEquals(randomLong, (long) aggregator.getInitialAggregatedValueForSegmentDocValue(randomLong)); + } +} diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java index 389b6cb34f085..e77f184ac0243 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java @@ -55,6 +55,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -94,7 +95,8 @@ public void setup() throws IOException { new Metric("field4", List.of(MetricStat.SUM)), new Metric("field6", List.of(MetricStat.VALUE_COUNT)), new Metric("field9", List.of(MetricStat.MIN)), - new Metric("field10", List.of(MetricStat.MAX)) + new Metric("field10", List.of(MetricStat.MAX)), + new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)) ); DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); @@ -187,11 +189,26 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0, 10L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, 10L } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0, 10L } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, null } + ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { @@ -200,14 +217,15 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 11L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 21L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -233,6 +251,7 @@ public void test_sortAndAggregateStarTreeDocuments() throws IOException { assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); numOfAggregatedDocuments++; } @@ -280,15 +299,15 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 11.0, null, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 18.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 18.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -303,7 +322,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -326,6 +345,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetric() throws IOExcepti assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -334,15 +354,30 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; // Setting second metric iterator as empty sorted numeric , indicating a metric field is null - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0, null } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, null, randomDouble(), 9.0, 12.0, 10L } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, null, randomDouble(), 8.0, 13.0, null } + ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 0.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 0.0, 2L, 8.0, 20.0, 11L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 0.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -355,9 +390,10 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = starTreeDocuments[i].metrics[5] != null ? (Long) starTreeDocuments[i].metrics[5] : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -380,6 +416,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullMetricField() throws IOEx assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -390,18 +427,18 @@ public void test_sortAndAggregateStarTreeDocuments_nullAndMinusOneInDimensionFie // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, null, 3L, 4L }, - new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0 } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { null, 4L, 2L, 1L }, - new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0 } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, 4L, 2L, 1L }, - new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0 } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 9.0, null, randomDouble(), 9.0, 12.0 }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { -1L, 4L, 2L, 1L }, new Object[] { 11.0, null, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( new StarTreeDocument(new Long[] { 2L, null, 3L, 4L }, new Object[] { 21.0, 0.0, 2L }), @@ -443,6 +480,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullAndMinusOneInDimensionFie assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -452,14 +490,29 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; // Setting second metric iterator as empty sorted numeric , indicating a metric field is null - starTreeDocuments[0] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { null, null, null, null }, + new Object[] { null, null, null, null, null, null } + ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 0.0, 0.0, 0L, null, null }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 0.0, 0.0, 0L, null, null, 5L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -482,7 +535,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -505,6 +558,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndNullMetrics( assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -521,21 +575,21 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { null, null, randomDouble(), null, maxValue } + new Object[] { null, null, randomDouble(), null, maxValue } ); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { null, null, null, null, null }); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { null, null, null, minValue, null } + new Object[] { null, null, null, minValue, null } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Double[] { null, null, null, null, null }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { null, null, null, null, null }); starTreeDocuments[4] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { sumValue, null, randomDouble(), null, null } + new Object[] { sumValue, null, randomDouble(), null, null } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { sumValue, 0.0, 2L, minValue, maxValue }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { sumValue, 0.0, 2L, minValue, maxValue, 5L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -558,7 +612,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri : null; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, null } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -581,6 +635,7 @@ public void test_sortAndAggregateStarTreeDocuments_nullDimensionsAndFewNullMetri assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } builder.build(segmentStarTreeDocumentIterator); validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -593,27 +648,27 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx // Setting second metric iterator as empty sorted numeric , indicating a metric field is null starTreeDocuments[0] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 12.0, null, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, null, randomDouble(), 8.0, 20.0, 10L } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 10.0, null, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, null, randomDouble(), 12.0, 10.0, 10L } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 14.0, null, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, null, randomDouble(), 6.0, 24.0, 10L } ); starTreeDocuments[3] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 9.0, null, randomDouble(), 9.0, 12.0 } + new Object[] { 9.0, null, randomDouble(), 9.0, 12.0, 10L } ); starTreeDocuments[4] = new StarTreeDocument( new Long[] { null, null, null, null }, - new Double[] { 11.0, null, randomDouble(), 8.0, 13.0 } + new Object[] { 11.0, null, randomDouble(), 8.0, 13.0, 10L } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 0.0, 5L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { null, null, null, null }, new Object[] { 56.0, 0.0, 5L, 6.0, 24.0, 50L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -626,9 +681,10 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx Long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); Long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); Long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Object[] { metric1, metric2, metric3, metric4, metric5 } + new Object[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -651,6 +707,7 @@ public void test_sortAndAggregateStarTreeDocuments_emptyDimensions() throws IOEx assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); } } @@ -661,28 +718,28 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() starTreeDocuments[0] = new StarTreeDocument( new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, - new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 } + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 } + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 } ); starTreeDocuments[3] = new StarTreeDocument( new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, - new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 } + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 } ); starTreeDocuments[4] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, - new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 } + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 } ); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { Long.MIN_VALUE, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, Long.MAX_VALUE }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -695,7 +752,7 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -720,6 +777,7 @@ public void test_sortAndAggregateStarTreeDocument_longMaxAndLongMinDimensions() assertEquals(expectedStarTreeDocument.metrics[2], resultStarTreeDocument.metrics[2]); assertEquals(expectedStarTreeDocument.metrics[3], resultStarTreeDocument.metrics[3]); assertEquals(expectedStarTreeDocument.metrics[4], resultStarTreeDocument.metrics[4]); + assertEquals(expectedStarTreeDocument.metrics[5], resultStarTreeDocument.metrics[5]); numOfAggregatedDocuments++; } @@ -735,19 +793,28 @@ public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, 4L, 3L, 4L }, - new Double[] { Double.MAX_VALUE, 10.0, randomDouble(), 8.0, 20.0 } + new Object[] { Double.MAX_VALUE, 10.0, randomDouble(), 8.0, 20.0, 100L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, 100L } ); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Double[] { 14.0, Double.MIN_VALUE, randomDouble(), 6.0, 24.0 } + new Object[] { 14.0, Double.MIN_VALUE, randomDouble(), 6.0, 24.0, 100L } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, 100L } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, 100L } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); List inorderStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { Double.MAX_VALUE + 9, 14.0, 2L, 8.0, 20.0, 200L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, Double.MIN_VALUE + 22, 3L, 6.0, 24.0, 300L }) ); Iterator expectedStarTreeDocumentIterator = inorderStarTreeDocuments.iterator(); @@ -758,9 +825,10 @@ public void test_sortAndAggregateStarTreeDocument_DoubleMaxAndDoubleMinMetrics() long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -892,7 +960,7 @@ public void test_build_halfFloatMetrics() throws IOException { ); segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -943,20 +1011,23 @@ public void test_build_floatMetrics() throws IOException { starTreeDocuments[0] = new StarTreeDocument( new Long[] { 2L, 4L, 3L, 4L }, - new Float[] { 12.0F, 10.0F, randomFloat(), 8.0F, 20.0F } + new Object[] { 12.0F, 10.0F, randomFloat(), 8.0F, 20.0F, null } ); starTreeDocuments[1] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 10.0F, 6.0F, randomFloat(), 12.0F, 10.0F } + new Object[] { 10.0F, 6.0F, randomFloat(), 12.0F, 10.0F, null } ); starTreeDocuments[2] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 14.0F, 12.0F, randomFloat(), 6.0F, 24.0F } + new Object[] { 14.0F, 12.0F, randomFloat(), 6.0F, 24.0F, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0F, 4.0F, randomFloat(), 9.0F, 12.0F, null } ); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Float[] { 9.0F, 4.0F, randomFloat(), 9.0F, 12.0F }); starTreeDocuments[4] = new StarTreeDocument( new Long[] { 3L, 4L, 2L, 1L }, - new Float[] { 11.0F, 16.0F, randomFloat(), 8.0F, 13.0F } + new Object[] { 11.0F, 16.0F, randomFloat(), 8.0F, 13.0F, null } ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; @@ -966,9 +1037,10 @@ public void test_build_floatMetrics() throws IOException { long metric3 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.floatToSortableInt((Float) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -1031,7 +1103,7 @@ public void test_build_longMetrics() throws IOException { long metric5 = (Long) starTreeDocuments[i].metrics[4]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, null } ); } @@ -1053,13 +1125,13 @@ public void test_build_longMetrics() throws IOException { private static Iterator getExpectedStarTreeDocumentIterator() { List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0 }), - new StarTreeDocument(new Long[] { null, 4L, null, null }, new Object[] { 56.0, 48.0, 5L, 6.0, 24.0 }) + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 2L, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, 1L }, new Object[] { 35.0, 34.0, 3L, 6.0, 24.0, 3L }), + new StarTreeDocument(new Long[] { null, 4L, null, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, 20.0, 2L }), + new StarTreeDocument(new Long[] { null, 4L, null, null }, new Object[] { 56.0, 48.0, 5L, 6.0, 24.0, 5L }) ); return expectedStarTreeDocuments.iterator(); } @@ -1069,11 +1141,26 @@ public void test_build() throws IOException { int noOfStarTreeDocuments = 5; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 12.0, 10.0, randomDouble(), 8.0, 20.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 10.0, 6.0, randomDouble(), 12.0, 10.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 14.0, 12.0, randomDouble(), 6.0, 24.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Double[] { 9.0, 4.0, randomDouble(), 9.0, 12.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Double[] { 11.0, 16.0, randomDouble(), 8.0, 13.0 }); + starTreeDocuments[0] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 12.0, 10.0, randomDouble(), 8.0, 20.0, 1L } + ); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 10.0, 6.0, randomDouble(), 12.0, 10.0, null } + ); + starTreeDocuments[2] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 14.0, 12.0, randomDouble(), 6.0, 24.0, null } + ); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Object[] { 9.0, 4.0, randomDouble(), 9.0, 12.0, null } + ); + starTreeDocuments[4] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { 11.0, 16.0, randomDouble(), 8.0, 13.0, null } + ); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { @@ -1082,9 +1169,10 @@ public void test_build() throws IOException { long metric3 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[2]); long metric4 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[3]); long metric5 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[4]); + Long metric6 = (Long) starTreeDocuments[i].metrics[5]; segmentStarTreeDocuments[i] = new StarTreeDocument( starTreeDocuments[i].dimensions, - new Long[] { metric1, metric2, metric3, metric4, metric5 } + new Long[] { metric1, metric2, metric3, metric4, metric5, metric6 } ); } @@ -1130,7 +1218,7 @@ public void test_build_starTreeDataset() throws IOException { fields = List.of("fieldC", "fieldB", "fieldL", "fieldI"); dimensionsOrder = List.of(new NumericDimension("fieldC"), new NumericDimension("fieldB"), new NumericDimension("fieldL")); - metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM))); + metrics = List.of(new Metric("fieldI", List.of(MetricStat.SUM)), new Metric("_doc_count", List.of(MetricStat.DOC_COUNT))); DocValuesProducer docValuesProducer = mock(DocValuesProducer.class); @@ -1199,18 +1287,18 @@ public void test_build_starTreeDataset() throws IOException { int noOfStarTreeDocuments = 7; StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; - starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Double[] { 400.0 }); - starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Double[] { 200.0 }); - starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Double[] { 300.0 }); - starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Double[] { 100.0 }); - starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Double[] { 600.0 }); - starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Double[] { 200.0 }); - starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Double[] { 400.0 }); + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0, null }); + starTreeDocuments[1] = new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0, null }); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0, null }); + starTreeDocuments[3] = new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0, null }); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0, null }); + starTreeDocuments[5] = new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0, null }); + starTreeDocuments[6] = new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0, null }); StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; for (int i = 0; i < noOfStarTreeDocuments; i++) { long metric1 = NumericUtils.doubleToSortableLong((Double) starTreeDocuments[i].metrics[0]); - segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1 }); + segmentStarTreeDocuments[i] = new StarTreeDocument(starTreeDocuments[i].dimensions, new Long[] { metric1, null }); } SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); @@ -1250,6 +1338,7 @@ public void test_build_starTreeDataset() throws IOException { assertEquals(expectedStarTreeDocument.dimensions[1], resultStarTreeDocument.dimensions[1]); assertEquals(expectedStarTreeDocument.dimensions[2], resultStarTreeDocument.dimensions[2]); assertEquals(expectedStarTreeDocument.metrics[0], resultStarTreeDocument.metrics[0]); + assertEquals(expectedStarTreeDocument.metrics[1], resultStarTreeDocument.metrics[1]); } validateStarTree(builder.getRootNode(), 3, 1, builder.getStarTreeDocuments()); } @@ -1278,33 +1367,33 @@ private static Map> getExpectedDimToValueMap() { private Iterator expectedStarTreeDocuments() { List expectedStarTreeDocuments = List.of( - new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 11L, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { null, 12L, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { null, 12L, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 12L, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, 13L, 21L }, new Object[] { 100.0 }), - new StarTreeDocument(new Long[] { null, 13L, 23L }, new Object[] { 300.0 }), - new StarTreeDocument(new Long[] { null, null, 21L }, new Object[] { 1500.0 }), - new StarTreeDocument(new Long[] { null, null, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { null, null, 23L }, new Object[] { 500.0 }), - new StarTreeDocument(new Long[] { null, null, null }, new Object[] { 2200.0 }), - new StarTreeDocument(new Long[] { null, 12L, null }, new Object[] { 800.0 }), - new StarTreeDocument(new Long[] { null, 13L, null }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, null, 21L }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 1L, null, 22L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 1L, null, null }, new Object[] { 600.0 }), - new StarTreeDocument(new Long[] { 2L, 13L, null }, new Object[] { 400.0 }), - new StarTreeDocument(new Long[] { 3L, null, 21L }, new Object[] { 1000.0 }), - new StarTreeDocument(new Long[] { 3L, null, 23L }, new Object[] { 200.0 }), - new StarTreeDocument(new Long[] { 3L, null, null }, new Object[] { 1200.0 }), - new StarTreeDocument(new Long[] { 3L, 12L, null }, new Object[] { 600.0 }) + new StarTreeDocument(new Long[] { 1L, 11L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 1L, 12L, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 2L, 13L, 21L }, new Object[] { 100.0, 1L }), + new StarTreeDocument(new Long[] { 2L, 13L, 23L }, new Object[] { 300.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 11L, 21L }, new Object[] { 600.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 12L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 3L, 12L, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 11L, 21L }, new Object[] { 1000.0, 2L }), + new StarTreeDocument(new Long[] { null, 12L, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { null, 12L, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 12L, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, 13L, 21L }, new Object[] { 100.0, 1L }), + new StarTreeDocument(new Long[] { null, 13L, 23L }, new Object[] { 300.0, 1L }), + new StarTreeDocument(new Long[] { null, null, 21L }, new Object[] { 1500.0, 4L }), + new StarTreeDocument(new Long[] { null, null, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { null, null, 23L }, new Object[] { 500.0, 2L }), + new StarTreeDocument(new Long[] { null, null, null }, new Object[] { 2200.0, 7L }), + new StarTreeDocument(new Long[] { null, 12L, null }, new Object[] { 800.0, 3L }), + new StarTreeDocument(new Long[] { null, 13L, null }, new Object[] { 400.0, 2L }), + new StarTreeDocument(new Long[] { 1L, null, 21L }, new Object[] { 400.0, 1L }), + new StarTreeDocument(new Long[] { 1L, null, 22L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 1L, null, null }, new Object[] { 600.0, 2L }), + new StarTreeDocument(new Long[] { 2L, 13L, null }, new Object[] { 400.0, 2L }), + new StarTreeDocument(new Long[] { 3L, null, 21L }, new Object[] { 1000.0, 2L }), + new StarTreeDocument(new Long[] { 3L, null, 23L }, new Object[] { 200.0, 1L }), + new StarTreeDocument(new Long[] { 3L, null, null }, new Object[] { 1200.0, 3L }), + new StarTreeDocument(new Long[] { 3L, 12L, null }, new Object[] { 600.0, 2L }) ); return expectedStarTreeDocuments.iterator(); @@ -2209,8 +2298,14 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { metricsList.add(getLongFromDouble(i * 10.0)); metricsWithField.add(i); } + List docCountMetricsList = new ArrayList<>(100); + List docCountMetricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + docCountMetricsList.add(i * 10L); + docCountMetricsWithField.add(i); + } - StarTreeField sf = getStarTreeField(1); + StarTreeField sf = getStarTreeFieldWithDocCount(1, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2222,6 +2317,8 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); @@ -2236,6 +2333,8 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2246,23 +2345,26 @@ public void testMergeFlowWithDuplicateDimensionValues() throws IOException { double sum = 0; /** 401 docs get generated - [0, 0, 0, 0] | [200.0] - [1, 1, 1, 1] | [700.0] - [2, 2, 2, 2] | [1200.0] - [3, 3, 3, 3] | [1700.0] - [4, 4, 4, 4] | [2200.0] + [0, 0, 0, 0] | [200.0, 10] + [1, 1, 1, 1] | [700.0, 10] + [2, 2, 2, 2] | [1200.0, 10] + [3, 3, 3, 3] | [1700.0, 10] + [4, 4, 4, 4] | [2200.0, 10] ..... - [null, null, null, 99] | [49700.0] - [null, null, null, null] | [2495000.0] + [null, null, null, 99] | [49700.0, 10] + [null, null, null, null] | [2495000.0, 1000] */ for (StarTreeDocument starTreeDocument : starTreeDocuments) { if (starTreeDocument.dimensions[3] == null) { assertEquals(sum, starTreeDocument.metrics[0]); + assertEquals(2495000L, (long) starTreeDocument.metrics[1]); } else { if (starTreeDocument.dimensions[0] != null) { sum += (double) starTreeDocument.metrics[0]; } assertEquals(starTreeDocument.dimensions[3] * 500 + 200.0, starTreeDocument.metrics[0]); + assertEquals(starTreeDocument.dimensions[3] * 500 + 200L, (long) starTreeDocument.metrics[1]); + } count++; } @@ -2319,7 +2421,14 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { metricsWithField.add(i); } - StarTreeField sf = getStarTreeField(3); + List metricsList1 = new ArrayList<>(100); + List metricsWithField1 = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList1.add(1L); + metricsWithField1.add(i); + } + + StarTreeField sf = getStarTreeFieldWithDocCount(3, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2331,6 +2440,8 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2345,6 +2456,8 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2353,17 +2466,58 @@ public void testMergeFlowWithMaxLeafDocs() throws IOException { List starTreeDocuments = builder.getStarTreeDocuments(); /** 635 docs get generated - [0, 0, 0, 0] | [200.0] - [1, 1, 1, 1] | [700.0] - [2, 2, 2, 2] | [1200.0] - [3, 3, 3, 3] | [1700.0] - [4, 4, 4, 4] | [2200.0] + [0, 0, 0, 0] | [200.0, 10] + [0, 0, 1, 1] | [700.0, 10] + [0, 0, 2, 2] | [1200.0, 10] + [0, 0, 3, 3] | [1700.0, 10] + [1, 0, 4, 4] | [2200.0, 10] + [1, 0, 5, 5] | [2700.0, 10] + [1, 0, 6, 6] | [3200.0, 10] + [1, 0, 7, 7] | [3700.0, 10] + [2, 0, 8, 8] | [4200.0, 10] + [2, 0, 9, 9] | [4700.0, 10] + [2, 1, 10, 10] | [5200.0, 10] + [2, 1, 11, 11] | [5700.0, 10] ..... - [null, null, null, 99] | [49700.0] + [18, 7, null, null] | [147800.0, 40] + ... + [7, 2, null, null] | [28900.0, 20] + ... + [null, null, null, 99] | [49700.0, 10] ..... - [null, null, null, null] | [2495000.0] + [null, null, null, null] | [2495000.0, 1000] */ assertEquals(635, starTreeDocuments.size()); + for (StarTreeDocument starTreeDocument : starTreeDocuments) { + if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[2] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null + && starTreeDocument.dimensions[1] != null + && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null && starTreeDocument.dimensions[1] != null) { + assertTrue((long) starTreeDocument.metrics[1] == 20L || (long) starTreeDocument.metrics[1] == 40L); + } else if (starTreeDocument.dimensions[1] != null && starTreeDocument.dimensions[3] != null) { + assertEquals(10L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[1] != null) { + assertEquals(100L, starTreeDocument.metrics[1]); + } else if (starTreeDocument.dimensions[0] != null) { + assertEquals(40L, starTreeDocument.metrics[1]); + } + } validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } @@ -2378,6 +2532,8 @@ private StarTreeValues getStarTreeValues( List docsWithField4, List metricsList, List metricsWithField, + List metricsList1, + List metricsWithField1, StarTreeField sf ) { SortedNumericDocValues d1sndv = getSortedNumericMock(dimList1, docsWithField1); @@ -2385,8 +2541,11 @@ private StarTreeValues getStarTreeValues( SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList1, metricsWithField1); Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); - Map metricDocIdSetIterators = Map.of("field2", m1sndv); + Map metricDocIdSetIterators = new LinkedHashMap<>(); + metricDocIdSetIterators.put("field2", m1sndv); + metricDocIdSetIterators.put("_doc_count", m2sndv); StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(500)); return starTreeValues; } @@ -2438,7 +2597,14 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE metricsWithField.add(i); } - StarTreeField sf = getStarTreeField(3); + List docCountMetricsList = new ArrayList<>(100); + List docCountMetricsWithField = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(getLongFromDouble(i * 2)); + metricsWithField.add(i); + } + + StarTreeField sf = getStarTreeFieldWithDocCount(3, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2450,6 +2616,8 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); @@ -2464,6 +2632,8 @@ public void testMergeFlowWithDuplicateDimensionValueWithMaxLeafDocs() throws IOE docsWithField4, metricsList, metricsWithField, + docCountMetricsList, + docCountMetricsWithField, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2536,8 +2706,13 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc metricsList.add(getLongFromDouble(10.0)); metricsWithField.add(i); } - - StarTreeField sf = getStarTreeField(10); + List metricsList1 = new ArrayList<>(100); + List metricsWithField1 = new ArrayList<>(100); + for (int i = 0; i < 500; i++) { + metricsList.add(1L); + metricsWithField.add(i); + } + StarTreeField sf = getStarTreeFieldWithDocCount(10, true); StarTreeValues starTreeValues = getStarTreeValues( dimList1, docsWithField1, @@ -2549,6 +2724,8 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); @@ -2563,6 +2740,8 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc docsWithField4, metricsList, metricsWithField, + metricsList1, + metricsWithField1, sf ); builder = getStarTreeBuilder(sf, writeState, mapperService); @@ -2584,14 +2763,18 @@ public void testMergeFlowWithMaxLeafDocsAndStarTreeNodesAssertion() throws IOExc validateStarTree(builder.getRootNode(), 4, sf.getStarTreeConfig().maxLeafDocs(), builder.getStarTreeDocuments()); } - private static StarTreeField getStarTreeField(int maxLeafDocs) { + private static StarTreeField getStarTreeFieldWithDocCount(int maxLeafDocs, boolean includeDocCountMetric) { Dimension d1 = new NumericDimension("field1"); Dimension d2 = new NumericDimension("field3"); Dimension d3 = new NumericDimension("field5"); Dimension d4 = new NumericDimension("field8"); List dims = List.of(d1, d2, d3, d4); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); - List metrics = List.of(m1); + Metric m2 = null; + if (includeDocCountMetric) { + m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); + } + List metrics = m2 == null ? List.of(m1) : List.of(m1, m2); StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( maxLeafDocs, new HashSet<>(), @@ -2684,8 +2867,9 @@ public void testMergeFlow() throws IOException { Dimension d4 = new NumericDimension("field8"); // Dimension d5 = new NumericDimension("field5"); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + Metric m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); List dims = List.of(d1, d2, d3, d4); - List metrics = List.of(m1); + List metrics = List.of(m1, m2); StarTreeFieldConfiguration c = new StarTreeFieldConfiguration( 1, new HashSet<>(), @@ -2697,8 +2881,9 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues m2sndv = DocValues.emptySortedNumeric(); Map dimDocIdSetIterators = Map.of("field1", d1sndv, "field3", d2sndv, "field5", d3sndv, "field8", d4sndv); - Map metricDocIdSetIterators = Map.of("field2", m1sndv); + Map metricDocIdSetIterators = Map.of("field2", m1sndv, "_doc_count", m2sndv); StarTreeValues starTreeValues = new StarTreeValues(sf, null, dimDocIdSetIterators, metricDocIdSetIterators, getAttributes(1000)); SortedNumericDocValues f2d1sndv = getSortedNumericMock(dimList1, docsWithField1); @@ -2706,6 +2891,7 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues f2d3sndv = getSortedNumericMock(dimList3, docsWithField3); SortedNumericDocValues f2d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues f2m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues f2m2sndv = DocValues.emptySortedNumeric(); Map f2dimDocIdSetIterators = Map.of( "field1", f2d1sndv, @@ -2716,7 +2902,7 @@ public void testMergeFlow() throws IOException { "field8", f2d4sndv ); - Map f2metricDocIdSetIterators = Map.of("field2", f2m1sndv); + Map f2metricDocIdSetIterators = Map.of("field2", f2m1sndv, "_doc_count", f2m2sndv); StarTreeValues starTreeValues2 = new StarTreeValues( sf, null, @@ -2728,17 +2914,18 @@ public void testMergeFlow() throws IOException { builder = getStarTreeBuilder(sf, writeState, mapperService); Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); /** - [0, 0, 0, 0] | [0.0] - [1, 1, 1, 1] | [20.0] - [2, 2, 2, 2] | [40.0] - [3, 3, 3, 3] | [60.0] - [4, 4, 4, 4] | [80.0] - [5, 5, 5, 5] | [100.0] + [0, 0, 0, 0] | [0.0, 2] + [1, 1, 1, 1] | [20.0, 2] + [2, 2, 2, 2] | [40.0, 2] + [3, 3, 3, 3] | [60.0, 2] + [4, 4, 4, 4] | [80.0, 2] + [5, 5, 5, 5] | [100.0, 2] ... [999, 999, 999, 999] | [19980.0] */ for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { assertEquals(starTreeDocument.dimensions[0] * 20.0, starTreeDocument.metrics[0]); + assertEquals(2L, starTreeDocument.metrics[1]); } builder.build(starTreeDocumentIterator); @@ -2934,13 +3121,6 @@ private static StarTreeField getStarTreeField(MetricStat count) { return new StarTreeField("sf", dims, metrics, c); } - private Long getLongFromDouble(Double num) { - if (num == null) { - return null; - } - return NumericUtils.doubleToSortableLong(num); - } - SortedNumericDocValues getSortedNumericMock(List dimList, List docsWithField) { return new SortedNumericDocValues() { int index = -1; diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index 637072c8886c1..5b5ca378ee7ff 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -25,7 +25,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.IsEqual.equalTo; -import static org.hamcrest.core.StringContains.containsString; public class FlatObjectFieldMapperTests extends MapperTestCase { private static final String FIELD_TYPE = "flat_object"; @@ -133,9 +132,10 @@ public void testDefaults() throws Exception { public void testNullValue() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); - MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.nullField("field")))); - assertThat(e.getMessage(), containsString("object mapping for [_doc] tried to parse field [field] as object")); - + ParsedDocument parsedDocument = mapper.parse(source(b -> b.nullField("field"))); + assertEquals(1, parsedDocument.docs().size()); + IndexableField[] fields = parsedDocument.rootDoc().getFields("field"); + assertEquals(0, fields.length); } @Override diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java index 96487db6dd512..b27ef49303205 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java @@ -62,6 +62,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; @@ -73,18 +74,22 @@ import org.opensearch.index.mapper.NumberFieldMapper.NumberType; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.MultiValueMode; +import org.opensearch.search.query.BitmapDocValuesQuery; import org.junit.Before; import java.io.ByteArrayInputStream; import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.function.Supplier; +import org.roaringbitmap.RoaringBitmap; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -947,4 +952,33 @@ public void testFetchSourceValue() throws IOException { assertEquals(Collections.singletonList(2.71f), fetchSourceValue(nullValueMapper, "")); assertEquals(Collections.singletonList(2.71f), fetchSourceValue(nullValueMapper, null)); } + + public void testBitmapQuery() throws IOException { + RoaringBitmap r = new RoaringBitmap(); + byte[] array = new byte[r.serializedSizeInBytes()]; + r.serialize(ByteBuffer.wrap(array)); + BytesArray bitmap = new BytesArray(array); + + NumberFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberType.INTEGER); + assertEquals( + new IndexOrDocValuesQuery(NumberType.bitmapIndexQuery("field", r), new BitmapDocValuesQuery("field", r)), + ft.bitmapQuery(bitmap) + ); + + ft = new NumberFieldType("field", NumberType.INTEGER, false, false, true, true, null, Collections.emptyMap()); + assertEquals(new BitmapDocValuesQuery("field", r), ft.bitmapQuery(bitmap)); + + Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig()); + DirectoryReader reader = DirectoryReader.open(w); + assertEquals(new MatchNoDocsQuery(), ft.bitmapQuery(bitmap).rewrite(newSearcher(reader))); + reader.close(); + w.close(); + dir.close(); + + NumberType type = randomValueOtherThan(NumberType.INTEGER, () -> randomFrom(NumberType.values())); + ft = new NumberFieldMapper.NumberFieldType("field", type); + NumberFieldType finalFt = ft; + assertThrows(IllegalArgumentException.class, () -> finalFt.bitmapQuery(bitmap)); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 3fa97825cdfc6..449b251dddca1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -52,10 +52,11 @@ public void teardown() { } public void testValidStarTree() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); for (CompositeMappedFieldType type : compositeFieldTypes) { StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals(2, starTreeFieldType.getDimensions().size()); assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); @@ -65,8 +66,11 @@ public void testValidStarTree() throws IOException { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList(MetricStat.SUM, MetricStat.AVG); + + // Assert COUNT and SUM gets added when AVG is defined + List expectedMetrics = Arrays.asList(MetricStat.AVG, MetricStat.VALUE_COUNT, MetricStat.SUM); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); @@ -77,6 +81,67 @@ public void testValidStarTree() throws IOException { } } + public void testMetricsWithJustSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithJustSum("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + + public void testMetricsWithCountAndSum() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithSumAndCount("status", "size")); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + Rounding.DateTimeUnit.DAY_OF_MONTH, + Rounding.DateTimeUnit.MONTH_OF_YEAR + ); + assertEquals(expectedTimeUnits, dateDim.getIntervals()); + assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); + + // Assert AVG gets added when both of its base metrics is already present + List expectedMetrics = List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + Metric metric = starTreeFieldType.getMetrics().get(1); + assertEquals("_doc_count", metric.getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), metric.getMetrics()); + + assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals( + new HashSet<>(Arrays.asList("@timestamp", "status")), + starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() + ); + } + } + public void testValidStarTreeDefaults() throws IOException { MapperService mapperService = createMapperService(getMinMapping()); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); @@ -91,15 +156,17 @@ public void testValidStarTreeDefaults() throws IOException { ); assertEquals(expectedTimeUnits, dateDim.getIntervals()); assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals(3, starTreeFieldType.getMetrics().size()); assertEquals("status", starTreeFieldType.getMetrics().get(0).getField()); - List expectedMetrics = Arrays.asList( - MetricStat.AVG, - MetricStat.VALUE_COUNT, - MetricStat.SUM, - MetricStat.MAX, - MetricStat.MIN - ); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + + assertEquals("metric_field", starTreeFieldType.getMetrics().get(1).getField()); + expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(1).getMetrics()); + Metric metric = starTreeFieldType.getMetrics().get(2); + assertEquals("_doc_count", metric.getField()); + assertEquals(List.of(MetricStat.DOC_COUNT), metric.getMetrics()); assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); @@ -109,7 +176,7 @@ public void testValidStarTreeDefaults() throws IOException { public void testInvalidDim() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("invalid", "size")) + () -> createMapperService(getExpandedMappingWithJustAvg("invalid", "size")) ); assertEquals("Failed to parse mapping [_doc]: unknown dimension field [invalid]", ex.getMessage()); } @@ -117,7 +184,7 @@ public void testInvalidDim() { public void testInvalidMetric() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getExpandedMapping("status", "invalid")) + () -> createMapperService(getExpandedMappingWithJustAvg("status", "invalid")) ); assertEquals("Failed to parse mapping [_doc]: unknown metric field [invalid]", ex.getMessage()); } @@ -136,7 +203,7 @@ public void testNoMetrics() { public void testInvalidParam() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> createMapperService(getInvalidMapping(false, false, false, false, true)) + () -> createMapperService(getInvalidMapping(false, false, false, false, true, false)) ); assertEquals( "Failed to parse mapping [_doc]: Star tree mapping definition has unsupported parameters: [invalid : {invalid=invalid}]", @@ -182,6 +249,14 @@ public void testInvalidMetricType() { ); } + public void testInvalidMetricTypeWithDocCount() { + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> createMapperService(getInvalidMapping(false, false, false, false, false, true)) + ); + assertEquals("Failed to parse mapping [_doc]: Invalid metric stat: _doc_count", ex.getMessage()); + } + public void testInvalidDimType() { MapperParsingException ex = expectThrows( MapperParsingException.class, @@ -232,6 +307,9 @@ public void testMetric() { assertEquals(MetricStat.MIN, MetricStat.fromTypeName("min")); assertEquals(MetricStat.SUM, MetricStat.fromTypeName("sum")); assertEquals(MetricStat.AVG, MetricStat.fromTypeName("avg")); + + assertEquals(List.of(MetricStat.VALUE_COUNT, MetricStat.SUM), MetricStat.AVG.getBaseMetrics()); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> MetricStat.fromTypeName("invalid")); assertEquals("Invalid metric stat: invalid", ex.getMessage()); } @@ -310,7 +388,7 @@ public void testStarTreeField() { } public void testValidations() throws IOException { - MapperService mapperService = createMapperService(getExpandedMapping("status", "size")); + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Settings settings = Settings.builder().put(CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING.getKey(), true).build(); CompositeIndexSettings enabledCompositeIndexSettings = new CompositeIndexSettings( settings, @@ -370,7 +448,7 @@ public void testValidations() throws IOException { ); } - private XContentBuilder getExpandedMapping(String dim, String metric) throws IOException { + private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) throws IOException { return topMapping(b -> { b.startObject("composite"); b.startObject("startree"); @@ -399,7 +477,6 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE b.startObject(); b.field("name", metric); b.startArray("stats"); - b.value("sum"); b.value("avg"); b.endArray(); b.endObject(); @@ -421,6 +498,107 @@ private XContentBuilder getExpandedMapping(String dim, String metric) throws IOE }); } + private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + + private XContentBuilder getExpandedMappingWithSumAndCount(String dim, String metric) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", 100); + b.startArray("skip_star_node_creation_for_dimensions"); + { + b.value("@timestamp"); + b.value("status"); + } + b.endArray(); + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.startArray("calendar_intervals"); + b.value("day"); + b.value("month"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", dim); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", metric); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } + private XContentBuilder getMinMapping() throws IOException { return getMinMapping(false, false, false, false); } @@ -546,7 +724,8 @@ private XContentBuilder getInvalidMapping( boolean invalidSkipDims, boolean invalidDimType, boolean invalidMetricType, - boolean invalidParam + boolean invalidParam, + boolean invalidDocCountMetricType ) throws IOException { return topMapping(b -> { b.startObject("composite"); @@ -583,6 +762,12 @@ private XContentBuilder getInvalidMapping( b.endObject(); b.startObject(); b.field("name", "metric_field"); + if (invalidDocCountMetricType) { + b.startArray("stats"); + b.value("_doc_count"); + b.value("avg"); + b.endArray(); + } b.endObject(); b.endArray(); b.endObject(); @@ -681,7 +866,7 @@ private XContentBuilder getInvalidMappingWithDv( private XContentBuilder getInvalidMapping(boolean singleDim, boolean invalidSkipDims, boolean invalidDimType, boolean invalidMetricType) throws IOException { - return getInvalidMapping(singleDim, invalidSkipDims, invalidDimType, invalidMetricType, false); + return getInvalidMapping(singleDim, invalidSkipDims, invalidDimType, invalidMetricType, false, false); } protected boolean supportsOrIgnoresBoost() { diff --git a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java index 97f372dc04a1b..cfe9fed3bc97c 100644 --- a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java @@ -46,6 +46,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.get.GetRequest; import org.opensearch.action.get.GetResponse; +import org.opensearch.common.document.DocumentField; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.ParsingException; @@ -59,15 +60,20 @@ import org.junit.Before; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; +import org.roaringbitmap.RoaringBitmap; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.instanceOf; @@ -120,10 +126,9 @@ protected TermsQueryBuilder doCreateTestQueryBuilder() { } private TermsLookup randomTermsLookup() { - // Randomly choose between a typeless terms lookup and one with an explicit type to make sure we are TermsLookup lookup = new TermsLookup(randomAlphaOfLength(10), randomAlphaOfLength(10), termsPath); - // testing both cases. lookup.routing(randomBoolean() ? randomAlphaOfLength(10) : null); + lookup.store(randomBoolean()); return lookup; } @@ -245,7 +250,17 @@ public GetResponse executeGet(GetRequest getRequest) { } catch (IOException ex) { throw new OpenSearchException("boom", ex); } - return new GetResponse(new GetResult(getRequest.index(), getRequest.id(), 0, 1, 0, true, new BytesArray(json), null, null)); + Map documentField = new HashMap<>(); + List nonNullTerms = new ArrayList<>(); + for (Object obj : randomTerms) { + if (obj != null) { + nonNullTerms.add(obj); + } + } + documentField.put(termsPath, new DocumentField(termsPath, nonNullTerms)); + return new GetResponse( + new GetResult(getRequest.index(), getRequest.id(), 0, 1, 0, true, new BytesArray(json), documentField, null) + ); } public void testNumeric() throws IOException { @@ -388,4 +403,50 @@ protected QueryBuilder parseQuery(XContentParser parser) throws IOException { } } + public void testFromJsonWithValueType() throws IOException { + String json = "{\n" + + " \"terms\": {\n" + + " \"student_id\": [\"OjAAAAEAAAAAAAEAEAAAAG8A3gA=\"],\n" + + " \"boost\" : 1.0,\n" + + " \"value_type\": \"bitmap\"\n" + + " }\n" + + "}"; + + TermsQueryBuilder parsed = (TermsQueryBuilder) parseQuery(json); + checkGeneratedJson(json, parsed); + assertEquals(json, 1, parsed.values().size()); + } + + public void testFromJsonWithValueTypeFail() { + String json = "{\n" + + " \"terms\": {\n" + + " \"student_id\": [\"OjAAAAEAAAAAAAEAEAAAAG8A3gA=\", \"2\"],\n" + + " \"boost\" : 1.0,\n" + + " \"value_type\": \"bitmap\"\n" + + " }\n" + + "}"; + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(json)); + assertEquals( + "Invalid value for bitmap type: Expected a single-element array with a base64 encoded serialized bitmap.", + e.getMessage() + ); + } + + public void testTermsLookupBitmap() throws IOException { + RoaringBitmap bitmap = new RoaringBitmap(); + bitmap.add(111); + bitmap.add(333); + byte[] array = new byte[bitmap.serializedSizeInBytes()]; + bitmap.serialize(ByteBuffer.wrap(array)); + randomTerms = List.of(new BytesArray(array)); // this will be fetched back by terms lookup + + TermsQueryBuilder query = new TermsQueryBuilder(INT_FIELD_NAME, randomTermsLookup().store(true)).valueType( + TermsQueryBuilder.ValueType.BITMAP + ); + QueryShardContext context = createShardContext(); + QueryBuilder rewritten = rewriteQuery(query, new QueryShardContext(context)); + Query luceneQuery = rewritten.toQuery(context); + assertTrue(luceneQuery instanceof IndexOrDocValuesQuery); + } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index ec48032df4a15..a6db37285fe6f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -25,23 +25,29 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.test.OpenSearchTestCase; import java.math.BigInteger; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; @@ -60,6 +66,7 @@ import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; @@ -537,4 +544,541 @@ private Map getRemoteStoreNodeAttributes() { remoteStoreNodeAttributes.put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, "my-translog-repo-1"); return remoteStoreNodeAttributes; } + + private void setupRemotePinnedTimestampFeature(boolean enabled) { + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), enabled).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + } + + public void testGetPinnedTimestampLockedFilesFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + Map metadataFilePinnedTimestampCache = new HashMap<>(); + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testGetPinnedTimestampLockedFilesWithEmptyMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Collections.emptyList(); + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNoPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = Collections.emptySet(); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullMetadataFiles() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = null; + Set pinnedTimestampSet = new HashSet<>(Arrays.asList(1L, 2L, 3L)); + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + public void testGetPinnedTimestampLockedFilesWithNullPinnedTimestampSet() { + setupRemotePinnedTimestampFeature(true); + List metadataFiles = Arrays.asList("file1.txt", "file2.txt", "file3.txt"); + Set pinnedTimestampSet = null; + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFiles, + pinnedTimestampSet, + new HashMap<>(), + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ); + assertTrue(implicitLockedFiles.isEmpty()); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List metadataFileTimestamps, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + String metadataPrefix = "metadata__1__2__3__4__5__"; + Map metadataFiles = new HashMap<>(); + for (Long metadataFileTimestamp : metadataFileTimestamps) { + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + private Tuple, Set> testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map metadataFileTimestampsPrimaryTermMap, + Set pinnedTimetamps, + Map metadataFilePinnedTimestampCache + ) { + setupRemotePinnedTimestampFeature(true); + Map metadataFiles = new HashMap<>(); + for (Map.Entry metadataFileTimestampPrimaryTerm : metadataFileTimestampsPrimaryTermMap.entrySet()) { + String primaryTerm = RemoteStoreUtils.invertLong(metadataFileTimestampPrimaryTerm.getValue()); + String metadataPrefix = "metadata__" + primaryTerm + "__2__3__4__5__"; + long metadataFileTimestamp = metadataFileTimestampPrimaryTerm.getKey(); + metadataFiles.put(metadataFileTimestamp, metadataPrefix + RemoteStoreUtils.invertLong(metadataFileTimestamp)); + } + return new Tuple<>( + metadataFiles, + RemoteStoreUtils.getPinnedTimestampLockedFiles( + new ArrayList<>(metadataFiles.values()), + pinnedTimetamps, + metadataFilePinnedTimestampCache, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getTimestamp, + RemoteSegmentStoreDirectory.MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen + ) + ); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestamps() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamps 800, 900 + // Metadata with timestamp 990 + // No metadata matches the timestamp + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(0, implicitLockedFiles.size()); + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990 + // Metadata timestamp 990 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(990L))); + // This is still 0 as we don't cache the latest metadata file as it can change (explained in the next test case) + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995 + // Metadata timestamp 995 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(995L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000 + // Metadata with timestamp 990, 995, 1000 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L), + Set.of(800L, 900L, 1000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + // This is still 0 as we don't cache the latest metadata file as it can change + assertEquals(0, metadataFilePinnedTimestampCache.size()); + + // Pinned timestamps 800, 900, 1000, 2000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1001 <= Pinned Timestamp 2000 + // Metadata timestamp 1001 <= Pinned Timestamp 3000 + // Metadata timestamp 1001 <= Pinned Timestamp 4000 + // Metadata timestamp 1001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1001L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + + // Pinned timestamps 800, 900, 1000, 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1000 <= Pinned Timestamp 1000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(800L, 900L, 1000L, 2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1000L), metadataFilePinnedTimestampCache.get(1000L)); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 990, 995, 1000, 1001, 1900, 2300 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 2300 <= Pinned Timestamp 3000 + // Metadata timestamp 2300 <= Pinned Timestamp 4000 + // Metadata timestamp 2300 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(990L, 995L, 1000L, 1001L, 1900L, 2300L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(2300L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + + // Pinned timestamps 2000, 3000, 4000, 5000 + // Metadata with timestamp 1001, 1900, 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 1900 <= Pinned Timestamp 2000 + // Metadata timestamp 3000 <= Pinned Timestamp 3000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(1001L, 1900L, 2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(2000L, 3000L, 4000L, 5000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(3, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(1900L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3000L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + // Now we cache all the matches except the last one. + assertEquals(4, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(1900L), metadataFilePinnedTimestampCache.get(2000L)); + assertEquals(metadataFiles.get(3000L), metadataFilePinnedTimestampCache.get(3000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + + // Pinned timestamps 4000, 5000, 6000, 7000 + // Metadata with timestamp 2300, 3000, 3001, 5500, 6000 + // Metadata timestamp 3001 <= Pinned Timestamp 4000 + // Metadata timestamp 3001 <= Pinned Timestamp 5000 + // Metadata timestamp 6000 <= Pinned Timestamp 6000 + // Metadata timestamp 6000 <= Pinned Timestamp 7000 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + List.of(2300L, 3000L, 3001L, 5500L, 6000L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(2, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(3001L))); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6000L))); + // Now we cache all the matches except the last one. + assertEquals(2, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(4000L)); + assertEquals(metadataFiles.get(3001L), metadataFilePinnedTimestampCache.get(5000L)); + } + + public void testGetPinnedTimestampLockedFilesWithPinnedTimestampsDifferentPrefix() { + setupRemotePinnedTimestampFeature(true); + + Map metadataFilePinnedTimestampCache = new HashMap<>(); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 6995 + // 5 - 6990 + Tuple, Set> metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + Map metadataFiles = metadataAndLocks.v1(); + Set implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 6998 + // 5 - 6995 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 5L, 6995L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 6 - 6998 + // 5 - 7001 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 6998L, 6L, 7001L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6998L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6998L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 7002 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(7002L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6990L))); + // Now we cache all the matches except the last one. + assertEquals(1, metadataFilePinnedTimestampCache.size()); + assertEquals(metadataFiles.get(6990L), metadataFilePinnedTimestampCache.get(7000L)); + + // Pinned timestamp 7000 + // Primary Term - Timestamp in md file + // 6 - 6999 + // 5 - 7005 + // 5 - 6990 + metadataFilePinnedTimestampCache = new HashMap<>(); + metadataAndLocks = testGetPinnedTimestampLockedFilesWithPinnedTimestamps( + Map.of(6999L, 6L, 7005L, 5L, 6990L, 5L), + Set.of(4000L, 5000L, 6000L, 7000L), + metadataFilePinnedTimestampCache + ); + metadataFiles = metadataAndLocks.v1(); + implicitLockedFiles = metadataAndLocks.v2(); + + assertEquals(1, implicitLockedFiles.size()); + assertTrue(implicitLockedFiles.contains(metadataFiles.get(6999L))); + // Now we cache all the matches except the last one. + assertEquals(0, metadataFilePinnedTimestampCache.size()); + } + + public void testFilterOutMetadataFilesBasedOnAgeFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + List metadataFiles = new ArrayList<>(); + + for (int i = 0; i < randomIntBetween(5, 10); i++) { + metadataFiles.add((System.currentTimeMillis() - randomIntBetween(-150000, 150000)) + "_file" + i + ".txt"); + } + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesOldEnough() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList( + (System.currentTimeMillis() - 150000) + "_file1.txt", + (System.currentTimeMillis() - 300000) + "_file2.txt", + (System.currentTimeMillis() - 450000) + "_file3.txt" + ); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertEquals(metadataFiles, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_SomeFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() - 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() - 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + List expected = Arrays.asList(file1, file2); + assertEquals(expected, result); + } + + public void testFilterOutMetadataFilesBasedOnAge_AllFilesTooNew() { + setupRemotePinnedTimestampFeature(true); + + String file1 = (System.currentTimeMillis() + 150000) + "_file1.txt"; + String file2 = (System.currentTimeMillis() + 300000) + "_file2.txt"; + String file3 = (System.currentTimeMillis() + 450000) + "_file3.txt"; + + List metadataFiles = Arrays.asList(file1, file2, file3); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testFilterOutMetadataFilesBasedOnAge_EmptyInputList() { + setupRemotePinnedTimestampFeature(true); + + List metadataFiles = Arrays.asList(); + + List result = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> Long.valueOf(file.split("_")[0]), + System.currentTimeMillis() + ); + assertTrue(result.isEmpty()); + } + + public void testIsPinnedTimestampStateStaleFeatureDisabled() { + setupRemotePinnedTimestampFeature(false); + assertFalse(RemoteStoreUtils.isPinnedTimestampStateStale()); + } + + public void testIsPinnedTimestampStateStaleFeatureEnabled() { + setupRemotePinnedTimestampFeature(true); + assertTrue(RemoteStoreUtils.isPinnedTimestampStateStale()); + } } diff --git a/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java index ff9b62a341deb..2c55d26261fe0 100644 --- a/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/BaseRemoteSegmentStoreDirectoryTests.java @@ -43,16 +43,9 @@ public class BaseRemoteSegmentStoreDirectoryTests extends IndexShardTestCase { protected SegmentInfos segmentInfos; protected ThreadPool threadPool; - protected final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 12, - 23, - 34, - 1, - 1, - "node-1" - ); + protected String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1, "node-1"); - protected final String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + protected String metadataFilenameDup = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( 12, 23, 34, @@ -60,30 +53,9 @@ public class BaseRemoteSegmentStoreDirectoryTests extends IndexShardTestCase { 1, "node-2" ); - protected final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 12, - 13, - 34, - 1, - 1, - "node-1" - ); - protected final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 10, - 38, - 34, - 1, - 1, - "node-1" - ); - protected final String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( - 10, - 36, - 34, - 1, - 1, - "node-1" - ); + protected String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1, "node-1"); + protected String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1, "node-1"); + protected String metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 36, 34, 1, 1, "node-1"); public void setupRemoteSegmentStoreDirectory() throws IOException { remoteDataDirectory = mock(RemoteDirectory.class); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java new file mode 100644 index 0000000000000..b4f93d706bb1e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryWithPinnedTimestampTests.java @@ -0,0 +1,292 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.util.Version; +import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; +import org.opensearch.gateway.remote.model.RemotePinnedTimestamps; +import org.opensearch.gateway.remote.model.RemoteStorePinnedTimestampsBlobStore; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.mockito.Mockito; + +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; +import static org.opensearch.test.RemoteStoreTestUtils.createMetadataFileBytes; +import static org.hamcrest.CoreMatchers.is; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteSegmentStoreDirectoryWithPinnedTimestampTests extends RemoteSegmentStoreDirectoryTests { + + Runnable updatePinnedTimstampTask; + BlobStoreTransferService blobStoreTransferService; + RemoteStorePinnedTimestampsBlobStore remoteStorePinnedTimestampsBlobStore; + RemoteStorePinnedTimestampService remoteStorePinnedTimestampServiceSpy; + + @Before + public void setupPinnedTimestamp() throws IOException { + RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings( + Settings.builder().put(CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true).build(), + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + + Supplier repositoriesServiceSupplier = mock(Supplier.class); + Settings settings = Settings.builder() + .put(Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "remote-repo") + .build(); + RepositoriesService repositoriesService = mock(RepositoriesService.class); + when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); + BlobStoreRepository blobStoreRepository = mock(BlobStoreRepository.class); + when(repositoriesService.repository("remote-repo")).thenReturn(blobStoreRepository); + + when(threadPool.schedule(any(), any(), any())).then(invocationOnMock -> { + updatePinnedTimstampTask = invocationOnMock.getArgument(0); + updatePinnedTimstampTask.run(); + return null; + }).then(subsequentInvocationsOnMock -> null); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = new RemoteStorePinnedTimestampService( + repositoriesServiceSupplier, + settings, + threadPool, + clusterService + ); + remoteStorePinnedTimestampServiceSpy = Mockito.spy(remoteStorePinnedTimestampService); + + remoteStorePinnedTimestampsBlobStore = mock(RemoteStorePinnedTimestampsBlobStore.class); + blobStoreTransferService = mock(BlobStoreTransferService.class); + when(remoteStorePinnedTimestampServiceSpy.pinnedTimestampsBlobStore()).thenReturn(remoteStorePinnedTimestampsBlobStore); + when(remoteStorePinnedTimestampServiceSpy.blobStoreTransferService()).thenReturn(blobStoreTransferService); + + doAnswer(invocationOnMock -> { + ActionListener> actionListener = invocationOnMock.getArgument(3); + actionListener.onResponse(new ArrayList<>()); + return null; + }).when(blobStoreTransferService).listAllInSortedOrder(any(), any(), eq(1), any()); + + remoteStorePinnedTimestampServiceSpy.start(); + + metadataWithOlderTimestamp(); + } + + private void metadataWithOlderTimestamp() { + metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 300000 + ); + metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 400000 + ); + metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 500000 + ); + metadataFilename4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 36, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() - 600000 + ); + } + + public void testInitializeToSpecificTimestampNoMetadataFiles() throws IOException { + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(new ArrayList<>()); + assertNull(remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L)); + } + + public void testInitializeToSpecificTimestampNoMdMatchingTimestamp() throws IOException { + String metadataPrefix = "metadata__1__2__3__4__5__"; + List metadataFiles = new ArrayList<>(); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(4000)); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(metadataFiles); + assertNull(remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L)); + } + + public void testInitializeToSpecificTimestampMatchingMdFile() throws IOException { + String metadataPrefix = "metadata__1__2__3__4__5__"; + List metadataFiles = new ArrayList<>(); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(1000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(2000)); + metadataFiles.add(metadataPrefix + RemoteStoreUtils.invertLong(3000)); + + Map metadata = new HashMap<>(); + metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512::" + Version.LATEST.major); + metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024::" + Version.LATEST.major); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(metadataFiles); + when(remoteMetadataDirectory.getBlobStream(metadataPrefix + RemoteStoreUtils.invertLong(1000))).thenReturn( + createMetadataFileBytes(metadata, indexShard.getLatestReplicationCheckpoint(), segmentInfos) + ); + + RemoteSegmentMetadata remoteSegmentMetadata = remoteSegmentStoreDirectory.initializeToSpecificTimestamp(1234L); + assertNotNull(remoteSegmentMetadata); + Map uploadedSegments = remoteSegmentStoreDirectory + .getSegmentsUploadedToRemoteStore(); + assertEquals(2, uploadedSegments.size()); + assertTrue(uploadedSegments.containsKey("_0.cfe")); + assertTrue(uploadedSegments.containsKey("_0.cfs")); + } + + public void testDeleteStaleCommitsNoPinnedTimestampMdFilesLatest() throws Exception { + metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 23, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 12, + 13, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename( + 10, + 38, + 34, + 1, + 1, + "node-1", + System.currentTimeMillis() + ); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), + anyInt() + ) + ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3)); + + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + // populateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + // But as the oldest metadata file's timestamp is within time threshold since last successful fetch, + // GC will skip deleting any data or metadata files. + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteDataDirectory, times(0)).deleteFile(any()); + verify(remoteMetadataDirectory, times(0)).deleteFile(any()); + } + + public void testDeleteStaleCommitsPinnedTimestampMdFile() throws Exception { + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), + anyInt() + ) + ).thenReturn(List.of(metadataFilename, metadataFilename2, metadataFilename3)); + + doAnswer(invocationOnMock -> { + ActionListener> actionListener = invocationOnMock.getArgument(3); + actionListener.onResponse(List.of(new PlainBlobMetadata("pinned_timestamp_123", 1000))); + return null; + }).when(blobStoreTransferService).listAllInSortedOrder(any(), any(), eq(1), any()); + + long pinnedTimestampMatchingMetadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getTimestamp(metadataFilename2) + 10; + when(remoteStorePinnedTimestampsBlobStore.read(any())).thenReturn(new RemotePinnedTimestamps.PinnedTimestamps(Map.of(pinnedTimestampMatchingMetadataFilename2, List.of("xyz")))); + when(remoteStorePinnedTimestampsBlobStore.getBlobPathForUpload(any())).thenReturn(new BlobPath()); + + final Map> metadataFilenameContentMapping = populateMetadata(); + final List filesToBeDeleted = metadataFilenameContentMapping.get(metadataFilename3) + .values() + .stream() + .map(metadata -> metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]) + .collect(Collectors.toList()); + + updatePinnedTimstampTask.run(); + + remoteSegmentStoreDirectory.init(); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(1); + + for (final String file : filesToBeDeleted) { + verify(remoteDataDirectory).deleteFile(file); + } + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); + verify(remoteMetadataDirectory, times(0)).deleteFile(metadataFilename2); + } +} diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 6757dbc184961..b5350a39e8599 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -31,12 +31,15 @@ package org.opensearch.indices; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.IndexShardStats; +import org.opensearch.action.search.SearchType; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexGraveyard; @@ -44,6 +47,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader.DelegatingCacheHelper; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -76,8 +80,11 @@ import org.opensearch.plugins.EnginePlugin; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.test.TestSearchContext; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -627,4 +634,32 @@ public void testClusterRemoteTranslogBufferIntervalDefault() { indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() ); } + + public void testDirectoryReaderWithoutDelegatingCacheHelperNotCacheable() throws IOException { + IndicesService indicesService = getIndicesService(); + final IndexService indexService = createIndex("test"); + ShardSearchRequest request = mock(ShardSearchRequest.class); + when(request.requestCache()).thenReturn(true); + + TestSearchContext context = new TestSearchContext(indexService.getBigArrays(), indexService) { + @Override + public SearchType searchType() { + return SearchType.QUERY_THEN_FETCH; + } + }; + + ContextIndexSearcher searcher = mock(ContextIndexSearcher.class); + context.setSearcher(searcher); + DirectoryReader reader = mock(DirectoryReader.class); + when(searcher.getDirectoryReader()).thenReturn(reader); + when(searcher.getIndexReader()).thenReturn(reader); + IndexReader.CacheHelper notDelegatingCacheHelper = mock(IndexReader.CacheHelper.class); + DelegatingCacheHelper delegatingCacheHelper = mock(DelegatingCacheHelper.class); + + for (boolean useDelegatingCacheHelper : new boolean[] { true, false }) { + IndexReader.CacheHelper cacheHelper = useDelegatingCacheHelper ? delegatingCacheHelper : notDelegatingCacheHelper; + when(reader.getReaderCacheHelper()).thenReturn(cacheHelper); + assertEquals(useDelegatingCacheHelper, indicesService.canCache(request, context)); + } + } } diff --git a/server/src/test/java/org/opensearch/indices/TermsLookupTests.java b/server/src/test/java/org/opensearch/indices/TermsLookupTests.java index 8a7867729f2c1..3f45bc86104dd 100644 --- a/server/src/test/java/org/opensearch/indices/TermsLookupTests.java +++ b/server/src/test/java/org/opensearch/indices/TermsLookupTests.java @@ -48,12 +48,15 @@ public void testTermsLookup() { String id = randomAlphaOfLengthBetween(1, 10); String path = randomAlphaOfLengthBetween(1, 10); String routing = randomAlphaOfLengthBetween(1, 10); + boolean store = randomBoolean(); TermsLookup termsLookup = new TermsLookup(index, id, path); termsLookup.routing(routing); + termsLookup.store(store); assertEquals(index, termsLookup.index()); assertEquals(id, termsLookup.id()); assertEquals(path, termsLookup.path()); assertEquals(routing, termsLookup.routing()); + assertEquals(store, termsLookup.store()); } public void testIllegalArguments() { @@ -109,6 +112,6 @@ public void testXContentParsing() throws IOException { public static TermsLookup randomTermsLookup() { return new TermsLookup(randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10).replace('.', '_')).routing( randomBoolean() ? randomAlphaOfLength(10) : null - ); + ).store(randomBoolean()); } } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 1caa2c99fc3b8..e8a0f70ee3563 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -54,6 +54,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; @@ -1413,6 +1414,106 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .get(); } + /** + * Tests that the slice count is calculated correctly when concurrent search is enabled + * If concurrent search enabled - + * pick index level slice count setting if index level setting is set + * else pick default cluster level slice count setting + * @throws IOException + */ + public void testConcurrentSegmentSearchSliceCount() throws IOException { + + String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + ClusterService clusterService = getInstanceFromNode(ClusterService.class); + ShardId shardId = new ShardId(indexService.index(), 0); + long nowInMillis = System.currentTimeMillis(); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(randomBoolean()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + nowInMillis, + clusterAlias, + Strings.EMPTY_ARRAY + ); + // enable concurrent search + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true)) + .get(); + + Integer[][] scenarios = { + // cluster setting, index setting, expected slice count + // expected value null will pick up default value from settings + { null, null, clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING) }, + { 4, null, 4 }, + { null, 3, 3 }, + { 4, 3, 3 }, }; + + for (Integer[] sliceCounts : scenarios) { + Integer clusterSliceCount = sliceCounts[0]; + Integer indexSliceCount = sliceCounts[1]; + Integer targetSliceCount = sliceCounts[2]; + + if (clusterSliceCount != null) { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), clusterSliceCount) + ) + .get(); + } else { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().putNull(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey()) + ) + .get(); + } + if (indexSliceCount != null) { + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings( + Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey(), indexSliceCount) + ) + .get(); + } else { + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings(Settings.builder().putNull(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey())) + .get(); + } + + try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { + searchContext.evaluateRequestShouldUseConcurrentSearch(); + assertEquals(targetSliceCount.intValue(), searchContext.getTargetMaxSliceCount()); + } + } + // cleanup + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) + .putNull(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey()) + ) + .get(); + } + /** * Test that the Search Context for concurrent segment search enabled is set correctly at the time of construction. * The same is used throughout the context object lifetime even if cluster setting changes before the request completion. diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/IncludeExcludeTests.java similarity index 58% rename from server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java rename to server/src/test/java/org/opensearch/search/aggregations/bucket/terms/IncludeExcludeTests.java index f223648de6ef4..2356e2dc6d855 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/IncludeExcludeTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.search.aggregations.support; +package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.SortedSetDocValues; @@ -44,13 +44,14 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.fielddata.AbstractSortedSetDocValues; import org.opensearch.search.DocValueFormat; -import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude.OrdinalsFilter; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.Collections; +import java.util.SortedSet; import java.util.TreeSet; +import java.util.concurrent.atomic.LongAdder; public class IncludeExcludeTests extends OpenSearchTestCase { @@ -297,4 +298,251 @@ private IncludeExclude serializeMixedRegex(IncludeExclude incExc) throws IOExcep } } + private static BytesRef[] toBytesRefArray(String... values) { + BytesRef[] bytesRefs = new BytesRef[values.length]; + for (int i = 0; i < values.length; i++) { + bytesRefs[i] = new BytesRef(values[i]); + } + return bytesRefs; + } + + public void testPrefixOrds() throws IOException { + IncludeExclude includeExclude = new IncludeExclude("(color|length|size):.*", "color:g.*"); + + OrdinalsFilter ordinalsFilter = includeExclude.convertToOrdinalsFilter(DocValueFormat.RAW); + + // Which of the following match the filter or not? + BytesRef[] bytesRefs = toBytesRefArray( + "color:blue", // true + "color:crimson", // true + "color:green", // false (excluded) + "color:gray", // false (excluded) + "depth:10in", // false + "depth:12in", // false + "depth:17in", // false + "length:long", // true + "length:medium", // true + "length:short", // true + "material:cotton", // false + "material:linen", // false + "material:polyester", // false + "size:large", // true + "size:medium", // true + "size:small", // true + "width:13in", // false + "width:27in", // false + "width:55in" // false + ); + boolean[] expectedFilter = new boolean[] { + true, + true, + false, + false, + false, + false, + false, + true, + true, + true, + false, + false, + false, + true, + true, + true, + false, + false, + false }; + + LongAdder lookupCount = new LongAdder(); + SortedSetDocValues sortedSetDocValues = new AbstractSortedSetDocValues() { + @Override + public boolean advanceExact(int target) { + return false; + } + + @Override + public long nextOrd() { + return 0; + } + + @Override + public int docValueCount() { + return 1; + } + + @Override + public BytesRef lookupOrd(long ord) { + lookupCount.increment(); + int ordIndex = Math.toIntExact(ord); + return bytesRefs[ordIndex]; + } + + @Override + public long getValueCount() { + return bytesRefs.length; + } + }; + + LongBitSet acceptedOrds = ordinalsFilter.acceptedGlobalOrdinals(sortedSetDocValues); + long prefixLookupCount = lookupCount.longValue(); + assertEquals(expectedFilter.length, acceptedOrds.length()); + for (int i = 0; i < expectedFilter.length; i++) { + assertEquals(expectedFilter[i], acceptedOrds.get(i)); + } + + // Now repeat, but this time, the prefix optimization won't work (because of the .+ on the exclude filter) + includeExclude = new IncludeExclude("(color|length|size):.*", "color:g.+"); + ordinalsFilter = includeExclude.convertToOrdinalsFilter(DocValueFormat.RAW); + acceptedOrds = ordinalsFilter.acceptedGlobalOrdinals(sortedSetDocValues); + long regexpLookupCount = lookupCount.longValue() - prefixLookupCount; + + // The filter should be functionally the same + assertEquals(expectedFilter.length, acceptedOrds.length()); + for (int i = 0; i < expectedFilter.length; i++) { + assertEquals(expectedFilter[i], acceptedOrds.get(i)); + } + + // But the full regexp requires more lookups + assertTrue(regexpLookupCount > prefixLookupCount); + } + + public void testExtractPrefixes() { + // Positive tests + assertEquals(bytesRefs("a"), IncludeExclude.extractPrefixes("a.*", 1000)); + assertEquals(bytesRefs("a", "b"), IncludeExclude.extractPrefixes("(a|b).*", 1000)); + assertEquals(bytesRefs("ab"), IncludeExclude.extractPrefixes("a(b).*", 1000)); + assertEquals(bytesRefs("ab", "ac"), IncludeExclude.extractPrefixes("a(b|c).*", 1000)); + assertEquals(bytesRefs("aabb", "aacc"), IncludeExclude.extractPrefixes("aa(bb|cc).*", 1000)); + + // No regex means no prefixes + assertEquals(bytesRefs(), IncludeExclude.extractPrefixes(null, 1000)); + + // Negative tests + assertNull(IncludeExclude.extractPrefixes(".*", 1000)); // Literal match-all has no prefixes + assertNull(IncludeExclude.extractPrefixes("ab?.*", 1000)); // Don't support optionals ye + // The following cover various cases involving infinite possible prefixes + assertNull(IncludeExclude.extractPrefixes("ab*.*", 1000)); + assertNull(IncludeExclude.extractPrefixes("a*(b|c).*", 1000)); + assertNull(IncludeExclude.extractPrefixes("a(b*|c)d.*", 1000)); + assertNull(IncludeExclude.extractPrefixes("a(b|c*)d.*", 1000)); + assertNull(IncludeExclude.extractPrefixes("a(b|c*)d*.*", 1000)); + + // Test with too many possible prefixes -- 10 * 10 * 10 + 1 > 1000 + assertNull( + IncludeExclude.extractPrefixes( + "((a1|a2|a3|a4|a5|a6|a7|a8|a9|a10)" + "(b1|b2|b3|b4|b5|b6|b7|b8|b9|b10)" + "(c1|c2|c3|c4|c5|c6|c7|c8|c9|c10)|x).*", + 1000 + ) + ); + // Test with too many possible prefixes -- 10 * 10 * 11 > 1000 + assertNull( + IncludeExclude.extractPrefixes( + "((a1|a2|a3|a4|a5|a6|a7|a8|a9|a10)" + "(b1|b2|b3|b4|b5|b6|b7|b8|b9|b10)" + "(c1|c2|c3|c4|c5|c6|c7|c8|c9|c10|c11)).*", + 1000 + ) + ); + } + + private static SortedSet bytesRefs(String... strings) { + SortedSet bytesRefs = new TreeSet<>(); + for (String string : strings) { + bytesRefs.add(new BytesRef(string)); + } + return bytesRefs; + } + + private static SortedSetDocValues toDocValues(BytesRef[] bytesRefs) { + return new AbstractSortedSetDocValues() { + @Override + public boolean advanceExact(int target) { + return false; + } + + @Override + public long nextOrd() { + return 0; + } + + @Override + public int docValueCount() { + return 1; + } + + @Override + public BytesRef lookupOrd(long ord) { + int ordIndex = Math.toIntExact(ord); + return bytesRefs[ordIndex]; + } + + @Override + public long getValueCount() { + return bytesRefs.length; + } + }; + } + + public void testOnlyIncludeExcludePrefix() throws IOException { + String incExcString = "(color:g|width:).*"; + IncludeExclude excludeOnly = new IncludeExclude(null, incExcString); + + OrdinalsFilter ordinalsFilter = excludeOnly.convertToOrdinalsFilter(DocValueFormat.RAW); + // Which of the following match the filter or not? + BytesRef[] bytesRefs = toBytesRefArray( + "color:blue", // true + "color:crimson", // true + "color:green", // false (excluded) + "color:gray", // false (excluded) + "depth:10in", // true + "depth:12in", // true + "depth:17in", // true + "length:long", // true + "length:medium", // true + "length:short", // true + "material:cotton", // true + "material:linen", // true + "material:polyester", // true + "size:large", // true + "size:medium", // true + "size:small", // true + "width:13in", // false + "width:27in", // false + "width:55in" // false + ); + boolean[] expectedFilter = new boolean[] { + true, + true, + false, + false, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + false, + false, + false }; + LongBitSet longBitSet = ordinalsFilter.acceptedGlobalOrdinals(toDocValues(bytesRefs)); + + for (int i = 0; i < expectedFilter.length; i++) { + assertEquals(expectedFilter[i], longBitSet.get(i)); + } + + // Now repeat, but this time include only. + IncludeExclude includeOnly = new IncludeExclude(incExcString, null); + ordinalsFilter = includeOnly.convertToOrdinalsFilter(DocValueFormat.RAW); + longBitSet = ordinalsFilter.acceptedGlobalOrdinals(toDocValues(bytesRefs)); + + // The previously excluded ordinals should be included and vice versa. + for (int i = 0; i < expectedFilter.length; i++) { + assertEquals(!expectedFilter[i], longBitSet.get(i)); + } + } } diff --git a/server/src/test/java/org/opensearch/search/query/BitmapDocValuesQueryTests.java b/server/src/test/java/org/opensearch/search/query/BitmapDocValuesQueryTests.java new file mode 100644 index 0000000000000..6e293d1ec69fd --- /dev/null +++ b/server/src/test/java/org/opensearch/search/query/BitmapDocValuesQueryTests.java @@ -0,0 +1,149 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.query; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.IntField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import org.roaringbitmap.RoaringBitmap; + +public class BitmapDocValuesQueryTests extends OpenSearchTestCase { + private Directory dir; + private IndexWriter w; + private DirectoryReader reader; + private IndexSearcher searcher; + + @Before + public void initSearcher() throws IOException { + dir = newDirectory(); + w = new IndexWriter(dir, newIndexWriterConfig()); + } + + @After + public void closeAllTheReaders() throws IOException { + reader.close(); + w.close(); + dir.close(); + } + + public void testScore() throws IOException { + Document d = new Document(); + d.add(new IntField("product_id", 1, Field.Store.NO)); + w.addDocument(d); + + d = new Document(); + d.add(new IntField("product_id", 2, Field.Store.NO)); + w.addDocument(d); + + d = new Document(); + d.add(new IntField("product_id", 3, Field.Store.NO)); + w.addDocument(d); + + d = new Document(); + d.add(new IntField("product_id", 4, Field.Store.NO)); + w.addDocument(d); + + w.commit(); + reader = DirectoryReader.open(w); + searcher = newSearcher(reader); + + RoaringBitmap bitmap = new RoaringBitmap(); + bitmap.add(1); + bitmap.add(4); + BitmapDocValuesQuery query = new BitmapDocValuesQuery("product_id", bitmap); + + Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + + List actual = new LinkedList<>(); + for (LeafReaderContext leaf : searcher.getIndexReader().leaves()) { + // use doc values to get the actual value of the matching docs and assert + // cannot directly check the docId because test can randomize segment numbers + SortedNumericDocValues dv = DocValues.getSortedNumeric(leaf.reader(), "product_id"); + Scorer scorer = weight.scorer(leaf); + DocIdSetIterator disi = scorer.iterator(); + int docId; + while ((docId = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + dv.advanceExact(docId); + for (int count = 0; count < dv.docValueCount(); ++count) { + actual.add((int) dv.nextValue()); + } + } + } + List expected = List.of(1, 4); + assertEquals(expected, actual); + } + + public void testScoreMutilValues() throws IOException { + Document d = new Document(); + d.add(new IntField("product_id", 1, Field.Store.NO)); + w.addDocument(d); + + d = new Document(); + d.add(new IntField("product_id", 2, Field.Store.NO)); + d.add(new IntField("product_id", 3, Field.Store.NO)); + w.addDocument(d); + + d = new Document(); + d.add(new IntField("product_id", 3, Field.Store.NO)); + w.addDocument(d); + + d = new Document(); + d.add(new IntField("product_id", 4, Field.Store.NO)); + w.addDocument(d); + + w.commit(); + reader = DirectoryReader.open(w); + searcher = newSearcher(reader); + + RoaringBitmap bitmap = new RoaringBitmap(); + bitmap.add(3); + BitmapDocValuesQuery query = new BitmapDocValuesQuery("product_id", bitmap); + + Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + + Set actual = new HashSet<>(); + for (LeafReaderContext leaf : searcher.getIndexReader().leaves()) { + // use doc values to get the actual value of the matching docs and assert + // cannot directly check the docId because test can randomize segment numbers + SortedNumericDocValues dv = DocValues.getSortedNumeric(leaf.reader(), "product_id"); + Scorer scorer = weight.scorer(leaf); + DocIdSetIterator disi = scorer.iterator(); + int docId; + while ((docId = disi.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + dv.advanceExact(docId); + for (int count = 0; count < dv.docValueCount(); ++count) { + actual.add((int) dv.nextValue()); + } + } + } + Set expected = Set.of(2, 3); + assertEquals(expected, actual); + } +} diff --git a/server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java b/server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java new file mode 100644 index 0000000000000..0f183555781d3 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/WorkloadManagementSettingsTests.java @@ -0,0 +1,241 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.wlm.WorkloadManagementSettings.NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME; +import static org.opensearch.wlm.WorkloadManagementSettings.NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME; +import static org.opensearch.wlm.WorkloadManagementSettings.NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME; +import static org.opensearch.wlm.WorkloadManagementSettings.NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME; + +public class WorkloadManagementSettingsTests extends OpenSearchTestCase { + + /** + * Tests the invalid value for {@code wlm.query_group.node.memory_rejection_threshold} + * When the value is set more than {@code wlm.query_group.node.memory_cancellation_threshold} accidentally during + * new feature development. This test is to ensure that {@link WorkloadManagementSettings} holds the + * invariant {@code nodeLevelRejectionThreshold < nodeLevelCancellationThreshold} + */ + public void testInvalidMemoryInstantiationOfWorkloadManagementSettings() { + Settings settings = Settings.builder() + .put(NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, 0.8) + .put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.7) + .build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + assertThrows(IllegalArgumentException.class, () -> new WorkloadManagementSettings(settings, cs)); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.cpu_rejection_threshold} + * When the value is set more than {@code wlm.query_group.node.cpu_cancellation_threshold} accidentally during + * new feature development. This test is to ensure that {@link WorkloadManagementSettings} holds the + * invariant {@code nodeLevelRejectionThreshold < nodeLevelCancellationThreshold} + */ + public void testInvalidCpuInstantiationOfWorkloadManagementSettings() { + Settings settings = Settings.builder() + .put(NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, 0.8) + .put(NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, 0.7) + .build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + assertThrows(IllegalArgumentException.class, () -> new WorkloadManagementSettings(settings, cs)); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.cpu_rejection_threshold} + * Using setNodeLevelCpuRejectionThreshold function + */ + public void testValidNodeLevelCpuRejectionThresholdCase1() { + Settings settings = Settings.builder().put(NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + workloadManagementSettings.setNodeLevelCpuRejectionThreshold(0.7); + assertEquals(0.7, workloadManagementSettings.getNodeLevelCpuRejectionThreshold(), 1e-9); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.cpu_rejection_threshold} + */ + public void testValidNodeLevelCpuRejectionThresholdCase2() { + Settings settings = Settings.builder().put(NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, 0.79).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertEquals(0.79, workloadManagementSettings.getNodeLevelCpuRejectionThreshold(), 1e-9); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.cpu_rejection_threshold} + * When the value is set more than {@literal 0.9} + */ + public void testInvalidNodeLevelCpuRejectionThresholdCase1() { + Settings settings = Settings.builder().put(NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, 0.8).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelCpuRejectionThreshold(0.95)); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.cpu_rejection_threshold} + * When the value is set more than {@code wlm.query_group.node.cpu_cancellation_threshold} + */ + public void testInvalidNodeLevelCpuRejectionThresholdCase2() { + Settings settings = Settings.builder() + .put(NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, 0.7) + .put(NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8) + .build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelCpuRejectionThreshold(0.85)); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.cpu_cancellation_threshold} + */ + public void testValidNodeLevelCpuCancellationThresholdCase1() { + Settings settings = Settings.builder().put(NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertEquals(0.8, workloadManagementSettings.getNodeLevelCpuRejectionThreshold(), 1e-9); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.cpu_cancellation_threshold} + * Using setNodeLevelCpuCancellationThreshold function + */ + public void testValidNodeLevelCpuCancellationThresholdCase2() { + Settings settings = Settings.builder().put(NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, 0.8).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + workloadManagementSettings.setNodeLevelCpuCancellationThreshold(0.83); + assertEquals(0.83, workloadManagementSettings.getNodeLevelCpuCancellationThreshold(), 1e-9); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.cpu_cancellation_threshold} + * When the value is set more than {@literal 0.95} + */ + public void testInvalidNodeLevelCpuCancellationThresholdCase1() { + Settings settings = Settings.builder().put(NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, 0.9).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelCpuCancellationThreshold(0.96)); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.cpu_cancellation_threshold} + * When the value is set less than {@code wlm.query_group.node.cpu_rejection_threshold} + */ + public void testInvalidNodeLevelCpuCancellationThresholdCase2() { + Settings settings = Settings.builder() + .put(NODE_CPU_REJECTION_THRESHOLD_SETTING_NAME, 0.7) + .put(NODE_CPU_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8) + .build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelCpuCancellationThreshold(0.65)); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.memory_cancellation_threshold} + */ + public void testValidNodeLevelMemoryCancellationThresholdCase1() { + Settings settings = Settings.builder().put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertEquals(0.8, workloadManagementSettings.getNodeLevelMemoryCancellationThreshold(), 1e-9); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.memory_cancellation_threshold} + * Using setNodeLevelMemoryCancellationThreshold function + */ + public void testValidNodeLevelMemoryCancellationThresholdCase2() { + Settings settings = Settings.builder().put(NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, 0.8).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + workloadManagementSettings.setNodeLevelMemoryCancellationThreshold(0.83); + assertEquals(0.83, workloadManagementSettings.getNodeLevelMemoryCancellationThreshold(), 1e-9); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.memory_cancellation_threshold} + * When the value is set more than {@literal 0.95} + */ + public void testInvalidNodeLevelMemoryCancellationThresholdCase1() { + Settings settings = Settings.builder().put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.9).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelMemoryCancellationThreshold(0.96)); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.memory_cancellation_threshold} + * When the value is set less than {@code wlm.query_group.node.memory_rejection_threshold} + */ + public void testInvalidNodeLevelMemoryCancellationThresholdCase2() { + Settings settings = Settings.builder() + .put(NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, 0.7) + .put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8) + .build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelMemoryCancellationThreshold(0.65)); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.memory_rejection_threshold} + */ + public void testValidNodeLevelMemoryRejectionThresholdCase1() { + Settings settings = Settings.builder().put(NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, 0.79).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertEquals(0.79, workloadManagementSettings.getNodeLevelMemoryRejectionThreshold(), 1e-9); + } + + /** + * Tests the valid value for {@code wlm.query_group.node.memory_rejection_threshold} + * Using setNodeLevelMemoryRejectionThreshold function + */ + public void testValidNodeLevelMemoryRejectionThresholdCase2() { + Settings settings = Settings.builder().put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.9).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + workloadManagementSettings.setNodeLevelMemoryRejectionThreshold(0.86); + assertEquals(0.86, workloadManagementSettings.getNodeLevelMemoryRejectionThreshold(), 1e-9); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.memory_rejection_threshold} + * When the value is set more than {@literal 0.9} + */ + public void testInvalidNodeLevelMemoryRejectionThresholdCase1() { + Settings settings = Settings.builder().put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.9).build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelMemoryRejectionThreshold(0.92)); + } + + /** + * Tests the invalid value for {@code wlm.query_group.node.memory_rejection_threshold} + * When the value is set more than {@code wlm.query_group.node.memory_cancellation_threshold} + */ + public void testInvalidNodeLevelMemoryRejectionThresholdCase2() { + Settings settings = Settings.builder() + .put(NODE_MEMORY_REJECTION_THRESHOLD_SETTING_NAME, 0.7) + .put(NODE_MEMORY_CANCELLATION_THRESHOLD_SETTING_NAME, 0.8) + .build(); + ClusterSettings cs = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + WorkloadManagementSettings workloadManagementSettings = new WorkloadManagementSettings(settings, cs); + assertThrows(IllegalArgumentException.class, () -> workloadManagementSettings.setNodeLevelMemoryRejectionThreshold(0.85)); + } +} diff --git a/settings.gradle b/settings.gradle index ae9f5384be592..b79c2aee135fc 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.develocity" version "3.17.6" + id "com.gradle.develocity" version "3.18" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 411509dfe5acc..b5cd12ef0c11f 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -55,7 +55,7 @@ dependencies { exclude group: 'com.nimbusds' exclude module: "commons-configuration2" } - api "dnsjava:dnsjava:3.6.0" + api "dnsjava:dnsjava:3.6.1" api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index b86cce682c68e..911aa92340de6 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2792,6 +2792,7 @@ private static Settings buildRemoteStoreNodeAttributes( } settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(PathType.values())); settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()); + settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), randomBoolean()); return settings.build(); } diff --git a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java index cd6bf02efef6f..9956c651618d3 100644 --- a/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/opensearch/transport/nio/MockNioTransport.java @@ -467,7 +467,7 @@ private void logLongRunningExecutions() { final Thread thread = entry.getKey(); final String stackTrace = Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n")); final Thread.State threadState = thread.getState(); - if (blockedSinceInNanos == registry.get(thread)) { + if (blockedSinceInNanos.equals(registry.get(thread))) { logger.warn( "Potentially blocked execution on network thread [{}] [{}] [{} milliseconds]: \n{}", thread.getName(), diff --git a/test/framework/src/main/resources/org/opensearch/analysis/common/test1.json b/test/framework/src/main/resources/org/opensearch/analysis/common/test1.json index e69c2db6ff400..18e30ec6f87d5 100644 --- a/test/framework/src/main/resources/org/opensearch/analysis/common/test1.json +++ b/test/framework/src/main/resources/org/opensearch/analysis/common/test1.json @@ -19,8 +19,22 @@ "type":"myfilter" }, "dict_dec":{ - "type":"dictionary_decompounder", - "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"] + "type":"dictionary_decompounder", + "word_list":["donau", "dampf", "schiff", "spargel", "creme", "suppe"] + }, + "hyphen_dec":{ + "type":"hyphenation_decompounder", + "word_list":["læse", "hest"], + "hyphenation_patterns_path": "da_UTF8.xml" + }, + "hyphen_dec_no_sub_matches":{ + "type":"hyphenation_decompounder", + "word_list":["basketball", "basket", "ball", "kurv"], + "hyphenation_patterns_path": "da_UTF8.xml", + "only_longest_match": false, + "no_sub_matches": true, + "no_overlapping_matches": false + } }, "analyzer":{ @@ -45,8 +59,16 @@ "filter":["lowercase", "stop", "czech_stem"] }, "decompoundingAnalyzer":{ - "tokenizer":"standard", - "filter":["dict_dec"] + "tokenizer":"standard", + "filter":["dict_dec"] + }, + "hyphenationAnalyzer":{ + "tokenizer":"standard", + "filter":["hyphen_dec"] + }, + "hyphenationAnalyzerNoSubMatches":{ + "tokenizer":"standard", + "filter":["hyphen_dec_no_sub_matches"] } } } diff --git a/test/framework/src/main/resources/org/opensearch/analysis/common/test1.yml b/test/framework/src/main/resources/org/opensearch/analysis/common/test1.yml index 82f933296a314..30862a9d0be8e 100644 --- a/test/framework/src/main/resources/org/opensearch/analysis/common/test1.yml +++ b/test/framework/src/main/resources/org/opensearch/analysis/common/test1.yml @@ -15,6 +15,18 @@ index : dict_dec : type : dictionary_decompounder word_list : [donau, dampf, schiff, spargel, creme, suppe] + hyphen_dec : + type : hyphenation_decompounder + word_list : [læse, hest] + hyphenation_patterns_path: "da_UTF8.xml" + hyphen_dec_no_sub_matches : + type : hyphenation_decompounder + word_list : [basketball, basket, ball, kurv] + hyphenation_patterns_path: "da_UTF8.xml" + only_longest_match: False + no_sub_matches: True + no_overlapping_matches: False + analyzer : standard : type : standard @@ -37,3 +49,9 @@ index : decompoundingAnalyzer : tokenizer : standard filter : [dict_dec] + hyphenationAnalyzer : + tokenizer : standard + filter : [hyphen_dec] + hyphenationAnalyzerNoSubMatches : + tokenizer : standard + filter : [hyphen_dec_no_sub_matches]