diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 732f2f9b96ae3..1c80f5048a611 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -239,5 +239,38 @@ "data_instance_config": "4vCPU, 32G Mem, 16G Heap" }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_15": { + "description": "Search only test-procedure for big5, uses lucene-10 index snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_16": { + "description": "Benchmarking config for NESTED workload, benchmarks nested queries with inner-hits", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nested", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" + } } -} diff --git a/.github/workflows/benchmark-pull-request.yml b/.github/workflows/benchmark-pull-request.yml index c494df6e27ce3..e6ccc31160bf9 100644 --- a/.github/workflows/benchmark-pull-request.yml +++ b/.github/workflows/benchmark-pull-request.yml @@ -4,7 +4,10 @@ on: types: [created] jobs: run-performance-benchmark-on-pull-request: - if: ${{ (github.event.issue.pull_request) && (contains(github.event.comment.body, '"run-benchmark-test"')) }} + if: | + github.repository == 'opensearch-project/OpenSearch' && + github.event.issue.pull_request && + contains(github.event.comment.body, '"run-benchmark-test"') runs-on: ubuntu-latest permissions: id-token: write @@ -111,7 +114,7 @@ jobs: uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} - result-encoding: string + result-encoding: json script: | // Get the collaborators - filtered to maintainer permissions const maintainersResponse = await github.request('GET /repos/{owner}/{repo}/collaborators', { @@ -121,12 +124,12 @@ jobs: affiliation: 'all', per_page: 100 }); - return maintainersResponse.data.map(item => item.login).join(', '); + return maintainersResponse.data.map(item => item.login); - uses: trstringer/manual-approval@v1 - if: (!contains(steps.get_approvers.outputs.result, github.event.comment.user.login)) + if: ${{ !contains(fromJSON(steps.get_approvers.outputs.result), github.event.comment.user.login) }} with: secret: ${{ github.TOKEN }} - approvers: ${{ steps.get_approvers.outputs.result }} + approvers: ${{ join(fromJSON(steps.get_approvers.outputs.result), ', ') }} minimum-approvals: 1 issue-title: 'Request to approve/deny benchmark run for PR #${{ env.PR_NUMBER }}' issue-body: "Please approve or deny the benchmark run for PR #${{ env.PR_NUMBER }}" diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index ef842bb405d60..0000000000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Developer Certificate of Origin Check - -on: [pull_request] - -jobs: - dco-check: - runs-on: ubuntu-latest - - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@v1.3.1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@v1.1.0 - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} - diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 3697750dab97a..923c82028cd1b 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v2.1.0 + uses: lycheeverse/lychee-action@v2.2.0 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index ad3fc5e6ba29e..40e4111e11942 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,10 +18,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) - Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489)) +- [Workload Management] Add Workload Management IT ([#16359](https://github.com/opensearch-project/OpenSearch/pull/16359)) - Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271)) - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/)) +- Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods ([#16923](https://github.com/opensearch-project/OpenSearch/pull/16923)) - Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)). +- Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678)) +- Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) +- Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) +- Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534)) +- Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/)) +- Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/)) +- Support searching from doc_value using termQueryCaseInsensitive/termQuery in flat_object/keyword field([#16974](https://github.com/opensearch-project/OpenSearch/pull/16974/)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) @@ -30,22 +39,33 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.apis:google-api-services-compute` from v1-rev20240407-2.0.0 to v1-rev20241105-2.0.0 ([#16502](https://github.com/opensearch-project/OpenSearch/pull/16502), [#16548](https://github.com/opensearch-project/OpenSearch/pull/16548), [#16613](https://github.com/opensearch-project/OpenSearch/pull/16613)) - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) -- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.2.2 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612)) +- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.3.0 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612), [#16854](https://github.com/opensearch-project/OpenSearch/pull/16854)) - Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.47 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807)) -- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.1.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610)) +- Bump `lycheeverse/lychee-action` from 2.0.2 to 2.2.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610), [#16897](https://github.com/opensearch-project/OpenSearch/pull/16897)) - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) - Bump `mockito` from 5.14.1 to 5.14.2, `objenesis` from 3.2 to 3.3 and `bytebuddy` from 1.15.4 to 1.15.10 ([#16655](https://github.com/opensearch-project/OpenSearch/pull/16655)) - Bump `Netty` from 4.1.114.Final to 4.1.115.Final ([#16661](https://github.com/opensearch-project/OpenSearch/pull/16661)) - Bump `org.xerial.snappy:snappy-java` from 1.1.10.6 to 1.1.10.7 ([#16665](https://github.com/opensearch-project/OpenSearch/pull/16665)) - Bump `codecov/codecov-action` from 4 to 5 ([#16667](https://github.com/opensearch-project/OpenSearch/pull/16667)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.2 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.3 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718), [#16858](https://github.com/opensearch-project/OpenSearch/pull/16858)) - Bump `jackson` from 2.17.2 to 2.18.2 ([#16733](https://github.com/opensearch-project/OpenSearch/pull/16733)) -- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.12 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716)) +- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.15 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716), [#16898](https://github.com/opensearch-project/OpenSearch/pull/16898)) - Bump `com.azure:azure-identity` from 1.13.2 to 1.14.2 ([#16778](https://github.com/opensearch-project/OpenSearch/pull/16778)) +- Bump Apache Lucene from 9.12.0 to 9.12.1 ([#16846](https://github.com/opensearch-project/OpenSearch/pull/16846)) +- Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855)) +- Bump `org.jline:jline` from 3.27.1 to 3.28.0 ([#16857](https://github.com/opensearch-project/OpenSearch/pull/16857)) +- Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856)) +- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895)) +- Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896)) +- Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918)) +- Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919)) +- Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951)) +- Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) +- Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909)) ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) @@ -67,6 +87,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Ensure consistency of system flag on IndexMetadata after diff is applied ([#16644](https://github.com/opensearch-project/OpenSearch/pull/16644)) - Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763)) - Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) +- Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) +- Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) +- Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) +- Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index a74781ac44720..6842f0e541abe 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -229,8 +229,7 @@ public Set getJarsToScan() { @TaskAction public void runThirdPartyAudit() throws IOException { Set jars = getJarsToScan(); - - extractJars(jars); + Set extractedJars = extractJars(jars); final String forbiddenApisOutput = runForbiddenAPIsCli(); @@ -248,7 +247,7 @@ public void runThirdPartyAudit() throws IOException { Set jdkJarHellClasses = null; if (this.jarHellEnabled) { - jdkJarHellClasses = runJdkJarHellCheck(); + jdkJarHellClasses = runJdkJarHellCheck(extractedJars); } if (missingClassExcludes != null) { @@ -301,16 +300,26 @@ private void logForbiddenAPIsOutput(String forbiddenApisOutput) { getLogger().error("Forbidden APIs output:\n{}==end of forbidden APIs==", forbiddenApisOutput); } - private void extractJars(Set jars) { + /** + * Extract project jars to build directory as specified by getJarExpandDir. + * Handle multi release jars by keeping versions closest to `targetCompatibility` version. + * @param jars to extract to build dir + * @return File set of extracted jars + */ + private Set extractJars(Set jars) { + Set extractedJars = new TreeSet<>(); File jarExpandDir = getJarExpandDir(); // We need to clean up to make sure old dependencies don't linger getProject().delete(jarExpandDir); jars.forEach(jar -> { + String jarPrefix = jar.getName().replace(".jar", ""); + File jarSubDir = new File(jarExpandDir, jarPrefix); + extractedJars.add(jarSubDir); FileTree jarFiles = getProject().zipTree(jar); getProject().copy(spec -> { spec.from(jarFiles); - spec.into(jarExpandDir); + spec.into(jarSubDir); // exclude classes from multi release jars spec.exclude("META-INF/versions/**"); }); @@ -329,7 +338,7 @@ private void extractJars(Set jars) { Integer.parseInt(targetCompatibility.get().getMajorVersion()) ).forEach(majorVersion -> getProject().copy(spec -> { spec.from(getProject().zipTree(jar)); - spec.into(jarExpandDir); + spec.into(jarSubDir); String metaInfPrefix = "META-INF/versions/" + majorVersion; spec.include(metaInfPrefix + "/**"); // Drop the version specific prefix @@ -337,6 +346,8 @@ private void extractJars(Set jars) { spec.setIncludeEmptyDirs(false); })); }); + + return extractedJars; } private void assertNoJarHell(Set jdkJarHellClasses) { @@ -398,7 +409,12 @@ private String runForbiddenAPIsCli() throws IOException { return forbiddenApisOutput; } - private Set runJdkJarHellCheck() throws IOException { + /** + * Execute java with JDK_JAR_HELL_MAIN_CLASS against provided jars with OpenSearch core in the classpath. + * @param jars to scan for jarHell violations. + * @return standard out of jarHell process. + */ + private Set runJdkJarHellCheck(Set jars) throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class); ExecResult execResult = execOps.getExecOps().javaexec(spec -> { @@ -407,9 +423,8 @@ private Set runJdkJarHellCheck() throws IOException { getRuntimeConfiguration(), getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) ); - spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS); - spec.args(getJarExpandDir()); + spec.args(jars); spec.setIgnoreExitValue(true); if (javaHome != null) { spec.setExecutable(javaHome + "/bin/java"); diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 3db2a6e7c2733..83bec727b1502 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.24.2" + implementation "org.apache.logging.log4j:log4j-core:2.24.3" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 25af649bb4aed..e1fa4de5a0caa 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.10.0" + id "com.netflix.nebula.ospackage-base" version "11.10.1" } void addProcessFilesTask(String type, boolean jdk) { diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index be59e1d3a5ab6..f357fb248520c 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -1,6 +1,6 @@ [versions] opensearch = "3.0.0" -lucene = "9.12.0" +lucene = "9.12.1" bundled_jdk_vendor = "adoptium" bundled_jdk = "23.0.1+11" @@ -27,7 +27,7 @@ google_http_client = "1.44.1" google_auth = "1.29.0" tdigest = "3.3" hdrhistogram = "2.2.2" -grpc = "1.68.0" +grpc = "1.68.2" # when updating the JNA version, also update the version in buildSrc/build.gradle jna = "5.13.0" diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 751da941d25dd..5a98a60e806ea 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -170,7 +170,6 @@ configure([ project(":libs:opensearch-common"), project(":libs:opensearch-core"), project(":libs:opensearch-compress"), - project(":plugins:events-correlation-engine"), project(":server") ]) { project.tasks.withType(MissingJavadocTask) { diff --git a/libs/core/licenses/lucene-core-9.12.0.jar.sha1 b/libs/core/licenses/lucene-core-9.12.0.jar.sha1 deleted file mode 100644 index e55f896dedb63..0000000000000 --- a/libs/core/licenses/lucene-core-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdb055d569bb20bfce9618fe2b01c29bab7f290c \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.12.1.jar.sha1 b/libs/core/licenses/lucene-core-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2521c91a81d64 --- /dev/null +++ b/libs/core/licenses/lucene-core-9.12.1.jar.sha1 @@ -0,0 +1 @@ +91447c90c1180122142773b5baddaf8547124794 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index ec0a18dbbf882..dd804fcc6db70 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -112,9 +112,9 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_17_1 = new Version(2170199, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_17_2 = new Version(2170299, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); - public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_0); + public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version CURRENT = V_3_0_0; public static Version fromId(int id) { diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 deleted file mode 100644 index 476049a66cc08..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ba843374a0aab3dfe0b11cb28b251844d85bf5b \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..9e0a5c2d7df21 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.12.1.jar.sha1 @@ -0,0 +1 @@ +667ee99f31c8e42eac70b0adcf8deb4232935430 \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java index 55dc23f665d2e..b3f6f7d0730fd 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java @@ -66,6 +66,7 @@ import org.opensearch.script.ScriptContext; import org.opensearch.script.ScriptEngine; import org.opensearch.script.ScriptService; +import org.opensearch.script.UpdateScript; import org.opensearch.search.aggregations.pipeline.MovingFunctionScript; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -109,6 +110,11 @@ public final class PainlessModulePlugin extends Plugin implements ScriptPlugin, ingest.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.ingest.txt")); map.put(IngestScript.CONTEXT, ingest); + // Functions available to update scripts + List update = new ArrayList<>(Allowlist.BASE_ALLOWLISTS); + update.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.update.txt")); + map.put(UpdateScript.CONTEXT, update); + // Functions available to derived fields List derived = new ArrayList<>(Allowlist.BASE_ALLOWLISTS); derived.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.derived.txt")); diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt new file mode 100644 index 0000000000000..144614b3862b0 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.update.txt @@ -0,0 +1,14 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +# This file contains an allowlist for the update scripts + +class java.lang.String { + String org.opensearch.painless.api.Augmentation sha1() + String org.opensearch.painless.api.Augmentation sha256() +} diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml index cb118ed9d562f..e0f3068810ed8 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/15_update.yml @@ -123,3 +123,39 @@ - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.type: "illegal_argument_exception" } - match: { error.reason: "Iterable object is self-referencing itself" } + +# update script supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Update script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: test_1 + id: 1 + body: + foo: bar + + - do: + update: + index: test_1 + id: 1 + body: + script: + lang: painless + source: "ctx._source.foo_sha1 = ctx._source.foo.sha1();ctx._source.foo_sha256 = ctx._source.foo.sha256();" + + - match: { _index: test_1 } + - match: { _id: "1" } + - match: { _version: 2 } + + - do: + get: + index: test_1 + id: 1 + + - match: { _source.foo: bar } + - match: { _source.foo_sha1: "62cdb7020ff920e5aa642c3d4066950dd1f01f4d" } + - match: { _source.foo_sha256: "fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9" } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml index 9c38b13bb1ff0..5c218aa00ca4f 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/85_scripting.yml @@ -440,3 +440,41 @@ lang: painless source: syntax errors are fun! - match: {error.reason: 'compile error'} + +# script in reindex supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: twitter + id: 1 + body: { "user": "foobar" } + - do: + indices.refresh: {} + + - do: + reindex: + refresh: true + body: + source: + index: twitter + dest: + index: new_twitter + script: + lang: painless + source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256() + - match: {created: 1} + - match: {noops: 0} + + - do: + get: + index: new_twitter + id: 1 + + - match: { _source.user: foobar } + - match: { _source.user_sha1: "8843d7f92416211de9ebb963ff4ce28125932878" } + - match: { _source.user_sha256: "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" } diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml index a8de49d812677..b52b1428e08bb 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/80_scripting.yml @@ -432,3 +432,38 @@ lang: painless source: syntax errors are fun! - match: {error.reason: 'compile error'} + +# script in update_by_query supports java.lang.String.sha1() and java.lang.String.sha256() methods +# related issue: https://github.com/opensearch-project/OpenSearch/issues/16423 +--- +"Script supports sha1() and sha256() method for strings": + - skip: + version: " - 2.18.99" + reason: "introduced in 2.19.0" + - do: + index: + index: twitter + id: 1 + body: { "user": "foobar" } + - do: + indices.refresh: {} + + - do: + update_by_query: + index: twitter + refresh: true + body: + script: + lang: painless + source: ctx._source.user_sha1 = ctx._source.user.sha1();ctx._source.user_sha256 = ctx._source.user.sha256() + - match: {updated: 1} + - match: {noops: 0} + + - do: + get: + index: twitter + id: 1 + + - match: { _source.user: foobar } + - match: { _source.user_sha1: "8843d7f92416211de9ebb963ff4ce28125932878" } + - match: { _source.user_sha256: "c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2" } diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 deleted file mode 100644 index 31398b27708a3..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a9232b6a4882979118d3281b98dfdb6e0e1cb5ca \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..acb73de8b5dc9 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.1.jar.sha1 @@ -0,0 +1 @@ +abaef4767ad64289e62abdd4606bf6ed2ddea0fd \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 deleted file mode 100644 index fa4c9d2d09d6e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3a6950ffc22e76a082e1b3cefb022b9f7870d29 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..916778086a6bd --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.1.jar.sha1 @@ -0,0 +1 @@ +635c41143b896f402589d29e33695dcfabae9cc5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 deleted file mode 100644 index 576b924286d2d..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e71f85b72ed3939039ba8897b28b065dd11918b9 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..9c057370df5d1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e265410a6a4d9cd23b2e9c73321e6bd307bc1422 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 deleted file mode 100644 index c8c146bbd0d25..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6baa3ae7ab20d6e644cf0bedb271c50a44c0e259 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..30db9fc8d69e2 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.1.jar.sha1 @@ -0,0 +1 @@ +3787b8edc0cfad21998abc6aeb9d2cbf152b4b26 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 deleted file mode 100644 index 54ea0b19f2a7b..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f183e1e8b1eaaa4dec444774a285bb8b66518522 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..96f8d70e6ee53 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e935f600bf153c46f5725198ca9352c32025f274 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 deleted file mode 100644 index 5442a40f5bba2..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b81a609934e65d12ab9d2d84bc2ea6f56a360e57 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..d6d5f1c2609ff --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.1.jar.sha1 @@ -0,0 +1 @@ +c4e1c94b1adbd1cb9dbdc0d3c2d2c33beabfc777 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 deleted file mode 100644 index 60fd4015cfde0..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bec069f286b45f20b743c81e84202369cd0467e7 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..661f3062458e2 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d8e4716dab6d829e7b37a8b185cbd242650aeb9e \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 deleted file mode 100644 index bf45716c5b8ce..0000000000000 --- a/plugins/discovery-gce/licenses/grpc-api-1.68.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/discovery-gce/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/events-correlation-engine/build.gradle b/plugins/events-correlation-engine/build.gradle deleted file mode 100644 index c3eff30012b1d..0000000000000 --- a/plugins/events-correlation-engine/build.gradle +++ /dev/null @@ -1,21 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - * - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -apply plugin: 'opensearch.java-rest-test' -apply plugin: 'opensearch.internal-cluster-test' - -opensearchplugin { - description 'OpenSearch Events Correlation Engine.' - classname 'org.opensearch.plugin.correlation.EventsCorrelationPlugin' -} - -dependencies { -} diff --git a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java b/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java deleted file mode 100644 index 028848a91213e..0000000000000 --- a/plugins/events-correlation-engine/src/internalClusterTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTransportIT.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.apache.lucene.search.join.ScoreMode; -import org.opensearch.action.admin.cluster.node.info.NodeInfo; -import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.query.NestedQueryBuilder; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse; -import org.opensearch.plugin.correlation.rules.model.CorrelationQuery; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.PluginInfo; -import org.opensearch.rest.RestRequest; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.Assert; - -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * Transport Action tests for events-correlation-plugin - */ -public class EventsCorrelationPluginTransportIT extends OpenSearchIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(EventsCorrelationPlugin.class); - } - - /** - * test events-correlation-plugin is installed - */ - public void testPluginsAreInstalled() { - NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); - nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); - NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); - List pluginInfos = nodesInfoResponse.getNodes() - .stream() - .flatMap( - (Function>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() - ) - .collect(Collectors.toList()); - Assert.assertTrue( - pluginInfos.stream() - .anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.correlation.EventsCorrelationPlugin")) - ); - } - - /** - * test creating a correlation rule - * @throws Exception Exception - */ - public void testCreatingACorrelationRule() throws Exception { - List correlationQueries = Arrays.asList( - new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", "@timestamp", List.of("s3")), - new CorrelationQuery("app_logs", "keywords:PermissionDenied", "@timestamp", List.of("others_application")) - ); - CorrelationRule correlationRule = new CorrelationRule("s3 to app logs", correlationQueries); - IndexCorrelationRuleRequest request = new IndexCorrelationRuleRequest(correlationRule, RestRequest.Method.POST); - - IndexCorrelationRuleResponse response = client().execute(IndexCorrelationRuleAction.INSTANCE, request).get(); - Assert.assertEquals(RestStatus.CREATED, response.getStatus()); - - NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery( - "correlate", - QueryBuilders.matchQuery("correlate.index", "s3_access_logs"), - ScoreMode.None - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(queryBuilder); - searchSourceBuilder.fetchSource(true); - - SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX); - searchRequest.source(searchSourceBuilder); - - SearchResponse searchResponse = client().search(searchRequest).get(); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - } - - /** - * test filtering correlation rules - * @throws Exception Exception - */ - public void testFilteringCorrelationRules() throws Exception { - List correlationQueries1 = Arrays.asList( - new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", "@timestamp", List.of("s3")), - new CorrelationQuery("app_logs", "keywords:PermissionDenied", "@timestamp", List.of("others_application")) - ); - CorrelationRule correlationRule1 = new CorrelationRule("s3 to app logs", correlationQueries1); - IndexCorrelationRuleRequest request1 = new IndexCorrelationRuleRequest(correlationRule1, RestRequest.Method.POST); - client().execute(IndexCorrelationRuleAction.INSTANCE, request1).get(); - - List correlationQueries2 = Arrays.asList( - new CorrelationQuery("windows", "host.hostname:EC2AMAZ*", "@timestamp", List.of("windows")), - new CorrelationQuery("app_logs", "endpoint:/customer_records.txt", "@timestamp", List.of("others_application")) - ); - CorrelationRule correlationRule2 = new CorrelationRule("windows to app logs", correlationQueries2); - IndexCorrelationRuleRequest request2 = new IndexCorrelationRuleRequest(correlationRule2, RestRequest.Method.POST); - client().execute(IndexCorrelationRuleAction.INSTANCE, request2).get(); - - NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery( - "correlate", - QueryBuilders.matchQuery("correlate.index", "s3_access_logs"), - ScoreMode.None - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(queryBuilder); - searchSourceBuilder.fetchSource(true); - - SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX); - searchRequest.source(searchSourceBuilder); - - SearchResponse searchResponse = client().search(searchRequest).get(); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - } - - /** - * test creating a correlation rule with no timestamp field - * @throws Exception Exception - */ - @SuppressWarnings("unchecked") - public void testCreatingACorrelationRuleWithNoTimestampField() throws Exception { - List correlationQueries = Arrays.asList( - new CorrelationQuery("s3_access_logs", "aws.cloudtrail.eventName:ReplicateObject", null, List.of("s3")), - new CorrelationQuery("app_logs", "keywords:PermissionDenied", null, List.of("others_application")) - ); - CorrelationRule correlationRule = new CorrelationRule("s3 to app logs", correlationQueries); - IndexCorrelationRuleRequest request = new IndexCorrelationRuleRequest(correlationRule, RestRequest.Method.POST); - - IndexCorrelationRuleResponse response = client().execute(IndexCorrelationRuleAction.INSTANCE, request).get(); - Assert.assertEquals(RestStatus.CREATED, response.getStatus()); - - NestedQueryBuilder queryBuilder = QueryBuilders.nestedQuery( - "correlate", - QueryBuilders.matchQuery("correlate.index", "s3_access_logs"), - ScoreMode.None - ); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(queryBuilder); - searchSourceBuilder.fetchSource(true); - - SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(CorrelationRule.CORRELATION_RULE_INDEX); - searchRequest.source(searchSourceBuilder); - - SearchResponse searchResponse = client().search(searchRequest).get(); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - Assert.assertEquals( - "_timestamp", - ((List>) (searchResponse.getHits().getHits()[0].getSourceAsMap().get("correlate"))).get(0) - .get("timestampField") - ); - } -} diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java deleted file mode 100644 index 414fe1948f053..0000000000000 --- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/CorrelationVectorsEngineIT.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.apache.hc.core5.http.Header; -import org.apache.hc.core5.http.HttpEntity; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.client.Request; -import org.opensearch.client.RequestOptions; -import org.opensearch.client.Response; -import org.opensearch.client.ResponseException; -import org.opensearch.client.RestClient; -import org.opensearch.client.WarningsHandler; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.common.Strings; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.IndexSettings; -import org.opensearch.test.rest.OpenSearchRestTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * Correlation Vectors Engine e2e tests - */ -public class CorrelationVectorsEngineIT extends OpenSearchRestTestCase { - - private static final int DIMENSION = 4; - private static final String PROPERTIES_FIELD_NAME = "properties"; - private static final String TYPE_FIELD_NAME = "type"; - private static final String CORRELATION_VECTOR_TYPE = "correlation_vector"; - private static final String DIMENSION_FIELD_NAME = "dimension"; - private static final int M = 16; - private static final int EF_CONSTRUCTION = 128; - private static final String INDEX_NAME = "test-index-1"; - private static final Float[][] TEST_VECTORS = new Float[][] { - { 1.0f, 1.0f, 1.0f, 1.0f }, - { 2.0f, 2.0f, 2.0f, 2.0f }, - { 3.0f, 3.0f, 3.0f, 3.0f } }; - private static final float[][] TEST_QUERY_VECTORS = new float[][] { - { 1.0f, 1.0f, 1.0f, 1.0f }, - { 2.0f, 2.0f, 2.0f, 2.0f }, - { 3.0f, 3.0f, 3.0f, 3.0f } }; - private static final Map> VECTOR_SIMILARITY_TO_SCORE = Map.of( - VectorSimilarityFunction.EUCLIDEAN, - (similarity) -> 1 / (1 + similarity), - VectorSimilarityFunction.DOT_PRODUCT, - (similarity) -> (1 + similarity) / 2, - VectorSimilarityFunction.COSINE, - (similarity) -> (1 + similarity) / 2 - ); - - /** - * test the e2e storage and query layer of events-correlation-engine - * @throws IOException IOException - */ - @SuppressWarnings("unchecked") - public void testQuery() throws IOException { - String textField = "text-field"; - String luceneField = "lucene-field"; - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject(PROPERTIES_FIELD_NAME) - .startObject(textField) - .field(TYPE_FIELD_NAME, "text") - .endObject() - .startObject(luceneField) - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, DIMENSION) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", M) - .field("ef_construction", EF_CONSTRUCTION) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); - - String mapping = builder.toString(); - createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); - - for (int idx = 0; idx < TEST_VECTORS.length; ++idx) { - addCorrelationDoc( - INDEX_NAME, - String.valueOf(idx + 1), - List.of(textField, luceneField), - List.of(java.util.UUID.randomUUID().toString(), TEST_VECTORS[idx]) - ); - } - refreshAllIndices(); - Assert.assertEquals(TEST_VECTORS.length, getDocCount(INDEX_NAME)); - - int k = 2; - for (float[] query : TEST_QUERY_VECTORS) { - - String correlationQuery = "{\n" - + " \"query\": {\n" - + " \"correlation\": {\n" - + " \"lucene-field\": {\n" - + " \"vector\": \n" - + Arrays.toString(query) - + " ,\n" - + " \"k\": 2,\n" - + " \"boost\": 1\n" - + " }\n" - + " }\n" - + " }\n" - + "}"; - - Response response = searchCorrelationIndex(INDEX_NAME, correlationQuery, k); - Map responseBody = entityAsMap(response); - Assert.assertEquals(2, ((List) ((Map) responseBody.get("hits")).get("hits")).size()); - @SuppressWarnings("unchecked") - double actualScore1 = Double.parseDouble( - ((List>) ((Map) responseBody.get("hits")).get("hits")).get(0).get("_score").toString() - ); - @SuppressWarnings("unchecked") - double actualScore2 = Double.parseDouble( - ((List>) ((Map) responseBody.get("hits")).get("hits")).get(1).get("_score").toString() - ); - @SuppressWarnings("unchecked") - List hit1 = ((Map>) ((List>) ((Map) responseBody.get("hits")) - .get("hits")).get(0).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList()); - float[] resultVector1 = new float[hit1.size()]; - for (int i = 0; i < hit1.size(); ++i) { - resultVector1[i] = hit1.get(i); - } - - @SuppressWarnings("unchecked") - List hit2 = ((Map>) ((List>) ((Map) responseBody.get("hits")) - .get("hits")).get(1).get("_source")).get(luceneField).stream().map(Double::floatValue).collect(Collectors.toList()); - float[] resultVector2 = new float[hit2.size()]; - for (int i = 0; i < hit2.size(); ++i) { - resultVector2[i] = hit2.get(i); - } - - double rawScore1 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector1, query); - Assert.assertEquals(rawScore1, actualScore1, 0.0001); - double rawScore2 = VectorSimilarityFunction.EUCLIDEAN.compare(resultVector2, query); - Assert.assertEquals(rawScore2, actualScore2, 0.0001); - } - } - - /** - * unhappy test for the e2e storage and query layer of events-correlation-engine with no index exist - */ - public void testQueryWithNoIndexExist() { - float[] query = new float[] { 1.0f, 1.0f, 1.0f, 1.0f }; - String correlationQuery = "{\n" - + " \"query\": {\n" - + " \"correlation\": {\n" - + " \"lucene-field\": {\n" - + " \"vector\": \n" - + Arrays.toString(query) - + " ,\n" - + " \"k\": 2,\n" - + " \"boost\": 1\n" - + " }\n" - + " }\n" - + " }\n" - + "}"; - Exception ex = assertThrows(ResponseException.class, () -> { searchCorrelationIndex(INDEX_NAME, correlationQuery, 2); }); - String expectedMessage = String.format(Locale.ROOT, "no such index [%s]", INDEX_NAME); - String actualMessage = ex.getMessage(); - Assert.assertTrue(actualMessage.contains(expectedMessage)); - } - - /** - * unhappy test for the e2e storage and query layer of events-correlation-engine with wrong mapping - */ - public void testQueryWithWrongMapping() throws IOException { - String textField = "text-field"; - String luceneField = "lucene-field"; - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .startObject(PROPERTIES_FIELD_NAME) - .startObject(textField) - .field(TYPE_FIELD_NAME, "text") - .endObject() - .startObject(luceneField) - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field("test", DIMENSION) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", M) - .field("ef_construction", EF_CONSTRUCTION) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); - - String mapping = builder.toString(); - Exception ex = assertThrows(ResponseException.class, () -> { - createTestIndexWithMappingJson(client(), INDEX_NAME, mapping, getCorrelationDefaultIndexSettings()); - }); - - String expectedMessage = String.format( - Locale.ROOT, - "unknown parameter [test] on mapper [%s] of type [correlation_vector]", - luceneField - ); - String actualMessage = ex.getMessage(); - Assert.assertTrue(actualMessage.contains(expectedMessage)); - } - - private String createTestIndexWithMappingJson(RestClient client, String index, String mapping, Settings settings) throws IOException { - Request request = new Request("PUT", "/" + index); - String entity = "{\"settings\": " + Strings.toString(MediaTypeRegistry.JSON, settings); - if (mapping != null) { - entity = entity + ",\"mappings\" : " + mapping; - } - - entity = entity + "}"; - if (!settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)) { - expectSoftDeletesWarning(request, index); - } - - request.setJsonEntity(entity); - client.performRequest(request); - return index; - } - - private Settings getCorrelationDefaultIndexSettings() { - return Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).put("index.correlation", true).build(); - } - - private void addCorrelationDoc(String index, String docId, List fieldNames, List vectors) throws IOException { - Request request = new Request("POST", "/" + index + "/_doc/" + docId + "?refresh=true"); - - XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); - for (int i = 0; i < fieldNames.size(); i++) { - builder.field(fieldNames.get(i), vectors.get(i)); - } - builder.endObject(); - - request.setJsonEntity(builder.toString()); - Response response = client().performRequest(request); - assertEquals(request.getEndpoint() + ": failed", RestStatus.CREATED, RestStatus.fromCode(response.getStatusLine().getStatusCode())); - } - - private Response searchCorrelationIndex(String index, String correlationQuery, int resultSize) throws IOException { - Request request = new Request("POST", "/" + index + "/_search"); - - request.addParameter("size", Integer.toString(resultSize)); - request.addParameter("explain", Boolean.toString(true)); - request.addParameter("search_type", "query_then_fetch"); - request.setJsonEntity(correlationQuery); - - Response response = client().performRequest(request); - Assert.assertEquals("Search failed", RestStatus.OK, restStatus(response)); - return response; - } - - private int getDocCount(String index) throws IOException { - Response response = makeRequest( - client(), - "GET", - String.format(Locale.getDefault(), "/%s/_count", index), - Collections.emptyMap(), - null - ); - Assert.assertEquals(RestStatus.OK, restStatus(response)); - return Integer.parseInt(entityAsMap(response).get("count").toString()); - } - - private Response makeRequest( - RestClient client, - String method, - String endpoint, - Map params, - HttpEntity entity, - Header... headers - ) throws IOException { - Request request = new Request(method, endpoint); - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - - for (Header header : headers) { - options.addHeader(header.getName(), header.getValue()); - } - request.setOptions(options.build()); - request.addParameters(params); - if (entity != null) { - request.setEntity(entity); - } - return client.performRequest(request); - } - - private RestStatus restStatus(Response response) { - return RestStatus.fromCode(response.getStatusLine().getStatusCode()); - } -} diff --git a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java b/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java deleted file mode 100644 index 3791a5cdf5db0..0000000000000 --- a/plugins/events-correlation-engine/src/javaRestTest/java/org/opensearch/plugin/correlation/EventsCorrelationPluginRestIT.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.opensearch.action.search.SearchResponse; -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.test.rest.OpenSearchRestTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -/** - * Rest Action tests for events-correlation-plugin - */ -public class EventsCorrelationPluginRestIT extends OpenSearchRestTestCase { - - /** - * test events-correlation-plugin is installed - * @throws IOException IOException - */ - @SuppressWarnings("unchecked") - public void testPluginsAreInstalled() throws IOException { - Request request = new Request("GET", "/_cat/plugins?s=component&h=name,component,version,description&format=json"); - Response response = client().performRequest(request); - List pluginsList = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.getEntity().getContent() - ).list(); - Assert.assertTrue( - pluginsList.stream() - .map(o -> (Map) o) - .anyMatch(plugin -> plugin.get("component").equals("events-correlation-engine")) - ); - } - - /** - * test creating a correlation rule - * @throws IOException IOException - */ - public void testCreatingACorrelationRule() throws IOException { - Request request = new Request("POST", "/_correlation/rules"); - request.setJsonEntity(sampleCorrelationRule()); - Response response = client().performRequest(request); - - Assert.assertEquals(201, response.getStatusLine().getStatusCode()); - - Map responseMap = entityAsMap(response); - String id = responseMap.get("_id").toString(); - - request = new Request("POST", "/.opensearch-correlation-rules-config/_search"); - request.setJsonEntity(matchIdQuery(id)); - response = client().performRequest(request); - - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - SearchResponse searchResponse = SearchResponse.fromXContent( - createParser(JsonXContent.jsonXContent, response.getEntity().getContent()) - ); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - } - - /** - * test creating a correlation rule with no timestamp field - * @throws IOException IOException - */ - @SuppressWarnings("unchecked") - public void testCreatingACorrelationRuleWithNoTimestampField() throws IOException { - Request request = new Request("POST", "/_correlation/rules"); - request.setJsonEntity(sampleCorrelationRuleWithNoTimestamp()); - Response response = client().performRequest(request); - - Assert.assertEquals(201, response.getStatusLine().getStatusCode()); - - Map responseMap = entityAsMap(response); - String id = responseMap.get("_id").toString(); - - request = new Request("POST", "/.opensearch-correlation-rules-config/_search"); - request.setJsonEntity(matchIdQuery(id)); - response = client().performRequest(request); - - Assert.assertEquals(200, response.getStatusLine().getStatusCode()); - SearchResponse searchResponse = SearchResponse.fromXContent( - createParser(JsonXContent.jsonXContent, response.getEntity().getContent()) - ); - Assert.assertEquals(1L, searchResponse.getHits().getTotalHits().value); - Assert.assertEquals( - "_timestamp", - ((List>) (searchResponse.getHits().getHits()[0].getSourceAsMap().get("correlate"))).get(0) - .get("timestampField") - ); - } - - private String sampleCorrelationRule() { - return "{\n" - + " \"name\": \"s3 to app logs\",\n" - + " \"correlate\": [\n" - + " {\n" - + " \"index\": \"s3_access_logs\",\n" - + " \"query\": \"aws.cloudtrail.eventName:ReplicateObject\",\n" - + " \"timestampField\": \"@timestamp\",\n" - + " \"tags\": [\n" - + " \"s3\"\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"index\": \"app_logs\",\n" - + " \"query\": \"keywords:PermissionDenied\",\n" - + " \"timestampField\": \"@timestamp\",\n" - + " \"tags\": [\n" - + " \"others_application\"\n" - + " ]\n" - + " }\n" - + " ]\n" - + "}"; - } - - private String sampleCorrelationRuleWithNoTimestamp() { - return "{\n" - + " \"name\": \"s3 to app logs\",\n" - + " \"correlate\": [\n" - + " {\n" - + " \"index\": \"s3_access_logs\",\n" - + " \"query\": \"aws.cloudtrail.eventName:ReplicateObject\",\n" - + " \"tags\": [\n" - + " \"s3\"\n" - + " ]\n" - + " },\n" - + " {\n" - + " \"index\": \"app_logs\",\n" - + " \"query\": \"keywords:PermissionDenied\",\n" - + " \"tags\": [\n" - + " \"others_application\"\n" - + " ]\n" - + " }\n" - + " ]\n" - + "}"; - } - - private String matchIdQuery(String id) { - return "{\n" + " \"query\" : {\n" + " \"match\":{\n" + " \"_id\": \"" + id + "\"\n" + " }\n" + " }\n" + "}"; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java deleted file mode 100644 index 9637042974d03..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/EventsCorrelationPlugin.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.opensearch.action.ActionRequest; -import org.opensearch.client.Client; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.IndexScopedSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.settings.SettingsFilter; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.env.Environment; -import org.opensearch.env.NodeEnvironment; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.codec.CodecServiceFactory; -import org.opensearch.index.mapper.Mapper; -import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecService; -import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; -import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryBuilder; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.resthandler.RestIndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.transport.TransportIndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.settings.EventsCorrelationSettings; -import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices; -import org.opensearch.plugins.ActionPlugin; -import org.opensearch.plugins.EnginePlugin; -import org.opensearch.plugins.MapperPlugin; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.SearchPlugin; -import org.opensearch.repositories.RepositoriesService; -import org.opensearch.rest.RestController; -import org.opensearch.rest.RestHandler; -import org.opensearch.script.ScriptService; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.watcher.ResourceWatcherService; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.function.Supplier; - -/** - * Plugin class for events-correlation-engine - */ -public class EventsCorrelationPlugin extends Plugin implements ActionPlugin, MapperPlugin, SearchPlugin, EnginePlugin { - - /** - * events-correlation-engine base uri - */ - public static final String PLUGINS_BASE_URI = "/_correlation"; - /** - * events-correlation-engine rules uri - */ - public static final String CORRELATION_RULES_BASE_URI = PLUGINS_BASE_URI + "/rules"; - - private CorrelationRuleIndices correlationRuleIndices; - - /** - * Default constructor - */ - public EventsCorrelationPlugin() {} - - @Override - public Collection createComponents( - Client client, - ClusterService clusterService, - ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, - ScriptService scriptService, - NamedXContentRegistry xContentRegistry, - Environment environment, - NodeEnvironment nodeEnvironment, - NamedWriteableRegistry namedWriteableRegistry, - IndexNameExpressionResolver indexNameExpressionResolver, - Supplier repositoriesServiceSupplier - ) { - correlationRuleIndices = new CorrelationRuleIndices(client, clusterService); - return List.of(correlationRuleIndices); - } - - @Override - public List getRestHandlers( - Settings settings, - RestController restController, - ClusterSettings clusterSettings, - IndexScopedSettings indexScopedSettings, - SettingsFilter settingsFilter, - IndexNameExpressionResolver indexNameExpressionResolver, - Supplier nodesInCluster - ) { - return List.of(new RestIndexCorrelationRuleAction()); - } - - @Override - public Map getMappers() { - return Collections.singletonMap(CorrelationVectorFieldMapper.CONTENT_TYPE, new VectorFieldMapper.TypeParser()); - } - - @Override - public Optional getCustomCodecServiceFactory(IndexSettings indexSettings) { - if (indexSettings.getValue(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING)) { - return Optional.of(CorrelationCodecService::new); - } - return Optional.empty(); - } - - @Override - public List> getQueries() { - return Collections.singletonList( - new QuerySpec<>( - CorrelationQueryBuilder.NAME_FIELD.getPreferredName(), - CorrelationQueryBuilder::new, - CorrelationQueryBuilder::parse - ) - ); - } - - @Override - public List> getActions() { - return List.of(new ActionPlugin.ActionHandler<>(IndexCorrelationRuleAction.INSTANCE, TransportIndexCorrelationRuleAction.class)); - } - - @Override - public List> getSettings() { - return List.of(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING, EventsCorrelationSettings.CORRELATION_TIME_WINDOW); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java deleted file mode 100644 index fef9200a73091..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContext.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.mapper.MapperParsingException; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Defines vector similarity function, m and ef_construction hyper parameters field mappings for correlation_vector type. - * - * @opensearch.internal - */ -public class CorrelationParamsContext implements ToXContentFragment, Writeable { - - /** - * Vector Similarity Function field - */ - public static final String VECTOR_SIMILARITY_FUNCTION = "similarityFunction"; - /** - * Parameters field to define m and ef_construction - */ - public static final String PARAMETERS = "parameters"; - - private final VectorSimilarityFunction similarityFunction; - private final Map parameters; - - /** - * Parameterized ctor for CorrelationParamsContext - * @param similarityFunction Vector Similarity Function - * @param parameters Parameters to define m and ef_construction - */ - public CorrelationParamsContext(VectorSimilarityFunction similarityFunction, Map parameters) { - this.similarityFunction = similarityFunction; - this.parameters = parameters; - } - - /** - * Parameterized ctor for CorrelationParamsContext - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationParamsContext(StreamInput sin) throws IOException { - this.similarityFunction = VectorSimilarityFunction.valueOf(sin.readString()); - if (sin.available() > 0) { - this.parameters = sin.readMap(); - } else { - this.parameters = null; - } - } - - /** - * Parse into CorrelationParamsContext - * @param in Object - * @return CorrelationParamsContext - */ - public static CorrelationParamsContext parse(Object in) { - if (!(in instanceof Map)) { - throw new MapperParsingException("Unable to parse CorrelationParamsContext"); - } - - @SuppressWarnings("unchecked") - Map contextMap = (Map) in; - VectorSimilarityFunction similarityFunction = VectorSimilarityFunction.EUCLIDEAN; - Map parameters = new HashMap<>(); - - if (contextMap.containsKey(VECTOR_SIMILARITY_FUNCTION)) { - Object value = contextMap.get(VECTOR_SIMILARITY_FUNCTION); - - if (value != null && !(value instanceof String)) { - throw new MapperParsingException(String.format(Locale.getDefault(), "%s must be a string", VECTOR_SIMILARITY_FUNCTION)); - } - - try { - similarityFunction = VectorSimilarityFunction.valueOf((String) value); - } catch (IllegalArgumentException ex) { - throw new MapperParsingException(String.format(Locale.getDefault(), "Invalid %s: %s", VECTOR_SIMILARITY_FUNCTION, value)); - } - } - if (contextMap.containsKey(PARAMETERS)) { - Object value = contextMap.get(PARAMETERS); - if (!(value instanceof Map)) { - throw new MapperParsingException("Unable to parse parameters for Correlation context"); - } - - @SuppressWarnings("unchecked") - Map valueMap = (Map) value; - parameters.putAll(valueMap); - } - return new CorrelationParamsContext(similarityFunction, parameters); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(VECTOR_SIMILARITY_FUNCTION, similarityFunction.name()); - if (params == null) { - builder.field(PARAMETERS, (String) null); - } else { - builder.startObject(PARAMETERS); - for (Map.Entry parameter : parameters.entrySet()) { - builder.field(parameter.getKey(), parameter.getValue()); - } - builder.endObject(); - } - builder.endObject(); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(similarityFunction.name()); - if (this.parameters != null) { - out.writeMap(parameters); - } - } - - /** - * get Vector Similarity Function - * @return Vector Similarity Function - */ - public VectorSimilarityFunction getSimilarityFunction() { - return similarityFunction; - } - - /** - * Get Parameters to define m and ef_construction - * @return Parameters to define m and ef_construction - */ - public Map getParameters() { - return parameters; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java deleted file mode 100644 index 61efd6b9a87ae..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/VectorField.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexableFieldType; -import org.apache.lucene.util.BytesRef; -import org.opensearch.common.io.stream.BytesStreamOutput; - -import java.io.IOException; - -/** - * Generic Vector Field defining a correlation vector name, float array. - * - * @opensearch.internal - */ -public class VectorField extends Field { - - /** - * Parameterized ctor for VectorField - * @param name name of the field - * @param value float array value for the field - * @param type type of the field - */ - public VectorField(String name, float[] value, IndexableFieldType type) { - super(name, new BytesRef(), type); - try { - final byte[] floatToByte = floatToByteArray(value); - this.setBytesValue(floatToByte); - } catch (IOException ex) { - throw new RuntimeException(ex); - } - } - - /** - * converts float array based vector to byte array. - * @param input float array - * @return byte array - */ - protected static byte[] floatToByteArray(float[] input) throws IOException { - BytesStreamOutput objectStream = new BytesStreamOutput(); - objectStream.writeFloatArray(input); - return objectStream.bytes().toBytesRef().bytes; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java deleted file mode 100644 index 00b55eb75995c..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/BasePerFieldCorrelationVectorsFormat.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec; - -import org.apache.lucene.codecs.KnnVectorsFormat; -import org.apache.lucene.codecs.perfield.PerFieldKnnVectorsFormat; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.mapper.CorrelationVectorFieldMapper; - -import java.util.Locale; -import java.util.Map; -import java.util.Optional; -import java.util.function.BiFunction; -import java.util.function.Supplier; - -/** - * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index. - * - * @opensearch.internal - */ -public abstract class BasePerFieldCorrelationVectorsFormat extends PerFieldKnnVectorsFormat { - /** - * the hyper-parameters for constructing HNSW graphs. - * HnswGraph.html - */ - public static final String METHOD_PARAMETER_M = "m"; - /** - * the hyper-parameters for constructing HNSW graphs. - * HnswGraph.html - */ - public static final String METHOD_PARAMETER_EF_CONSTRUCTION = "ef_construction"; - - private final Optional mapperService; - private final int defaultMaxConnections; - private final int defaultBeamWidth; - private final Supplier defaultFormatSupplier; - private final BiFunction formatSupplier; - - /** - * Parameterized ctor of BasePerFieldCorrelationVectorsFormat - * @param mapperService mapper service - * @param defaultMaxConnections default m - * @param defaultBeamWidth default ef_construction - * @param defaultFormatSupplier default format supplier - * @param formatSupplier format supplier - */ - public BasePerFieldCorrelationVectorsFormat( - Optional mapperService, - int defaultMaxConnections, - int defaultBeamWidth, - Supplier defaultFormatSupplier, - BiFunction formatSupplier - ) { - this.mapperService = mapperService; - this.defaultMaxConnections = defaultMaxConnections; - this.defaultBeamWidth = defaultBeamWidth; - this.defaultFormatSupplier = defaultFormatSupplier; - this.formatSupplier = formatSupplier; - } - - @Override - public KnnVectorsFormat getKnnVectorsFormatForField(String field) { - if (!isCorrelationVectorFieldType(field)) { - return defaultFormatSupplier.get(); - } - - var type = (CorrelationVectorFieldMapper.CorrelationVectorFieldType) mapperService.orElseThrow( - () -> new IllegalArgumentException( - String.format(Locale.getDefault(), "Cannot read field type for field [%s] because mapper service is not available", field) - ) - ).fieldType(field); - - var params = type.getCorrelationParams().getParameters(); - int maxConnections = getMaxConnections(params); - int beamWidth = getBeamWidth(params); - - return formatSupplier.apply(maxConnections, beamWidth); - } - - private boolean isCorrelationVectorFieldType(final String field) { - return mapperService.isPresent() - && mapperService.get().fieldType(field) instanceof CorrelationVectorFieldMapper.CorrelationVectorFieldType; - } - - private int getMaxConnections(final Map params) { - if (params != null && params.containsKey(METHOD_PARAMETER_M)) { - return (int) params.get(METHOD_PARAMETER_M); - } - return defaultMaxConnections; - } - - private int getBeamWidth(final Map params) { - if (params != null && params.containsKey(METHOD_PARAMETER_EF_CONSTRUCTION)) { - return (int) params.get(METHOD_PARAMETER_EF_CONSTRUCTION); - } - return defaultBeamWidth; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java deleted file mode 100644 index 09d5e1d2c19e3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecService.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec; - -import org.apache.lucene.codecs.Codec; -import org.opensearch.index.codec.CodecService; -import org.opensearch.index.codec.CodecServiceConfig; -import org.opensearch.index.mapper.MapperService; - -/** - * custom Correlation Codec Service - * - * @opensearch.internal - */ -public class CorrelationCodecService extends CodecService { - - private final MapperService mapperService; - - /** - * Parameterized ctor for CorrelationCodecService - * @param codecServiceConfig Generic codec service config - */ - public CorrelationCodecService(CodecServiceConfig codecServiceConfig) { - super(codecServiceConfig.getMapperService(), codecServiceConfig.getIndexSettings(), codecServiceConfig.getLogger()); - mapperService = codecServiceConfig.getMapperService(); - } - - @Override - public Codec codec(String name) { - return CorrelationCodecVersion.current().getCorrelationCodecSupplier().apply(super.codec(name), mapperService); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java deleted file mode 100644 index 9dbb695f14b78..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec; - -import org.apache.lucene.backward_codecs.lucene99.Lucene99Codec; -import org.apache.lucene.codecs.Codec; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec; -import org.opensearch.plugin.correlation.core.index.codec.correlation990.PerFieldCorrelationVectorsFormat; - -import java.util.Optional; -import java.util.function.BiFunction; -import java.util.function.Supplier; - -/** - * CorrelationCodecVersion enum - * - * @opensearch.internal - */ -public enum CorrelationCodecVersion { - V_9_9_0( - "CorrelationCodec", - new Lucene99Codec(), - new PerFieldCorrelationVectorsFormat(Optional.empty()), - (userCodec, mapperService) -> new CorrelationCodec(userCodec, new PerFieldCorrelationVectorsFormat(Optional.of(mapperService))), - CorrelationCodec::new - ); - - private static final CorrelationCodecVersion CURRENT = V_9_9_0; - private final String codecName; - private final Codec defaultCodecDelegate; - private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; - private final BiFunction correlationCodecSupplier; - private final Supplier defaultCorrelationCodecSupplier; - - CorrelationCodecVersion( - String codecName, - Codec defaultCodecDelegate, - PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat, - BiFunction correlationCodecSupplier, - Supplier defaultCorrelationCodecSupplier - ) { - this.codecName = codecName; - this.defaultCodecDelegate = defaultCodecDelegate; - this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat; - this.correlationCodecSupplier = correlationCodecSupplier; - this.defaultCorrelationCodecSupplier = defaultCorrelationCodecSupplier; - } - - /** - * get codec name - * @return codec name - */ - public String getCodecName() { - return codecName; - } - - /** - * get default codec delegate - * @return default codec delegate - */ - public Codec getDefaultCodecDelegate() { - return defaultCodecDelegate; - } - - /** - * get correlation vectors format - * @return correlation vectors format - */ - public PerFieldCorrelationVectorsFormat getPerFieldCorrelationVectorsFormat() { - return perFieldCorrelationVectorsFormat; - } - - /** - * get correlation codec supplier - * @return correlation codec supplier - */ - public BiFunction getCorrelationCodecSupplier() { - return correlationCodecSupplier; - } - - /** - * get default correlation codec supplier - * @return default correlation codec supplier - */ - public Supplier getDefaultCorrelationCodecSupplier() { - return defaultCorrelationCodecSupplier; - } - - /** - * static method to get correlation codec version - * @return correlation codec version - */ - public static final CorrelationCodecVersion current() { - return CURRENT; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java deleted file mode 100644 index 022972e2e06c3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec.correlation990; - -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.FilterCodec; -import org.apache.lucene.codecs.KnnVectorsFormat; -import org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion; - -/** - * Correlation Codec class - * - * @opensearch.internal - */ -public class CorrelationCodec extends FilterCodec { - private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_9_0; - private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; - - /** - * ctor for CorrelationCodec - */ - public CorrelationCodec() { - this(VERSION.getDefaultCodecDelegate(), VERSION.getPerFieldCorrelationVectorsFormat()); - } - - /** - * Parameterized ctor for CorrelationCodec - * @param delegate codec delegate - * @param perFieldCorrelationVectorsFormat correlation vectors format - */ - public CorrelationCodec(Codec delegate, PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat) { - super(VERSION.getCodecName(), delegate); - this.perFieldCorrelationVectorsFormat = perFieldCorrelationVectorsFormat; - } - - @Override - public KnnVectorsFormat knnVectorsFormat() { - return perFieldCorrelationVectorsFormat; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java deleted file mode 100644 index 89cc0b614a1a5..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec.correlation990; - -import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsFormat; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat; - -import java.util.Optional; - -/** - * Class to define the hyper-parameters m and ef_construction for insert and store of correlation vectors into HNSW graphs based lucene index. - */ -public class PerFieldCorrelationVectorsFormat extends BasePerFieldCorrelationVectorsFormat { - - /** - * Parameterized ctor for PerFieldCorrelationVectorsFormat - * @param mapperService mapper service - */ - public PerFieldCorrelationVectorsFormat(final Optional mapperService) { - super( - mapperService, - Lucene99HnswVectorsFormat.DEFAULT_MAX_CONN, - Lucene99HnswVectorsFormat.DEFAULT_BEAM_WIDTH, - Lucene99HnswVectorsFormat::new, - Lucene99HnswVectorsFormat::new - ); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java deleted file mode 100644 index fc2a9de58a73a..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * custom Lucene9.5 codec package for events-correlation-engine - */ -package org.opensearch.plugin.correlation.core.index.codec.correlation990; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java deleted file mode 100644 index 862b7cd253f04..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * custom codec package for events-correlation-engine - */ -package org.opensearch.plugin.correlation.core.index.codec; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java deleted file mode 100644 index 18c9dd222e2cf..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapper.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.mapper; - -import org.apache.lucene.codecs.KnnVectorsFormat; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.KnnFloatVectorField; -import org.apache.lucene.document.StoredField; -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.common.Explicit; -import org.opensearch.index.mapper.FieldMapper; -import org.opensearch.index.mapper.ParseContext; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.plugin.correlation.core.index.VectorField; - -import java.io.IOException; -import java.util.Locale; -import java.util.Optional; - -/** - * Field mapper for the correlation vector type - * - * @opensearch.internal - */ -public class CorrelationVectorFieldMapper extends VectorFieldMapper { - - private static final int LUCENE_MAX_DIMENSION = KnnVectorsFormat.DEFAULT_MAX_DIMENSIONS; - - private final FieldType vectorFieldType; - - /** - * Parameterized ctor for CorrelationVectorFieldMapper - * @param input Object containing name of the field, type and other details. - */ - public CorrelationVectorFieldMapper(final CreateLuceneFieldMapperInput input) { - super( - input.getName(), - input.getMappedFieldType(), - input.getMultiFields(), - input.getCopyTo(), - input.getIgnoreMalformed(), - input.isStored(), - input.isHasDocValues() - ); - - this.correlationParams = input.getCorrelationParams(); - final VectorSimilarityFunction vectorSimilarityFunction = this.correlationParams.getSimilarityFunction(); - - final int dimension = input.getMappedFieldType().getDimension(); - if (dimension > LUCENE_MAX_DIMENSION) { - throw new IllegalArgumentException( - String.format( - Locale.ROOT, - "Dimension value cannot be greater than [%s] but got [%s] for vector [%s]", - LUCENE_MAX_DIMENSION, - dimension, - input.getName() - ) - ); - } - - this.fieldType = KnnFloatVectorField.createFieldType(dimension, vectorSimilarityFunction); - - if (this.hasDocValues) { - this.vectorFieldType = buildDocValuesFieldType(); - } else { - this.vectorFieldType = null; - } - } - - private static FieldType buildDocValuesFieldType() { - FieldType field = new FieldType(); - field.setDocValuesType(DocValuesType.BINARY); - field.freeze(); - return field; - } - - @Override - protected void parseCreateField(ParseContext context, int dimension) throws IOException { - Optional arrayOptional = getFloatsFromContext(context, dimension); - - if (arrayOptional.isEmpty()) { - return; - } - final float[] array = arrayOptional.get(); - - KnnFloatVectorField point = new KnnFloatVectorField(name(), array, fieldType); - - context.doc().add(point); - if (fieldType.stored()) { - context.doc().add(new StoredField(name(), point.toString())); - } - if (hasDocValues && vectorFieldType != null) { - context.doc().add(new VectorField(name(), array, vectorFieldType)); - } - context.path().remove(); - } - - static class CreateLuceneFieldMapperInput { - String name; - - CorrelationVectorFieldType mappedFieldType; - - FieldMapper.MultiFields multiFields; - - FieldMapper.CopyTo copyTo; - - Explicit ignoreMalformed; - boolean stored; - boolean hasDocValues; - - CorrelationParamsContext correlationParams; - - public CreateLuceneFieldMapperInput( - String name, - CorrelationVectorFieldType mappedFieldType, - FieldMapper.MultiFields multiFields, - FieldMapper.CopyTo copyTo, - Explicit ignoreMalformed, - boolean stored, - boolean hasDocValues, - CorrelationParamsContext correlationParams - ) { - this.name = name; - this.mappedFieldType = mappedFieldType; - this.multiFields = multiFields; - this.copyTo = copyTo; - this.ignoreMalformed = ignoreMalformed; - this.stored = stored; - this.hasDocValues = hasDocValues; - this.correlationParams = correlationParams; - } - - public String getName() { - return name; - } - - public CorrelationVectorFieldType getMappedFieldType() { - return mappedFieldType; - } - - public FieldMapper.MultiFields getMultiFields() { - return multiFields; - } - - public FieldMapper.CopyTo getCopyTo() { - return copyTo; - } - - public Explicit getIgnoreMalformed() { - return ignoreMalformed; - } - - public boolean isStored() { - return stored; - } - - public boolean isHasDocValues() { - return hasDocValues; - } - - public CorrelationParamsContext getCorrelationParams() { - return correlationParams; - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java deleted file mode 100644 index 5ac6d92792295..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/VectorFieldMapper.java +++ /dev/null @@ -1,399 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.mapper; - -import org.apache.lucene.search.FieldExistsQuery; -import org.apache.lucene.search.Query; -import org.opensearch.common.Explicit; -import org.opensearch.common.xcontent.support.XContentMapValues; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.FieldMapper; -import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.Mapper; -import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.index.mapper.ParametrizedFieldMapper; -import org.opensearch.index.mapper.ParseContext; -import org.opensearch.index.mapper.TextSearchInfo; -import org.opensearch.index.mapper.ValueFetcher; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.QueryShardException; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.search.lookup.SearchLookup; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Optional; - -/** - * Parameterized field mapper for Correlation Vector type - * - * @opensearch.internal - */ -public abstract class VectorFieldMapper extends ParametrizedFieldMapper { - - /** - * name of Correlation Vector type - */ - public static final String CONTENT_TYPE = "correlation_vector"; - /** - * dimension of the correlation vectors - */ - public static final String DIMENSION = "dimension"; - /** - * context e.g. parameters and vector similarity function of Correlation Vector type - */ - public static final String CORRELATION_CONTEXT = "correlation_ctx"; - - private static VectorFieldMapper toType(FieldMapper in) { - return (VectorFieldMapper) in; - } - - /** - * definition of VectorFieldMapper.Builder - */ - public static class Builder extends ParametrizedFieldMapper.Builder { - protected Boolean ignoreMalformed; - - protected final Parameter stored = Parameter.boolParam("store", false, m -> toType(m).stored, false); - protected final Parameter hasDocValues = Parameter.boolParam("doc_values", false, m -> toType(m).hasDocValues, true); - protected final Parameter dimension = new Parameter<>(DIMENSION, false, () -> -1, (n, c, o) -> { - if (o == null) { - throw new IllegalArgumentException("Dimension cannot be null"); - } - int value; - try { - value = XContentMapValues.nodeIntegerValue(o); - } catch (Exception ex) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "Unable to parse [dimension] from provided value [%s] for vector [%s]", o, name) - ); - } - if (value <= 0) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "Dimension value must be greater than 0 for vector: %s", name) - ); - } - return value; - }, m -> toType(m).dimension); - - protected final Parameter correlationParamsContext = new Parameter<>( - CORRELATION_CONTEXT, - false, - () -> null, - (n, c, o) -> CorrelationParamsContext.parse(o), - m -> toType(m).correlationParams - ); - - protected final Parameter> meta = Parameter.metaParam(); - - /** - * Parameterized ctor for VectorFieldMapper.Builder - * @param name name - */ - public Builder(String name) { - super(name); - } - - @Override - protected List> getParameters() { - return Arrays.asList(stored, hasDocValues, dimension, meta, correlationParamsContext); - } - - protected Explicit ignoreMalformed(BuilderContext context) { - if (ignoreMalformed != null) { - return new Explicit<>(ignoreMalformed, true); - } - if (context.indexSettings() != null) { - return new Explicit<>(IGNORE_MALFORMED_SETTING.get(context.indexSettings()), false); - } - return Defaults.IGNORE_MALFORMED; - } - - @Override - public ParametrizedFieldMapper build(BuilderContext context) { - final CorrelationParamsContext correlationParams = correlationParamsContext.getValue(); - final MultiFields multiFieldsBuilder = this.multiFieldsBuilder.build(this, context); - final CopyTo copyToBuilder = copyTo.build(); - final Explicit ignoreMalformed = ignoreMalformed(context); - final Map metaValue = meta.getValue(); - - final CorrelationVectorFieldType mappedFieldType = new CorrelationVectorFieldType( - buildFullName(context), - metaValue, - dimension.getValue(), - correlationParams - ); - - CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput createLuceneFieldMapperInput = - new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput( - name, - mappedFieldType, - multiFieldsBuilder, - copyToBuilder, - ignoreMalformed, - stored.get(), - hasDocValues.get(), - correlationParams - ); - return new CorrelationVectorFieldMapper(createLuceneFieldMapperInput); - } - } - - /** - * deifintion of VectorFieldMapper.TypeParser - */ - public static class TypeParser implements Mapper.TypeParser { - - /** - * default constructor of VectorFieldMapper.TypeParser - */ - public TypeParser() {} - - @Override - public Mapper.Builder parse(String name, Map node, ParserContext context) throws MapperParsingException { - Builder builder = new VectorFieldMapper.Builder(name); - builder.parse(name, context, node); - - if (builder.dimension.getValue() == -1) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "Dimension value missing for vector: %s", name)); - } - return builder; - } - } - - /** - * deifintion of VectorFieldMapper.CorrelationVectorFieldType - */ - public static class CorrelationVectorFieldType extends MappedFieldType { - int dimension; - CorrelationParamsContext correlationParams; - - /** - * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType - * @param name name of the field - * @param meta meta of the field - * @param dimension dimension of the field - */ - public CorrelationVectorFieldType(String name, Map meta, int dimension) { - this(name, meta, dimension, null); - } - - /** - * Parameterized ctor for VectorFieldMapper.CorrelationVectorFieldType - * @param name name of the field - * @param meta meta of the field - * @param dimension dimension of the field - * @param correlationParams correlation params for the field - */ - public CorrelationVectorFieldType( - String name, - Map meta, - int dimension, - CorrelationParamsContext correlationParams - ) { - super(name, false, false, true, TextSearchInfo.NONE, meta); - this.dimension = dimension; - this.correlationParams = correlationParams; - } - - @Override - public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String s) { - throw new UnsupportedOperationException("Correlation Vector do not support fields search"); - } - - @Override - public String typeName() { - return CONTENT_TYPE; - } - - @Override - public Query existsQuery(QueryShardContext context) { - return new FieldExistsQuery(name()); - } - - @Override - public Query termQuery(Object o, QueryShardContext context) { - throw new QueryShardException( - context, - String.format( - Locale.getDefault(), - "Correlation vector do not support exact searching, use Correlation queries instead: [%s]", - name() - ) - ); - } - - /** - * get dimension - * @return dimension - */ - public int getDimension() { - return dimension; - } - - /** - * get correlation params - * @return correlation params - */ - public CorrelationParamsContext getCorrelationParams() { - return correlationParams; - } - } - - protected Explicit ignoreMalformed; - protected boolean stored; - protected boolean hasDocValues; - protected Integer dimension; - protected CorrelationParamsContext correlationParams; - - /** - * Parameterized ctor for VectorFieldMapper - * @param simpleName name of field - * @param mappedFieldType field type of field - * @param multiFields multi fields - * @param copyTo copy to - * @param ignoreMalformed ignore malformed - * @param stored stored field - * @param hasDocValues has doc values - */ - public VectorFieldMapper( - String simpleName, - CorrelationVectorFieldType mappedFieldType, - FieldMapper.MultiFields multiFields, - FieldMapper.CopyTo copyTo, - Explicit ignoreMalformed, - boolean stored, - boolean hasDocValues - ) { - super(simpleName, mappedFieldType, multiFields, copyTo); - this.ignoreMalformed = ignoreMalformed; - this.stored = stored; - this.hasDocValues = hasDocValues; - this.dimension = mappedFieldType.getDimension(); - } - - @Override - protected VectorFieldMapper clone() { - return (VectorFieldMapper) super.clone(); - } - - @Override - protected String contentType() { - return CONTENT_TYPE; - } - - @Override - protected void parseCreateField(ParseContext parseContext) throws IOException { - parseCreateField(parseContext, fieldType().getDimension()); - } - - protected abstract void parseCreateField(ParseContext parseContext, int dimension) throws IOException; - - Optional getFloatsFromContext(ParseContext context, int dimension) throws IOException { - context.path().add(simpleName()); - - List vector = new ArrayList<>(); - XContentParser.Token token = context.parser().currentToken(); - float value; - if (token == XContentParser.Token.START_ARRAY) { - token = context.parser().nextToken(); - while (token != XContentParser.Token.END_ARRAY) { - value = context.parser().floatValue(); - - if (Float.isNaN(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be NaN"); - } - - if (Float.isInfinite(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be infinity"); - } - vector.add(value); - token = context.parser().nextToken(); - } - } else if (token == XContentParser.Token.VALUE_NUMBER) { - value = context.parser().floatValue(); - if (Float.isNaN(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be NaN"); - } - - if (Float.isInfinite(value)) { - throw new IllegalArgumentException("Correlation vector values cannot be infinity"); - } - vector.add(value); - context.parser().nextToken(); - } else if (token == XContentParser.Token.VALUE_NULL) { - context.path().remove(); - return Optional.empty(); - } - - if (dimension != vector.size()) { - String errorMessage = String.format( - Locale.ROOT, - "Vector dimension mismatch. Expected: %d, Given: %d", - dimension, - vector.size() - ); - throw new IllegalArgumentException(errorMessage); - } - - float[] array = new float[vector.size()]; - int i = 0; - for (Float f : vector) { - array[i++] = f; - } - return Optional.of(array); - } - - @Override - protected boolean docValuesByDefault() { - return true; - } - - @Override - public ParametrizedFieldMapper.Builder getMergeBuilder() { - return new VectorFieldMapper.Builder(simpleName()).init(this); - } - - @Override - public boolean parsesArrayValue() { - return true; - } - - @Override - public CorrelationVectorFieldType fieldType() { - return (CorrelationVectorFieldType) super.fieldType(); - } - - @Override - protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { - super.doXContentBody(builder, includeDefaults, params); - if (includeDefaults || ignoreMalformed.explicit()) { - builder.field(Names.IGNORE_MALFORMED, ignoreMalformed.value()); - } - } - - /** - * Class for constants used in parent class VectorFieldMapper - */ - public static class Names { - public static final String IGNORE_MALFORMED = "ignore_malformed"; - } - - /** - * Class for constants used in parent class VectorFieldMapper - */ - public static class Defaults { - public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java deleted file mode 100644 index cfc0ffdfa81f1..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * package to wrap Lucene KnnFloatVectorField and KnnFloatVectorQuery for Opensearch events-correlation-engine - */ -package org.opensearch.plugin.correlation.core.index; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java deleted file mode 100644 index e95b68e855cca..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilder.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.query; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.search.Query; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.ParsingException; -import org.opensearch.core.common.Strings; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.MappedFieldType; -import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.index.query.AbstractQueryBuilder; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.WithFieldName; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Objects; - -/** - * Constructs a query to get correlated events or documents for a particular event or document. - * - * @opensearch.internal - */ -public class CorrelationQueryBuilder extends AbstractQueryBuilder implements WithFieldName { - - private static final Logger log = LogManager.getLogger(CorrelationQueryBuilder.class); - protected static final ParseField VECTOR_FIELD = new ParseField("vector"); - protected static final ParseField K_FIELD = new ParseField("k"); - protected static final ParseField FILTER_FIELD = new ParseField("filter"); - /** - * max number of neighbors that can be retrieved. - */ - public static int K_MAX = 10000; - - /** - * name of the query - */ - public static final ParseField NAME_FIELD = new ParseField("correlation"); - - private String fieldName; - private float[] vector; - private int k = 0; - private double boost; - private QueryBuilder filter; - - private CorrelationQueryBuilder() {} - - /** - * parameterized ctor for CorrelationQueryBuilder - * @param fieldName field name for query - * @param vector query vector - * @param k number of nearby neighbors - */ - public CorrelationQueryBuilder(String fieldName, float[] vector, int k) { - this(fieldName, vector, k, null); - } - - /** - * parameterized ctor for CorrelationQueryBuilder - * @param fieldName field name for query - * @param vector query vector - * @param k number of nearby neighbors - * @param filter optional filter query - */ - public CorrelationQueryBuilder(String fieldName, float[] vector, int k, QueryBuilder filter) { - if (Strings.isNullOrEmpty(fieldName)) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "[%s] requires fieldName", NAME_FIELD.getPreferredName()) - ); - } - if (vector == null) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "[%s] requires query vector", NAME_FIELD.getPreferredName()) - ); - } - if (vector.length == 0) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "[%s] query vector is empty", NAME_FIELD.getPreferredName()) - ); - } - if (k <= 0) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k > 0", NAME_FIELD.getPreferredName())); - } - if (k > K_MAX) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "[%s] requires k <= ", K_MAX)); - } - - this.fieldName = fieldName; - this.vector = vector; - this.k = k; - this.filter = filter; - } - - /** - * parameterized ctor for CorrelationQueryBuilder - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationQueryBuilder(StreamInput sin) throws IOException { - super(sin); - this.fieldName = sin.readString(); - this.vector = sin.readFloatArray(); - this.k = sin.readInt(); - this.filter = sin.readOptionalNamedWriteable(QueryBuilder.class); - } - - private static float[] objectsToFloats(List objs) { - float[] vector = new float[objs.size()]; - for (int i = 0; i < objs.size(); ++i) { - vector[i] = ((Number) objs.get(i)).floatValue(); - } - return vector; - } - - /** - * parse into CorrelationQueryBuilder - * @param xcp XContentParser - * @return CorrelationQueryBuilder - */ - public static CorrelationQueryBuilder parse(XContentParser xcp) throws IOException { - String fieldName = null; - List vector = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - - int k = 0; - QueryBuilder filter = null; - String queryName = null; - String currentFieldName = null; - XContentParser.Token token; - while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = xcp.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, currentFieldName); - fieldName = currentFieldName; - while ((token = xcp.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = xcp.currentName(); - } else if (token.isValue() || token == XContentParser.Token.START_ARRAY) { - if (VECTOR_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - vector = xcp.list(); - } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - boost = xcp.floatValue(); - } else if (K_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - k = (Integer) NumberFieldMapper.NumberType.INTEGER.parse(xcp.objectBytes(), false); - } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, xcp.getDeprecationHandler())) { - queryName = xcp.text(); - } else { - throw new ParsingException( - xcp.getTokenLocation(), - "[" + NAME_FIELD.getPreferredName() + "] query does not support [" + currentFieldName + "]" - ); - } - } else if (token == XContentParser.Token.START_OBJECT) { - String tokenName = xcp.currentName(); - if (FILTER_FIELD.getPreferredName().equals(tokenName)) { - filter = parseInnerQueryBuilder(xcp); - } else { - throw new ParsingException( - xcp.getTokenLocation(), - "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "]" - ); - } - } else { - throw new ParsingException( - xcp.getTokenLocation(), - "[" + NAME_FIELD.getPreferredName() + "] unknown token [" + token + "] after [" + currentFieldName + "]" - ); - } - } - } else { - throwParsingExceptionOnMultipleFields(NAME_FIELD.getPreferredName(), xcp.getTokenLocation(), fieldName, xcp.currentName()); - fieldName = xcp.currentName(); - vector = xcp.list(); - } - } - - assert vector != null; - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(fieldName, objectsToFloats(vector), k, filter); - correlationQueryBuilder.queryName(queryName); - correlationQueryBuilder.boost(boost); - return correlationQueryBuilder; - } - - public void setFieldName(String fieldName) { - this.fieldName = fieldName; - } - - /** - * get field name - * @return field name - */ - @Override - public String fieldName() { - return fieldName; - } - - public void setVector(float[] vector) { - this.vector = vector; - } - - /** - * get query vector - * @return query vector - */ - public Object vector() { - return vector; - } - - public void setK(int k) { - this.k = k; - } - - /** - * get number of nearby neighbors - * @return number of nearby neighbors - */ - public int getK() { - return k; - } - - public void setBoost(double boost) { - this.boost = boost; - } - - /** - * get boost - * @return boost - */ - public double getBoost() { - return boost; - } - - public void setFilter(QueryBuilder filter) { - this.filter = filter; - } - - /** - * get optional filter - * @return optional filter - */ - public QueryBuilder getFilter() { - return filter; - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(fieldName); - out.writeFloatArray(vector); - out.writeInt(k); - out.writeOptionalNamedWriteable(filter); - } - - @Override - public void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(fieldName); - - builder.field(VECTOR_FIELD.getPreferredName(), vector); - builder.field(K_FIELD.getPreferredName(), k); - if (filter != null) { - builder.field(FILTER_FIELD.getPreferredName(), filter); - } - printBoostAndQueryName(builder); - builder.endObject(); - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - MappedFieldType mappedFieldType = context.fieldMapper(fieldName); - - if (!(mappedFieldType instanceof VectorFieldMapper.CorrelationVectorFieldType)) { - throw new IllegalArgumentException(String.format(Locale.getDefault(), "Field '%s' is not knn_vector type.", this.fieldName)); - } - - VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = - (VectorFieldMapper.CorrelationVectorFieldType) mappedFieldType; - int fieldDimension = correlationVectorFieldType.getDimension(); - - if (fieldDimension != vector.length) { - throw new IllegalArgumentException( - String.format( - Locale.getDefault(), - "Query vector has invalid dimension: %d. Dimension should be: %d", - vector.length, - fieldDimension - ) - ); - } - - String indexName = context.index().getName(); - CorrelationQueryFactory.CreateQueryRequest createQueryRequest = new CorrelationQueryFactory.CreateQueryRequest( - indexName, - this.fieldName, - this.vector, - this.k, - this.filter, - context - ); - return CorrelationQueryFactory.create(createQueryRequest); - } - - @Override - protected boolean doEquals(CorrelationQueryBuilder other) { - return Objects.equals(fieldName, other.fieldName) && Arrays.equals(vector, other.vector) && Objects.equals(k, other.k); - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldName, vector, k); - } - - @Override - public String getWriteableName() { - return NAME_FIELD.getPreferredName(); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java deleted file mode 100644 index d5db299bfa3a5..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryFactory.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.query; - -import org.apache.lucene.search.KnnFloatVectorQuery; -import org.apache.lucene.search.Query; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryShardContext; - -import java.io.IOException; -import java.util.Optional; - -/** - * CorrelationQueryFactory util class is used to construct a Lucene KnnFloatVectorQuery. - * - * @opensearch.internal - */ -public class CorrelationQueryFactory { - - /** - * static method which takes input params to construct a Lucene KnnFloatVectorQuery. - * @param createQueryRequest object parameter containing inputs for constructing Lucene KnnFloatVectorQuery. - * @return generic Lucene Query object - */ - public static Query create(CreateQueryRequest createQueryRequest) { - final String indexName = createQueryRequest.getIndexName(); - final String fieldName = createQueryRequest.getFieldName(); - final int k = createQueryRequest.getK(); - final float[] vector = createQueryRequest.getVector(); - - if (createQueryRequest.getFilter().isPresent()) { - final QueryShardContext context = createQueryRequest.getContext() - .orElseThrow(() -> new RuntimeException("Shard context cannot be null")); - - try { - final Query filterQuery = createQueryRequest.getFilter().get().toQuery(context); - return new KnnFloatVectorQuery(fieldName, vector, k, filterQuery); - } catch (IOException ex) { - throw new RuntimeException("Cannot create knn query with filter", ex); - } - } - return new KnnFloatVectorQuery(fieldName, vector, k); - } - - /** - * class containing params to construct a Lucene KnnFloatVectorQuery. - * - * @opensearch.internal - */ - public static class CreateQueryRequest { - private String indexName; - - private String fieldName; - - private float[] vector; - - private int k; - - private QueryBuilder filter; - - private QueryShardContext context; - - /** - * Parameterized ctor for CreateQueryRequest - * @param indexName index name - * @param fieldName field name - * @param vector query vector - * @param k number of nearby neighbors - * @param filter additional filter query - * @param context QueryShardContext - */ - public CreateQueryRequest( - String indexName, - String fieldName, - float[] vector, - int k, - QueryBuilder filter, - QueryShardContext context - ) { - this.indexName = indexName; - this.fieldName = fieldName; - this.vector = vector; - this.k = k; - this.filter = filter; - this.context = context; - } - - /** - * get index name - * @return get index name - */ - public String getIndexName() { - return indexName; - } - - /** - * get field name - * @return get field name - */ - public String getFieldName() { - return fieldName; - } - - /** - * get vector - * @return get vector - */ - public float[] getVector() { - return vector; - } - - /** - * get number of nearby neighbors - * @return number of nearby neighbors - */ - public int getK() { - return k; - } - - /** - * get optional filter query - * @return get optional filter query - */ - public Optional getFilter() { - return Optional.ofNullable(filter); - } - - /** - * get optional query shard context - * @return get optional query shard context - */ - public Optional getContext() { - return Optional.ofNullable(context); - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java deleted file mode 100644 index 2cf5db786a60f..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/query/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * correlation query builder package - */ -package org.opensearch.plugin.correlation.core.index.query; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java deleted file mode 100644 index 82be787af5a72..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * base package of events-correlation-engine - */ -package org.opensearch.plugin.correlation; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java deleted file mode 100644 index ab6f05ec0e6a3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleAction.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.action; - -import org.opensearch.action.ActionType; - -/** - * Transport Action for indexing correlation rules. - * - * @opensearch.internal - */ -public class IndexCorrelationRuleAction extends ActionType { - - /** - * Instance of IndexCorrelationRuleAction - */ - public static final IndexCorrelationRuleAction INSTANCE = new IndexCorrelationRuleAction(); - /** - * Name of IndexCorrelationRuleAction - */ - public static final String NAME = "cluster:admin/correlation/rules"; - - private IndexCorrelationRuleAction() { - super(NAME, IndexCorrelationRuleResponse::new); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java deleted file mode 100644 index 3fe25d144059d..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleRequest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.action; - -import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.rest.RestRequest; - -import java.io.IOException; - -/** - * A request to index correlation rules. - * - * @opensearch.internal - */ -public class IndexCorrelationRuleRequest extends ActionRequest { - - private String correlationRuleId; - - private CorrelationRule correlationRule; - - private RestRequest.Method method; - - /** - * Parameterized ctor for IndexCorrelationRuleRequest - * @param correlationRule correlation rule - * @param method Rest method of request PUT or POST - */ - public IndexCorrelationRuleRequest(CorrelationRule correlationRule, RestRequest.Method method) { - super(); - this.correlationRuleId = ""; - this.correlationRule = correlationRule; - this.method = method; - } - - /** - * Parameterized ctor for IndexCorrelationRuleRequest - * @param correlationRuleId correlation rule id - * @param correlationRule correlation rule - * @param method Rest method of request PUT or POST - */ - public IndexCorrelationRuleRequest(String correlationRuleId, CorrelationRule correlationRule, RestRequest.Method method) { - super(); - this.correlationRuleId = correlationRuleId; - this.correlationRule = correlationRule; - this.method = method; - } - - /** - * StreamInput ctor of IndexCorrelationRuleRequest - * @param sin StreamInput - * @throws IOException IOException - */ - public IndexCorrelationRuleRequest(StreamInput sin) throws IOException { - this(sin.readString(), CorrelationRule.readFrom(sin), sin.readEnum(RestRequest.Method.class)); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(correlationRuleId); - correlationRule.writeTo(out); - } - - /** - * get correlation rule id - * @return correlation rule id - */ - public String getCorrelationRuleId() { - return correlationRuleId; - } - - /** - * get correlation rule - * @return correlation rule - */ - public CorrelationRule getCorrelationRule() { - return correlationRule; - } - - /** - * get Rest method - * @return Rest method - */ - public RestRequest.Method getMethod() { - return method; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java deleted file mode 100644 index 8102e6585825e..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/IndexCorrelationRuleResponse.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.action; - -import org.opensearch.core.ParseField; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; - -import java.io.IOException; - -/** - * Transport Response for indexing correlation rules. - * - * @opensearch.internal - */ -public class IndexCorrelationRuleResponse extends ActionResponse implements ToXContentObject { - - private static final ParseField _ID = new ParseField("_id"); - private static final ParseField _VERSION = new ParseField("_version"); - - private String id; - - private Long version; - - private RestStatus status; - - private CorrelationRule correlationRule; - - /** - * Parameterized ctor for IndexCorrelationRuleResponse - * @param version version of rule - * @param status Rest status of indexing rule - * @param correlationRule correlation rule - */ - public IndexCorrelationRuleResponse(String id, Long version, RestStatus status, CorrelationRule correlationRule) { - super(); - this.id = id; - this.version = version; - this.status = status; - this.correlationRule = correlationRule; - } - - /** - * StreamInput ctor of IndexCorrelationRuleResponse - * @param sin StreamInput - * @throws IOException IOException - */ - public IndexCorrelationRuleResponse(StreamInput sin) throws IOException { - this(sin.readString(), sin.readLong(), sin.readEnum(RestStatus.class), CorrelationRule.readFrom(sin)); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject().field(_ID.getPreferredName(), id).field(_VERSION.getPreferredName(), version); - - builder.field("rule", correlationRule); - return builder.endObject(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(id); - out.writeLong(version); - out.writeEnum(status); - correlationRule.writeTo(out); - } - - /** - * get id - * @return id of rule - */ - public String getId() { - return id; - } - - /** - * get status - * @return Rest status of indexing rule - */ - public RestStatus getStatus() { - return status; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java deleted file mode 100644 index c01f2936a20ca..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/action/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Transport Actions, Requests and Responses for correlation rules - */ -package org.opensearch.plugin.correlation.rules.action; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java deleted file mode 100644 index 3797e0c7043dc..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationQuery.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.model; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * Correlation Query DSL - * { - * "index": "s3_access_logs", - * "query": "aws.cloudtrail.eventName:ReplicateObject", - * "timestampField": "@timestamp", - * "tags": [ - * "s3" - * ] - * } - */ -public class CorrelationQuery implements Writeable, ToXContentObject { - - private static final Logger log = LogManager.getLogger(CorrelationQuery.class); - private static final ParseField INDEX_FIELD = new ParseField("index"); - private static final ParseField QUERY_FIELD = new ParseField("query"); - private static final ParseField TIMESTAMP_FIELD = new ParseField("timestampField"); - private static final ParseField TAGS_FIELD = new ParseField("tags"); - private static final ObjectParser PARSER = new ObjectParser<>("CorrelationQuery", CorrelationQuery::new); - - static { - PARSER.declareString(CorrelationQuery::setIndex, INDEX_FIELD); - PARSER.declareString(CorrelationQuery::setQuery, QUERY_FIELD); - PARSER.declareStringOrNull(CorrelationQuery::setTimestampField, TIMESTAMP_FIELD); - PARSER.declareField((xcp, query, context) -> { - List tags = new ArrayList<>(); - XContentParser.Token currentToken = xcp.currentToken(); - if (currentToken == XContentParser.Token.START_ARRAY) { - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - tags.add(xcp.text()); - } - } - query.setTags(tags); - }, TAGS_FIELD, ObjectParser.ValueType.STRING_ARRAY); - } - - private String index; - - private String query; - - private String timestampField; - - private List tags; - - private CorrelationQuery() { - this.timestampField = "_timestamp"; - } - - /** - * Parameterized ctor of Correlation Query - * @param index event index to correlate - * @param query query to filter relevant events for correlations from index - * @param timestampField timestamp field in the index - * @param tags tags to store additional metadata as part of correlation queries. - */ - public CorrelationQuery(String index, String query, String timestampField, List tags) { - this.index = index; - this.query = query; - this.timestampField = timestampField != null ? timestampField : "_timestamp"; - this.tags = tags; - } - - /** - * StreamInput ctor of Correlation Query - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationQuery(StreamInput sin) throws IOException { - this(sin.readString(), sin.readString(), sin.readString(), sin.readStringList()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); - out.writeString(query); - out.writeString(timestampField); - out.writeStringCollection(tags); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(INDEX_FIELD.getPreferredName(), index) - .field(QUERY_FIELD.getPreferredName(), query) - .field(TIMESTAMP_FIELD.getPreferredName(), timestampField) - .field(TAGS_FIELD.getPreferredName(), tags); - return builder.endObject(); - } - - /** - * parse into CorrelationQuery - * @param xcp XContentParser - * @return CorrelationQuery - */ - public static CorrelationQuery parse(XContentParser xcp) { - return PARSER.apply(xcp, null); - } - - /** - * convert StreamInput to CorrelationQuery - * @param sin StreamInput - * @return CorrelationQuery - * @throws IOException IOException - */ - public static CorrelationQuery readFrom(StreamInput sin) throws IOException { - return new CorrelationQuery(sin); - } - - /** - * Set index - * @param index event index to correlate - */ - public void setIndex(String index) { - this.index = index; - } - - /** - * Get index - * @return event index to correlate - */ - public String getIndex() { - return index; - } - - /** - * Set query - * @param query query to filter relevant events for correlations from index - */ - public void setQuery(String query) { - this.query = query; - } - - /** - * Get query - * @return query to filter relevant events for correlations from index - */ - public String getQuery() { - return query; - } - - /** - * Set timestamp field - * @param timestampField timestamp field in the index - */ - public void setTimestampField(String timestampField) { - this.timestampField = timestampField != null ? timestampField : "_timestamp"; - } - - /** - * Get timestamp field - * @return timestamp field in the index - */ - public String getTimestampField() { - return timestampField; - } - - /** - * Set tags - * @param tags tags to store additional metadata as part of correlation queries. - */ - public void setTags(List tags) { - this.tags = tags; - } - - /** - * Get tags - * @return tags to store additional metadata as part of correlation queries. - */ - public List getTags() { - return tags; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java deleted file mode 100644 index 6978d7248e199..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/CorrelationRule.java +++ /dev/null @@ -1,244 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.model; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.core.ParseField; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ObjectParser; -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Objects; - -/** - * Correlation Rule DSL - * { - * "name": "s3 to app logs", - * "correlate": [ - * { - * "index": "s3_access_logs", - * "query": "aws.cloudtrail.eventName:ReplicateObject", - * "timestampField": "@timestamp", - * "tags": [ - * "s3" - * ] - * } - * ] - * } - * - * @opensearch.api - * @opensearch.experimental - */ -public class CorrelationRule implements Writeable, ToXContentObject { - - private static final Logger log = LogManager.getLogger(CorrelationRule.class); - - /** - * Correlation Rule Index - */ - public static final String CORRELATION_RULE_INDEX = ".opensearch-correlation-rules-config"; - - private static final ParseField ID_FIELD = new ParseField("id"); - private static final ParseField VERSION_FIELD = new ParseField("version"); - private static final ParseField NAME_FIELD = new ParseField("name"); - private static final ParseField CORRELATION_QUERIES_FIELD = new ParseField("correlate"); - private static final ObjectParser PARSER = new ObjectParser<>("CorrelationRule", CorrelationRule::new); - - static { - PARSER.declareString(CorrelationRule::setId, ID_FIELD); - PARSER.declareLong(CorrelationRule::setVersion, VERSION_FIELD); - PARSER.declareString(CorrelationRule::setName, NAME_FIELD); - PARSER.declareField((xcp, rule, context) -> { - List correlationQueries = new ArrayList<>(); - XContentParser.Token currentToken = xcp.currentToken(); - if (currentToken == XContentParser.Token.START_ARRAY) { - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - correlationQueries.add(CorrelationQuery.parse(xcp)); - } - } - rule.setCorrelationQueries(correlationQueries); - }, CORRELATION_QUERIES_FIELD, ObjectParser.ValueType.OBJECT_ARRAY); - } - - private String id; - - private Long version; - - private String name; - - private List correlationQueries; - - private CorrelationRule() {} - - /** - * Parameterized ctor of Correlation Rule - * @param name name of rule - * @param correlationQueries list of correlation queries part of rule - */ - public CorrelationRule(String name, List correlationQueries) { - this("", 1L, name, correlationQueries); - } - - /** - * Parameterized ctor of Correlation Rule - * @param id id of rule - * @param version version of rule - * @param name name of rule - * @param correlationQueries list of correlation queries part of rule - */ - public CorrelationRule(String id, Long version, String name, List correlationQueries) { - this.id = id; - this.version = version; - this.name = name; - this.correlationQueries = correlationQueries; - } - - /** - * StreamInput ctor of Correlation Rule - * @param sin StreamInput - * @throws IOException IOException - */ - public CorrelationRule(StreamInput sin) throws IOException { - this(sin.readString(), sin.readLong(), sin.readString(), sin.readList(CorrelationQuery::readFrom)); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - - builder.field(ID_FIELD.getPreferredName(), id); - builder.field(VERSION_FIELD.getPreferredName(), version); - builder.field(NAME_FIELD.getPreferredName(), name); - - CorrelationQuery[] correlationQueries = new CorrelationQuery[] {}; - correlationQueries = this.correlationQueries.toArray(correlationQueries); - builder.field(CORRELATION_QUERIES_FIELD.getPreferredName(), correlationQueries); - return builder.endObject(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(id); - out.writeLong(version); - out.writeString(name); - - for (CorrelationQuery query : correlationQueries) { - query.writeTo(out); - } - } - - /** - * parse into CorrelationRule - * @param xcp XContentParser - * @param id id of rule - * @param version version of rule - * @return CorrelationRule - */ - public static CorrelationRule parse(XContentParser xcp, String id, Long version) { - return PARSER.apply(xcp, null); - } - - /** - * convert StreamInput to CorrelationRule - * @param sin StreamInput - * @return CorrelationRule - * @throws IOException IOException - */ - public static CorrelationRule readFrom(StreamInput sin) throws IOException { - return new CorrelationRule(sin); - } - - /** - * set id - * @param id id of rule - */ - public void setId(String id) { - this.id = id; - } - - /** - * get id - * @return id of rule - */ - public String getId() { - return id; - } - - /** - * set version - * @param version version of rule - */ - public void setVersion(Long version) { - this.version = version; - } - - /** - * get version - * @return version of rule - */ - public Long getVersion() { - return version; - } - - /** - * set name - * @param name name of rule - */ - public void setName(String name) { - this.name = name; - } - - /** - * get name - * @return name of rule - */ - public String getName() { - return name; - } - - /** - * set correlation queries - * @param correlationQueries set correlation queries for the rule - */ - public void setCorrelationQueries(List correlationQueries) { - this.correlationQueries = correlationQueries; - } - - /** - * get correlation queries - * @return correlation queries for the rule - */ - public List getCorrelationQueries() { - return correlationQueries; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - CorrelationRule that = (CorrelationRule) o; - return id.equals(that.id) - && version.equals(that.version) - && name.equals(that.name) - && correlationQueries.equals(that.correlationQueries); - } - - @Override - public int hashCode() { - return Objects.hash(id, version, name, correlationQueries); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java deleted file mode 100644 index b04b7be3c62e3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/model/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * data models for correlation rules - */ -package org.opensearch.plugin.correlation.rules.model; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java deleted file mode 100644 index 3b2b7eb02ae5f..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/RestIndexCorrelationRuleAction.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.resthandler; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.client.node.NodeClient; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.plugin.correlation.EventsCorrelationPlugin; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.rest.BaseRestHandler; -import org.opensearch.rest.BytesRestResponse; -import org.opensearch.rest.RestChannel; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestResponse; -import org.opensearch.rest.action.RestResponseListener; - -import java.io.IOException; -import java.util.List; -import java.util.Locale; - -/** - * Rest action for indexing correlation rules. - * - * @opensearch.api - */ -public class RestIndexCorrelationRuleAction extends BaseRestHandler { - - private static final Logger log = LogManager.getLogger(RestIndexCorrelationRuleAction.class); - - /** - * Default constructor - */ - public RestIndexCorrelationRuleAction() {} - - @Override - public String getName() { - return "index_correlation_rule_action"; - } - - @Override - public List routes() { - return List.of( - new Route(RestRequest.Method.POST, EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI), - new Route( - RestRequest.Method.PUT, - String.format(Locale.ROOT, "%s/{%s}", EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI, "rule_id") - ) - ); - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - log.debug(String.format(Locale.ROOT, "%s %s", request.method(), EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI)); - - String id = request.param("rule_id", ""); - - XContentParser xcp = request.contentParser(); - - CorrelationRule correlationRule = CorrelationRule.parse(xcp, id, 1L); - IndexCorrelationRuleRequest indexCorrelationRuleRequest = new IndexCorrelationRuleRequest(id, correlationRule, request.method()); - return channel -> client.execute( - IndexCorrelationRuleAction.INSTANCE, - indexCorrelationRuleRequest, - indexCorrelationRuleResponse(channel, request.method()) - ); - } - - private RestResponseListener indexCorrelationRuleResponse( - RestChannel channel, - RestRequest.Method restMethod - ) { - return new RestResponseListener<>(channel) { - @Override - public RestResponse buildResponse(IndexCorrelationRuleResponse response) throws Exception { - RestStatus returnStatus = RestStatus.CREATED; - if (restMethod == RestRequest.Method.PUT) { - returnStatus = RestStatus.OK; - } - - BytesRestResponse restResponse = new BytesRestResponse( - returnStatus, - response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS) - ); - - if (restMethod == RestRequest.Method.POST) { - String location = String.format( - Locale.ROOT, - "%s/%s", - EventsCorrelationPlugin.CORRELATION_RULES_BASE_URI, - response.getId() - ); - restResponse.addHeader("Location", location); - } - - return restResponse; - } - }; - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java deleted file mode 100644 index 607ec355801ad..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/resthandler/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Rest Handlers for correlation rules - */ -package org.opensearch.plugin.correlation.rules.resthandler; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java deleted file mode 100644 index 7b4fb670c4aee..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/TransportIndexCorrelationRuleAction.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.rules.transport; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.admin.indices.create.CreateIndexResponse; -import org.opensearch.action.index.IndexRequest; -import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.client.Client; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.inject.Inject; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.core.action.ActionListener; -import org.opensearch.core.rest.RestStatus; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleAction; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleRequest; -import org.opensearch.plugin.correlation.rules.action.IndexCorrelationRuleResponse; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; -import org.opensearch.plugin.correlation.utils.CorrelationRuleIndices; -import org.opensearch.plugin.correlation.utils.IndexUtils; -import org.opensearch.rest.RestRequest; -import org.opensearch.tasks.Task; -import org.opensearch.transport.TransportService; - -import java.io.IOException; -import java.util.Locale; - -/** - * Transport Action for indexing correlation rules. - * - * @opensearch.internal - */ -public class TransportIndexCorrelationRuleAction extends HandledTransportAction { - - private static final Logger log = LogManager.getLogger(TransportIndexCorrelationRuleAction.class); - - private final Client client; - - private final CorrelationRuleIndices correlationRuleIndices; - - private final ClusterService clusterService; - - /** - * Parameterized ctor for Transport Action - * @param transportService TransportService - * @param client OS client - * @param actionFilters ActionFilters - * @param clusterService ClusterService - * @param correlationRuleIndices CorrelationRuleIndices which manages lifecycle of correlation rule index - */ - @Inject - public TransportIndexCorrelationRuleAction( - TransportService transportService, - Client client, - ActionFilters actionFilters, - ClusterService clusterService, - CorrelationRuleIndices correlationRuleIndices - ) { - super(IndexCorrelationRuleAction.NAME, transportService, actionFilters, IndexCorrelationRuleRequest::new); - this.client = client; - this.clusterService = clusterService; - this.correlationRuleIndices = correlationRuleIndices; - } - - @Override - protected void doExecute(Task task, IndexCorrelationRuleRequest request, ActionListener listener) { - AsyncIndexCorrelationRuleAction asyncAction = new AsyncIndexCorrelationRuleAction(request, listener); - asyncAction.start(); - } - - private class AsyncIndexCorrelationRuleAction { - private final IndexCorrelationRuleRequest request; - - private final ActionListener listener; - - AsyncIndexCorrelationRuleAction(IndexCorrelationRuleRequest request, ActionListener listener) { - this.request = request; - this.listener = listener; - } - - void start() { - try { - if (correlationRuleIndices.correlationRuleIndexExists() == false) { - try { - correlationRuleIndices.initCorrelationRuleIndex(new ActionListener<>() { - @Override - public void onResponse(CreateIndexResponse response) { - try { - onCreateMappingsResponse(response); - indexCorrelationRule(); - } catch (IOException e) { - onFailures(e); - } - } - - @Override - public void onFailure(Exception e) { - onFailures(e); - } - }); - } catch (IOException e) { - onFailures(e); - } - } else if (!IndexUtils.correlationRuleIndexUpdated) { - IndexUtils.updateIndexMapping( - CorrelationRule.CORRELATION_RULE_INDEX, - CorrelationRuleIndices.correlationRuleIndexMappings(), - clusterService.state(), - client.admin().indices(), - new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse response) { - onUpdateMappingsResponse(response); - try { - indexCorrelationRule(); - } catch (IOException e) { - onFailures(e); - } - } - - @Override - public void onFailure(Exception e) { - onFailures(e); - } - } - ); - } else { - indexCorrelationRule(); - } - } catch (IOException ex) { - onFailures(ex); - } - } - - void indexCorrelationRule() throws IOException { - IndexRequest indexRequest; - if (request.getMethod() == RestRequest.Method.POST) { - indexRequest = new IndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ) - .source(request.getCorrelationRule().toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) - .timeout(TimeValue.timeValueSeconds(60)); - } else { - indexRequest = new IndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).setRefreshPolicy( - WriteRequest.RefreshPolicy.IMMEDIATE - ) - .source(request.getCorrelationRule().toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) - .id(request.getCorrelationRuleId()) - .timeout(TimeValue.timeValueSeconds(60)); - } - - client.index(indexRequest, new ActionListener<>() { - @Override - public void onResponse(IndexResponse response) { - if (response.status().equals(RestStatus.CREATED) || response.status().equals(RestStatus.OK)) { - CorrelationRule ruleResponse = request.getCorrelationRule(); - ruleResponse.setId(response.getId()); - onOperation(ruleResponse); - } else { - onFailures(new OpenSearchStatusException(response.toString(), RestStatus.INTERNAL_SERVER_ERROR)); - } - } - - @Override - public void onFailure(Exception e) { - onFailures(e); - } - }); - } - - private void onCreateMappingsResponse(CreateIndexResponse response) throws IOException { - if (response.isAcknowledged()) { - log.info(String.format(Locale.ROOT, "Created %s with mappings.", CorrelationRule.CORRELATION_RULE_INDEX)); - IndexUtils.correlationRuleIndexUpdated(); - } else { - log.error(String.format(Locale.ROOT, "Create %s mappings call not acknowledged.", CorrelationRule.CORRELATION_RULE_INDEX)); - throw new OpenSearchStatusException( - String.format(Locale.getDefault(), "Create %s mappings call not acknowledged", CorrelationRule.CORRELATION_RULE_INDEX), - RestStatus.INTERNAL_SERVER_ERROR - ); - } - } - - private void onUpdateMappingsResponse(AcknowledgedResponse response) { - if (response.isAcknowledged()) { - log.info(String.format(Locale.ROOT, "Created %s with mappings.", CorrelationRule.CORRELATION_RULE_INDEX)); - IndexUtils.correlationRuleIndexUpdated(); - } else { - log.error(String.format(Locale.ROOT, "Create %s mappings call not acknowledged.", CorrelationRule.CORRELATION_RULE_INDEX)); - throw new OpenSearchStatusException( - String.format(Locale.getDefault(), "Create %s mappings call not acknowledged", CorrelationRule.CORRELATION_RULE_INDEX), - RestStatus.INTERNAL_SERVER_ERROR - ); - } - } - - private void onOperation(CorrelationRule correlationRule) { - finishHim(correlationRule, null); - } - - private void onFailures(Exception t) { - finishHim(null, t); - } - - private void finishHim(CorrelationRule correlationRule, Exception t) { - if (t != null) { - listener.onFailure(t); - } else { - listener.onResponse( - new IndexCorrelationRuleResponse( - correlationRule.getId(), - correlationRule.getVersion(), - request.getMethod() == RestRequest.Method.POST ? RestStatus.CREATED : RestStatus.OK, - correlationRule - ) - ); - } - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java deleted file mode 100644 index 7a47efbb9bb45..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/rules/transport/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Transport Actions for correlation rules. - */ -package org.opensearch.plugin.correlation.rules.transport; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java deleted file mode 100644 index 2e2dbbffbeaa2..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettings.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.settings; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.unit.TimeValue; - -import java.util.concurrent.TimeUnit; - -import static org.opensearch.common.settings.Setting.Property.IndexScope; - -/** - * Settings for events-correlation-engine. - * - * @opensearch.api - * @opensearch.experimental - */ -public class EventsCorrelationSettings { - /** - * Correlation Index setting name - */ - public static final String CORRELATION_INDEX = "index.correlation"; - /** - * Boolean setting to check if an OS index is a correlation index. - */ - public static final Setting IS_CORRELATION_INDEX_SETTING = Setting.boolSetting(CORRELATION_INDEX, false, IndexScope); - /** - * Global time window setting for Correlations - */ - public static final Setting CORRELATION_TIME_WINDOW = Setting.positiveTimeSetting( - "plugins.security_analytics.correlation_time_window", - new TimeValue(5, TimeUnit.MINUTES), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - - /** - * Default constructor - */ - public EventsCorrelationSettings() {} -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java deleted file mode 100644 index 795291cd0de2e..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/settings/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Settings for events-correlation-engine - */ -package org.opensearch.plugin.correlation.settings; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java deleted file mode 100644 index 3656bd413733a..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/CorrelationRuleIndices.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.utils; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.admin.indices.create.CreateIndexRequest; -import org.opensearch.action.admin.indices.create.CreateIndexResponse; -import org.opensearch.client.Client; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.Settings; -import org.opensearch.core.action.ActionListener; -import org.opensearch.plugin.correlation.rules.model.CorrelationRule; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.Objects; - -/** - * Correlation Rule Index manager - * - * @opensearch.internal - */ -public class CorrelationRuleIndices { - private static final Logger log = LogManager.getLogger(CorrelationRuleIndices.class); - - private final Client client; - - private final ClusterService clusterService; - - /** - * Parameterized ctor for CorrelationRuleIndices - * @param client OS Client - * @param clusterService ClusterService - */ - public CorrelationRuleIndices(Client client, ClusterService clusterService) { - this.client = client; - this.clusterService = clusterService; - } - - /** - * get correlation rule index mappings - * @return mappings of correlation rule index - * @throws IOException IOException - */ - public static String correlationRuleIndexMappings() throws IOException { - return new String( - Objects.requireNonNull(CorrelationRuleIndices.class.getClassLoader().getResourceAsStream("mappings/correlation-rules.json")) - .readAllBytes(), - Charset.defaultCharset() - ); - } - - /** - * init the correlation rule index - * @param actionListener listener - * @throws IOException IOException - */ - public void initCorrelationRuleIndex(ActionListener actionListener) throws IOException { - if (correlationRuleIndexExists() == false) { - CreateIndexRequest indexRequest = new CreateIndexRequest(CorrelationRule.CORRELATION_RULE_INDEX).mapping( - correlationRuleIndexMappings() - ).settings(Settings.builder().put("index.hidden", true).build()); - client.admin().indices().create(indexRequest, actionListener); - } - } - - /** - * check if correlation rule index exists - * @return boolean - */ - public boolean correlationRuleIndexExists() { - ClusterState clusterState = clusterService.state(); - return clusterState.getRoutingTable().hasIndex(CorrelationRule.CORRELATION_RULE_INDEX); - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java deleted file mode 100644 index 362be3d2932e3..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/IndexUtils.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.utils; - -import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.client.IndicesAdminClient; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.core.action.ActionListener; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; - -import static org.opensearch.core.ParseField.CommonFields._META; - -/** - * Index Management utils - * - * @opensearch.internal - */ -public class IndexUtils { - private static final Integer NO_SCHEMA_VERSION = 0; - private static final String SCHEMA_VERSION = "schema_version"; - - /** - * manages the mappings lifecycle for correlation rule index - */ - public static Boolean correlationRuleIndexUpdated = false; - - private IndexUtils() {} - - /** - * updates the status of correlationRuleIndexUpdated to true - */ - public static void correlationRuleIndexUpdated() { - correlationRuleIndexUpdated = true; - } - - /** - * util method which decides based on schema version whether to update an index. - * @param index IndexMetadata - * @param mapping new mappings - * @return Boolean - * @throws IOException IOException - */ - public static Boolean shouldUpdateIndex(IndexMetadata index, String mapping) throws IOException { - Integer oldVersion = NO_SCHEMA_VERSION; - Integer newVersion = getSchemaVersion(mapping); - - Map indexMapping = index.mapping().sourceAsMap(); - if (indexMapping != null - && indexMapping.containsKey(_META.getPreferredName()) - && indexMapping.get(_META.getPreferredName()) instanceof HashMap) { - Map metaData = (HashMap) indexMapping.get(_META.getPreferredName()); - if (metaData.containsKey(SCHEMA_VERSION)) { - oldVersion = (Integer) metaData.get(SCHEMA_VERSION); - } - } - return newVersion > oldVersion; - } - - /** - * Gets the schema version for the mapping - * @param mapping mappings as input - * @return schema version - * @throws IOException IOException - */ - public static Integer getSchemaVersion(String mapping) throws IOException { - XContentParser xcp = MediaTypeRegistry.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, mapping); - - while (!xcp.isClosed()) { - XContentParser.Token token = xcp.currentToken(); - if (token != null && token != XContentParser.Token.END_OBJECT && token != XContentParser.Token.START_OBJECT) { - if (!Objects.equals(xcp.currentName(), _META.getPreferredName())) { - xcp.nextToken(); - xcp.skipChildren(); - } else { - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - switch (xcp.currentName()) { - case SCHEMA_VERSION: - int version = xcp.intValue(); - if (version < 0) { - throw new IllegalArgumentException( - String.format(Locale.getDefault(), "%s cannot be negative", SCHEMA_VERSION) - ); - } - return version; - default: - xcp.nextToken(); - } - } - } - } - xcp.nextToken(); - } - return NO_SCHEMA_VERSION; - } - - /** - * updates the mappings for the index. - * @param index index for which mapping needs to be updated - * @param mapping new mappings - * @param clusterState ClusterState - * @param client Admin client - * @param actionListener listener - * @throws IOException IOException - */ - public static void updateIndexMapping( - String index, - String mapping, - ClusterState clusterState, - IndicesAdminClient client, - ActionListener actionListener - ) throws IOException { - if (clusterState.metadata().indices().containsKey(index)) { - if (shouldUpdateIndex(clusterState.metadata().index(index), mapping)) { - PutMappingRequest putMappingRequest = new PutMappingRequest(index).source(mapping, MediaTypeRegistry.JSON); - client.putMapping(putMappingRequest, actionListener); - } else { - actionListener.onResponse(new AcknowledgedResponse(true)); - } - } - } -} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java deleted file mode 100644 index 798196c47df20..0000000000000 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/utils/package-info.java +++ /dev/null @@ -1,12 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * utils package for events-correlation-engine - */ -package org.opensearch.plugin.correlation.utils; diff --git a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec deleted file mode 100644 index 013c17e4a9736..0000000000000 --- a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ /dev/null @@ -1 +0,0 @@ -org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec diff --git a/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json b/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json deleted file mode 100644 index 7741b160eca24..0000000000000 --- a/plugins/events-correlation-engine/src/main/resources/mappings/correlation-rules.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "_meta" : { - "schema_version": 1 - }, - "properties": { - "name": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "correlate": { - "type": "nested", - "properties": { - "index": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "query": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "tags": { - "type": "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "timestampField": { - "type": "text", - "analyzer" : "whitespace", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - } - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java deleted file mode 100644 index 005ffa2097b03..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/EventsCorrelationPluginTests.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation; - -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -public class EventsCorrelationPluginTests extends OpenSearchTestCase { - - public void testDummy() { - Assert.assertEquals(1, 1); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java deleted file mode 100644 index 19ce3b33514d8..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/CorrelationParamsContextTests.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.index.VectorSimilarityFunction; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.PARAMETERS; -import static org.opensearch.plugin.correlation.core.index.CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION; - -/** - * Unit tests for CorrelationsParamsContext - */ -public class CorrelationParamsContextTests extends OpenSearchTestCase { - - /** - * Test reading from and writing to streams - */ - public void testStreams() throws IOException { - int efConstruction = 321; - int m = 12; - - Map parameters = new HashMap<>(); - parameters.put("m", m); - parameters.put("ef_construction", efConstruction); - - CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); - - BytesStreamOutput streamOutput = new BytesStreamOutput(); - context.writeTo(streamOutput); - - CorrelationParamsContext copy = new CorrelationParamsContext(streamOutput.bytes().streamInput()); - Assert.assertEquals(context.getSimilarityFunction(), copy.getSimilarityFunction()); - Assert.assertEquals(context.getParameters(), copy.getParameters()); - } - - /** - * test get vector similarity function - */ - public void testVectorSimilarityFunction() { - int efConstruction = 321; - int m = 12; - - Map parameters = new HashMap<>(); - parameters.put("m", m); - parameters.put("ef_construction", efConstruction); - - CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction()); - } - - /** - * test get parameters - */ - public void testParameters() { - int efConstruction = 321; - int m = 12; - - Map parameters = new HashMap<>(); - parameters.put("m", m); - parameters.put("ef_construction", efConstruction); - - CorrelationParamsContext context = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, parameters); - Assert.assertEquals(parameters, context.getParameters()); - } - - /** - * test parse method with invalid input - * @throws IOException IOException - */ - public void testParse_Invalid() throws IOException { - // Invalid input type - Integer invalidIn = 12; - expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(invalidIn)); - - // Invalid vector similarity function - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(CorrelationParamsContext.VECTOR_SIMILARITY_FUNCTION, 0) - .endObject(); - - final Map in2 = xContentBuilderToMap(xContentBuilder); - expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in2)); - - // Invalid parameters - xContentBuilder = XContentFactory.jsonBuilder().startObject().field(PARAMETERS, 0).endObject(); - - final Map in4 = xContentBuilderToMap(xContentBuilder); - expectThrows(MapperParsingException.class, () -> CorrelationParamsContext.parse(in4)); - } - - /** - * test parse with null parameters - * @throws IOException IOException - */ - public void testParse_NullParameters() throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) - .field(PARAMETERS, (String) null) - .endObject(); - Map in = xContentBuilderToMap(xContentBuilder); - Assert.assertThrows(MapperParsingException.class, () -> { CorrelationParamsContext.parse(in); }); - } - - /** - * test parse method - * @throws IOException IOException - */ - public void testParse_Valid() throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) - .startObject(PARAMETERS) - .field("m", 2) - .field("ef_construction", 128) - .endObject() - .endObject(); - - Map in = xContentBuilderToMap(xContentBuilder); - CorrelationParamsContext context = CorrelationParamsContext.parse(in); - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, context.getSimilarityFunction()); - Assert.assertEquals(Map.of("m", 2, "ef_construction", 128), context.getParameters()); - } - - /** - * test toXContent method - * @throws IOException IOException - */ - public void testToXContent() throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(VECTOR_SIMILARITY_FUNCTION, VectorSimilarityFunction.EUCLIDEAN) - .startObject(PARAMETERS) - .field("m", 2) - .field("ef_construction", 128) - .endObject() - .endObject(); - - Map in = xContentBuilderToMap(xContentBuilder); - CorrelationParamsContext context = CorrelationParamsContext.parse(in); - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder = context.toXContent(builder, ToXContent.EMPTY_PARAMS); - - Map out = xContentBuilderToMap(builder); - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN.name(), out.get(VECTOR_SIMILARITY_FUNCTION)); - } - - private Map xContentBuilderToMap(XContentBuilder xContentBuilder) { - return XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java deleted file mode 100644 index 32c71dcd37196..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/VectorFieldTests.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index; - -import org.apache.lucene.document.FieldType; -import org.opensearch.ExceptionsHelper; -import org.opensearch.OpenSearchException; -import org.opensearch.common.Randomness; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.BytesStreamInput; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Random; - -/** - * Unit tests for VectorField - */ -public class VectorFieldTests extends OpenSearchTestCase { - - private final Random random = Randomness.get(); - - /** - * test VectorField ctor - */ - public void testVectorField_ctor() { - VectorField field = new VectorField("test-field", new float[] { 1.0f, 1.0f }, new FieldType()); - Assert.assertEquals("test-field", field.name()); - } - - /** - * test float vector to array serializer - * @throws IOException IOException - */ - public void testVectorAsArraySerializer() throws IOException { - final float[] vector = getArrayOfRandomFloats(20); - - final BytesStreamOutput objectStream = new BytesStreamOutput(); - objectStream.writeFloatArray(vector); - final byte[] serializedVector = objectStream.bytes().toBytesRef().bytes; - - final byte[] actualSerializedVector = VectorField.floatToByteArray(vector); - - Assert.assertNotNull(actualSerializedVector); - Assert.assertArrayEquals(serializedVector, actualSerializedVector); - - final float[] actualDeserializedVector = byteToFloatArray(actualSerializedVector); - Assert.assertNotNull(actualDeserializedVector); - Assert.assertArrayEquals(vector, actualDeserializedVector, 0.1f); - } - - /** - * test byte array to float vector failures - */ - public void testByteToFloatArrayFailures() { - final byte[] serializedVector = "test-dummy".getBytes(StandardCharsets.UTF_8); - expectThrows(OpenSearchException.class, () -> { byteToFloatArray(serializedVector); }); - } - - private float[] getArrayOfRandomFloats(int length) { - float[] vector = new float[length]; - for (int i = 0; i < 20; ++i) { - vector[i] = random.nextFloat(); - } - return vector; - } - - private static float[] byteToFloatArray(byte[] byteStream) { - try (BytesStreamInput objectStream = new BytesStreamInput(byteStream)) { - return objectStream.readFloatArray(); - } catch (IOException ex) { - throw ExceptionsHelper.convertToOpenSearchException(ex); - } - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java deleted file mode 100644 index 7223b450a136c..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.codec.correlation990; - -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.FieldType; -import org.apache.lucene.document.KnnFloatVectorField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.SerialMergeScheduler; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.store.Directory; -import org.apache.lucene.tests.index.RandomIndexWriter; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; -import org.opensearch.plugin.correlation.core.index.query.CorrelationQueryFactory; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Map; -import java.util.Optional; -import java.util.function.Function; - -import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_EF_CONSTRUCTION; -import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_M; -import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_9_0; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -/** - * Unit tests for custom correlation codec - */ -public class CorrelationCodecTests extends OpenSearchTestCase { - - private static final String FIELD_NAME_ONE = "test_vector_one"; - private static final String FIELD_NAME_TWO = "test_vector_two"; - - /** - * test correlation vector index - * @throws Exception Exception - */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8329") - public void testCorrelationVectorIndex() throws Exception { - Function perFieldCorrelationVectorsProvider = - mapperService -> new PerFieldCorrelationVectorsFormat(Optional.of(mapperService)); - Function correlationCodecProvider = (correlationVectorsFormat -> new CorrelationCodec( - V_9_9_0.getDefaultCodecDelegate(), - correlationVectorsFormat - )); - testCorrelationVectorIndex(correlationCodecProvider, perFieldCorrelationVectorsProvider); - } - - private void testCorrelationVectorIndex( - final Function codecProvider, - final Function perFieldCorrelationVectorsProvider - ) throws Exception { - final MapperService mapperService = mock(MapperService.class); - final CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext( - VectorSimilarityFunction.EUCLIDEAN, - Map.of(METHOD_PARAMETER_M, 16, METHOD_PARAMETER_EF_CONSTRUCTION, 256) - ); - - final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType1 = new VectorFieldMapper.CorrelationVectorFieldType( - FIELD_NAME_ONE, - Map.of(), - 3, - correlationParamsContext - ); - final VectorFieldMapper.CorrelationVectorFieldType mappedFieldType2 = new VectorFieldMapper.CorrelationVectorFieldType( - FIELD_NAME_TWO, - Map.of(), - 2, - correlationParamsContext - ); - when(mapperService.fieldType(eq(FIELD_NAME_ONE))).thenReturn(mappedFieldType1); - when(mapperService.fieldType(eq(FIELD_NAME_TWO))).thenReturn(mappedFieldType2); - - var perFieldCorrelationVectorsFormatSpy = spy(perFieldCorrelationVectorsProvider.apply(mapperService)); - final Codec codec = codecProvider.apply(perFieldCorrelationVectorsFormatSpy); - - Directory dir = newFSDirectory(createTempDir()); - IndexWriterConfig iwc = newIndexWriterConfig(); - iwc.setMergeScheduler(new SerialMergeScheduler()); - iwc.setCodec(codec); - - final FieldType luceneFieldType = KnnFloatVectorField.createFieldType(3, VectorSimilarityFunction.EUCLIDEAN); - float[] array = { 1.0f, 3.0f, 4.0f }; - KnnFloatVectorField vectorField = new KnnFloatVectorField(FIELD_NAME_ONE, array, luceneFieldType); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc); - Document doc = new Document(); - doc.add(vectorField); - writer.addDocument(doc); - writer.commit(); - IndexReader reader = writer.getReader(); - writer.close(); - - verify(perFieldCorrelationVectorsFormatSpy).getKnnVectorsFormatForField(eq(FIELD_NAME_ONE)); - - IndexSearcher searcher = new IndexSearcher(reader); - Query query = CorrelationQueryFactory.create( - new CorrelationQueryFactory.CreateQueryRequest("dummy", FIELD_NAME_ONE, new float[] { 1.0f, 0.0f, 0.0f }, 1, null, null) - ); - - assertEquals(1, searcher.count(query)); - - reader.close(); - dir.close(); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java deleted file mode 100644 index 674f35069a742..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/mapper/CorrelationVectorFieldMapperTests.java +++ /dev/null @@ -1,310 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.mapper; - -import org.apache.lucene.document.KnnFloatVectorField; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.FieldExistsQuery; -import org.opensearch.Version; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.Explicit; -import org.opensearch.common.settings.IndexScopedSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.mapper.ContentPath; -import org.opensearch.index.mapper.FieldMapper; -import org.opensearch.index.mapper.Mapper; -import org.opensearch.index.mapper.MapperParsingException; -import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.ParseContext; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.QueryShardException; -import org.opensearch.plugin.correlation.core.index.CorrelationParamsContext; -import org.opensearch.search.lookup.SearchLookup; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -import org.mockito.Mockito; - -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Unit tests for correlation vector field mapper - */ -public class CorrelationVectorFieldMapperTests extends OpenSearchTestCase { - - private static final String CORRELATION_VECTOR_TYPE = "correlation_vector"; - private static final String DIMENSION_FIELD_NAME = "dimension"; - private static final String TYPE_FIELD_NAME = "type"; - - /** - * test builder construction from parse of correlation params context - * @throws IOException IOException - */ - public void testBuilder_parse_fromCorrelationParamsContext() throws IOException { - String fieldName = "test-field-name"; - String indexName = "test-index-name"; - Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); - - VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); - - int efConstruction = 321; - int m = 12; - int dimension = 10; - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, dimension) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", m) - .field("ef_construction", efConstruction) - .endObject() - .endObject() - .endObject(); - - VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), - buildParserContext(indexName, settings) - ); - Mapper.BuilderContext builderContext = new Mapper.BuilderContext(settings, new ContentPath()); - builder.build(builderContext); - - Assert.assertEquals(VectorSimilarityFunction.EUCLIDEAN, builder.correlationParamsContext.getValue().getSimilarityFunction()); - Assert.assertEquals(321, builder.correlationParamsContext.getValue().getParameters().get("ef_construction")); - - XContentBuilder xContentBuilderEmptyParams = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, dimension) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .endObject() - .endObject(); - - VectorFieldMapper.Builder builderEmptyParams = (VectorFieldMapper.Builder) typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilderEmptyParams), true, xContentBuilderEmptyParams.contentType()) - .v2(), - buildParserContext(indexName, settings) - ); - - Assert.assertEquals( - VectorSimilarityFunction.EUCLIDEAN, - builderEmptyParams.correlationParamsContext.getValue().getSimilarityFunction() - ); - Assert.assertTrue(builderEmptyParams.correlationParamsContext.getValue().getParameters().isEmpty()); - } - - /** - * test type parser construction throw error for invalid dimension of correlation vectors - * @throws IOException IOException - */ - public void testTypeParser_parse_fromCorrelationParamsContext_InvalidDimension() throws IOException { - String fieldName = "test-field-name"; - String indexName = "test-index-name"; - Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); - - VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); - - int efConstruction = 321; - int m = 12; - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, 2000) - .startObject("correlation_ctx") - .field("similarityFunction", VectorSimilarityFunction.EUCLIDEAN.name()) - .startObject("parameters") - .field("m", m) - .field("ef_construction", efConstruction) - .endObject() - .endObject() - .endObject(); - - VectorFieldMapper.Builder builder = (VectorFieldMapper.Builder) typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), - buildParserContext(indexName, settings) - ); - - expectThrows(IllegalArgumentException.class, () -> builder.build(new Mapper.BuilderContext(settings, new ContentPath()))); - } - - /** - * test type parser construction error for invalid vector similarity function - * @throws IOException IOException - */ - public void testTypeParser_parse_fromCorrelationParamsContext_InvalidVectorSimilarityFunction() throws IOException { - String fieldName = "test-field-name"; - String indexName = "test-index-name"; - Settings settings = Settings.builder().put(settings(Version.CURRENT).build()).build(); - - VectorFieldMapper.TypeParser typeParser = new VectorFieldMapper.TypeParser(); - - int efConstruction = 321; - int m = 12; - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() - .startObject() - .field(TYPE_FIELD_NAME, CORRELATION_VECTOR_TYPE) - .field(DIMENSION_FIELD_NAME, 2000) - .startObject("correlation_ctx") - .field("similarityFunction", "invalid") - .startObject("parameters") - .field("m", m) - .field("ef_construction", efConstruction) - .endObject() - .endObject() - .endObject(); - - expectThrows( - MapperParsingException.class, - () -> typeParser.parse( - fieldName, - XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true, xContentBuilder.contentType()).v2(), - buildParserContext(indexName, settings) - ) - ); - } - - /** - * test parseCreateField in CorrelationVectorFieldMapper - * @throws IOException ioexception - */ - public void testCorrelationVectorFieldMapper_parseCreateField() throws IOException { - String fieldName = "test-field-name"; - int dimension = 10; - float[] testVector = createInitializedFloatArray(dimension, 1.0f); - CorrelationParamsContext correlationParamsContext = new CorrelationParamsContext(VectorSimilarityFunction.EUCLIDEAN, Map.of()); - - VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType( - fieldName, - Map.of(), - dimension, - correlationParamsContext - ); - - CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput input = new CorrelationVectorFieldMapper.CreateLuceneFieldMapperInput( - fieldName, - correlationVectorFieldType, - FieldMapper.MultiFields.empty(), - FieldMapper.CopyTo.empty(), - new Explicit<>(true, true), - false, - false, - correlationParamsContext - ); - - ParseContext.Document document = new ParseContext.Document(); - ContentPath contentPath = new ContentPath(); - ParseContext parseContext = mock(ParseContext.class); - when(parseContext.doc()).thenReturn(document); - when(parseContext.path()).thenReturn(contentPath); - - CorrelationVectorFieldMapper correlationVectorFieldMapper = Mockito.spy(new CorrelationVectorFieldMapper(input)); - doReturn(Optional.of(testVector)).when(correlationVectorFieldMapper).getFloatsFromContext(parseContext, dimension); - - correlationVectorFieldMapper.parseCreateField(parseContext, dimension); - - List fields = document.getFields(); - assertEquals(1, fields.size()); - IndexableField field = fields.get(0); - - Assert.assertTrue(field instanceof KnnFloatVectorField); - KnnFloatVectorField knnFloatVectorField = (KnnFloatVectorField) field; - Assert.assertArrayEquals(testVector, knnFloatVectorField.vectorValue(), 0.001f); - } - - /** - * test CorrelationVectorFieldType subclass - */ - public void testCorrelationVectorFieldType() { - String fieldName = "test-field-name"; - int dimension = 10; - QueryShardContext context = mock(QueryShardContext.class); - SearchLookup searchLookup = mock(SearchLookup.class); - - VectorFieldMapper.CorrelationVectorFieldType correlationVectorFieldType = new VectorFieldMapper.CorrelationVectorFieldType( - fieldName, - Map.of(), - dimension - ); - Assert.assertThrows(QueryShardException.class, () -> { correlationVectorFieldType.termQuery(new Object(), context); }); - Assert.assertThrows( - UnsupportedOperationException.class, - () -> { correlationVectorFieldType.valueFetcher(context, searchLookup, ""); } - ); - Assert.assertTrue(correlationVectorFieldType.existsQuery(context) instanceof FieldExistsQuery); - Assert.assertEquals(VectorFieldMapper.CONTENT_TYPE, correlationVectorFieldType.typeName()); - } - - /** - * test constants in VectorFieldMapper - */ - public void testVectorFieldMapperConstants() { - Assert.assertNotNull(VectorFieldMapper.Defaults.IGNORE_MALFORMED); - Assert.assertNotNull(VectorFieldMapper.Names.IGNORE_MALFORMED); - } - - private IndexMetadata buildIndexMetaData(String index, Settings settings) { - return IndexMetadata.builder(index) - .settings(settings) - .numberOfShards(1) - .numberOfReplicas(0) - .version(7) - .mappingVersion(0) - .settingsVersion(0) - .aliasesVersion(0) - .creationDate(0) - .build(); - } - - private Mapper.TypeParser.ParserContext buildParserContext(String index, Settings settings) { - IndexSettings indexSettings = new IndexSettings( - buildIndexMetaData(index, settings), - Settings.EMPTY, - new IndexScopedSettings(Settings.EMPTY, new HashSet<>(IndexScopedSettings.BUILT_IN_INDEX_SETTINGS)) - ); - - MapperService mapperService = mock(MapperService.class); - when(mapperService.getIndexSettings()).thenReturn(indexSettings); - - return new Mapper.TypeParser.ParserContext( - null, - mapperService, - type -> new VectorFieldMapper.TypeParser(), - Version.CURRENT, - null, - null, - null - ); - } - - private static float[] createInitializedFloatArray(int dimension, float value) { - float[] array = new float[dimension]; - Arrays.fill(array, value); - return array; - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java deleted file mode 100644 index 3e567d0c04e53..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/query/CorrelationQueryBuilderTests.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.core.index.query; - -import org.apache.lucene.search.KnnFloatVectorQuery; -import org.opensearch.Version; -import org.opensearch.cluster.ClusterModule; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.common.Strings; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.index.Index; -import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.NumberFieldMapper; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.plugin.correlation.core.index.mapper.VectorFieldMapper; -import org.opensearch.plugins.SearchPlugin; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; - -import java.io.IOException; -import java.util.List; -import java.util.Optional; - -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Unit tests for Correlation Query Builder - */ -public class CorrelationQueryBuilderTests extends OpenSearchTestCase { - - private static final String FIELD_NAME = "myvector"; - private static final int K = 1; - private static final TermQueryBuilder TERM_QUERY = QueryBuilders.termQuery("field", "value"); - private static final float[] QUERY_VECTOR = new float[] { 1.0f, 2.0f, 3.0f, 4.0f }; - - /** - * test invalid number of nearby neighbors - */ - public void testInvalidK() { - float[] queryVector = { 1.0f, 1.0f }; - - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, -K)); - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 0)); - expectThrows( - IllegalArgumentException.class, - () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, CorrelationQueryBuilder.K_MAX + 1) - ); - } - - /** - * test empty vector scenario - */ - public void testEmptyVector() { - final float[] queryVector = null; - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector, 1)); - final float[] queryVector1 = new float[] {}; - expectThrows(IllegalArgumentException.class, () -> new CorrelationQueryBuilder(FIELD_NAME, queryVector1, 1)); - } - - /** - * test serde with xcontent - * @throws IOException IOException - */ - public void testFromXContent() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - builder.startObject(correlationQueryBuilder.fieldName()); - builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector()); - builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK()); - builder.endObject(); - builder.endObject(); - XContentParser contentParser = createParser(builder); - contentParser.nextToken(); - CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); - Assert.assertEquals(actualBuilder, correlationQueryBuilder); - } - - /** - * test serde with xcontent - * @throws IOException IOException - */ - public void testFromXContentFromString() throws IOException { - String correlationQuery = "{\n" - + " \"myvector\" : {\n" - + " \"vector\" : [\n" - + " 1.0,\n" - + " 2.0,\n" - + " 3.0,\n" - + " 4.0\n" - + " ],\n" - + " \"k\" : 1,\n" - + " \"boost\" : 1.0\n" - + " }\n" - + "}"; - XContentParser contentParser = createParser(JsonXContent.jsonXContent, correlationQuery); - contentParser.nextToken(); - CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); - Assert.assertEquals(correlationQuery.replace("\n", "").replace(" ", ""), Strings.toString(MediaTypeRegistry.JSON, actualBuilder)); - } - - /** - * test serde with xcontent with filters - * @throws IOException IOException - */ - public void testFromXContentWithFilters() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY); - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - builder.startObject(correlationQueryBuilder.fieldName()); - builder.field(CorrelationQueryBuilder.VECTOR_FIELD.getPreferredName(), correlationQueryBuilder.vector()); - builder.field(CorrelationQueryBuilder.K_FIELD.getPreferredName(), correlationQueryBuilder.getK()); - builder.field(CorrelationQueryBuilder.FILTER_FIELD.getPreferredName(), correlationQueryBuilder.getFilter()); - builder.endObject(); - builder.endObject(); - XContentParser contentParser = createParser(builder); - contentParser.nextToken(); - CorrelationQueryBuilder actualBuilder = CorrelationQueryBuilder.parse(contentParser); - Assert.assertEquals(actualBuilder, correlationQueryBuilder); - } - - /** - * test conversion o KnnFloatVectorQuery logic - * @throws IOException IOException - */ - public void testDoToQuery() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockCorrVectorField.getDimension()).thenReturn(4); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext); - Assert.assertEquals(FIELD_NAME, query.getField()); - Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f); - Assert.assertEquals(K, query.getK()); - } - - /** - * test conversion o KnnFloatVectorQuery logic with filter - * @throws IOException IOException - */ - public void testDoToQueryWithFilter() throws IOException { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, TERM_QUERY); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockCorrVectorField.getDimension()).thenReturn(4); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - KnnFloatVectorQuery query = (KnnFloatVectorQuery) correlationQueryBuilder.doToQuery(mockQueryShardContext); - Assert.assertEquals(FIELD_NAME, query.getField()); - Assert.assertArrayEquals(QUERY_VECTOR, query.getTargetCopy(), 0.1f); - Assert.assertEquals(K, query.getK()); - Assert.assertEquals(TERM_QUERY.toQuery(mockQueryShardContext), query.getFilter()); - } - - /** - * test conversion o KnnFloatVectorQuery logic failure with invalid dimensions - */ - public void testDoToQueryInvalidDimensions() { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - VectorFieldMapper.CorrelationVectorFieldType mockCorrVectorField = mock(VectorFieldMapper.CorrelationVectorFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockCorrVectorField.getDimension()).thenReturn(400); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext)); - } - - /** - * test conversion o KnnFloatVectorQuery logic failure with invalid field type - */ - public void testDoToQueryInvalidFieldType() { - CorrelationQueryBuilder correlationQueryBuilder = new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - Index dummyIndex = new Index("dummy", "dummy"); - QueryShardContext mockQueryShardContext = mock(QueryShardContext.class); - NumberFieldMapper.NumberFieldType mockCorrVectorField = mock(NumberFieldMapper.NumberFieldType.class); - when(mockQueryShardContext.index()).thenReturn(dummyIndex); - when(mockQueryShardContext.fieldMapper(anyString())).thenReturn(mockCorrVectorField); - expectThrows(IllegalArgumentException.class, () -> correlationQueryBuilder.doToQuery(mockQueryShardContext)); - } - - /** - * test serialization of Correlation Query Builder - * @throws Exception throws an IOException if serialization fails - * @throws Exception Exception - */ - public void testSerialization() throws Exception { - assertSerialization(Optional.empty()); - assertSerialization(Optional.of(TERM_QUERY)); - } - - private void assertSerialization(final Optional queryBuilderOptional) throws IOException { - final CorrelationQueryBuilder builder = queryBuilderOptional.isPresent() - ? new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K, queryBuilderOptional.get()) - : new CorrelationQueryBuilder(FIELD_NAME, QUERY_VECTOR, K); - - try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setVersion(Version.CURRENT); - output.writeNamedWriteable(builder); - - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), writableRegistry())) { - in.setVersion(Version.CURRENT); - final QueryBuilder deserializedQuery = in.readNamedWriteable(QueryBuilder.class); - - assertNotNull(deserializedQuery); - assertTrue(deserializedQuery instanceof CorrelationQueryBuilder); - final CorrelationQueryBuilder deserializedKnnQueryBuilder = (CorrelationQueryBuilder) deserializedQuery; - assertEquals(FIELD_NAME, deserializedKnnQueryBuilder.fieldName()); - assertArrayEquals(QUERY_VECTOR, (float[]) deserializedKnnQueryBuilder.vector(), 0.0f); - assertEquals(K, deserializedKnnQueryBuilder.getK()); - if (queryBuilderOptional.isPresent()) { - assertNotNull(deserializedKnnQueryBuilder.getFilter()); - assertEquals(queryBuilderOptional.get(), deserializedKnnQueryBuilder.getFilter()); - } else { - assertNull(deserializedKnnQueryBuilder.getFilter()); - } - } - } - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - List list = ClusterModule.getNamedXWriteables(); - SearchPlugin.QuerySpec spec = new SearchPlugin.QuerySpec<>( - TermQueryBuilder.NAME, - TermQueryBuilder::new, - TermQueryBuilder::fromXContent - ); - list.add(new NamedXContentRegistry.Entry(QueryBuilder.class, spec.getName(), (p, c) -> spec.getParser().fromXContent(p))); - NamedXContentRegistry registry = new NamedXContentRegistry(list); - return registry; - } - - @Override - protected NamedWriteableRegistry writableRegistry() { - final List entries = ClusterModule.getNamedWriteables(); - entries.add( - new NamedWriteableRegistry.Entry( - QueryBuilder.class, - CorrelationQueryBuilder.NAME_FIELD.getPreferredName(), - CorrelationQueryBuilder::new - ) - ); - entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new)); - return new NamedWriteableRegistry(entries); - } -} diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java deleted file mode 100644 index 45cb47b05b5c2..0000000000000 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/settings/EventsCorrelationSettingsTests.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.plugin.correlation.settings; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.plugin.correlation.EventsCorrelationPlugin; -import org.opensearch.test.OpenSearchTestCase; -import org.junit.Assert; -import org.junit.Before; - -import java.util.List; -import java.util.concurrent.TimeUnit; - -/** - * Unit tests for Correlation Engine settings - */ -public class EventsCorrelationSettingsTests extends OpenSearchTestCase { - - private EventsCorrelationPlugin plugin; - - @Before - public void setup() { - plugin = new EventsCorrelationPlugin(); - } - - /** - * test all plugin settings returned - */ - public void testAllPluginSettingsReturned() { - List expectedSettings = List.of( - EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING, - EventsCorrelationSettings.CORRELATION_TIME_WINDOW - ); - - List> settings = plugin.getSettings(); - Assert.assertTrue(settings.containsAll(expectedSettings)); - } - - /** - * test settings get value - */ - public void testSettingsGetValue() { - Settings settings = Settings.builder().put("index.correlation", true).build(); - Assert.assertEquals(EventsCorrelationSettings.IS_CORRELATION_INDEX_SETTING.get(settings), true); - settings = Settings.builder() - .put("plugins.security_analytics.correlation_time_window", new TimeValue(10, TimeUnit.MINUTES)) - .build(); - Assert.assertEquals(EventsCorrelationSettings.CORRELATION_TIME_WINDOW.get(settings), new TimeValue(10, TimeUnit.MINUTES)); - } -} diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 4f30ea9ea7e22..2948ca12904f5 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -89,7 +89,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.2.2' + api 'org.apache.xmlbeans:xmlbeans:5.3.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 deleted file mode 100644 index 613c1028dbd6d..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -586ffe10ae9864e19e85c24bd060790a70586f72 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 new file mode 100644 index 0000000000000..4dbb0149da890 --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.3.0.jar.sha1 @@ -0,0 +1 @@ +f93c3ba820d7240b7fec4ec5bc35e7223cc6fc1f \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index d419f6fafeb30..ad12ec9003e64 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,11 +44,11 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.51.0' + api 'com.azure:azure-core:1.54.1' api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.28.0' - api 'com.azure:azure-core-http-netty:1.15.5' + api 'com.azure:azure-core-http-netty:1.15.7' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -61,8 +61,8 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.17.2' - api 'com.nimbusds:oauth2-oidc-sdk:11.19.1' + api 'com.microsoft.azure:msal4j:1.18.0' + api 'com.nimbusds:oauth2-oidc-sdk:11.20.1' api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' @@ -108,7 +108,6 @@ thirdPartyAudit { // Optional and not enabled by Elasticsearch 'com.google.common.util.concurrent.internal.InternalFutureFailureAccess', 'com.google.common.util.concurrent.internal.InternalFutures', - 'com.azure.core.credential.ProofOfPossessionOptions', 'com.azure.storage.internal.avro.implementation.AvroObject', 'com.azure.storage.internal.avro.implementation.AvroReader', 'com.azure.storage.internal.avro.implementation.AvroReaderFactory', diff --git a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 deleted file mode 100644 index 7200f59af2f9a..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.51.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ff5d0aedf75ca45ec0ace24673f790d2f7a57096 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 new file mode 100644 index 0000000000000..9246d0dd8443a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.54.1.jar.sha1 @@ -0,0 +1 @@ +9ae0cc4a8ff02a0146510ec9e1c06ab48950a66b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 deleted file mode 100644 index 2f5239cc26148..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -44d99705d3759e2ad7ee8110f811d4ed304a6a7c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 new file mode 100644 index 0000000000000..d72f835c69903 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 @@ -0,0 +1 @@ +a83247eeeb7f63f891e725228d54c3c24132c66a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 deleted file mode 100644 index b5219ee17e9fa..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6211e3d71d0388929babaa0ff0951b30d001852 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 new file mode 100644 index 0000000000000..292259e9d862d --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.18.0.jar.sha1 @@ -0,0 +1 @@ +a47e4e9257a5d9cdb8282c331278492968e06250 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 deleted file mode 100644 index 7d83b0e8ca639..0000000000000 --- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.19.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58db85a807a56ae76baffa519772271ad5808195 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 new file mode 100644 index 0000000000000..7527d31eb1d37 --- /dev/null +++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 @@ -0,0 +1 @@ +8d1ecd62d31945534a7cd63062c3c48ff0df9c43 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 deleted file mode 100644 index bf45716c5b8ce..0000000000000 --- a/plugins/repository-gcs/licenses/grpc-api-1.68.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a9f25c58d8d5b0fcf37ae889a50fec87e34ac08 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 944de326d144c..5bea51706cfae 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -59,6 +59,7 @@ import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; +import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; import org.opensearch.test.BackgroundIndexer; @@ -153,7 +154,6 @@ protected Settings nodeSettings(int nodeOrdinal) { // Disable request throttling because some random values in tests might generate too many failures for the S3 client .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false) .put(S3ClientSettings.PROXY_TYPE_SETTING.getConcreteSettingForNamespace("test").getKey(), ProxySettings.ProxyType.DIRECT) - .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) .put(super.nodeSettings(nodeOrdinal)) .setSecureSettings(secureSettings); @@ -253,22 +253,27 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - GenericStatsMetricPublisher genericStatsMetricPublisher = new GenericStatsMetricPublisher(10000L, 10, 10000L, 10); - + AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( + S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), + normalExecutorBuilder.getStreamReader(), + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader(), + transferSemaphoresHolder + ); return new S3Repository( metadata, registry, service, clusterService, recoverySettings, - null, - null, - null, - null, - null, - false, - null, - null, + asyncUploadUtils, + urgentExecutorBuilder, + priorityExecutorBuilder, + normalExecutorBuilder, + s3AsyncService, + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()), + normalPrioritySizeBasedBlockingQ, + lowPrioritySizeBasedBlockingQ, genericStatsMetricPublisher ) { diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index f0e40db965646..7db9a0d3ba790 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -55,14 +55,6 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { - @Override - protected Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) - .build(); - } - @Override @Before @SuppressForbidden(reason = "Need to set system property here for AWS SDK v2") diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 8bbef168de89c..7397c3132c17c 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -25,7 +25,6 @@ import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; -import software.amazon.awssdk.profiles.ProfileFileSystemSetting; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; @@ -120,6 +119,7 @@ public AmazonAsyncS3Reference client( if (existing != null && existing.tryIncRef()) { return existing; } + final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); @@ -235,17 +235,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { + RetryPolicy retryPolicy = SocketAccess.doPrivileged( + () -> RetryPolicy.builder() + .numRetries(clientSettings.maxRetries) + .throttlingBackoffStrategy( + clientSettings.throttleRetries ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) : BackoffStrategy.none() + ) + .build() + ); + return ClientOverrideConfiguration.builder() - .retryPolicy( - RetryPolicy.builder() - .numRetries(clientSettings.maxRetries) - .throttlingBackoffStrategy( - clientSettings.throttleRetries - ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) - : BackoffStrategy.none() - ) - .build() - ) + .retryPolicy(retryPolicy) .apiCallAttemptTimeout(Duration.ofMillis(clientSettings.requestTimeoutMillis)) .build(); } @@ -346,12 +346,7 @@ static AwsCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c // valid paths. @SuppressForbidden(reason = "Need to provide this override to v2 SDK so that path does not default to home path") private static void setDefaultAwsProfilePath() { - if (ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.getStringValue().isEmpty()) { - System.setProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), System.getProperty("opensearch.path.conf")); - } - if (ProfileFileSystemSetting.AWS_CONFIG_FILE.getStringValue().isEmpty()) { - System.setProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), System.getProperty("opensearch.path.conf")); - } + S3Service.setDefaultAwsProfilePath(); } private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) { @@ -443,5 +438,6 @@ public AwsCredentials resolveCredentials() { @Override public void close() { releaseCachedClients(); + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 1a402e8431e25..8690a5c91680a 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -43,9 +43,6 @@ import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.Delete; -import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; import software.amazon.awssdk.services.s3.model.GetObjectRequest; @@ -55,9 +52,7 @@ import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.s3.model.ObjectAttributes; -import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.S3Error; import software.amazon.awssdk.services.s3.model.ServerSideEncryption; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; @@ -68,7 +63,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; @@ -101,11 +96,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; @@ -381,125 +373,17 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS } @Override - public DeleteResult delete() throws IOException { - final AtomicLong deletedBlobs = new AtomicLong(); - final AtomicLong deletedBytes = new AtomicLong(); - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - ListObjectsV2Iterable listObjectsIterable = SocketAccess.doPrivileged( - () -> clientReference.get() - .listObjectsV2Paginator( - ListObjectsV2Request.builder() - .bucket(blobStore.bucket()) - .prefix(keyPath) - .overrideConfiguration( - o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher) - ) - .build() - ) - ); - - Iterator listObjectsResponseIterator = listObjectsIterable.iterator(); - while (listObjectsResponseIterator.hasNext()) { - ListObjectsV2Response listObjectsResponse = SocketAccess.doPrivileged(listObjectsResponseIterator::next); - List blobsToDelete = listObjectsResponse.contents().stream().map(s3Object -> { - deletedBlobs.incrementAndGet(); - deletedBytes.addAndGet(s3Object.size()); - - return s3Object.key(); - }).collect(Collectors.toList()); - - if (!listObjectsResponseIterator.hasNext()) { - blobsToDelete.add(keyPath); - } - - doDeleteBlobs(blobsToDelete, false); - } - } catch (SdkException e) { - throw new IOException("Exception when deleting blob container [" + keyPath + "]", e); - } - - return new DeleteResult(deletedBlobs.get(), deletedBytes.get()); + public DeleteResult delete() { + PlainActionFuture future = new PlainActionFuture<>(); + deleteAsync(future); + return future.actionGet(); } @Override - public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { - doDeleteBlobs(blobNames, true); - } - - private void doDeleteBlobs(List blobNames, boolean relative) throws IOException { - if (blobNames.isEmpty()) { - return; - } - final Set outstanding; - if (relative) { - outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet()); - } else { - outstanding = new HashSet<>(blobNames); - } - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes - final List deleteRequests = new ArrayList<>(); - final List partition = new ArrayList<>(); - for (String key : outstanding) { - partition.add(key); - if (partition.size() == blobStore.getBulkDeletesSize()) { - deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); - partition.clear(); - } - } - if (partition.isEmpty() == false) { - deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); - } - SocketAccess.doPrivilegedVoid(() -> { - SdkException aex = null; - for (DeleteObjectsRequest deleteRequest : deleteRequests) { - List keysInRequest = deleteRequest.delete() - .objects() - .stream() - .map(ObjectIdentifier::key) - .collect(Collectors.toList()); - try { - DeleteObjectsResponse deleteObjectsResponse = clientReference.get().deleteObjects(deleteRequest); - outstanding.removeAll(keysInRequest); - outstanding.addAll(deleteObjectsResponse.errors().stream().map(S3Error::key).collect(Collectors.toSet())); - if (!deleteObjectsResponse.errors().isEmpty()) { - logger.warn( - () -> new ParameterizedMessage( - "Failed to delete some blobs {}", - deleteObjectsResponse.errors() - .stream() - .map(s3Error -> "[" + s3Error.key() + "][" + s3Error.code() + "][" + s3Error.message() + "]") - .collect(Collectors.toList()) - ) - ); - } - } catch (SdkException e) { - // The AWS client threw any unexpected exception and did not execute the request at all so we do not - // remove any keys from the outstanding deletes set. - aex = ExceptionsHelper.useOrSuppress(aex, e); - } - } - if (aex != null) { - throw aex; - } - }); - } catch (Exception e) { - throw new IOException("Failed to delete blobs [" + outstanding + "]", e); - } - assert outstanding.isEmpty(); - } - - private DeleteObjectsRequest bulkDelete(String bucket, List blobs) { - return DeleteObjectsRequest.builder() - .bucket(bucket) - .delete( - Delete.builder() - .objects(blobs.stream().map(blob -> ObjectIdentifier.builder().key(blob).build()).collect(Collectors.toList())) - .quiet(true) - .build() - ) - .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) - .build(); + public void deleteBlobsIgnoringIfNotExists(List blobNames) { + PlainActionFuture future = new PlainActionFuture<>(); + deleteBlobsAsyncIgnoringIfNotExists(blobNames, future); + future.actionGet(); } @Override @@ -886,7 +770,11 @@ public void deleteAsync(ActionListener completionListener) { try (AmazonAsyncS3Reference asyncClientReference = blobStore.asyncClientReference()) { S3AsyncClient s3AsyncClient = asyncClientReference.get().client(); - ListObjectsV2Request listRequest = ListObjectsV2Request.builder().bucket(blobStore.bucket()).prefix(keyPath).build(); + ListObjectsV2Request listRequest = ListObjectsV2Request.builder() + .bucket(blobStore.bucket()) + .prefix(keyPath) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher)) + .build(); ListObjectsV2Publisher listPublisher = s3AsyncClient.listObjectsV2Paginator(listRequest); AtomicLong deletedBlobs = new AtomicLong(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 1048ec784ec4e..72a812339e387 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -93,19 +93,19 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private static final String NORMAL_TRANSFER_QUEUE_CONSUMER = "normal_transfer_queue_consumer"; protected final S3Service service; - private final S3AsyncService s3AsyncService; + protected final S3AsyncService s3AsyncService; private final Path configPath; - private AsyncExecutorContainer urgentExecutorBuilder; - private AsyncExecutorContainer priorityExecutorBuilder; - private AsyncExecutorContainer normalExecutorBuilder; + protected AsyncExecutorContainer urgentExecutorBuilder; + protected AsyncExecutorContainer priorityExecutorBuilder; + protected AsyncExecutorContainer normalExecutorBuilder; private ExecutorService lowTransferQConsumerService; private ExecutorService normalTransferQConsumerService; - private SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ; - private SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ; - private TransferSemaphoresHolder transferSemaphoresHolder; - private GenericStatsMetricPublisher genericStatsMetricPublisher; + protected SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ; + protected SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ; + protected TransferSemaphoresHolder transferSemaphoresHolder; + protected GenericStatsMetricPublisher genericStatsMetricPublisher; public S3RepositoryPlugin(final Settings settings, final Path configPath) { this(settings, configPath, new S3Service(configPath), new S3AsyncService(configPath)); @@ -387,5 +387,8 @@ public void reload(Settings settings) { public void close() throws IOException { service.close(); s3AsyncService.close(); + urgentExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + priorityExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + normalExecutorBuilder.getAsyncTransferEventLoopGroup().close(); } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 2cb11541d924f..53371cd1529ce 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -48,6 +48,7 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.DeletedObject; import software.amazon.awssdk.services.s3.model.GetObjectAttributesParts; import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; @@ -92,7 +93,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -102,6 +102,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -286,9 +287,8 @@ public int numberOfPagesFetched() { } } - public void testDelete() throws IOException { + public void testDelete() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); int bulkDeleteSize = 5; @@ -297,147 +297,314 @@ public void testDelete() throws IOException { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); final int totalPageCount = 3; final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); - final List keysDeleted = new ArrayList<>(); - AtomicInteger deleteCount = new AtomicInteger(); + List responses = new ArrayList<>(); + List allObjects = new ArrayList<>(); + long totalSize = 0; + + for (int i = 0; i < totalPageCount; i++) { + List pageObjects = new ArrayList<>(); + for (int j = 0; j < s3ObjectsPerPage; j++) { + pageObjects.add(S3Object.builder().key(randomAlphaOfLength(10)).size(s3ObjectSize).build()); + totalSize += s3ObjectSize; + } + allObjects.addAll(pageObjects); + responses.add(ListObjectsV2Response.builder().contents(pageObjects).build()); + } + + AtomicInteger counter = new AtomicInteger(); doAnswer(invocation -> { - DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); - deleteCount.getAndIncrement(); - logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size()); - keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList())); - return DeleteObjectsResponse.builder().build(); - }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + int currentCounter = counter.getAndIncrement(); + if (currentCounter < responses.size()) { + subscriber.onNext(responses.get(currentCounter)); + } + if (currentCounter == responses.size() - 1) { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn( + CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build()) + ); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - DeleteResult deleteResult = blobContainer.delete(); - assertEquals(s3ObjectSize * s3ObjectsPerPage * totalPageCount, deleteResult.bytesDeleted()); - assertEquals(s3ObjectsPerPage * totalPageCount, deleteResult.blobsDeleted()); - // keysDeleted will have blobPath also - assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); - assertTrue(keysDeleted.contains(blobPath.buildAsString())); - // keysDeleted will have blobPath also - assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get()); - keysDeleted.remove(blobPath.buildAsString()); - assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference resultRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + resultRef.set(deleteResult); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Unexpected failure: " + e.getMessage()); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + DeleteResult result = resultRef.get(); + + assertEquals(totalSize, result.bytesDeleted()); + assertEquals(allObjects.size(), result.blobsDeleted()); + + verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class)); + int expectedDeleteCalls = (int) Math.ceil((double) allObjects.size() / bulkDeleteSize); + verify(s3AsyncClient, times(expectedDeleteCalls)).deleteObjects(any(DeleteObjectsRequest.class)); } - public void testDeleteItemLevelErrorsDuringDelete() { + public void testDeleteItemLevelErrorsDuringDelete() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + int bulkDeleteSize = 3; // Small size to force multiple delete requests + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + final int totalObjects = 10; + List s3Objects = new ArrayList<>(); + for (int i = 0; i < totalObjects; i++) { + s3Objects.add(S3Object.builder().key("key-" + i).size(100L).build()); + } - final List keysFailedDeletion = new ArrayList<>(); + AtomicBoolean onNext = new AtomicBoolean(false); doAnswer(invocation -> { - DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); - int i = 0; - for (ObjectIdentifier objectIdentifier : deleteObjectsRequest.delete().objects()) { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (onNext.compareAndSet(false, true)) { + subscriber.onNext(ListObjectsV2Response.builder().contents(s3Objects).build()); + } else { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + // Simulate item-level errors during delete + AtomicInteger deleteCallCount = new AtomicInteger(0); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenAnswer(invocation -> { + DeleteObjectsRequest request = invocation.getArgument(0); + List errors = new ArrayList<>(); + List deletedObjects = new ArrayList<>(); + + for (int i = 0; i < request.delete().objects().size(); i++) { if (i % 2 == 0) { - keysFailedDeletion.add(objectIdentifier.key()); + errors.add( + S3Error.builder() + .key(request.delete().objects().get(i).key()) + .code("InternalError") + .message("Simulated error") + .build() + ); + } else { + deletedObjects.add(DeletedObject.builder().key(request.delete().objects().get(i).key()).build()); } - i++; } - return DeleteObjectsResponse.builder() - .errors(keysFailedDeletion.stream().map(key -> S3Error.builder().key(key).build()).collect(Collectors.toList())) - .build(); - }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); + + deleteCallCount.incrementAndGet(); + return CompletableFuture.completedFuture(DeleteObjectsResponse.builder().errors(errors).deleted(deletedObjects).build()); + }); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(AssertionError.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference resultRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + resultRef.set(deleteResult); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + + assertNull("Unexpected exception: " + exceptionRef.get(), exceptionRef.get()); + DeleteResult result = resultRef.get(); + assertNotNull("Expected DeleteResult but got null", result); + + // We expect half of the objects to be deleted successfully + // But as of today, the blob delete count and bytes is updated a bit earlier. + assertEquals(totalObjects, result.blobsDeleted()); + assertEquals(totalObjects * 100L, result.bytesDeleted()); + + verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class)); + + // Calculate expected number of deleteObjects calls + int expectedDeleteCalls = (int) Math.ceil((double) totalObjects / bulkDeleteSize); + assertEquals(expectedDeleteCalls, deleteCallCount.get()); } - public void testDeleteSdkExceptionDuringListOperation() { + public void testDeleteSdkExceptionDuringListOperation() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onError(new RuntimeException("Simulated listing error")); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(IOException.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected failure but got success"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(exceptionRef.get()); + assertEquals(IOException.class, exceptionRef.get().getClass()); + assertEquals("Failed to list objects for deletion", exceptionRef.get().getMessage()); } - public void testDeleteSdkExceptionDuringDeleteOperation() { + public void testDeleteSdkExceptionDuringDeleteOperation() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); + int bulkDeleteSize = 5; final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onNext( + ListObjectsV2Response.builder().contents(S3Object.builder().key("test-key").size(100L).build()).build() + ); + subscriber.onComplete(); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); - when(client.deleteObjects(any(DeleteObjectsRequest.class))).thenThrow(SdkException.builder().build()); + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + CompletableFuture failedFuture = new CompletableFuture<>(); + failedFuture.completeExceptionally(new RuntimeException("Simulated delete error")); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(IOException.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected failure but got success"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(exceptionRef.get()); + logger.error("", exceptionRef.get()); + assertTrue(exceptionRef.get() instanceof CompletionException); + assertEquals("java.lang.RuntimeException: Simulated delete error", exceptionRef.get().getMessage()); } public void testExecuteSingleUpload() throws IOException { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java index 9ac1564c807c3..c0ee9cb6d980f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java @@ -8,6 +8,7 @@ package org.opensearch.repositories.s3; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeUnit; import org.opensearch.common.unit.SizeValue; @@ -25,6 +26,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class S3RepositoryPluginTests extends OpenSearchTestCase { @@ -37,8 +40,6 @@ public void testGetExecutorBuilders() throws IOException { ThreadPool threadPool = null; try (S3RepositoryPlugin plugin = new S3RepositoryPlugin(settings, configPath)) { List> executorBuilders = plugin.getExecutorBuilders(settings); - assertNotNull(executorBuilders); - assertFalse(executorBuilders.isEmpty()); threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); final Executor executor = threadPool.executor(URGENT_FUTURE_COMPLETION); assertNotNull(executor); @@ -57,6 +58,12 @@ public void testGetExecutorBuilders() throws IOException { assertThat(info.getMax(), equalTo(size)); assertThat(openSearchThreadPoolExecutor.getMaximumPoolSize(), equalTo(size)); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + plugin.createComponents(null, clusterService, threadPool, null, null, null, null, null, null, null, null); + assertNotNull(executorBuilders); + assertFalse(executorBuilders.isEmpty()); + final int availableProcessors = Runtime.getRuntime().availableProcessors(); if (processors > availableProcessors) { assertWarnings( diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle new file mode 100644 index 0000000000000..47f62b2b8c3f3 --- /dev/null +++ b/plugins/transport-grpc/build.gradle @@ -0,0 +1,168 @@ +import org.gradle.api.attributes.java.TargetJvmEnvironment + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +opensearchplugin { + description 'gRPC based transport implementation' + classname 'org.opensearch.transport.grpc.GrpcPlugin' +} + +dependencies { + compileOnly "com.google.code.findbugs:jsr305:3.0.2" + runtimeOnly "com.google.guava:guava:${versions.guava}" + implementation "com.google.errorprone:error_prone_annotations:2.24.1" + implementation "com.google.guava:failureaccess:1.0.1" + implementation "io.grpc:grpc-api:${versions.grpc}" + implementation "io.grpc:grpc-core:${versions.grpc}" + implementation "io.grpc:grpc-netty-shaded:${versions.grpc}" + implementation "io.grpc:grpc-protobuf-lite:${versions.grpc}" + implementation "io.grpc:grpc-protobuf:${versions.grpc}" + implementation "io.grpc:grpc-services:${versions.grpc}" + implementation "io.grpc:grpc-stub:${versions.grpc}" + implementation "io.grpc:grpc-util:${versions.grpc}" + implementation "io.perfmark:perfmark-api:0.26.0" +} + +tasks.named("dependencyLicenses").configure { + mapping from: /grpc-.*/, to: 'grpc' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.aayushatharva.brotli4j.Brotli4jLoader', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', + 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', + 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', + // classes are missing + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.openssl.PEMEncryptedKeyPair', + 'org.bouncycastle.openssl.PEMParser', + 'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter', + 'org.bouncycastle.openssl.jcajce.JceOpenSSLPKCS8DecryptorProviderBuilder', + 'org.bouncycastle.openssl.jcajce.JcePEMDecryptorProviderBuilder', + 'org.bouncycastle.pkcs.PKCS8EncryptedPrivateKeyInfo', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.helpers.FormattingTuple', + 'org.slf4j.helpers.MessageFormatter', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + 'org.slf4j.spi.LocationAwareLogger', + + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonToken', + 'com.google.protobuf.util.Durations', + 'com.google.protobuf.util.Timestamps', + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.google.rpc.Status', + 'com.google.rpc.Status$Builder', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration' + ) + + ignoreViolations( + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$1', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$2', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$3', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$4', + 'io.grpc.netty.shaded.io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator$5', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$1', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$2', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$3', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$4', + 'io.grpc.netty.shaded.io.netty.util.internal.PlatformDependent0$6', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeLongArrayAccess', + 'io.grpc.netty.shaded.io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess' + ) +} diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 new file mode 100644 index 0000000000000..67723f6f51248 --- /dev/null +++ b/plugins/transport-grpc/licenses/error_prone_annotations-2.24.1.jar.sha1 @@ -0,0 +1 @@ +32b299e45105aa9b0df8279c74dc1edfcf313ff0 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/error_prone_annotations-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt b/plugins/transport-grpc/licenses/error_prone_annotations-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 new file mode 100644 index 0000000000000..4798b37e20691 --- /dev/null +++ b/plugins/transport-grpc/licenses/failureaccess-1.0.1.jar.sha1 @@ -0,0 +1 @@ +1dcf1de382a0bf95a3d8b0849546c88bac1292c9 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/failureaccess-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt b/plugins/transport-grpc/licenses/failureaccess-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/grpc-LICENSE.txt b/plugins/transport-grpc/licenses/grpc-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/grpc-NOTICE.txt b/plugins/transport-grpc/licenses/grpc-NOTICE.txt new file mode 100644 index 0000000000000..f70c5620cf75a --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-NOTICE.txt @@ -0,0 +1,62 @@ +Copyright 2014 The gRPC Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'OkHttp', an open source +HTTP & SPDY client for Android and Java applications, which can be obtained +at: + + * LICENSE: + * okhttp/third_party/okhttp/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/square/okhttp + * LOCATION_IN_GRPC: + * okhttp/third_party/okhttp + +This product contains a modified portion of 'Envoy', an open source +cloud-native high-performance edge/middle/service proxy, which can be +obtained at: + + * LICENSE: + * xds/third_party/envoy/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/envoy/NOTICE + * HOMEPAGE: + * https://www.envoyproxy.io + * LOCATION_IN_GRPC: + * xds/third_party/envoy + +This product contains a modified portion of 'protoc-gen-validate (PGV)', +an open source protoc plugin to generate polyglot message validators, +which can be obtained at: + + * LICENSE: + * xds/third_party/protoc-gen-validate/LICENSE (Apache License 2.0) + * NOTICE: + * xds/third_party/protoc-gen-validate/NOTICE + * HOMEPAGE: + * https://github.com/envoyproxy/protoc-gen-validate + * LOCATION_IN_GRPC: + * xds/third_party/protoc-gen-validate + +This product contains a modified portion of 'udpa', +an open source universal data plane API, which can be obtained at: + + * LICENSE: + * xds/third_party/udpa/LICENSE (Apache License 2.0) + * HOMEPAGE: + * https://github.com/cncf/udpa + * LOCATION_IN_GRPC: + * xds/third_party/udpa diff --git a/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..1844172dec982 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-api-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a257a5dd25dda1c97a99b56d5b9c1e56c12ae554 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e20345d29e914 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-core-1.68.2.jar.sha1 @@ -0,0 +1 @@ +b0fd51a1c029785d1c9ae2cfc80a296b60dfcfdb \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..53fa705a66129 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-netty-shaded-1.68.2.jar.sha1 @@ -0,0 +1 @@ +8ea4186fbdcc5432664364ed53e03cf0d458c3ec \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..e861b41837f33 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-protobuf-1.68.2.jar.sha1 @@ -0,0 +1 @@ +35b28e0d57874021cd31e76dd4a795f76a82471e \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..b2401f9752829 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-protobuf-lite-1.68.2.jar.sha1 @@ -0,0 +1 @@ +a53064b896adcfefe74362a33e111492351dfc03 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..c4edf923791e5 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-services-1.68.2.jar.sha1 @@ -0,0 +1 @@ +6c2a0b0640544b9010a42bcf76f2791116a75c9d \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..118464f8f48ff --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-stub-1.68.2.jar.sha1 @@ -0,0 +1 @@ +d58ee1cf723b4b5536d44b67e328c163580a8d98 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 new file mode 100644 index 0000000000000..c3261b012e502 --- /dev/null +++ b/plugins/transport-grpc/licenses/grpc-util-1.68.2.jar.sha1 @@ -0,0 +1 @@ +2d195570e9256d1357d584146a8e6b19587d4044 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 new file mode 100644 index 0000000000000..27d5304e326df --- /dev/null +++ b/plugins/transport-grpc/licenses/guava-33.2.1-jre.jar.sha1 @@ -0,0 +1 @@ +818e780da2c66c63bbb6480fef1f3855eeafa3e4 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/guava-LICENSE.txt b/plugins/transport-grpc/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/transport-grpc/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/guava-NOTICE.txt b/plugins/transport-grpc/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 new file mode 100644 index 0000000000000..abf1becd13298 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-0.26.0.jar.sha1 @@ -0,0 +1 @@ +ef65452adaf20bf7d12ef55913aba24037b82738 \ No newline at end of file diff --git a/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt new file mode 100644 index 0000000000000..7d74b6569cf64 --- /dev/null +++ b/plugins/transport-grpc/licenses/perfmark-api-NOTICE.txt @@ -0,0 +1,40 @@ +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +----------------------------------------------------------------------- + +This product contains a modified portion of 'Catapult', an open source +Trace Event viewer for Chome, Linux, and Android applications, which can +be obtained at: + + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/catapult/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/catapult-project/catapult + +This product contains a modified portion of 'Polymer', a library for Web +Components, which can be obtained at: + * LICENSE: + * traceviewer/src/main/resources/io/perfmark/traceviewer/third_party/polymer/LICENSE (New BSD License) + * HOMEPAGE: + * https://github.com/Polymer/polymer + + +This product contains a modified portion of 'ASM', an open source +Java Bytecode library, which can be obtained at: + + * LICENSE: + * agent/src/main/resources/io/perfmark/agent/third_party/asm/LICENSE (BSD style License) + * HOMEPAGE: + * https://asm.ow2.io/ diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java new file mode 100644 index 0000000000000..0a464e135350b --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.transport.grpc; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORTS; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; + +/** + * Main class for the gRPC plugin. + */ +public final class GrpcPlugin extends Plugin implements NetworkPlugin { + + /** + * Creates a new GrpcPlugin instance. + */ + public GrpcPlugin() {} + + @Override + public Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.singletonMap( + GRPC_TRANSPORT_SETTING_KEY, + () -> new Netty4GrpcServerTransport(settings, Collections.emptyList(), networkService) + ); + } + + @Override + public List> getSettings() { + return List.of( + SETTING_GRPC_PORTS, + SETTING_GRPC_HOST, + SETTING_GRPC_PUBLISH_HOST, + SETTING_GRPC_BIND_HOST, + SETTING_GRPC_WORKER_COUNT, + SETTING_GRPC_PUBLISH_PORT + ); + } +} diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java new file mode 100644 index 0000000000000..61c0722772b92 --- /dev/null +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java @@ -0,0 +1,277 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.grpc; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.Strings; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.transport.BindTransportException; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import io.grpc.BindableService; +import io.grpc.InsecureServerCredentials; +import io.grpc.Server; +import io.grpc.netty.shaded.io.grpc.netty.NettyServerBuilder; +import io.grpc.netty.shaded.io.netty.channel.EventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.nio.NioEventLoopGroup; +import io.grpc.netty.shaded.io.netty.channel.socket.nio.NioServerSocketChannel; +import io.grpc.protobuf.services.HealthStatusManager; +import io.grpc.protobuf.services.ProtoReflectionService; + +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.intSetting; +import static org.opensearch.common.settings.Setting.listSetting; +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.transport.Transport.resolveTransportPublishPort; + +/** + * Netty4 gRPC server implemented as a LifecycleComponent. + * Services injected through BindableService list. + */ +public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { + private static final Logger logger = LogManager.getLogger(Netty4GrpcServerTransport.class); + + /** + * Type key for configuring settings of this auxiliary transport. + */ + public static final String GRPC_TRANSPORT_SETTING_KEY = "experimental-transport-grpc"; + + /** + * Port range on which to bind. + * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORTS where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY. + */ + public static final Setting SETTING_GRPC_PORTS = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace( + GRPC_TRANSPORT_SETTING_KEY + ); + + /** + * Port published to peers for this server. + */ + public static final Setting SETTING_GRPC_PUBLISH_PORT = intSetting("grpc.publish_port", -1, -1, Setting.Property.NodeScope); + + /** + * Host list to bind and publish. + * For distinct bind/publish hosts configure SETTING_GRPC_BIND_HOST + SETTING_GRPC_PUBLISH_HOST separately. + */ + public static final Setting> SETTING_GRPC_HOST = listSetting( + "grpc.host", + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Host list to bind. + */ + public static final Setting> SETTING_GRPC_BIND_HOST = listSetting( + "grpc.bind_host", + SETTING_GRPC_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Host list published to peers. + */ + public static final Setting> SETTING_GRPC_PUBLISH_HOST = listSetting( + "grpc.publish_host", + SETTING_GRPC_HOST, + Function.identity(), + Setting.Property.NodeScope + ); + + /** + * Configure size of thread pool backing this transport server. + */ + public static final Setting SETTING_GRPC_WORKER_COUNT = new Setting<>( + "grpc.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "grpc.netty.worker_count"), + Setting.Property.NodeScope + ); + + private final Settings settings; + private final NetworkService networkService; + private final List services; + private final CopyOnWriteArrayList servers = new CopyOnWriteArrayList<>(); + private final String[] bindHosts; + private final String[] publishHosts; + private final PortsRange port; + private final int nettyEventLoopThreads; + + private volatile BoundTransportAddress boundAddress; + private volatile EventLoopGroup eventLoopGroup; + + /** + * Creates a new Netty4GrpcServerTransport instance. + * @param settings the configured settings. + * @param services the gRPC compatible services to be registered with the server. + * @param networkService the bind/publish addresses. + */ + public Netty4GrpcServerTransport(Settings settings, List services, NetworkService networkService) { + this.settings = Objects.requireNonNull(settings); + this.services = Objects.requireNonNull(services); + this.networkService = Objects.requireNonNull(networkService); + + final List httpBindHost = SETTING_GRPC_BIND_HOST.get(settings); + this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray( + Strings.EMPTY_ARRAY + ); + + final List httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); + this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost) + .toArray(Strings.EMPTY_ARRAY); + + this.port = SETTING_GRPC_PORTS.get(settings); + this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings); + } + + BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + @Override + protected void doStart() { + boolean success = false; + try { + this.eventLoopGroup = new NioEventLoopGroup(nettyEventLoopThreads, daemonThreadFactory(settings, "grpc_event_loop")); + bindServer(); + success = true; + logger.info("Started gRPC server on port {}", port); + } finally { + if (!success) { + doStop(); + } + } + } + + @Override + protected void doStop() { + for (Server server : servers) { + if (server != null) { + server.shutdown(); + try { + server.awaitTermination(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Interrupted while shutting down gRPC server"); + } finally { + server.shutdownNow(); + } + } + } + if (eventLoopGroup != null) { + try { + eventLoopGroup.shutdownGracefully(0, 10, TimeUnit.SECONDS).await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("Failed to shut down event loop group"); + } + } + } + + @Override + protected void doClose() { + + } + + private void bindServer() { + InetAddress[] hostAddresses; + try { + hostAddresses = networkService.resolveBindHostAddresses(bindHosts); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); + } + + List boundAddresses = new ArrayList<>(hostAddresses.length); + for (InetAddress address : hostAddresses) { + boundAddresses.add(bindAddress(address, port)); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolveTransportPublishPort(SETTING_GRPC_PUBLISH_PORT.get(settings), boundAddresses, publishInetAddress); + if (publishPort < 0) { + throw new BindTransportException( + "Failed to auto-resolve grpc publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_GRPC_PORTS.getKey() + + " or " + + SETTING_GRPC_PUBLISH_PORT.getKey() + ); + } + + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); + logger.info("{}", boundAddress); + } + + private TransportAddress bindAddress(InetAddress hostAddress, PortsRange portRange) { + AtomicReference lastException = new AtomicReference<>(); + AtomicReference addr = new AtomicReference<>(); + + boolean success = portRange.iterate(portNumber -> { + try { + + final InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber); + final NettyServerBuilder serverBuilder = NettyServerBuilder.forAddress(address, InsecureServerCredentials.create()) + .bossEventLoopGroup(eventLoopGroup) + .workerEventLoopGroup(eventLoopGroup) + .channelType(NioServerSocketChannel.class) + .addService(new HealthStatusManager().getHealthService()) + .addService(ProtoReflectionService.newInstance()); + + services.forEach(serverBuilder::addService); + + Server srv = serverBuilder.build().start(); + servers.add(srv); + addr.set(new TransportAddress(hostAddress, portNumber)); + logger.debug("Bound gRPC to address {{}}", address); + return true; + } catch (Exception e) { + lastException.set(e); + return false; + } + }); + + if (!success) { + throw new RuntimeException("Failed to bind to " + hostAddress + " on ports " + portRange, lastException.get()); + } + + return addr.get(); + } +} diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java similarity index 59% rename from plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java rename to plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java index 4fdc622c3d886..4a5d9d02b5b91 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/mapper/package-info.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/package-info.java @@ -7,6 +7,7 @@ */ /** - * correlation field mapper package + * gRPC transport implementation for OpenSearch. + * Provides network communication using the gRPC protocol. */ -package org.opensearch.plugin.correlation.core.index.mapper; +package org.opensearch.transport.grpc; diff --git a/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..398de576b6c5a --- /dev/null +++ b/plugins/transport-grpc/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant codeBase "${codebase.grpc-netty-shaded}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; + + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; + + // Netty sets custom classloader for some of its internal threads + permission java.lang.RuntimePermission "*", "setContextClassLoader"; +}; diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java new file mode 100644 index 0000000000000..ebeff62c2c23c --- /dev/null +++ b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.grpc; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; +import org.junit.Before; + +import java.util.List; + +import io.grpc.BindableService; + +import static org.hamcrest.Matchers.emptyArray; +import static org.hamcrest.Matchers.not; + +public class Netty4GrpcServerTransportTests extends OpenSearchTestCase { + + private NetworkService networkService; + private List services; + + @Before + public void setup() { + networkService = new NetworkService(List.of()); + services = List.of(); + } + + public void test() { + try (Netty4GrpcServerTransport transport = new Netty4GrpcServerTransport(createSettings(), services, networkService)) { + transport.start(); + + MatcherAssert.assertThat(transport.boundAddress().boundAddresses(), not(emptyArray())); + assertNotNull(transport.boundAddress().publishAddress().address()); + + transport.stop(); + } + } + + private static Settings createSettings() { + return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORTS.getKey(), getPortRange()).build(); + } +} diff --git a/release-notes/opensearch.release-notes-1.3.20.md b/release-notes/opensearch.release-notes-1.3.20.md index 44cd62e31a928..b3cc89fb37985 100644 --- a/release-notes/opensearch.release-notes-1.3.20.md +++ b/release-notes/opensearch.release-notes-1.3.20.md @@ -8,6 +8,7 @@ - Bump `commonsio` to 2.16.0 ([#16780](https://github.com/opensearch-project/OpenSearch/pull/16780)) - Bump `protobuf-java` to 3.25.5 ([#16792](https://github.com/opensearch-project/OpenSearch/pull/16792)) - Bump `snappy-java` to 1.1.10.7 ([#16792](https://github.com/opensearch-project/OpenSearch/pull/16792)) +- Bump `mime4j-core` to 0.8.11 ([#16810](https://github.com/opensearch-project/OpenSearch/pull/16810)) ### Fixed - Update help output for _cat ([#14722](https://github.com/opensearch-project/OpenSearch/pull/14722)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml index 9ec39660a4928..266b41c6b5a77 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml @@ -1,8 +1,9 @@ --- # The test setup includes: -# - Create flat_object mapping for flat_object_doc_values_test index -# - Index 9 example documents -# - Search tests about doc_values and index +# - 1.Create flat_object mapping for flat_object_doc_values_test index +# - 2.Index 9 example documents +# - 3.Search tests about doc_values and index +# - 4.Fetch doc_value from flat_object field setup: - skip: @@ -44,6 +45,8 @@ setup: {"order":"order7","issue":{"labels":{"number":7,"name":"abc7","status":1}}} {"index":{"_index":"flat_object_doc_values_test","_id":"8"}} {"order":"order8","issue":{"labels":{"number":8,"name":"abc8","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"9"}} + {"order":"order9","issue":{"labels":{"number":9,"name":"abC8","status":1}}} --- # Delete Index when connection is teardown @@ -67,7 +70,53 @@ teardown: } } - - length: { hits.hits: 9 } + - length: { hits.hits: 10 } + + # Case Insensitive Term Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels.name: { + value: "abc8", + case_insensitive: "true" + } + } + } + ] + } + } + } + + - length: { hits.hits: 2 } + + # Case Insensitive Term Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels: { + value: "abc8", + case_insensitive: "true" + } + } + } + ] + } + } + } + + - length: { hits.hits: 2 } # Term Query with exact dot path. - do: @@ -786,3 +835,48 @@ teardown: - length: { hits.hits: 1 } - match: { hits.hits.0._source.order: "order8" } + + # Stored Fields with exact dot path. + - do: + search: + body: { + _source: false, + query: { + bool: { + must: [ + { + term: { + order: "order0" + } + } + ] + } + }, + stored_fields: "_none_", + docvalue_fields: [ "issue.labels.name","order" ] + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields: { "order" : [ "order0" ], "issue.labels.name": [ "abc0" ] } } + + - do: + search: + body: { + _source: false, + query: { + bool: { + must: [ + { + term: { + order: "order0" + } + } + ] + } + }, + stored_fields: "_none_", + docvalue_fields: [ "issue.labels.name" ] + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0.fields: { "issue.labels.name": [ "abc0" ] } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml index 647aaf2c9088b..53ed730925595 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -1121,8 +1121,8 @@ "search on fields with only doc_values enabled": - skip: features: [ "headers" ] - version: " - 2.18.99" - reason: "searching with only doc_values was finally added in 2.19.0" + version: " - 2.99.99" + reason: "searching with only doc_values was finally added in 3.0.0" - do: indices.create: index: test-doc-values @@ -1193,6 +1193,37 @@ - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2", "boolean": true, "date_nanos": "2020-10-29T12:12:12.123456789Z", "date": "2020-10-29T12:12:12.987Z" }' - '{ "index": { "_index": "test-doc-values", "_id": "3" } }' - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3", "boolean": false, "date_nanos": "2024-10-29T12:12:12.123456789Z", "date": "2024-10-29T12:12:12.987Z" }' + - '{ "index": { "_index": "test-doc-values", "_id": "4" } }' + - '{ "some_keyword": "Keyword1" }' + - '{ "index": { "_index": "test-doc-values", "_id": "5" } }' + - '{ "some_keyword": "keyword1" }' + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: { + "some_keyword": { + "value": "Keyword1" + } } + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: { + "some_keyword": { + "value": "keyword1", + "case_insensitive": "true" + } } + + - match: { hits.total: 2 } - do: search: diff --git a/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 deleted file mode 100644 index fd952034f3742..0000000000000 --- a/server/licenses/lucene-analysis-common-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4c2503cfaba37249e20ea877555cb52ee89d1ae1 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2b9a8cf6e43fd --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.12.1.jar.sha1 @@ -0,0 +1 @@ +86836497e35c1ab33259d9864ceb280c0016075e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 deleted file mode 100644 index 2993134edd610..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68fe98c94e9644a584ea1bf525e68d9406fc61ec \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..89d6ddbec3eec --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d0e79d06a0ed021663737e4df777ab7b80cd28c4 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.0.jar.sha1 b/server/licenses/lucene-core-9.12.0.jar.sha1 deleted file mode 100644 index e55f896dedb63..0000000000000 --- a/server/licenses/lucene-core-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fdb055d569bb20bfce9618fe2b01c29bab7f290c \ No newline at end of file diff --git a/server/licenses/lucene-core-9.12.1.jar.sha1 b/server/licenses/lucene-core-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..2521c91a81d64 --- /dev/null +++ b/server/licenses/lucene-core-9.12.1.jar.sha1 @@ -0,0 +1 @@ +91447c90c1180122142773b5baddaf8547124794 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.0.jar.sha1 b/server/licenses/lucene-grouping-9.12.0.jar.sha1 deleted file mode 100644 index 48388974bb38f..0000000000000 --- a/server/licenses/lucene-grouping-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ccf99f8db57aa97b2c1f95c5cc2a11156a043921 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.12.1.jar.sha1 b/server/licenses/lucene-grouping-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..61d7ff62ac3cc --- /dev/null +++ b/server/licenses/lucene-grouping-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e4bc3d0aa7eec4f41b4f350de0263a8d5625d2b3 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.0.jar.sha1 b/server/licenses/lucene-highlighter-9.12.0.jar.sha1 deleted file mode 100644 index 3d457579da892..0000000000000 --- a/server/licenses/lucene-highlighter-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e93429f66fbcd3b58d81f01223d6ce5688047296 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.12.1.jar.sha1 b/server/licenses/lucene-highlighter-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..57fc10a58b806 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.12.1.jar.sha1 @@ -0,0 +1 @@ +2eeedfcec47dd65969f36e88931ed452291dd43e \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.0.jar.sha1 b/server/licenses/lucene-join-9.12.0.jar.sha1 deleted file mode 100644 index c5f6d16598a60..0000000000000 --- a/server/licenses/lucene-join-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14c802d6955eaf11107375a2ada8fe8ec53b3e01 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.12.1.jar.sha1 b/server/licenses/lucene-join-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..8d46f20c39974 --- /dev/null +++ b/server/licenses/lucene-join-9.12.1.jar.sha1 @@ -0,0 +1 @@ +3c5e9ff2925a8373ae0d35c1d0a7b2465cebec9f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.0.jar.sha1 b/server/licenses/lucene-memory-9.12.0.jar.sha1 deleted file mode 100644 index e7ac44089c006..0000000000000 --- a/server/licenses/lucene-memory-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ffe090339540876b40df792aee51a42af6b3f37f \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.12.1.jar.sha1 b/server/licenses/lucene-memory-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..55de1c9322aa3 --- /dev/null +++ b/server/licenses/lucene-memory-9.12.1.jar.sha1 @@ -0,0 +1 @@ +e80eecfb1dcc324140387c8357c81e12c2a01937 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.0.jar.sha1 b/server/licenses/lucene-misc-9.12.0.jar.sha1 deleted file mode 100644 index afb546be4e032..0000000000000 --- a/server/licenses/lucene-misc-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad17704ee90eb926b6d3105f7027485cdadbecd9 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.12.1.jar.sha1 b/server/licenses/lucene-misc-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..86982eb1c900c --- /dev/null +++ b/server/licenses/lucene-misc-9.12.1.jar.sha1 @@ -0,0 +1 @@ +4e65d01d1c23f3f49dc325d552701bbefafee7ee \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.0.jar.sha1 b/server/licenses/lucene-queries-9.12.0.jar.sha1 deleted file mode 100644 index e24756e38dad2..0000000000000 --- a/server/licenses/lucene-queries-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ac2a62b0b55c5725bb65f0c5454f9f8a401cf43 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.12.1.jar.sha1 b/server/licenses/lucene-queries-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..f2087ec8eb623 --- /dev/null +++ b/server/licenses/lucene-queries-9.12.1.jar.sha1 @@ -0,0 +1 @@ +14f24315041b686683dba4bc679ca7dc6a505906 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.0.jar.sha1 b/server/licenses/lucene-queryparser-9.12.0.jar.sha1 deleted file mode 100644 index e93e00a063dd0..0000000000000 --- a/server/licenses/lucene-queryparser-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -55959399373876f4c184944315458dc6b88fbd81 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.12.1.jar.sha1 b/server/licenses/lucene-queryparser-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..489e6719da342 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.12.1.jar.sha1 @@ -0,0 +1 @@ +aa6df09a99f8881d843e9863aa1713dc9f3ed24f \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.0.jar.sha1 b/server/licenses/lucene-sandbox-9.12.0.jar.sha1 deleted file mode 100644 index a3fd8446e0dbc..0000000000000 --- a/server/licenses/lucene-sandbox-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f65882536d681c11a1cbc920e5679201101e3603 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.12.1.jar.sha1 b/server/licenses/lucene-sandbox-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..c1d613e23f1fe --- /dev/null +++ b/server/licenses/lucene-sandbox-9.12.1.jar.sha1 @@ -0,0 +1 @@ +1a66485629d60779f039fc26360f4374ef1496e7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 deleted file mode 100644 index b0f11fb667faf..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d00cc7cc2279822ef6740f0677cafacfb439fa8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..c38b794ce9948 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.12.1.jar.sha1 @@ -0,0 +1 @@ +0a7379410eff21676472adc8ea76a57891ec83c2 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 deleted file mode 100644 index 858eee25ac191..0000000000000 --- a/server/licenses/lucene-spatial3d-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e3092632ca1d4427d3ebb2c866ac89d90f5b61ec \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..bc327a8cec830 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.12.1.jar.sha1 @@ -0,0 +1 @@ +d2fdea4edabb1f616f494999651c43abfd0aa124 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.0.jar.sha1 b/server/licenses/lucene-suggest-9.12.0.jar.sha1 deleted file mode 100644 index 973a7726d845d..0000000000000 --- a/server/licenses/lucene-suggest-9.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1c6636499317ebe498f3490a1ec8b86b8a363dd \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.12.1.jar.sha1 b/server/licenses/lucene-suggest-9.12.1.jar.sha1 new file mode 100644 index 0000000000000..961f6da619149 --- /dev/null +++ b/server/licenses/lucene-suggest-9.12.1.jar.sha1 @@ -0,0 +1 @@ +0660e0996ec7653fe0c13c608137e264645eecac \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java index 32d5b3db85629..a7cb4847b45e5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsActionIT.java @@ -8,9 +8,15 @@ package org.opensearch.action.admin.cluster.shards; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.datastream.DataStreamTestCase; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.action.pagination.PageParams; +import org.opensearch.client.Requests; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; @@ -20,15 +26,19 @@ import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.search.SearchService.NO_TIMEOUT; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0, scope = OpenSearchIntegTestCase.Scope.TEST) -public class TransportCatShardsActionIT extends OpenSearchIntegTestCase { +public class TransportCatShardsActionIT extends DataStreamTestCase { public void testCatShardsWithSuccessResponse() throws InterruptedException { internalCluster().startClusterManagerOnlyNodes(1); @@ -125,4 +135,334 @@ public void onFailure(Exception e) { latch.await(); } + public void testListShardsWithHiddenIndex() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(2); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + ensureGreen(); + + // Verify result for a default query: "_list/shards" + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 2, true); + + // Verify result when hidden index is explicitly queried: "_list/shards" + listShardsRequest = getListShardsTransportRequest(new String[] { "test-hidden-idx" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 2, true); + + // Verify result when hidden index is queried with wildcard: "_list/shards*" + // Since the ClusterStateAction underneath is invoked with lenientExpandOpen IndicesOptions, + // Wildcards for hidden indices should not get resolved. + listShardsRequest = getListShardsTransportRequest(new String[] { "test-hidden-idx*" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-hidden-idx", 0, false); + } + + public void testListShardsWithClosedIndex() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(2); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + ensureGreen(); + + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verify result for a default query: "_list/shards" + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 2, false); + + // Verify result when closed index is explicitly queried: "_list/shards" + listShardsRequest = getListShardsTransportRequest(new String[] { "test-closed-idx" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 2, false); + + // Verify result when closed index is queried with wildcard: "_list/shards*" + // Since the ClusterStateAction underneath is invoked with lenientExpandOpen IndicesOptions, + // Wildcards for closed indices should not get resolved. + listShardsRequest = getListShardsTransportRequest(new String[] { "test-closed-idx*" }, 100); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), "test-closed-idx", 0, false); + } + + public void testListShardsWithClosedAndHiddenIndices() throws InterruptedException, ExecutionException { + final int numIndices = 4; + final int numShards = 1; + final int numReplicas = 2; + final int pageSize = 100; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verifying response for default queries: /_list/shards + // all the shards should be part of response, however stats should not be displayed for closed index + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, pageSize); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx"))); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-hidden-idx"))); + assertEquals(numIndices * numShards * (numReplicas + 1), listShardsResponse.get().getResponseShards().size()); + assertFalse( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .anyMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-closed-idx")) + ); + assertEquals( + (numIndices - 1) * numShards * (numReplicas + 1), + listShardsResponse.get().getIndicesStatsResponse().getShards().length + ); + + // Verifying responses when hidden indices are explicitly queried: /_list/shards/test-hidden-idx + // Shards for hidden index should appear in response along with stats + listShardsRequest.setIndices(List.of("test-hidden-idx").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().allMatch(shard -> shard.getIndexName().equals("test-hidden-idx"))); + assertTrue( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-hidden-idx")) + ); + assertEquals( + listShardsResponse.get().getResponseShards().size(), + listShardsResponse.get().getIndicesStatsResponse().getShards().length + ); + + // Verifying responses when hidden indices are queried with wildcards: /_list/shards/test-hidden-idx* + // Shards for hidden index should not appear in response with stats. + listShardsRequest.setIndices(List.of("test-hidden-idx*").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + + // Explicitly querying for closed index: /_list/shards/test-closed-idx + // should output closed shards without stats. + listShardsRequest.setIndices(List.of("test-closed-idx").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue(listShardsResponse.get().getResponseShards().stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx"))); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + + // Querying for closed index with wildcards: /_list/shards/test-closed-idx* + // should not output any closed shards. + listShardsRequest.setIndices(List.of("test-closed-idx*").toArray(new String[0])); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertEquals(0, listShardsResponse.get().getResponseShards().size()); + assertEquals(0, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + } + + public void testListShardsWithClosedIndicesAcrossPages() throws InterruptedException, ExecutionException { + final int numIndices = 4; + final int numShards = 1; + final int numReplicas = 2; + final int pageSize = numShards * (numReplicas + 1); + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test-open-idx-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-open-idx-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-closed-idx-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + // close index "test-closed-idx-1" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx-1")).get(); + ensureGreen(); + // close index "test-closed-idx-2" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx-2")).get(); + ensureGreen(); + + // Verifying response for default queries: /_list/shards + List responseShardRouting = new ArrayList<>(); + List responseShardStats = new ArrayList<>(); + String nextToken = null; + CatShardsRequest listShardsRequest; + ActionFuture listShardsResponse; + do { + listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, nextToken, pageSize); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + nextToken = listShardsResponse.get().getPageToken().getNextToken(); + responseShardRouting.addAll(listShardsResponse.get().getResponseShards()); + responseShardStats.addAll(List.of(listShardsResponse.get().getIndicesStatsResponse().getShards())); + } while (nextToken != null); + + assertTrue(responseShardRouting.stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx-1"))); + assertTrue(responseShardRouting.stream().anyMatch(shard -> shard.getIndexName().equals("test-closed-idx-2"))); + assertEquals(numIndices * numShards * (numReplicas + 1), responseShardRouting.size()); + // ShardsStats should only appear for 2 open indices + assertFalse( + responseShardStats.stream().anyMatch(shardStats -> shardStats.getShardRouting().getIndexName().contains("test-closed-idx")) + ); + assertEquals(2 * numShards * (numReplicas + 1), responseShardStats.size()); + } + + public void testListShardsWithDataStream() throws Exception { + final int numDataNodes = 3; + String dataStreamName = "logs-test"; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(numDataNodes); + // Create an index template for data streams. + createDataStreamIndexTemplate("data-stream-template", List.of("logs-*")); + // Create data streams matching the "logs-*" index pattern. + createDataStream(dataStreamName); + ensureGreen(); + // Verifying default query's result. Data stream should have created a hidden backing index in the + // background and all the corresponding shards should appear in the response along with stats. + CatShardsRequest listShardsRequest = getListShardsTransportRequest(Strings.EMPTY_ARRAY, numDataNodes * numDataNodes); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), dataStreamName, numDataNodes + 1, true); + // Verifying result when data stream is directly queried. Again, all the shards with stats should appear + listShardsRequest = getListShardsTransportRequest(new String[] { dataStreamName }, numDataNodes * numDataNodes); + listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertSingleIndexResponseShards(listShardsResponse.get(), dataStreamName, numDataNodes + 1, true); + } + + public void testListShardsWithAliases() throws Exception { + final int numShards = 1; + final int numReplicas = 1; + final String aliasName = "test-alias"; + internalCluster().startClusterManagerOnlyNodes(1); + internalCluster().startDataOnlyNodes(3); + createIndex( + "test-closed-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .build() + ); + createIndex( + "test-hidden-idx", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas) + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .build() + ); + ensureGreen(); + + // Point test alias to both the indices (one being hidden while the other is closed) + final IndicesAliasesRequest request = new IndicesAliasesRequest().origin("allowed"); + request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test-closed-idx").alias(aliasName)); + assertAcked(client().admin().indices().aliases(request).actionGet()); + + request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("test-hidden-idx").alias(aliasName)); + assertAcked(client().admin().indices().aliases(request).actionGet()); + + // close index "test-closed-idx" + client().admin().indices().close(Requests.closeIndexRequest("test-closed-idx")).get(); + ensureGreen(); + + // Verifying result when an alias is explicitly queried. + CatShardsRequest listShardsRequest = getListShardsTransportRequest(new String[] { aliasName }, 100); + ActionFuture listShardsResponse = client().execute(CatShardsAction.INSTANCE, listShardsRequest); + assertTrue( + listShardsResponse.get() + .getResponseShards() + .stream() + .allMatch(shard -> shard.getIndexName().equals("test-hidden-idx") || shard.getIndexName().equals("test-closed-idx")) + ); + assertTrue( + Arrays.stream(listShardsResponse.get().getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().equals("test-hidden-idx")) + ); + assertEquals(4, listShardsResponse.get().getResponseShards().size()); + assertEquals(2, listShardsResponse.get().getIndicesStatsResponse().getShards().length); + } + + private void assertSingleIndexResponseShards( + CatShardsResponse catShardsResponse, + String indexNamePattern, + final int totalNumShards, + boolean shardStatsExist + ) { + assertTrue(catShardsResponse.getResponseShards().stream().allMatch(shard -> shard.getIndexName().contains(indexNamePattern))); + assertEquals(totalNumShards, catShardsResponse.getResponseShards().size()); + if (shardStatsExist) { + assertTrue( + Arrays.stream(catShardsResponse.getIndicesStatsResponse().getShards()) + .allMatch(shardStats -> shardStats.getShardRouting().getIndexName().contains(indexNamePattern)) + ); + } + assertEquals(shardStatsExist ? totalNumShards : 0, catShardsResponse.getIndicesStatsResponse().getShards().length); + } + + private CatShardsRequest getListShardsTransportRequest(String[] indices, final int pageSize) { + return getListShardsTransportRequest(indices, null, pageSize); + } + + private CatShardsRequest getListShardsTransportRequest(String[] indices, String nextToken, final int pageSize) { + CatShardsRequest listShardsRequest = new CatShardsRequest(); + listShardsRequest.setCancelAfterTimeInterval(NO_TIMEOUT); + listShardsRequest.setIndices(indices); + listShardsRequest.setPageParams(new PageParams(nextToken, PageParams.PARAM_ASC_SORT_VALUE, pageSize)); + return listShardsRequest; + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index c91c4d7bbb63b..1d01f717aad1f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -26,6 +26,8 @@ import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit; import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; @@ -41,6 +43,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -56,7 +59,7 @@ public class StarTreeMapperIT extends OpenSearchIntegTestCase { .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .build(); - private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean ipdim) { + private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean wildcard) { try { return jsonBuilder().startObject() .startObject("composite") @@ -68,7 +71,7 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .endObject() .startArray("ordered_dimensions") .startObject() - .field("name", getDim(invalidDim, ipdim)) + .field("name", getDim(invalidDim, wildcard)) .endObject() .startObject() .field("name", "keyword_dv") @@ -102,6 +105,195 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .field("type", "keyword") .field("doc_values", false) .endObject() + .startObject("ip_no_dv") + .field("type", "ip") + .field("doc_values", false) + .endObject() + .startObject("ip") + .field("type", "ip") + .field("doc_values", true) + .endObject() + .startObject("wildcard") + .field("type", "wildcard") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createNestedTestMapping() { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startObject("date_dimension") + .field("name", "timestamp") + .endObject() + .startArray("ordered_dimensions") + .startObject() + .field("name", "nested.nested1.status") + .endObject() + .startObject() + .field("name", "nested.nested1.keyword_dv") + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "nested3.numeric_dv") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("nested3") + .startObject("properties") + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("nested") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("nested-not-startree") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .startObject("ip") + .field("type", "ip") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createNestedTestMappingForArray() { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startObject("date_dimension") + .field("name", "timestamp") + .endObject() + .startArray("ordered_dimensions") + .startObject() + .field("name", "status") + .endObject() + .startObject() + .field("name", "nested.nested1.keyword_dv") + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "nested3.numeric_dv") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("status") + .field("type", "integer") + .endObject() + .startObject("nested3") + .startObject("properties") + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("nested") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("nested-not-startree") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() .startObject("ip") .field("type", "ip") .field("doc_values", false) @@ -362,11 +554,11 @@ private XContentBuilder getMappingWithDuplicateFields(boolean isDuplicateDim, bo return mapping; } - private static String getDim(boolean hasDocValues, boolean isKeyword) { + private static String getDim(boolean hasDocValues, boolean isWildCard) { if (hasDocValues) { - return random().nextBoolean() ? "numeric" : "keyword"; - } else if (isKeyword) { - return "ip"; + return random().nextBoolean() ? "numeric" : random().nextBoolean() ? "keyword" : "ip_no_dv"; + } else if (isWildCard) { + return "wildcard"; } return "numeric_dv"; } @@ -467,6 +659,46 @@ public void testValidCompositeIndexWithDates() { } } + public void testValidCompositeIndexWithNestedFields() { + prepareCreate(TEST_INDEX).setMapping(createNestedTestMapping()).setSettings(settings).get(); + Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); + for (IndicesService service : dataNodeInstances) { + final Index index = resolveIndex("test"); + if (service.hasIndex(index)) { + IndexService indexService = service.indexService(index); + Set fts = indexService.mapperService().getCompositeFieldTypes(); + + for (CompositeMappedFieldType ft : fts) { + assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType); + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft; + assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR), + DataCubeDateTimeUnit.HALF_HOUR_OF_DAY + ); + for (int i = 0; i < dateDim.getIntervals().size(); i++) { + assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); + } + assertEquals("nested.nested1.status", starTreeFieldType.getDimensions().get(1).getField()); + assertTrue(starTreeFieldType.getDimensions().get(1) instanceof NumericDimension); + assertEquals("nested.nested1.keyword_dv", starTreeFieldType.getDimensions().get(2).getField()); + assertTrue(starTreeFieldType.getDimensions().get(2) instanceof OrdinalDimension); + assertEquals("nested3.numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals( + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, + starTreeFieldType.getStarTreeConfig().getBuildMode() + ); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + } + } + public void testValidCompositeIndexWithDuplicateDates() { prepareCreate(TEST_INDEX).setMapping(createDateTestMapping(true)).setSettings(settings).get(); Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); @@ -555,11 +787,156 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException { () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() ); assertEquals( - "object mapping for [_doc] with array for [numeric_dv] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [_doc] with array for [numeric_dv] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", ex.getMessage() ); } + public void testCompositeIndexWithArraysInNestedCompositeField() throws IOException { + // here nested.nested1.status is part of the composite field but "nested" field itself is an array + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startArray("nested") + .startObject() + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .endArray() + .endObject() + .endArray() + .endObject(); + // Index the document and refresh + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() + ); + assertEquals( + "object mapping for [_doc] with array for [nested] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + } + + public void testCompositeIndexWithArraysInChildNestedCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // here nested.nested1.status is part of the composite field but "nested.nested1" field is an array + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested") + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .endArray() + .endObject() + .endObject(); + // Index the document and refresh + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() + ); + assertEquals( + "object mapping for [nested] with array for [nested1] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + } + + public void testCompositeIndexWithArraysInNestedCompositeFieldSameNameAsNormalField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMappingForArray()).get(); + // here status is part of the composite field but "nested.nested1.status" field is an array which is not + // part of composite field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested") + .startObject("nested1") + .startArray("status") + .value(10) + .value(20) + .value(30) + .endArray() + .endObject() + .endObject() + .field("status", "200") + .endObject(); + // Index the document and refresh + // Index the document and refresh + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get(); + + assertEquals(RestStatus.CREATED, indexResponse.status()); + + client().admin().indices().prepareRefresh(TEST_INDEX).get(); + // Verify the document was indexed + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get(); + + assertEquals(1, searchResponse.getHits().getTotalHits().value); + + // Verify the values in the indexed document + SearchHit hit = searchResponse.getHits().getAt(0); + assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp")); + + int values = Integer.parseInt((String) hit.getSourceAsMap().get("status")); + assertEquals(200, values); + } + + public void testCompositeIndexWithNestedArraysInNonCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested-not-startree") + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 20) + .endObject() + .startObject() + .field("status", 30) + .endObject() + .endArray() + .endObject() + .endObject(); + + // Index the document and refresh + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get(); + + assertEquals(RestStatus.CREATED, indexResponse.status()); + + client().admin().indices().prepareRefresh(TEST_INDEX).get(); + // Verify the document was indexed + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get(); + + assertEquals(1, searchResponse.getHits().getTotalHits().value); + + // Verify the values in the indexed document + SearchHit hit = searchResponse.getHits().getAt(0); + assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp")); + + List values = (List) ((Map) (hit.getSourceAsMap().get("nested-not-startree"))).get("nested1"); + assertEquals(3, values.size()); + int i = 1; + for (Object val : values) { + Map valMap = (Map) val; + assertEquals(10 * i, valMap.get("status")); + i++; + } + } + public void testCompositeIndexWithArraysInNonCompositeField() throws IOException { prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); // Attempt to index a document with an array field @@ -748,7 +1125,7 @@ public void testUnsupportedDim() { () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, true)).get() ); assertEquals( - "Failed to parse mapping [_doc]: unsupported field type associated with dimension [ip] as part of star tree field [startree-1]", + "Failed to parse mapping [_doc]: unsupported field type associated with dimension [wildcard] as part of star tree field [startree-1]", ex.getMessage() ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java index a1b512c326ac5..f660695af9965 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java @@ -8,14 +8,20 @@ package org.opensearch.indices.replication; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; import java.nio.file.Path; +import java.util.List; +import java.util.Set; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SearchReplicaReplicationIT extends SegmentReplicationBaseIT { @@ -82,4 +88,47 @@ public void testReplication() throws Exception { waitForSearchableDocs(docCount, primary, replica); } + public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final List nodes = internalCluster().startDataOnlyNodes(2); + createIndex( + INDEX_NAME, + Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .put("number_of_search_only_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + ensureGreen(INDEX_NAME); + + final int docCount = 5; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(docCount, nodes); + + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .setDetailed(true) + .execute() + .actionGet(); + + // Verify the number of indices + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size()); + // Verify total shards + assertEquals(2, segmentReplicationStatsResponse.getTotalShards()); + // Verify the number of primary shards + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).size()); + + SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0); + Set replicaStats = perGroupStats.getReplicaStats(); + // Verify the number of replica stats + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats) { + assertNotNull(replicaStat.getCurrentReplicationState()); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index ebb911c739eb3..1c4585e38ee90 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -39,6 +39,9 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -1078,4 +1081,79 @@ public void testCloseIndexWithNoOpSyncAndFlushForAsyncTranslog() throws Interrup Thread.sleep(10000); ensureGreen(INDEX_NAME); } + + public void testSuccessfulShallowV1SnapshotPostIndexClose() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms")); + + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + logger.info("Create shallow snapshot setting enabled repo"); + String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; + Path shallowSnapshotRepoPath = randomRepoPath(); + Settings.Builder settings = Settings.builder() + .put("location", shallowSnapshotRepoPath) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); + createRepository(shallowSnapshotRepoName, "fs", settings); + + for (int i = 0; i < 10; i++) { + indexBulk(INDEX_NAME, 1); + } + flushAndRefresh(INDEX_NAME); + + logger.info("Verify shallow snapshot created before close"); + final String snapshot1 = "snapshot1"; + SnapshotInfo snapshotInfo1 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot1) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo1.state()); + assertTrue(snapshotInfo1.successfulShards() > 0); + assertEquals(0, snapshotInfo1.failedShards()); + + for (int i = 0; i < 10; i++) { + indexBulk(INDEX_NAME, 1); + } + + // close index + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(1000); + logger.info("Verify shallow snapshot created after close"); + final String snapshot2 = "snapshot2"; + + SnapshotInfo snapshotInfo2 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot2) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo2.state()); + assertTrue(snapshotInfo2.successfulShards() > 0); + assertEquals(0, snapshotInfo2.failedShards()); + + // delete the index + cluster().wipeIndices(INDEX_NAME); + // try restoring the snapshot + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(shallowSnapshotRepoName, snapshot2) + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(INDEX_NAME); + flushAndRefresh(INDEX_NAME); + assertBusy(() -> { assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), 20); }); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java index ef7da395d2151..79caef1f45a26 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java @@ -82,8 +82,7 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testSimpleTimeout() throws Exception { - final int numDocs = 1000; - for (int i = 0; i < numDocs; i++) { + for (int i = 0; i < 32; i++) { client().prepareIndex("test").setId(Integer.toString(i)).setSource("field", "value").get(); } refresh("test"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 40c9301ef4bce..d200b9177353a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -314,7 +314,7 @@ public void testSearchCancellationWithBackpressureDisabled() throws InterruptedE assertNull("SearchShardTask shouldn't have cancelled for monitor_only mode", caughtException); } - private static class ExceptionCatchingListener implements ActionListener { + public static class ExceptionCatchingListener implements ActionListener { private final CountDownLatch latch; private Exception exception = null; @@ -333,7 +333,11 @@ public void onFailure(Exception e) { latch.countDown(); } - private Exception getException() { + public CountDownLatch getLatch() { + return latch; + } + + public Exception getException() { return exception; } } @@ -349,7 +353,7 @@ private Supplier descriptionSupplier(String description) { return () -> description; } - interface TaskFactory { + public interface TaskFactory { T createTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 2ce96092203e8..60a6e59014e11 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -1023,7 +1023,7 @@ public void testDocValueFields() throws Exception { .startObject("ip_field") .field("type", "ip") .endObject() - .startObject("flat_object_field") + .startObject("flat_object_field1") .field("type", "flat_object") .endObject() .endObject() @@ -1050,9 +1050,11 @@ public void testDocValueFields() throws Exception { .field("boolean_field", true) .field("binary_field", new byte[] { 42, 100 }) .field("ip_field", "::1") - .field("flat_object_field") + .field("flat_object_field1") .startObject() + .field("fooa", "bara") .field("foo", "bar") + .field("foob", "barb") .endObject() .endObject() ) @@ -1075,7 +1077,7 @@ public void testDocValueFields() throws Exception { .addDocValueField("boolean_field") .addDocValueField("binary_field") .addDocValueField("ip_field") - .addDocValueField("flat_object_field"); + .addDocValueField("flat_object_field1.foo"); SearchResponse searchResponse = builder.get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(1L)); @@ -1097,7 +1099,7 @@ public void testDocValueFields() throws Exception { "keyword_field", "binary_field", "ip_field", - "flat_object_field" + "flat_object_field1.foo" ) ) ); @@ -1116,7 +1118,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field1.foo").getValue(), equalTo("bar")); builder = client().prepareSearch().setQuery(matchAllQuery()).addDocValueField("*field"); searchResponse = builder.get(); @@ -1139,8 +1141,7 @@ public void testDocValueFields() throws Exception { "text_field", "keyword_field", "binary_field", - "ip_field", - "flat_object_field" + "ip_field" ) ) ); @@ -1160,7 +1161,6 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); builder = client().prepareSearch() .setQuery(matchAllQuery()) @@ -1176,7 +1176,7 @@ public void testDocValueFields() throws Exception { .addDocValueField("boolean_field", "use_field_mapping") .addDocValueField("binary_field", "use_field_mapping") .addDocValueField("ip_field", "use_field_mapping") - .addDocValueField("flat_object_field", "use_field_mapping"); + .addDocValueField("flat_object_field1.foo", null); ; searchResponse = builder.get(); @@ -1199,7 +1199,7 @@ public void testDocValueFields() throws Exception { "keyword_field", "binary_field", "ip_field", - "flat_object_field" + "flat_object_field1.foo" ) ) ); @@ -1219,7 +1219,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field").getValue(), equalTo("flat_object_field.foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("flat_object_field1.foo").getValue(), equalTo("bar")); builder = client().prepareSearch() .setQuery(matchAllQuery()) diff --git a/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java new file mode 100644 index 0000000000000..6b68a83da94e2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java @@ -0,0 +1,434 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchTask; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.backpressure.SearchBackpressureIT.ExceptionCatchingListener; +import org.opensearch.search.backpressure.SearchBackpressureIT.TaskFactory; +import org.opensearch.search.backpressure.SearchBackpressureIT.TestResponse; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.threadpool.ThreadPool.Names.SAME; +import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; +import static org.hamcrest.Matchers.instanceOf; + +public class WorkloadManagementIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + final static String PUT = "PUT"; + final static String MEMORY = "MEMORY"; + final static String CPU = "CPU"; + final static String ENABLED = "enabled"; + final static String DELETE = "DELETE"; + private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.SECONDS); + + public WorkloadManagementIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Collection> nodePlugins() { + final List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestClusterUpdatePlugin.class); + return plugins; + } + + @Before + public final void setupNodeSettings() { + Settings request = Settings.builder() + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD.getKey(), 0.9) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_CANCELLATION_THRESHOLD.getKey(), 0.9) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + } + + @After + public final void cleanupNodeSettings() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testHighCPUInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighCPUInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testNoCancellation() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.8, ResourceType.MEMORY, 0.8) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public Exception executeQueryGroupTask(String resourceType, String queryGroupId) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute( + TestQueryGroupTaskTransportAction.ACTION, + new TestQueryGroupTaskRequest( + resourceType, + queryGroupId, + (TaskFactory) (id, type, action, description, parentTaskId, headers) -> new SearchTask( + id, + type, + action, + () -> description, + parentTaskId, + headers + ) + ), + listener + ); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS)); + return listener.getException(); + } + + public void updateQueryGroupInClusterState(String method, QueryGroup queryGroup) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestClusterUpdateTransportAction.ACTION, new TestClusterUpdateRequest(queryGroup, method), listener); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + assertEquals(0, listener.getLatch().getCount()); + } + + public static class TestClusterUpdateRequest extends ClusterManagerNodeRequest { + final private String method; + final private QueryGroup queryGroup; + + public TestClusterUpdateRequest(QueryGroup queryGroup, String method) { + this.method = method; + this.queryGroup = queryGroup; + } + + public TestClusterUpdateRequest(StreamInput in) throws IOException { + super(in); + this.method = in.readString(); + this.queryGroup = new QueryGroup(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(method); + queryGroup.writeTo(out); + } + + public QueryGroup getQueryGroup() { + return queryGroup; + } + + public String getMethod() { + return method; + } + } + + public static class TestClusterUpdateTransportAction extends TransportClusterManagerNodeAction { + public static final ActionType ACTION = new ActionType<>("internal::test_cluster_update_action", TestResponse::new); + + @Inject + public TestClusterUpdateTransportAction( + ThreadPool threadPool, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService + ) { + super( + ACTION.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TestClusterUpdateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return SAME; + } + + @Override + protected TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(TestClusterUpdateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + TestClusterUpdateRequest request, + ClusterState clusterState, + ActionListener listener + ) { + clusterService.submitStateUpdateTask("query-group-persistence-service", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + Map currentGroups = currentState.metadata().queryGroups(); + QueryGroup queryGroup = request.getQueryGroup(); + String id = queryGroup.get_id(); + String method = request.getMethod(); + Metadata metadata; + if (method.equals(PUT)) { // create + metadata = Metadata.builder(currentState.metadata()).put(queryGroup).build(); + } else { // delete + metadata = Metadata.builder(currentState.metadata()).remove(currentGroups.get(id)).build(); + } + return ClusterState.builder(currentState).metadata(metadata).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(new TestResponse()); + } + }); + } + } + + public static class TestQueryGroupTaskRequest extends ActionRequest { + private final String type; + private final String queryGroupId; + private TaskFactory taskFactory; + + public TestQueryGroupTaskRequest(String type, String queryGroupId, TaskFactory taskFactory) { + this.type = type; + this.queryGroupId = queryGroupId; + this.taskFactory = taskFactory; + } + + public TestQueryGroupTaskRequest(StreamInput in) throws IOException { + super(in); + this.type = in.readString(); + this.queryGroupId = in.readString(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return taskFactory.createTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(type); + out.writeString(queryGroupId); + } + + public String getType() { + return type; + } + + public String getQueryGroupId() { + return queryGroupId; + } + } + + public static class TestQueryGroupTaskTransportAction extends HandledTransportAction { + public static final ActionType ACTION = new ActionType<>("internal::test_query_group_task_action", TestResponse::new); + private final ThreadPool threadPool; + + @Inject + public TestQueryGroupTaskTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { + super(ACTION.name(), transportService, actionFilters, TestQueryGroupTaskRequest::new); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestQueryGroupTaskRequest request, ActionListener listener) { + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, request.getQueryGroupId()); + threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + CancellableTask cancellableTask = (CancellableTask) task; + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + assertEquals(request.getQueryGroupId(), ((QueryGroupTask) task).getQueryGroupId()); + long startTime = System.nanoTime(); + while (System.nanoTime() - startTime < TIMEOUT.getNanos()) { + doWork(request); + if (cancellableTask.isCancelled()) { + break; + } + } + if (cancellableTask.isCancelled()) { + throw new TaskCancelledException(cancellableTask.getReasonCancelled()); + } else { + listener.onResponse(new TestResponse()); + } + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private void doWork(TestQueryGroupTaskRequest request) throws InterruptedException { + switch (request.getType()) { + case "CPU": + long i = 0, j = 1, k = 1, iterations = 1000; + do { + j += i; + k *= j; + i++; + } while (i < iterations); + break; + case "MEMORY": + int bytesToAllocate = (int) (Runtime.getRuntime().totalMemory() * 0.01); + Byte[] bytes = new Byte[bytesToAllocate]; + int[] ints = new int[bytesToAllocate]; + break; + } + } + } + + public static class TestClusterUpdatePlugin extends Plugin implements ActionPlugin { + @Override + public List> getActions() { + return Arrays.asList( + new ActionHandler<>(TestClusterUpdateTransportAction.ACTION, TestClusterUpdateTransportAction.class), + new ActionHandler<>(TestQueryGroupTaskTransportAction.ACTION, TestQueryGroupTaskTransportAction.class) + ); + } + + @Override + public List> getClientActions() { + return Arrays.asList(TestClusterUpdateTransportAction.ACTION, TestQueryGroupTaskTransportAction.ACTION); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java index 7b36b7a10f4f2..01efa96a7369e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java @@ -18,6 +18,8 @@ import org.opensearch.action.support.HandledTransportAction; import org.opensearch.action.support.TimeoutTaskCancellationUtility; import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.breaker.ResponseLimitBreachedException; import org.opensearch.common.breaker.ResponseLimitSettings; import org.opensearch.common.inject.Inject; @@ -27,6 +29,7 @@ import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; +import java.util.List; import java.util.Objects; import static org.opensearch.common.breaker.ResponseLimitSettings.LimitEntity.SHARDS; @@ -98,9 +101,6 @@ public void onResponse(ClusterStateResponse clusterStateResponse) { shardsRequest.getPageParams(), clusterStateResponse ); - String[] indices = Objects.isNull(paginationStrategy) - ? shardsRequest.getIndices() - : paginationStrategy.getRequestedIndices().toArray(new String[0]); catShardsResponse.setNodes(clusterStateResponse.getState().getNodes()); catShardsResponse.setResponseShards( Objects.isNull(paginationStrategy) @@ -108,8 +108,12 @@ public void onResponse(ClusterStateResponse clusterStateResponse) { : paginationStrategy.getRequestedEntities() ); catShardsResponse.setPageToken(Objects.isNull(paginationStrategy) ? null : paginationStrategy.getResponseToken()); + + String[] indices = Objects.isNull(paginationStrategy) + ? shardsRequest.getIndices() + : filterClosedIndices(clusterStateResponse.getState(), paginationStrategy.getRequestedIndices()); // For paginated queries, if strategy outputs no shards to be returned, avoid fetching IndicesStats. - if (shouldSkipIndicesStatsRequest(paginationStrategy)) { + if (shouldSkipIndicesStatsRequest(paginationStrategy, indices)) { catShardsResponse.setIndicesStatsResponse(IndicesStatsResponse.getEmptyResponse()); cancellableListener.onResponse(catShardsResponse); return; @@ -166,7 +170,19 @@ private void validateRequestLimit( } } - private boolean shouldSkipIndicesStatsRequest(ShardPaginationStrategy paginationStrategy) { - return Objects.nonNull(paginationStrategy) && paginationStrategy.getRequestedEntities().isEmpty(); + private boolean shouldSkipIndicesStatsRequest(ShardPaginationStrategy paginationStrategy, String[] indices) { + return Objects.nonNull(paginationStrategy) && (indices == null || indices.length == 0); + } + + /** + * Will be used by paginated query (_list/shards) to filter out closed indices (only consider OPEN) before fetching + * IndicesStats. Since pagination strategy always passes concrete indices to TransportIndicesStatsAction, + * the default behaviour of StrictExpandOpenAndForbidClosed leads to errors if closed indices are encountered. + */ + private String[] filterClosedIndices(ClusterState clusterState, List strategyIndices) { + return strategyIndices.stream().filter(index -> { + IndexMetadata metadata = clusterState.metadata().indices().get(index); + return metadata != null && metadata.getState().equals(IndexMetadata.State.CLOSE) == false; + }).toArray(String[]::new); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java index fc97d67c6c3af..44408c5043fcf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsAction.java @@ -21,7 +21,6 @@ import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.IndexService; import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationPressureService; import org.opensearch.index.SegmentReplicationShardStats; @@ -38,7 +37,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; /** * Transport action for shard segment replication operation. This transport action does not actually @@ -96,11 +97,11 @@ protected SegmentReplicationStatsResponse newResponse( ) { String[] shards = request.shards(); final List shardsToFetch = Arrays.stream(shards).map(Integer::valueOf).collect(Collectors.toList()); - // organize replica responses by allocationId. final Map replicaStats = new HashMap<>(); // map of index name to list of replication group stats. final Map> primaryStats = new HashMap<>(); + for (SegmentReplicationShardStatsResponse response : responses) { if (response != null) { if (response.getReplicaStats() != null) { @@ -109,6 +110,7 @@ protected SegmentReplicationStatsResponse newResponse( replicaStats.putIfAbsent(shardRouting.allocationId().getId(), response.getReplicaStats()); } } + if (response.getPrimaryStats() != null) { final ShardId shardId = response.getPrimaryStats().getShardId(); if (shardsToFetch.isEmpty() || shardsToFetch.contains(shardId.getId())) { @@ -126,15 +128,20 @@ protected SegmentReplicationStatsResponse newResponse( } } } - // combine the replica stats to the shard stat entry in each group. - for (Map.Entry> entry : primaryStats.entrySet()) { - for (SegmentReplicationPerGroupStats group : entry.getValue()) { - for (SegmentReplicationShardStats replicaStat : group.getReplicaStats()) { - replicaStat.setCurrentReplicationState(replicaStats.getOrDefault(replicaStat.getAllocationId(), null)); - } - } - } - return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, primaryStats, shardFailures); + + Map> replicationStats = primaryStats.entrySet() + .stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue() + .stream() + .map(groupStats -> updateGroupStats(groupStats, replicaStats)) + .collect(Collectors.toList()) + ) + ); + + return new SegmentReplicationStatsResponse(totalShards, successfulShards, failedShards, replicationStats, shardFailures); } @Override @@ -144,9 +151,8 @@ protected SegmentReplicationStatsRequest readRequestFrom(StreamInput in) throws @Override protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplicationStatsRequest request, ShardRouting shardRouting) { - IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); ShardId shardId = shardRouting.shardId(); + IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); if (indexShard.indexSettings().isSegRepEnabledOrRemoteNode() == false) { return null; @@ -156,11 +162,7 @@ protected SegmentReplicationShardStatsResponse shardOperation(SegmentReplication return new SegmentReplicationShardStatsResponse(pressureService.getStatsForShard(indexShard)); } - // return information about only on-going segment replication events. - if (request.activeOnly()) { - return new SegmentReplicationShardStatsResponse(targetService.getOngoingEventSegmentReplicationState(shardId)); - } - return new SegmentReplicationShardStatsResponse(targetService.getSegmentReplicationState(shardId)); + return new SegmentReplicationShardStatsResponse(getSegmentReplicationState(shardId, request.activeOnly())); } @Override @@ -181,4 +183,83 @@ protected ClusterBlockException checkRequestBlock( ) { return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); } + + private SegmentReplicationPerGroupStats updateGroupStats( + SegmentReplicationPerGroupStats groupStats, + Map replicaStats + ) { + // Update the SegmentReplicationState for each of the replicas + Set updatedReplicaStats = groupStats.getReplicaStats() + .stream() + .peek(replicaStat -> replicaStat.setCurrentReplicationState(replicaStats.getOrDefault(replicaStat.getAllocationId(), null))) + .collect(Collectors.toSet()); + + // Compute search replica stats + Set searchReplicaStats = computeSearchReplicaStats(groupStats.getShardId(), replicaStats); + + // Combine ReplicaStats and SearchReplicaStats + Set combinedStats = Stream.concat(updatedReplicaStats.stream(), searchReplicaStats.stream()) + .collect(Collectors.toSet()); + + return new SegmentReplicationPerGroupStats(groupStats.getShardId(), combinedStats, groupStats.getRejectedRequestCount()); + } + + private Set computeSearchReplicaStats( + ShardId shardId, + Map replicaStats + ) { + return replicaStats.values() + .stream() + .filter(segmentReplicationState -> segmentReplicationState.getShardRouting().shardId().equals(shardId)) + .filter(segmentReplicationState -> segmentReplicationState.getShardRouting().isSearchOnly()) + .map(segmentReplicationState -> { + ShardRouting shardRouting = segmentReplicationState.getShardRouting(); + SegmentReplicationShardStats segmentReplicationStats = computeSegmentReplicationShardStats(shardRouting); + segmentReplicationStats.setCurrentReplicationState(segmentReplicationState); + return segmentReplicationStats; + }) + .collect(Collectors.toSet()); + } + + SegmentReplicationShardStats computeSegmentReplicationShardStats(ShardRouting shardRouting) { + ShardId shardId = shardRouting.shardId(); + SegmentReplicationState completedSegmentReplicationState = targetService.getlatestCompletedEventSegmentReplicationState(shardId); + SegmentReplicationState ongoingSegmentReplicationState = targetService.getOngoingEventSegmentReplicationState(shardId); + + return new SegmentReplicationShardStats( + shardRouting.allocationId().getId(), + 0, + calculateBytesRemainingToReplicate(ongoingSegmentReplicationState), + 0, + getCurrentReplicationLag(ongoingSegmentReplicationState), + getLastCompletedReplicationLag(completedSegmentReplicationState) + ); + } + + private SegmentReplicationState getSegmentReplicationState(ShardId shardId, boolean isActiveOnly) { + if (isActiveOnly) { + return targetService.getOngoingEventSegmentReplicationState(shardId); + } else { + return targetService.getSegmentReplicationState(shardId); + } + } + + private long calculateBytesRemainingToReplicate(SegmentReplicationState ongoingSegmentReplicationState) { + if (ongoingSegmentReplicationState == null) { + return 0; + } + return ongoingSegmentReplicationState.getIndex() + .fileDetails() + .stream() + .mapToLong(index -> index.length() - index.recovered()) + .sum(); + } + + private long getCurrentReplicationLag(SegmentReplicationState ongoingSegmentReplicationState) { + return ongoingSegmentReplicationState != null ? ongoingSegmentReplicationState.getTimer().time() : 0; + } + + private long getLastCompletedReplicationLag(SegmentReplicationState completedSegmentReplicationState) { + return completedSegmentReplicationState != null ? completedSegmentReplicationState.getTimer().time() : 0; + } } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 819e09312a0df..558b7370749d5 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -430,6 +430,13 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes if (remoteClusterStateService != null && termVersionResponse.isStatePresentInRemote()) { try { + logger.info( + () -> new ParameterizedMessage( + "Term version checker downloading full cluster state for term {}, version {}", + termVersion.getTerm(), + termVersion.getVersion() + ) + ); ClusterStateTermVersion clusterStateTermVersion = termVersionResponse.getClusterStateTermVersion(); Optional clusterMetadataManifest = remoteClusterStateService .getClusterMetadataManifestByTermVersion( @@ -454,7 +461,7 @@ private ClusterState getStateFromLocalNode(GetTermVersionResponse termVersionRes return clusterStateFromRemote; } } catch (Exception e) { - logger.trace("Error while fetching from remote cluster state", e); + logger.error("Error while fetching from remote cluster state", e); } } return null; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java index 1cab739a20838..22861e0ba5c31 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/term/TransportGetTermVersionAction.java @@ -98,7 +98,7 @@ private GetTermVersionResponse buildResponse(GetTermVersionRequest request, Clus ClusterStateTermVersion termVersion = new ClusterStateTermVersion(state); if (discovery instanceof Coordinator) { Coordinator coordinator = (Coordinator) discovery; - if (coordinator.isRemotePublicationEnabled()) { + if (coordinator.canDownloadFullStateFromRemote()) { return new GetTermVersionResponse(termVersion, coordinator.isRemotePublicationEnabled()); } } diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 53b1d990f9a0c..9f1dcbe8fb587 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -35,7 +35,9 @@ import org.opensearch.cli.Command; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; import org.opensearch.env.Environment; import org.opensearch.http.HttpTransportSettings; import org.opensearch.plugins.PluginInfo; @@ -71,6 +73,9 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_PORT_DEFAULTS; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORTS; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING; /** * Initializes SecurityManager with necessary permissions. @@ -402,6 +407,7 @@ static void addFilePermissions(Permissions policy, Environment environment) thro private static void addBindPermissions(Permissions policy, Settings settings) { addSocketPermissionForHttp(policy, settings); addSocketPermissionForTransportProfiles(policy, settings); + addSocketPermissionForAux(policy, settings); } /** @@ -416,6 +422,29 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S addSocketPermissionForPortRange(policy, httpRange); } + /** + * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORTS. + * If an auxiliary transport type is enabled but has no corresponding port range setting fall back to AUX_PORT_DEFAULTS. + * + * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to. + * @param settings the {@link Settings} instance to read the gRPC settings from + */ + private static void addSocketPermissionForAux(final Permissions policy, final Settings settings) { + Set portsRanges = new HashSet<>(); + for (String auxType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { + Setting auxTypePortSettings = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(auxType); + if (auxTypePortSettings.exists(settings)) { + portsRanges.add(auxTypePortSettings.get(settings)); + } else { + portsRanges.add(new PortsRange(AUX_PORT_DEFAULTS)); + } + } + + for (PortsRange portRange : portsRanges) { + addSocketPermissionForPortRange(policy, portRange.getPortRangeString()); + } + } + /** * Add dynamic {@link SocketPermission} based on transport settings. This method will first check if there is a port range specified in * the transport profile specified by {@code profileSettings} and will fall back to {@code settings}. diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 6fee2037501e7..ef0f49b8ae394 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -1906,4 +1906,12 @@ public boolean isRemotePublicationEnabled() { } return false; } + + public boolean canDownloadFullStateFromRemote() { + if (remoteClusterStateService != null) { + return remoteClusterStateService.isRemotePublicationEnabled() && remoteClusterStateService.canDownloadFromRemoteForReadAPI(); + } + return false; + } + } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 7275d72f2db9f..4ad5b80038048 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -258,7 +258,7 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest } if (applyFullState == true) { - logger.debug( + logger.info( () -> new ParameterizedMessage( "Downloading full cluster state for term {}, version {}, stateUUID {}", manifest.getClusterTerm(), diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java index a05938c176678..7999faece52ca 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/RemoteShardsBalancer.java @@ -247,11 +247,17 @@ void balance() { final Map nodePrimaryShardCount = calculateNodePrimaryShardCount(remoteRoutingNodes); int totalPrimaryShardCount = nodePrimaryShardCount.values().stream().reduce(0, Integer::sum); - totalPrimaryShardCount += routingNodes.unassigned().getNumPrimaries(); - int avgPrimaryPerNode = (totalPrimaryShardCount + routingNodes.size() - 1) / routingNodes.size(); + int unassignedRemotePrimaryShardCount = 0; + for (ShardRouting shard : routingNodes.unassigned()) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getShardPool(shard, allocation)) && shard.primary()) { + unassignedRemotePrimaryShardCount++; + } + } + totalPrimaryShardCount += unassignedRemotePrimaryShardCount; + final int avgPrimaryPerNode = (totalPrimaryShardCount + remoteRoutingNodes.size() - 1) / remoteRoutingNodes.size(); - ArrayDeque sourceNodes = new ArrayDeque<>(); - ArrayDeque targetNodes = new ArrayDeque<>(); + final ArrayDeque sourceNodes = new ArrayDeque<>(); + final ArrayDeque targetNodes = new ArrayDeque<>(); for (RoutingNode node : remoteRoutingNodes) { if (nodePrimaryShardCount.get(node.nodeId()) > avgPrimaryPerNode) { sourceNodes.add(node); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index d0b6f812e9ee2..6489f3cb33ce0 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -233,6 +233,13 @@ public ClusterState state() { return clusterState; } + /** + * Returns true if the appliedClusterState is not null + */ + public boolean isStateInitialised() { + return this.state.get() != null; + } + /** * Returns true if the appliedClusterState is not null */ diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index 1a79161d223e2..b4f2250f6dec9 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -183,6 +183,13 @@ public ClusterState state() { return clusterApplierService.state(); } + /** + * Returns true if the state in appliedClusterState is not null + */ + public boolean isStateInitialised() { + return clusterApplierService.isStateInitialised(); + } + /** * The state that is persisted to store but may not be applied to cluster. * @return ClusterState diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index bb8da190a6f35..5d55fb52c323d 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -80,6 +80,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_KEY; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING; + /** * A module to handle registering and binding all network related classes. * @@ -157,6 +160,8 @@ public final class NetworkModule { private final Map> transportFactories = new HashMap<>(); private final Map> transportHttpFactories = new HashMap<>(); + private final Map> transportAuxFactories = new HashMap<>(); + private final List transportInterceptors = new ArrayList<>(); /** @@ -222,6 +227,18 @@ public NetworkModule( registerHttpTransport(entry.getKey(), entry.getValue()); } + Map> auxTransportFactory = plugin.getAuxTransports( + settings, + threadPool, + circuitBreakerService, + networkService, + clusterSettings, + tracer + ); + for (Map.Entry> entry : auxTransportFactory.entrySet()) { + registerAuxTransport(entry.getKey(), entry.getValue()); + } + Map> transportFactory = plugin.getTransports( settings, threadPool, @@ -305,6 +322,12 @@ private void registerHttpTransport(String key, Supplier fac } } + private void registerAuxTransport(String key, Supplier factory) { + if (transportAuxFactories.putIfAbsent(key, factory) != null) { + throw new IllegalArgumentException("transport for name: " + key + " is already registered"); + } + } + /** * Register an allocation command. *

@@ -346,6 +369,25 @@ public Supplier getHttpServerTransportSupplier() { return factory; } + /** + * Optional client/server transports that run in parallel to HttpServerTransport. + * Multiple transport types can be registered and enabled via AUX_TRANSPORT_TYPES_SETTING. + * An IllegalStateException is thrown if a transport type is enabled not registered. + */ + public List getAuxServerTransportList() { + List serverTransportSuppliers = new ArrayList<>(); + + for (String transportType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { + final Supplier factory = transportAuxFactories.get(transportType); + if (factory == null) { + throw new IllegalStateException("Unsupported " + AUX_TRANSPORT_TYPES_KEY + " [" + transportType + "]"); + } + serverTransportSuppliers.add(factory.get()); + } + + return serverTransportSuppliers; + } + public Supplier getTransportSupplier() { final String name; if (TRANSPORT_TYPE_SETTING.exists(settings)) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 04a19e32c4ebc..f554e6d1dc591 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -149,6 +149,7 @@ import org.opensearch.node.resource.tracker.ResourceTrackerSettings; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; +import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.PluginsService; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlSettings; import org.opensearch.ratelimitting.admissioncontrol.settings.CpuBasedAdmissionControllerSettings; @@ -362,6 +363,7 @@ public void apply(Settings value, Settings current, Settings previous) { NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED, NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION, NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME, + NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING, HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS, HttpTransportSettings.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_MAX_AGE, @@ -738,6 +740,8 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateCleanupManager.REMOTE_CLUSTER_STATE_CLEANUP_INTERVAL_SETTING, RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, RemoteClusterStateService.REMOTE_PUBLICATION_SETTING, + RemoteClusterStateService.REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API, + INDEX_METADATA_UPLOAD_TIMEOUT_SETTING, GLOBAL_METADATA_UPLOAD_TIMEOUT_SETTING, METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, @@ -785,7 +789,6 @@ public void apply(Settings value, Settings current, Settings previous) { // Snapshot related Settings BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING, - BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, BlobStoreRepository.SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index c5fc6d5cae6a7..778ab3e56cf76 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -129,6 +129,7 @@ public class RemoteClusterStateService implements Closeable { * Gates the functionality of remote publication. */ public static final String REMOTE_PUBLICATION_SETTING_KEY = "cluster.remote_store.publication.enabled"; + public static final String REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API_KEY = "cluster.remote_state.download.serve_read_api.enabled"; public static final Setting REMOTE_PUBLICATION_SETTING = Setting.boolSetting( REMOTE_PUBLICATION_SETTING_KEY, @@ -137,6 +138,13 @@ public class RemoteClusterStateService implements Closeable { Property.Dynamic ); + public static final Setting REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API = Setting.boolSetting( + REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API_KEY, + true, + Property.NodeScope, + Property.Dynamic + ); + /** * Used to specify if cluster state metadata should be published to remote store */ @@ -235,6 +243,9 @@ public static RemoteClusterStateValidationMode parseString(String mode) { + "indices, coordination metadata updated : [{}], settings metadata updated : [{}], templates metadata " + "updated : [{}], custom metadata updated : [{}], indices routing updated : [{}]"; private volatile AtomicBoolean isPublicationEnabled; + + private volatile AtomicBoolean downloadFromRemoteForReadAPI; + private final String remotePathPrefix; private final RemoteClusterStateCache remoteClusterStateCache; @@ -281,6 +292,8 @@ public RemoteClusterStateService( && RemoteStoreNodeAttribute.isRemoteRoutingTableConfigured(settings) ); clusterSettings.addSettingsUpdateConsumer(REMOTE_PUBLICATION_SETTING, this::setRemotePublicationSetting); + this.downloadFromRemoteForReadAPI = new AtomicBoolean(clusterSettings.get(REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API)); + clusterSettings.addSettingsUpdateConsumer(REMOTE_STATE_DOWNLOAD_TO_SERVE_READ_API, this::setRemoteDownloadForReadAPISetting); this.remotePathPrefix = CLUSTER_REMOTE_STORE_STATE_PATH_PREFIX.get(settings); this.remoteRoutingTableService = RemoteRoutingTableServiceFactory.getService( repositoriesService, @@ -1124,6 +1137,14 @@ private void setRemotePublicationSetting(boolean remotePublicationSetting) { } } + private void setRemoteDownloadForReadAPISetting(boolean remoteDownloadForReadAPISetting) { + this.downloadFromRemoteForReadAPI.set(remoteDownloadForReadAPISetting); + } + + public boolean canDownloadFromRemoteForReadAPI() { + return this.downloadFromRemoteForReadAPI.get(); + } + // Package private for unit test RemoteRoutingTableService getRemoteRoutingTableService() { return this.remoteRoutingTableService; @@ -1473,8 +1494,22 @@ public ClusterState getClusterStateForManifest( try { ClusterState stateFromCache = remoteClusterStateCache.getState(clusterName, manifest); if (stateFromCache != null) { + logger.trace( + () -> new ParameterizedMessage( + "Found cluster state in cache for term {} and version {}", + manifest.getClusterTerm(), + manifest.getStateVersion() + ) + ); return stateFromCache; } + logger.info( + () -> new ParameterizedMessage( + "Cluster state not found in cache for term {} and version {}", + manifest.getClusterTerm(), + manifest.getStateVersion() + ) + ); final ClusterState clusterState; final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 991fbf12072be..7f78ae0b9d2ff 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -62,6 +62,7 @@ import org.opensearch.telemetry.tracing.channels.TraceableRestChannel; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BindTransportException; +import org.opensearch.transport.Transport; import java.io.IOException; import java.net.InetAddress; @@ -71,7 +72,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -192,7 +192,25 @@ protected void bindServer() { throw new BindTransportException("Failed to resolve publish address", e); } - final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); + final int publishPort = Transport.resolveTransportPublishPort( + SETTING_HTTP_PUBLISH_PORT.get(settings), + boundAddresses, + publishInetAddress + ); + if (publishPort < 0) { + throw new BindHttpException( + "Failed to auto-resolve http publish port, multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + SETTING_HTTP_PORT.getKey() + + " or " + + SETTING_HTTP_PUBLISH_PORT.getKey() + ); + } + TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), publishAddress); logger.info("{}", boundAddress); @@ -258,47 +276,6 @@ protected void doClose() {} */ protected abstract void stopInternal(); - // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); - - if (publishPort < 0) { - for (TransportAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.address().getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final Set ports = new HashSet<>(); - for (TransportAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next(); - } - } - - if (publishPort < 0) { - throw new BindHttpException( - "Failed to auto-resolve http publish port, multiple bound addresses " - + boundAddresses - + " with distinct ports and none of them matched the publish address (" - + publishInetAddress - + "). " - + "Please specify a unique port by setting " - + SETTING_HTTP_PORT.getKey() - + " or " - + SETTING_HTTP_PUBLISH_PORT.getKey() - ); - } - return publishPort; - } - public void onException(HttpChannel channel, Exception e) { channel.handleException(e); if (lifecycle.started() == false) { diff --git a/server/src/main/java/org/opensearch/http/HttpTracer.java b/server/src/main/java/org/opensearch/http/HttpTracer.java index de1da4a20e294..e31cca21f6a54 100644 --- a/server/src/main/java/org/opensearch/http/HttpTracer.java +++ b/server/src/main/java/org/opensearch/http/HttpTracer.java @@ -116,10 +116,11 @@ void traceResponse( ) { logger.trace( new ParameterizedMessage( - "[{}][{}][{}][{}][{}] sent response to [{}] success [{}]", + "[{}][{}][{}][{}][{}][{}] sent response to [{}] success [{}]", requestId, opaqueHeader, restResponse.status(), + restResponse.status().getStatus(), restResponse.contentType(), contentLength, httpChannel, diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java index 904d6a7aba5c6..ca52d8bf4bca0 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java @@ -33,9 +33,11 @@ import org.opensearch.index.compositeindex.datacube.startree.builder.StarTreesBuilder; import org.opensearch.index.compositeindex.datacube.startree.index.CompositeIndexValues; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.DocCountFieldMapper; -import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import java.io.IOException; @@ -44,6 +46,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -262,22 +265,38 @@ public SortedSetDocValues getSortedSet(FieldInfo field) { return DocValues.emptySortedSet(); } }); - } - // TODO : change this logic to evaluate for sortedNumericField specifically - else { + } else if (isSortedNumericField(compositeField)) { fieldProducerMap.put(compositeField, new EmptyDocValuesProducer() { @Override public SortedNumericDocValues getSortedNumeric(FieldInfo field) { return DocValues.emptySortedNumeric(); } }); + } else { + throw new IllegalStateException( + String.format(Locale.ROOT, "Unsupported DocValues field associated with the composite field : %s", compositeField) + ); } } compositeFieldSet.remove(compositeField); } private boolean isSortedSetField(String field) { - return mapperService.fieldType(field) instanceof KeywordFieldMapper.KeywordFieldType; + MappedFieldType ft = mapperService.fieldType(field); + assert ft.isAggregatable(); + return ft.fielddataBuilder( + "", + () -> { throw new UnsupportedOperationException("SearchLookup not available"); } + ) instanceof SortedSetOrdinalsIndexFieldData.Builder; + } + + private boolean isSortedNumericField(String field) { + MappedFieldType ft = mapperService.fieldType(field); + assert ft.isAggregatable(); + return ft.fielddataBuilder( + "", + () -> { throw new UnsupportedOperationException("SearchLookup not available"); } + ) instanceof IndexNumericFieldData.Builder; } @Override @@ -370,5 +389,4 @@ private static SegmentWriteState getSegmentWriteState(SegmentWriteState segmentW segmentWriteState.segmentSuffix ); } - } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java index e834706e2fa9d..b1e78d78d3ad2 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java @@ -24,7 +24,8 @@ import java.util.stream.Collectors; import static org.opensearch.index.compositeindex.datacube.DateDimension.CALENDAR_INTERVALS; -import static org.opensearch.index.compositeindex.datacube.KeywordDimension.KEYWORD; +import static org.opensearch.index.compositeindex.datacube.IpDimension.IP; +import static org.opensearch.index.compositeindex.datacube.OrdinalDimension.ORDINAL; /** * Dimension factory class mainly used to parse and create dimension from the mappings @@ -44,8 +45,10 @@ public static Dimension parseAndCreateDimension( return parseAndCreateDateDimension(name, dimensionMap, c); case NumericDimension.NUMERIC: return new NumericDimension(name); - case KEYWORD: - return new KeywordDimension(name); + case ORDINAL: + return new OrdinalDimension(name); + case IP: + return new IpDimension(name); default: throw new IllegalArgumentException( String.format(Locale.ROOT, "unsupported field type associated with dimension [%s] as part of star tree field", name) @@ -69,8 +72,10 @@ public static Dimension parseAndCreateDimension( return parseAndCreateDateDimension(name, dimensionMap, c); case NUMERIC: return new NumericDimension(name); - case KEYWORD: - return new KeywordDimension(name); + case ORDINAL: + return new OrdinalDimension(name); + case IP: + return new IpDimension(name); default: throw new IllegalArgumentException( String.format(Locale.ROOT, "unsupported field type associated with star tree dimension [%s]", name) diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java index d327f8ca1fa1e..f7911e72f36fc 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java @@ -30,8 +30,14 @@ public enum DimensionType { DATE, /** - * Represents a keyword dimension type. - * This is used for dimensions that contain keyword ordinals. + * Represents dimension types which uses ordinals. + * This is used for dimensions that contain sortedSet ordinals. */ - KEYWORD + ORDINAL, + + /** + * Represents an IP dimension type. + * This is used for dimensions that contain IP ordinals. + */ + IP } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java new file mode 100644 index 0000000000000..9c3682bd2e0ea --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/IpDimension.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube; + +import org.apache.lucene.index.DocValuesType; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; + +/** + * Composite index keyword dimension class + * + * @opensearch.experimental + */ +@ExperimentalApi +public class IpDimension implements Dimension { + public static final String IP = "ip"; + private final String field; + + public IpDimension(String field) { + this.field = field; + } + + @Override + public String getField() { + return field; + } + + @Override + public int getNumSubDimensions() { + return 1; + } + + @Override + public void setDimensionValues(Long value, Consumer dimSetter) { + // This will set the keyword dimension value's ordinal + dimSetter.accept(value); + } + + @Override + public List getSubDimensionNames() { + return List.of(field); + } + + @Override + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_SET; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CompositeDataCubeFieldType.NAME, field); + builder.field(CompositeDataCubeFieldType.TYPE, IP); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IpDimension dimension = (IpDimension) o; + return Objects.equals(field, dimension.getField()); + } + + @Override + public int hashCode() { + return Objects.hash(field); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java similarity index 87% rename from server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java rename to server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java index 58e248fd548d6..9cb4cd78bdaac 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/KeywordDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/OrdinalDimension.java @@ -24,11 +24,11 @@ * @opensearch.experimental */ @ExperimentalApi -public class KeywordDimension implements Dimension { - public static final String KEYWORD = "keyword"; +public class OrdinalDimension implements Dimension { + public static final String ORDINAL = "ordinal"; private final String field; - public KeywordDimension(String field) { + public OrdinalDimension(String field) { this.field = field; } @@ -62,7 +62,7 @@ public DocValuesType getDocValuesType() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(CompositeDataCubeFieldType.NAME, field); - builder.field(CompositeDataCubeFieldType.TYPE, KEYWORD); + builder.field(CompositeDataCubeFieldType.TYPE, ORDINAL); builder.endObject(); return builder; } @@ -71,7 +71,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - KeywordDimension dimension = (KeywordDimension) o; + OrdinalDimension dimension = (OrdinalDimension) o; return Objects.equals(field, dimension.getField()); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java index e538be5d5bece..e46cf6f56b36e 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java @@ -152,7 +152,7 @@ private static MetricStat validateStarTreeMetricSupport( MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat(); field = ((MetricAggregatorFactory) aggregatorFactory).getField(); - if (supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { + if (field != null && supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { return metricStat; } } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java index 827792cdb1091..48da9b30ac1b0 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocValueFetcher.java @@ -43,6 +43,7 @@ import java.util.List; import static java.util.Collections.emptyList; +import static org.opensearch.index.mapper.FlatObjectFieldMapper.DOC_VALUE_NO_MATCH; /** * Value fetcher that loads from doc values. @@ -70,7 +71,10 @@ public List fetchValues(SourceLookup lookup) throws IOException { } List result = new ArrayList(leaf.docValueCount()); for (int i = 0, count = leaf.docValueCount(); i < count; ++i) { - result.add(leaf.nextValue()); + Object value = leaf.nextValue(); + if (value != DOC_VALUE_NO_MATCH) { + result.add(value); + } } return result; } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index 50ff816695156..134baa70f80c2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -661,12 +661,22 @@ private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapp throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; + String path = context.path().pathAsText(arrayFieldName); + boolean isNested = path.contains(".") || context.mapperService().isCompositeIndexFieldNestedField(path); // block array values for composite index fields - if (context.indexSettings().isCompositeIndex() && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) { + // Assume original index has 2 fields - status , nested.nested1.status + // case 1 : if status is part of composite index and nested.nested1.status is not part of composite index, + // then nested.nested1.status/nested.nested1/nested array should not be blocked + // case 2 : if nested.nested1.status is part of composite index and status is not part of composite index, + // then arrays in nested/nested.nested1 and nested.nested1.status fields should be blocked + // but arrays in status should not be blocked + if (context.indexSettings().isCompositeIndex() + && ((isNested == false && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) + || (isNested && context.mapperService().isCompositeIndexFieldNestedField(path)))) { throw new MapperParsingException( String.format( Locale.ROOT, - "object mapping for [%s] with array for [%s] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [%s] with array for [%s] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", mapper.name(), arrayFieldName ) diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index 0ccdb40f9d33a..4fe821ff74d34 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -15,7 +15,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; @@ -28,6 +27,7 @@ import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.JsonToStringXContentParser; import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; @@ -36,11 +36,13 @@ import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.lookup.SearchLookup; import java.io.IOException; import java.io.UncheckedIOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -63,6 +65,7 @@ public final class FlatObjectFieldMapper extends DynamicKeyFieldMapper { public static final String CONTENT_TYPE = "flat_object"; + public static final Object DOC_VALUE_NO_MATCH = new Object(); /** * In flat_object field mapper, field type is similar to keyword field type @@ -272,7 +275,7 @@ NamedAnalyzer normalizer() { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { failIfNoDocValues(); - return new SortedSetOrdinalsIndexFieldData.Builder(name(), CoreValuesSourceType.BYTES); + return new SortedSetOrdinalsIndexFieldData.Builder(valueFieldType().name(), CoreValuesSourceType.BYTES); } @Override @@ -304,6 +307,30 @@ protected String parseSourceValue(Object value) { }; } + @Override + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); + } + if (timeZone != null) { + throw new IllegalArgumentException( + "Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones" + ); + } + if (mappedFieldTypeName != null) { + return new FlatObjectDocValueFormat(mappedFieldTypeName + DOT_SYMBOL + name() + EQUAL_SYMBOL); + } else { + throw new IllegalArgumentException( + "Field [" + name() + "] of type [" + typeName() + "] does not support doc_value in root field" + ); + } + } + + @Override + public boolean isAggregatable() { + return false; + } + @Override public Object valueForDisplay(Object value) { if (value == null) { @@ -336,23 +363,17 @@ private KeywordFieldType valueFieldType() { return (mappedFieldTypeName == null) ? valueFieldType : valueAndPathFieldType; } + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return valueFieldType().termQueryCaseInsensitive(rewriteValue(inputToString(value)), context); + } + /** * redirect queries with rewrite value to rewriteSearchValue and directSubFieldName */ @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - - String searchValueString = inputToString(value); - String directSubFieldName = directSubfield(); - String rewriteSearchValue = rewriteValue(searchValueString); - - failIfNotIndexed(); - Query query; - query = new TermQuery(new Term(directSubFieldName, indexedValueForSearch(rewriteSearchValue))); - if (boost() != 1f) { - query = new BoostQuery(query, boost()); - } - return query; + return valueFieldType().termQuery(rewriteValue(inputToString(value)), context); } @Override @@ -530,6 +551,39 @@ public Query wildcardQuery( return valueFieldType().wildcardQuery(rewriteValue(value), method, caseInsensitve, context); } + /** + * A doc_value formatter for flat_object field. + */ + public class FlatObjectDocValueFormat implements DocValueFormat { + private static final String NAME = "flat_object"; + private final String prefix; + + public FlatObjectDocValueFormat(String prefix) { + this.prefix = prefix; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) {} + + @Override + public Object format(BytesRef value) { + String parsedValue = inputToString(value); + if (parsedValue.startsWith(prefix) == false) { + return DOC_VALUE_NO_MATCH; + } + return parsedValue.substring(prefix.length()); + } + + @Override + public BytesRef parseBytesRef(String value) { + return new BytesRef((String) valueFieldType.rewriteForDocValue(rewriteValue(value))); + } + } } private final ValueFieldMapper valueFieldMapper; diff --git a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java index e23a48f94f450..1283aa302c111 100644 --- a/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/IpFieldMapper.java @@ -52,6 +52,7 @@ import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.network.NetworkAddress; +import org.opensearch.index.compositeindex.datacube.DimensionType; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -68,6 +69,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.BiFunction; import java.util.function.Supplier; @@ -161,6 +163,11 @@ public IpFieldMapper build(BuilderContext context) { ); } + @Override + public Optional getSupportedDataCubeDimensionType() { + return Optional.of(DimensionType.IP); + } + } public static final TypeParser PARSER = new TypeParser((n, c) -> { diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index df14a5811f6a0..4436e74c821c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -39,6 +39,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; @@ -259,7 +260,7 @@ public KeywordFieldMapper build(BuilderContext context) { @Override public Optional getSupportedDataCubeDimensionType() { - return Optional.of(DimensionType.KEYWORD); + return Optional.of(DimensionType.ORDINAL); } } @@ -398,6 +399,46 @@ protected Object rewriteForDocValue(Object value) { return value; } + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + if (isSearchable()) { + return super.termQueryCaseInsensitive(value, context); + } else { + BytesRef bytesRef = indexedValueForSearch(rewriteForDocValue(value)); + Term term = new Term(name(), bytesRef); + Query query = AutomatonQueries.createAutomatonQuery( + term, + AutomatonQueries.toCaseInsensitiveString(bytesRef.utf8ToString(), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT), + MultiTermQuery.DOC_VALUES_REWRITE + ); + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; + } + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + if (isSearchable()) { + return super.termQuery(value, context); + } else { + Query query = SortedSetDocValuesField.newSlowRangeQuery( + name(), + indexedValueForSearch(rewriteForDocValue(value)), + indexedValueForSearch(rewriteForDocValue(value)), + true, + true + ); + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; + } + } + @Override public Query termsQuery(List values, QueryShardContext context) { failIfNotIndexedAndNoDocValues(); diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 84b0b1d69432d..5a7c6a0102052 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -228,6 +228,7 @@ public enum MergeReason { private volatile Set compositeMappedFieldTypes; private volatile Set fieldsPartOfCompositeMappings; + private volatile Set nestedFieldsPartOfCompositeMappings; public MapperService( IndexSettings indexSettings, @@ -554,10 +555,29 @@ private synchronized Map internalMerge(DocumentMapper ma private void buildCompositeFieldLookup() { Set fieldsPartOfCompositeMappings = new HashSet<>(); + Set nestedFieldsPartOfCompositeMappings = new HashSet<>(); + for (CompositeMappedFieldType fieldType : compositeMappedFieldTypes) { fieldsPartOfCompositeMappings.addAll(fieldType.fields()); + + for (String field : fieldType.fields()) { + String[] parts = field.split("\\."); + if (parts.length > 1) { + StringBuilder path = new StringBuilder(); + for (int i = 0; i < parts.length; i++) { + if (i == 0) { + path.append(parts[i]); + } else { + path.append(".").append(parts[i]); + } + nestedFieldsPartOfCompositeMappings.add(path.toString()); + } + } + } } + this.fieldsPartOfCompositeMappings = fieldsPartOfCompositeMappings; + this.nestedFieldsPartOfCompositeMappings = nestedFieldsPartOfCompositeMappings; } private boolean assertSerialization(DocumentMapper mapper) { @@ -690,6 +710,11 @@ public boolean isFieldPartOfCompositeIndex(String field) { return fieldsPartOfCompositeMappings.contains(field); } + public boolean isCompositeIndexFieldNestedField(String field) { + return nestedFieldsPartOfCompositeMappings.contains(field); + + } + public ObjectMapper getObjectMapper(String name) { return this.mapper == null ? null : this.mapper.objectMappers().get(name); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java index fb97f8c309a70..757de65248d33 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -16,6 +16,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; @@ -290,6 +291,16 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, return new SourceFieldMatchQuery(builder.build(), phrasePrefixQuery, this, context); } + @Override + public Query termQuery(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQuery(value, context)); + } + + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQueryCaseInsensitive(value, context)); + } + private List> getTermsFromTokenStream(TokenStream stream) throws IOException { final List> termArray = new ArrayList<>(); TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index 40f05a8b76755..7b361e12330a3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -23,6 +23,7 @@ import org.opensearch.search.lookup.SearchLookup; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -431,8 +432,46 @@ private static boolean isBuilderAllowedForMetric(Mapper.Builder builder) { return builder.isDataCubeMetricSupported(); } - private Optional findMapperBuilderByName(String field, List mappersBuilders) { - return mappersBuilders.stream().filter(builder -> builder.name().equals(field)).findFirst(); + private Optional findMapperBuilderByName(String name, List mappersBuilders) { + String[] parts = name.split("\\."); + + // Start with the top-level builders + Optional currentBuilder = mappersBuilders.stream() + .filter(builder -> builder.name().equals(parts[0])) + .findFirst(); + + // If we can't find the first part, or if there's only one part, return the result + if (currentBuilder.isEmpty() || parts.length == 1) { + return currentBuilder; + } + + // Navigate through the nested structure + try { + Mapper.Builder builder = currentBuilder.get(); + for (int i = 1; i < parts.length; i++) { + List childBuilders = getChildBuilders(builder); + int finalI = i; + builder = childBuilders.stream() + .filter(b -> b.name().equals(parts[finalI])) + .findFirst() + .orElseThrow( + () -> new IllegalArgumentException( + String.format(Locale.ROOT, "Could not find nested field [%s] in path [%s]", parts[finalI], name) + ) + ); + } + return Optional.of(builder); + } catch (Exception e) { + return Optional.empty(); + } + } + + // Helper method to get child builders from a parent builder + private List getChildBuilders(Mapper.Builder builder) { + if (builder instanceof ObjectMapper.Builder) { + return ((ObjectMapper.Builder) builder).mappersBuilders; + } + return Collections.emptyList(); } public Builder(String name, ObjectMapper.Builder objBuilder) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index eb3999718ca5b..f5de4dfb5a933 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1624,6 +1624,22 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { return luceneVersion == null ? indexSettings.getIndexVersionCreated().luceneVersion : luceneVersion; } + /** + * Fetches the last remote uploaded segment metadata file + * @return {@link RemoteSegmentMetadata} + * @throws IOException + */ + public RemoteSegmentMetadata fetchLastRemoteUploadedSegmentMetadata() throws IOException { + if (!indexSettings.isAssignedOnRemoteNode()) { + throw new IllegalStateException("Index is not assigned on Remote Node"); + } + RemoteSegmentMetadata lastUploadedMetadata = getRemoteDirectory().readLatestMetadataFile(); + if (lastUploadedMetadata == null) { + throw new FileNotFoundException("No metadata file found in remote store"); + } + return lastUploadedMetadata; + } + /** * Creates a new {@link IndexCommit} snapshot from the currently running engine. All resources referenced by this * commit won't be freed until the commit / snapshot is closed. diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c78ee6711dcda..704a23890b07a 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1216,6 +1216,9 @@ protected Node( SearchExecutionStatsCollector.makeWrapper(responseCollectorService) ); final HttpServerTransport httpServerTransport = newHttpTransport(networkModule); + + pluginComponents.addAll(newAuxTransports(networkModule)); + final IndexingPressureService indexingPressureService = new IndexingPressureService(settings, clusterService); // Going forward, IndexingPressureService will have required constructs for exposing listeners/interfaces for plugin // development. Then we can deprecate Getter and Setter for IndexingPressureService in ClusterService (#478). @@ -2113,6 +2116,10 @@ protected HttpServerTransport newHttpTransport(NetworkModule networkModule) { return networkModule.getHttpServerTransportSupplier().get(); } + protected List newAuxTransports(NetworkModule networkModule) { + return networkModule.getAuxServerTransportList(); + } + private static class LocalNodeFactory implements Function { private final SetOnce localNode = new SetOnce<>(); private final String persistentNodeId; diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 138ef6f71280d..516aa94534f94 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -31,9 +31,13 @@ package org.opensearch.plugins; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.PortsRange; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; @@ -49,8 +53,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.function.Supplier; +import static java.util.Collections.emptyList; +import static org.opensearch.common.settings.Setting.affixKeySetting; + /** * Plugin for extending network and transport related classes * @@ -58,6 +66,49 @@ */ public interface NetworkPlugin { + /** + * Auxiliary transports are lifecycle components with an associated port range. + * These pluggable client/server transport implementations have their lifecycle managed by Node. + * + * Auxiliary transports are additionally defined by a port range on which they bind. Opening permissions on these + * ports is awkward as {@link org.opensearch.bootstrap.Security} is configured previous to Node initialization during + * bootstrap. To allow pluggable AuxTransports access to configurable port ranges we require the port range be provided + * through an {@link org.opensearch.common.settings.Setting.AffixSetting} of the form 'AUX_SETTINGS_PREFIX.{aux-transport-key}.ports'. + */ + abstract class AuxTransport extends AbstractLifecycleComponent { + public static final String AUX_SETTINGS_PREFIX = "aux.transport."; + public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types"; + public static final String AUX_PORT_DEFAULTS = "9400-9500"; + public static final Setting.AffixSetting AUX_TRANSPORT_PORTS = affixKeySetting( + AUX_SETTINGS_PREFIX, + "ports", + key -> new Setting<>(key, AUX_PORT_DEFAULTS, PortsRange::new, Setting.Property.NodeScope) + ); + + public static final Setting> AUX_TRANSPORT_TYPES_SETTING = Setting.listSetting( + AUX_TRANSPORT_TYPES_KEY, + emptyList(), + Function.identity(), + Setting.Property.NodeScope + ); + } + + /** + * Auxiliary transports are optional and run in parallel to the default HttpServerTransport. + * Returns a map of AuxTransport suppliers. + */ + @ExperimentalApi + default Map> getAuxTransports( + Settings settings, + ThreadPool threadPool, + CircuitBreakerService circuitBreakerService, + NetworkService networkService, + ClusterSettings clusterSettings, + Tracer tracer + ) { + return Collections.emptyMap(); + } + /** * Returns a list of {@link TransportInterceptor} instances that are used to intercept incoming and outgoing * transport (inter-node) requests. This must not return null diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index b6030f4ded5e5..4ff699e8017ba 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -86,6 +86,8 @@ public class PluginInfo implements Writeable, ToXContentObject { private final String classname; private final String customFolderName; private final List extendedPlugins; + // Optional extended plugins are a subset of extendedPlugins that only contains the optional extended plugins + private final List optionalExtendedPlugins; private final boolean hasNativeController; /** @@ -149,7 +151,11 @@ public PluginInfo( this.javaVersion = javaVersion; this.classname = classname; this.customFolderName = customFolderName; - this.extendedPlugins = Collections.unmodifiableList(extendedPlugins); + this.extendedPlugins = extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); + this.optionalExtendedPlugins = extendedPlugins.stream() + .filter(PluginInfo::isOptionalExtension) + .map(s -> s.split(";")[0]) + .collect(Collectors.toUnmodifiableList()); this.hasNativeController = hasNativeController; } @@ -209,6 +215,16 @@ public PluginInfo(final StreamInput in) throws IOException { this.customFolderName = in.readString(); this.extendedPlugins = in.readStringList(); this.hasNativeController = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { + this.optionalExtendedPlugins = in.readStringList(); + } else { + this.optionalExtendedPlugins = new ArrayList<>(); + } + } + + static boolean isOptionalExtension(String extendedPlugin) { + String[] dependency = extendedPlugin.split(";"); + return dependency.length > 1 && "optional=true".equals(dependency[1]); } @Override @@ -234,6 +250,9 @@ This works for currently supported range notations (=,~) } out.writeStringCollection(extendedPlugins); out.writeBoolean(hasNativeController); + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { + out.writeStringCollection(optionalExtendedPlugins); + } } /** @@ -417,8 +436,17 @@ public String getFolderName() { * * @return the names of the plugins extended */ + public boolean isExtendedPluginOptional(String extendedPlugin) { + return optionalExtendedPlugins.contains(extendedPlugin); + } + + /** + * Other plugins this plugin extends through SPI + * + * @return the names of the plugins extended + */ public List getExtendedPlugins() { - return extendedPlugins; + return extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); } /** @@ -493,6 +521,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("custom_foldername", customFolderName); builder.field("extended_plugins", extendedPlugins); builder.field("has_native_controller", hasNativeController); + builder.field("optional_extended_plugins", optionalExtendedPlugins); } builder.endObject(); diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index f08c9c738f1b4..9bc1f1334122e 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -524,7 +524,13 @@ private static void addSortedBundle( for (String dependency : bundle.plugin.getExtendedPlugins()) { Bundle depBundle = bundles.get(dependency); if (depBundle == null) { - throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + if (bundle.plugin.isExtendedPluginOptional(dependency)) { + logger.warn("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + logger.warn("Some features of this plugin may not function without the dependencies being installed.\n"); + continue; + } else { + throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + } } addSortedBundle(depBundle, bundles, sortedBundles, dependencyStack); assert sortedBundles.contains(depBundle); @@ -653,6 +659,9 @@ static void checkBundleJarHell(Set classpath, Bundle bundle, Map urls = new HashSet<>(); for (String extendedPlugin : exts) { Set pluginUrls = transitiveUrls.get(extendedPlugin); + if (pluginUrls == null && bundle.plugin.isExtendedPluginOptional(extendedPlugin)) { + continue; + } assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin; Set intersection = new HashSet<>(urls); @@ -704,6 +713,10 @@ private Plugin loadBundle(Bundle bundle, Map loaded) { List extendedLoaders = new ArrayList<>(); for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) { Plugin extendedPlugin = loaded.get(extendedPluginName); + if (extendedPlugin == null && bundle.plugin.isExtendedPluginOptional(extendedPluginName)) { + // extended plugin is optional and is not installed + continue; + } assert extendedPlugin != null; if (ExtensiblePlugin.class.isInstance(extendedPlugin) == false) { throw new IllegalStateException("Plugin [" + name + "] cannot extend non-extensible plugin [" + extendedPluginName + "]"); diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 138bc13140aea..259c4a6e09ce7 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -416,6 +416,45 @@ default void snapshotRemoteStoreIndexShard( throw new UnsupportedOperationException(); } + /** + * Adds a reference of remote store data for a index commit point. + *

+ * The index commit point can be obtained by using {@link org.opensearch.index.engine.Engine#acquireLastIndexCommit} method. + * Or for closed index can be obtained by reading last remote uploaded metadata by using {@link org.opensearch.index.shard.IndexShard#fetchLastRemoteUploadedSegmentMetadata()} method. + * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. + *

+ * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check + * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. + * @param store store to be snapshotted + * @param snapshotId snapshot id + * @param indexId id for the index being snapshotted + * @param snapshotIndexCommit commit point + * @param shardStateIdentifier a unique identifier of the state of the shard that is stored with the shard's snapshot and used + * to detect if the shard has changed between snapshots. If {@code null} is passed as the identifier + * snapshotting will be done by inspecting the physical files referenced by {@code snapshotIndexCommit} + * @param snapshotStatus snapshot status + * @param primaryTerm current Primary Term + * @param commitGeneration current commit generation + * @param startTime start time of the snapshot commit, this will be used as the start time for snapshot. + * @param indexFilesToFileLengthMap map of index files to file length + * @param listener listener invoked on completion + */ + default void snapshotRemoteStoreIndexShard( + Store store, + SnapshotId snapshotId, + IndexId indexId, + @Nullable IndexCommit snapshotIndexCommit, + @Nullable String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + long primaryTerm, + long commitGeneration, + long startTime, + @Nullable Map indexFilesToFileLengthMap, + ActionListener listener + ) { + throw new UnsupportedOperationException(); + } + /** * Restores snapshot of the shard. *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index a82c05dab0b44..93a7dc0cb06af 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -50,7 +50,6 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.RepositoryCleanupInProgress; @@ -70,7 +69,6 @@ import org.opensearch.common.Randomness; import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -428,16 +426,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo Setting.Property.Final ); - /** - * Controls the fixed prefix for the snapshot shard blob path. cluster.snapshot.async-deletion.enable - */ - public static final Setting SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING = Setting.boolSetting( - "cluster.snapshot.async-deletion.enable", - true, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - protected volatile boolean supportURLRepo; private volatile int maxShardBlobDeleteBatch; @@ -531,8 +519,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo private final String snapshotShardPathPrefix; - private volatile boolean enableAsyncDeletion; - protected final long repositoryDataCacheThreshold; /** @@ -587,8 +573,6 @@ protected BlobStoreRepository( this.recoverySettings = recoverySettings; this.remoteStoreSettings = new RemoteStoreSettings(clusterService.getSettings(), clusterService.getClusterSettings()); this.snapshotShardPathPrefix = SNAPSHOT_SHARD_PATH_PREFIX_SETTING.get(clusterService.getSettings()); - this.enableAsyncDeletion = SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings().addSettingsUpdateConsumer(SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, this::setEnableAsyncDeletion); this.repositoryDataCacheThreshold = SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD.get(clusterService.getSettings()).getBytes(); } @@ -2219,15 +2203,7 @@ private void executeOneStaleIndexDelete( private DeleteResult deleteContainer(BlobContainer container) throws IOException { long startTime = System.nanoTime(); - DeleteResult deleteResult; - if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { - // Use deleteAsync and wait for the result - PlainActionFuture future = new PlainActionFuture<>(); - ((AsyncMultiStreamBlobContainer) container).deleteAsync(future); - deleteResult = future.actionGet(); - } else { - deleteResult = container.delete(); - } + DeleteResult deleteResult = container.delete(); logger.debug(new ParameterizedMessage("[{}] Deleted {} in {}ns", metadata.name(), container.path(), startTime - System.nanoTime())); return deleteResult; } @@ -2862,13 +2838,7 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna private void deleteFromContainer(BlobContainer container, List blobs) throws IOException { logger.trace(() -> new ParameterizedMessage("[{}] Deleting {} from [{}]", metadata.name(), blobs, container.path())); long startTime = System.nanoTime(); - if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { - PlainActionFuture future = new PlainActionFuture<>(); - ((AsyncMultiStreamBlobContainer) container).deleteBlobsAsyncIgnoringIfNotExists(blobs, future); - future.actionGet(); - } else { - container.deleteBlobsIgnoringIfNotExists(blobs); - } + container.deleteBlobsIgnoringIfNotExists(blobs); logger.debug( () -> new ParameterizedMessage( "[{}] Deletion {} from [{}] took {}ns", @@ -2994,7 +2964,12 @@ public String startVerification() { */ private BlobContainer testContainer(String seed) { BlobPath testBlobPath; - if (prefixModeVerification == true) { + + if (prefixModeVerification == true + && (clusterService.isStateInitialised() == false + || clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.V_2_17_0))) { + // During the remote store node bootstrap, the cluster state is not initialised + // Otherwise, the cluster state is initialised and available with the min node version information PathInput pathInput = PathInput.builder().basePath(basePath()).indexUUID(seed).build(); testBlobPath = PathType.HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); } else { @@ -3739,6 +3714,33 @@ private void writeAtomic(BlobContainer container, final String blobName, final B } } + @Override + public void snapshotRemoteStoreIndexShard( + Store store, + SnapshotId snapshotId, + IndexId indexId, + IndexCommit snapshotIndexCommit, + @Nullable String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + long primaryTerm, + long startTime, + ActionListener listener + ) { + snapshotRemoteStoreIndexShard( + store, + snapshotId, + indexId, + snapshotIndexCommit, + shardStateIdentifier, + snapshotStatus, + primaryTerm, + snapshotIndexCommit.getGeneration(), + startTime, + null, + listener + ); + } + @Override public void snapshotRemoteStoreIndexShard( Store store, @@ -3748,13 +3750,16 @@ public void snapshotRemoteStoreIndexShard( String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus, long primaryTerm, + long commitGeneration, long startTime, + Map indexFilesToFileLengthMap, ActionListener listener ) { if (isReadOnly()) { listener.onFailure(new RepositoryException(metadata.name(), "cannot snapshot shard on a readonly repository")); return; } + final ShardId shardId = store.shardId(); try { final String generation = snapshotStatus.generation(); @@ -3762,13 +3767,21 @@ public void snapshotRemoteStoreIndexShard( final BlobContainer shardContainer = shardContainer(indexId, shardId); long indexTotalFileSize = 0; - // local store is being used here to fetch the files metadata instead of remote store as currently - // remote store is mirroring the local store. - List fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames()); - Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit); - for (String fileName : fileNames) { - indexTotalFileSize += commitSnapshotMetadata.get(fileName).length(); + List fileNames; + + if (snapshotIndexCommit != null) { + // local store is being used here to fetch the files metadata instead of remote store as currently + // remote store is mirroring the local store. + fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames()); + Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit); + for (String fileName : fileNames) { + indexTotalFileSize += commitSnapshotMetadata.get(fileName).length(); + } + } else { + fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet()); + indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum(); } + int indexTotalNumberOfFiles = fileNames.size(); snapshotStatus.moveToStarted( @@ -3779,7 +3792,7 @@ public void snapshotRemoteStoreIndexShard( indexTotalFileSize ); - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(commitGeneration); // now create and write the commit point logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); @@ -3790,7 +3803,7 @@ public void snapshotRemoteStoreIndexShard( snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), primaryTerm, - snapshotIndexCommit.getGeneration(), + commitGeneration, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), indexTotalNumberOfFiles, @@ -4737,8 +4750,4 @@ public String toString() { return name; } } - - public void setEnableAsyncDeletion(boolean enableAsyncDeletion) { - this.enableAsyncDeletion = enableAsyncDeletion; - } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index d862b2c2784de..41344fd06cbbc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -104,6 +104,6 @@ public String getStatsSubtype() { } public String getField() { - return config.fieldContext().field(); + return config.fieldContext() != null ? config.fieldContext().field() : null; } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 8da36bbb8d4bd..1e2264593310d 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.opensearch.cluster.SnapshotsInProgress.ShardState; import org.opensearch.cluster.SnapshotsInProgress.State; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; @@ -63,6 +64,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.IndexShardSnapshotStatus.Stage; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -74,7 +76,6 @@ import org.opensearch.transport.TransportService; import java.io.IOException; -import java.nio.file.NoSuchFileException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -371,7 +372,9 @@ private void snapshot( ActionListener listener ) { try { - final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShardOrNull(shardId.id()); + final boolean closedIndex = indexService.getMetadata().getState() == IndexMetadata.State.CLOSE; if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); } @@ -398,24 +401,42 @@ private void snapshot( if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) { long startTime = threadPool.relativeTimeInMillis(); long primaryTerm = indexShard.getOperationPrimaryTerm(); - // we flush first to make sure we get the latest writes snapshotted - wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); - IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); - long commitGeneration = snapshotIndexCommit.getGeneration(); + long commitGeneration = 0L; + Map indexFilesToFileLengthMap = null; + IndexCommit snapshotIndexCommit = null; + try { + if (closedIndex) { + RemoteSegmentMetadata lastRemoteUploadedIndexCommit = indexShard.fetchLastRemoteUploadedSegmentMetadata(); + indexFilesToFileLengthMap = lastRemoteUploadedIndexCommit.getMetadata() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getLength())); + primaryTerm = lastRemoteUploadedIndexCommit.getPrimaryTerm(); + commitGeneration = lastRemoteUploadedIndexCommit.getGeneration(); + } else { + wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + } indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); - } catch (NoSuchFileException e) { - wrappedSnapshot.close(); - logger.warn( - "Exception while acquiring lock on primaryTerm = {} and generation = {}", - primaryTerm, - commitGeneration - ); - indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); - wrappedSnapshot = indexShard.acquireLastIndexCommit(false); - snapshotIndexCommit = wrappedSnapshot.get(); - commitGeneration = snapshotIndexCommit.getGeneration(); - indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } catch (IOException e) { + if (closedIndex) { + logger.warn("Exception while reading latest metadata file from remote store"); + listener.onFailure(e); + } else { + wrappedSnapshot.close(); + logger.warn( + "Exception while acquiring lock on primaryTerm = {} and generation = {}", + primaryTerm, + commitGeneration + ); + indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); + wrappedSnapshot = indexShard.acquireLastIndexCommit(false); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } } try { repository.snapshotRemoteStoreIndexShard( @@ -423,11 +444,13 @@ private void snapshot( snapshot.getSnapshotId(), indexId, snapshotIndexCommit, - getShardStateId(indexShard, snapshotIndexCommit), + null, snapshotStatus, primaryTerm, + commitGeneration, startTime, - ActionListener.runBefore(listener, wrappedSnapshot::close) + indexFilesToFileLengthMap, + closedIndex ? listener : ActionListener.runBefore(listener, wrappedSnapshot::close) ); } catch (IndexShardSnapshotFailedException e) { logger.error( diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 269a4c87dfb72..59d3b110aeca8 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -198,7 +198,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.REMOTE_PURGE, ThreadPoolType.SCALING); map.put(Names.REMOTE_REFRESH_RETRY, ThreadPoolType.SCALING); map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); - map.put(Names.REMOTE_STATE_READ, ThreadPoolType.SCALING); + map.put(Names.REMOTE_STATE_READ, ThreadPoolType.FIXED); map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); map.put(Names.REMOTE_STATE_CHECKSUM, ThreadPoolType.FIXED); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); @@ -306,7 +306,7 @@ public ThreadPool( ); builders.put( Names.REMOTE_STATE_READ, - new ScalingExecutorBuilder(Names.REMOTE_STATE_READ, 1, boundedBy(4 * allocatedProcessors, 4, 32), TimeValue.timeValueMinutes(5)) + new FixedExecutorBuilder(settings, Names.REMOTE_STATE_READ, boundedBy(4 * allocatedProcessors, 4, 32), 120000) ); builders.put( Names.INDEX_SEARCHER, diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index f56cd146ce953..f80a29872a78d 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -521,38 +521,8 @@ private BoundTransportAddress createBoundTransportAddress(ProfileSettings profil throw new BindTransportException("Failed to resolve publish address", e); } - final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); - return new BoundTransportAddress(transportBoundAddresses, publishAddress); - } - - // package private for tests - static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = profileSettings.publishPort; - - // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress - if (publishPort < 0) { - for (InetSocketAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final Set ports = new HashSet<>(); - for (InetSocketAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next(); - } - } - - if (publishPort < 0) { + final int publishPort = Transport.resolvePublishPort(profileSettings.publishPort, boundAddresses, publishInetAddress); + if (publishPort == -1) { String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; throw new BindTransportException( "Failed to auto-resolve publish port" @@ -568,7 +538,9 @@ static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { + if (publishPort < 0) { + for (InetSocketAddress boundAddress : boundAddresses) { + InetAddress boundInetAddress = boundAddress.getAddress(); + if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { + publishPort = boundAddress.getPort(); + break; + } + } + } + + if (publishPort < 0) { + final Set ports = new HashSet<>(); + for (InetSocketAddress boundAddress : boundAddresses) { + ports.add(boundAddress.getPort()); + } + if (ports.size() == 1) { + publishPort = ports.iterator().next(); + } + } + + return publishPort; + } + + static int resolveTransportPublishPort(int publishPort, List boundAddresses, InetAddress publishInetAddress) { + return Transport.resolvePublishPort( + publishPort, + boundAddresses.stream().map(TransportAddress::address).collect(Collectors.toList()), + publishInetAddress + ); + } + /** * A unidirectional connection to a {@link DiscoveryNode} * diff --git a/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java new file mode 100644 index 0000000000000..ea455d607f058 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/indices/replication/TransportSegmentReplicationStatsActionTests.java @@ -0,0 +1,595 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.replication; + +import org.opensearch.Version; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlock; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.block.ClusterBlocks; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.support.DefaultShardOperationFailedException; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationPressureService; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.SegmentReplicationState; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationTimer; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportService; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TransportSegmentReplicationStatsActionTests extends OpenSearchTestCase { + @Mock + private ClusterService clusterService; + @Mock + private TransportService transportService; + @Mock + private IndicesService indicesService; + @Mock + private SegmentReplicationTargetService targetService; + @Mock + private ActionFilters actionFilters; + @Mock + private IndexNameExpressionResolver indexNameExpressionResolver; + @Mock + private SegmentReplicationPressureService pressureService; + @Mock + private IndexShard indexShard; + @Mock + private IndexService indexService; + + private TransportSegmentReplicationStatsAction action; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this); + super.setUp(); + action = new TransportSegmentReplicationStatsAction( + clusterService, + transportService, + indicesService, + targetService, + actionFilters, + indexNameExpressionResolver, + pressureService + ); + } + + public void testShardReturnsAllTheShardsForTheIndex() { + SegmentReplicationStatsRequest segmentReplicationStatsRequest = mock(SegmentReplicationStatsRequest.class); + String[] concreteIndices = new String[] { "test-index" }; + ClusterState clusterState = mock(ClusterState.class); + RoutingTable routingTables = mock(RoutingTable.class); + ShardsIterator shardsIterator = mock(ShardIterator.class); + + when(clusterState.routingTable()).thenReturn(routingTables); + when(routingTables.allShardsIncludingRelocationTargets(any())).thenReturn(shardsIterator); + assertEquals(shardsIterator, action.shards(clusterState, segmentReplicationStatsRequest, concreteIndices)); + } + + public void testShardOperationWithPrimaryShard() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(true); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + verify(pressureService).getStatsForShard(any()); + } + + public void testShardOperationWithReplicaShard() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + request.activeOnly(false); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(false); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + when(targetService.getSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + assertNull(response.getPrimaryStats()); + assertNotNull(response.getReplicaStats()); + verify(targetService).getSegmentReplicationState(shardId); + } + + public void testShardOperationWithReplicaShardActiveOnly() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + request.activeOnly(true); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.primary()).thenReturn(false); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepEnabled()); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNotNull(response); + assertNull(response.getPrimaryStats()); + assertNotNull(response.getReplicaStats()); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testComputeBytesRemainingToReplicateWhenCompletedAndOngoingStateNotNull() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerCompleted = mock(ReplicationTimer.class); + ReplicationTimer replicationTimerOngoing = mock(ReplicationTimer.class); + long time1 = 10; + long time2 = 15; + ReplicationLuceneIndex replicationLuceneIndex = new ReplicationLuceneIndex(); + replicationLuceneIndex.addFileDetail("name1", 10, false); + replicationLuceneIndex.addFileDetail("name2", 15, false); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getlatestCompletedEventSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + when(completedSegmentReplicationState.getTimer()).thenReturn(replicationTimerCompleted); + when(onGoingSegmentReplicationState.getTimer()).thenReturn(replicationTimerOngoing); + when(replicationTimerOngoing.time()).thenReturn(time1); + when(replicationTimerCompleted.time()).thenReturn(time2); + when(onGoingSegmentReplicationState.getIndex()).thenReturn(replicationLuceneIndex); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(25, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(10, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(15, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoCompletedState() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState onGoingSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerOngoing = mock(ReplicationTimer.class); + long time1 = 10; + ReplicationLuceneIndex replicationLuceneIndex = new ReplicationLuceneIndex(); + replicationLuceneIndex.addFileDetail("name1", 10, false); + replicationLuceneIndex.addFileDetail("name2", 15, false); + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getOngoingEventSegmentReplicationState(shardId)).thenReturn(onGoingSegmentReplicationState); + when(onGoingSegmentReplicationState.getTimer()).thenReturn(replicationTimerOngoing); + when(replicationTimerOngoing.time()).thenReturn(time1); + when(onGoingSegmentReplicationState.getIndex()).thenReturn(replicationLuceneIndex); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(25, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(10, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(0, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoOnGoingState() { + ShardRouting shardRouting = mock(ShardRouting.class); + SegmentReplicationState completedSegmentReplicationState = mock(SegmentReplicationState.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + ReplicationTimer replicationTimerCompleted = mock(ReplicationTimer.class); + long time2 = 15; + + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + when(targetService.getlatestCompletedEventSegmentReplicationState(shardId)).thenReturn(completedSegmentReplicationState); + when(completedSegmentReplicationState.getTimer()).thenReturn(replicationTimerCompleted); + when(replicationTimerCompleted.time()).thenReturn(time2); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(0, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(0, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(15, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testCalculateBytesRemainingToReplicateWhenNoCompletedAndOngoingState() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + AllocationId allocationId = AllocationId.newInitializing(); + when(shardRouting.shardId()).thenReturn(shardId); + when(shardRouting.allocationId()).thenReturn(allocationId); + + SegmentReplicationShardStats segmentReplicationShardStats = action.computeSegmentReplicationShardStats(shardRouting); + + assertNotNull(segmentReplicationShardStats); + assertEquals(0, segmentReplicationShardStats.getBytesBehindCount()); + assertEquals(0, segmentReplicationShardStats.getCurrentReplicationLagMillis()); + assertEquals(0, segmentReplicationShardStats.getLastCompletedReplicationTimeMillis()); + + verify(targetService).getlatestCompletedEventSegmentReplicationState(shardId); + verify(targetService).getOngoingEventSegmentReplicationState(shardId); + } + + public void testNewResponseWhenAllReplicasReturnResponseCombinesTheResults() { + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + List shardFailures = new ArrayList<>(); + String[] shards = { "0", "1" }; + request.shards(shards); + + int totalShards = 6; + int successfulShards = 6; + int failedShard = 0; + String allocIdOne = "allocIdOne"; + String allocIdTwo = "allocIdTwo"; + String allocIdThree = "allocIdThree"; + String allocIdFour = "allocIdFour"; + String allocIdFive = "allocIdFive"; + String allocIdSix = "allocIdSix"; + + ShardId shardId0 = mock(ShardId.class); + ShardRouting primary0 = mock(ShardRouting.class); + ShardRouting replica0 = mock(ShardRouting.class); + ShardRouting searchReplica0 = mock(ShardRouting.class); + + ShardId shardId1 = mock(ShardId.class); + ShardRouting primary1 = mock(ShardRouting.class); + ShardRouting replica1 = mock(ShardRouting.class); + ShardRouting searchReplica1 = mock(ShardRouting.class); + + when(shardId0.getId()).thenReturn(0); + when(shardId0.getIndexName()).thenReturn("test-index-1"); + when(primary0.shardId()).thenReturn(shardId0); + when(replica0.shardId()).thenReturn(shardId0); + when(searchReplica0.shardId()).thenReturn(shardId0); + + when(shardId1.getId()).thenReturn(1); + when(shardId1.getIndexName()).thenReturn("test-index-1"); + when(primary1.shardId()).thenReturn(shardId1); + when(replica1.shardId()).thenReturn(shardId1); + when(searchReplica1.shardId()).thenReturn(shardId1); + + AllocationId allocationIdOne = mock(AllocationId.class); + AllocationId allocationIdTwo = mock(AllocationId.class); + AllocationId allocationIdThree = mock(AllocationId.class); + AllocationId allocationIdFour = mock(AllocationId.class); + AllocationId allocationIdFive = mock(AllocationId.class); + AllocationId allocationIdSix = mock(AllocationId.class); + + when(allocationIdOne.getId()).thenReturn(allocIdOne); + when(allocationIdTwo.getId()).thenReturn(allocIdTwo); + when(allocationIdThree.getId()).thenReturn(allocIdThree); + when(allocationIdFour.getId()).thenReturn(allocIdFour); + when(allocationIdFive.getId()).thenReturn(allocIdFive); + when(allocationIdSix.getId()).thenReturn(allocIdSix); + when(primary0.allocationId()).thenReturn(allocationIdOne); + when(replica0.allocationId()).thenReturn(allocationIdTwo); + when(searchReplica0.allocationId()).thenReturn(allocationIdThree); + when(primary1.allocationId()).thenReturn(allocationIdFour); + when(replica1.allocationId()).thenReturn(allocationIdFive); + when(searchReplica1.allocationId()).thenReturn(allocationIdSix); + + when(primary0.isSearchOnly()).thenReturn(false); + when(replica0.isSearchOnly()).thenReturn(false); + when(searchReplica0.isSearchOnly()).thenReturn(true); + when(primary1.isSearchOnly()).thenReturn(false); + when(replica1.isSearchOnly()).thenReturn(false); + when(searchReplica1.isSearchOnly()).thenReturn(true); + + Set segmentReplicationShardStats0 = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica0 = new SegmentReplicationShardStats(allocIdTwo, 0, 0, 0, 0, 0); + segmentReplicationShardStats0.add(segmentReplicationShardStatsOfReplica0); + + Set segmentReplicationShardStats1 = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica1 = new SegmentReplicationShardStats(allocIdFive, 0, 0, 0, 0, 0); + segmentReplicationShardStats1.add(segmentReplicationShardStatsOfReplica1); + + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats0 = new SegmentReplicationPerGroupStats( + shardId0, + segmentReplicationShardStats0, + 0 + ); + + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats1 = new SegmentReplicationPerGroupStats( + shardId1, + segmentReplicationShardStats1, + 0 + ); + + SegmentReplicationState segmentReplicationState0 = mock(SegmentReplicationState.class); + SegmentReplicationState searchReplicaSegmentReplicationState0 = mock(SegmentReplicationState.class); + SegmentReplicationState segmentReplicationState1 = mock(SegmentReplicationState.class); + SegmentReplicationState searchReplicaSegmentReplicationState1 = mock(SegmentReplicationState.class); + + when(segmentReplicationState0.getShardRouting()).thenReturn(replica0); + when(searchReplicaSegmentReplicationState0.getShardRouting()).thenReturn(searchReplica0); + when(segmentReplicationState1.getShardRouting()).thenReturn(replica1); + when(searchReplicaSegmentReplicationState1.getShardRouting()).thenReturn(searchReplica1); + + List responses = List.of( + new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats0), + new SegmentReplicationShardStatsResponse(segmentReplicationState0), + new SegmentReplicationShardStatsResponse(searchReplicaSegmentReplicationState0), + new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats1), + new SegmentReplicationShardStatsResponse(segmentReplicationState1), + new SegmentReplicationShardStatsResponse(searchReplicaSegmentReplicationState1) + ); + + SegmentReplicationStatsResponse response = action.newResponse( + request, + totalShards, + successfulShards, + failedShard, + responses, + shardFailures, + ClusterState.EMPTY_STATE + ); + + List responseStats = response.getReplicationStats().get("test-index-1"); + SegmentReplicationPerGroupStats primStats0 = responseStats.get(0); + Set replicaStats0 = primStats0.getReplicaStats(); + assertEquals(2, replicaStats0.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats0) { + if (replicaStat.getAllocationId().equals(allocIdTwo)) { + assertEquals(segmentReplicationState0, replicaStat.getCurrentReplicationState()); + } + + if (replicaStat.getAllocationId().equals(allocIdThree)) { + assertEquals(searchReplicaSegmentReplicationState0, replicaStat.getCurrentReplicationState()); + } + } + + SegmentReplicationPerGroupStats primStats1 = responseStats.get(1); + Set replicaStats1 = primStats1.getReplicaStats(); + assertEquals(2, replicaStats1.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats1) { + if (replicaStat.getAllocationId().equals(allocIdFive)) { + assertEquals(segmentReplicationState1, replicaStat.getCurrentReplicationState()); + } + + if (replicaStat.getAllocationId().equals(allocIdSix)) { + assertEquals(searchReplicaSegmentReplicationState1, replicaStat.getCurrentReplicationState()); + } + } + } + + public void testNewResponseWhenShardsToFetchEmptyAndResponsesContainsNull() { + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + List shardFailures = new ArrayList<>(); + String[] shards = {}; + request.shards(shards); + + int totalShards = 3; + int successfulShards = 3; + int failedShard = 0; + String allocIdOne = "allocIdOne"; + String allocIdTwo = "allocIdTwo"; + ShardId shardIdOne = mock(ShardId.class); + ShardId shardIdTwo = mock(ShardId.class); + ShardId shardIdThree = mock(ShardId.class); + ShardRouting shardRoutingOne = mock(ShardRouting.class); + ShardRouting shardRoutingTwo = mock(ShardRouting.class); + ShardRouting shardRoutingThree = mock(ShardRouting.class); + when(shardIdOne.getId()).thenReturn(1); + when(shardIdTwo.getId()).thenReturn(2); + when(shardIdThree.getId()).thenReturn(3); + when(shardRoutingOne.shardId()).thenReturn(shardIdOne); + when(shardRoutingTwo.shardId()).thenReturn(shardIdTwo); + when(shardRoutingThree.shardId()).thenReturn(shardIdThree); + AllocationId allocationId = mock(AllocationId.class); + when(allocationId.getId()).thenReturn(allocIdOne); + when(shardRoutingTwo.allocationId()).thenReturn(allocationId); + when(shardIdOne.getIndexName()).thenReturn("test-index"); + + Set segmentReplicationShardStats = new HashSet<>(); + SegmentReplicationShardStats segmentReplicationShardStatsOfReplica = new SegmentReplicationShardStats(allocIdOne, 0, 0, 0, 0, 0); + segmentReplicationShardStats.add(segmentReplicationShardStatsOfReplica); + SegmentReplicationPerGroupStats segmentReplicationPerGroupStats = new SegmentReplicationPerGroupStats( + shardIdOne, + segmentReplicationShardStats, + 0 + ); + + SegmentReplicationState segmentReplicationState = mock(SegmentReplicationState.class); + SegmentReplicationShardStats segmentReplicationShardStatsFromSearchReplica = mock(SegmentReplicationShardStats.class); + when(segmentReplicationShardStatsFromSearchReplica.getAllocationId()).thenReturn("alloc2"); + when(segmentReplicationState.getShardRouting()).thenReturn(shardRoutingTwo); + + List responses = new ArrayList<>(); + responses.add(null); + responses.add(new SegmentReplicationShardStatsResponse(segmentReplicationPerGroupStats)); + responses.add(new SegmentReplicationShardStatsResponse(segmentReplicationState)); + + SegmentReplicationStatsResponse response = action.newResponse( + request, + totalShards, + successfulShards, + failedShard, + responses, + shardFailures, + ClusterState.EMPTY_STATE + ); + + List responseStats = response.getReplicationStats().get("test-index"); + SegmentReplicationPerGroupStats primStats = responseStats.get(0); + Set segRpShardStatsSet = primStats.getReplicaStats(); + + for (SegmentReplicationShardStats segRpShardStats : segRpShardStatsSet) { + if (segRpShardStats.getAllocationId().equals(allocIdOne)) { + assertEquals(segmentReplicationState, segRpShardStats.getCurrentReplicationState()); + } + + if (segRpShardStats.getAllocationId().equals(allocIdTwo)) { + assertEquals(segmentReplicationShardStatsFromSearchReplica, segRpShardStats); + } + } + } + + public void testShardOperationWithSegRepDisabled() { + ShardRouting shardRouting = mock(ShardRouting.class); + ShardId shardId = new ShardId(new Index("test-index", "test-uuid"), 0); + SegmentReplicationStatsRequest request = new SegmentReplicationStatsRequest(); + + when(shardRouting.shardId()).thenReturn(shardId); + when(indicesService.indexServiceSafe(shardId.getIndex())).thenReturn(indexService); + when(indexService.getShard(shardId.id())).thenReturn(indexShard); + when(indexShard.indexSettings()).thenReturn(createIndexSettingsWithSegRepDisabled()); + + SegmentReplicationShardStatsResponse response = action.shardOperation(request, shardRouting); + + assertNull(response); + } + + public void testGlobalBlockCheck() { + ClusterBlock writeClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE) + ); + + ClusterBlock readClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_READ) + ); + + ClusterBlocks.Builder builder = ClusterBlocks.builder(); + builder.addGlobalBlock(writeClusterBlock); + ClusterState metadataWriteBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNull(action.checkGlobalBlock(metadataWriteBlockedState, new SegmentReplicationStatsRequest())); + + builder = ClusterBlocks.builder(); + builder.addGlobalBlock(readClusterBlock); + ClusterState metadataReadBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNotNull(action.checkGlobalBlock(metadataReadBlockedState, new SegmentReplicationStatsRequest())); + } + + public void testIndexBlockCheck() { + ClusterBlock writeClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_WRITE) + ); + + ClusterBlock readClusterBlock = new ClusterBlock( + 1, + "uuid", + "", + true, + true, + true, + RestStatus.OK, + EnumSet.of(ClusterBlockLevel.METADATA_READ) + ); + + String indexName = "test"; + ClusterBlocks.Builder builder = ClusterBlocks.builder(); + builder.addIndexBlock(indexName, writeClusterBlock); + ClusterState metadataWriteBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNull(action.checkRequestBlock(metadataWriteBlockedState, new SegmentReplicationStatsRequest(), new String[] { indexName })); + + builder = ClusterBlocks.builder(); + builder.addIndexBlock(indexName, readClusterBlock); + ClusterState metadataReadBlockedState = ClusterState.builder(ClusterState.EMPTY_STATE).blocks(builder).build(); + assertNotNull(action.checkRequestBlock(metadataReadBlockedState, new SegmentReplicationStatsRequest(), new String[] { indexName })); + } + + private IndexSettings createIndexSettingsWithSegRepEnabled() { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + + return new IndexSettings(IndexMetadata.builder("test").settings(settings).build(), settings); + } + + private IndexSettings createIndexSettingsWithSegRepDisabled() { + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + return new IndexSettings(IndexMetadata.builder("test").settings(settings).build(), settings); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java index 6a03a1f79bcde..a7f18aabf8436 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsBalancerBaseTestCase.java @@ -194,7 +194,7 @@ public AllocationService createRemoteCapableAllocationService() { } public AllocationService createRemoteCapableAllocationService(String excludeNodes) { - Settings settings = Settings.builder().put("cluster.routing.allocation.exclude.node_id", excludeNodes).build(); + Settings settings = Settings.builder().put("cluster.routing.allocation.exclude._id", excludeNodes).build(); return new MockAllocationService( randomAllocationDeciders(settings, EMPTY_CLUSTER_SETTINGS, random()), new TestGatewayAllocator(), diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java index e1c0a7eff1f6e..e55a9de160114 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/RemoteShardsRebalanceShardsTests.java @@ -25,25 +25,51 @@ public class RemoteShardsRebalanceShardsTests extends RemoteShardsBalancerBaseTe * Post rebalance primaries should be balanced across all the nodes. */ public void testShardAllocationAndRebalance() { - int localOnlyNodes = 20; - int remoteCapableNodes = 40; - int localIndices = 40; - int remoteIndices = 80; + final int localOnlyNodes = 20; + final int remoteCapableNodes = 40; + final int halfRemoteCapableNodes = remoteCapableNodes / 2; + final int localIndices = 40; + final int remoteIndices = 80; ClusterState clusterState = createInitialCluster(localOnlyNodes, remoteCapableNodes, localIndices, remoteIndices); - AllocationService service = this.createRemoteCapableAllocationService(); + final StringBuilder excludeNodes = new StringBuilder(); + for (int i = 0; i < halfRemoteCapableNodes; i++) { + excludeNodes.append(getNodeId(i, true)); + if (i != (remoteCapableNodes / 2 - 1)) { + excludeNodes.append(", "); + } + } + AllocationService service = this.createRemoteCapableAllocationService(excludeNodes.toString()); clusterState = allocateShardsAndBalance(clusterState, service); RoutingNodes routingNodes = clusterState.getRoutingNodes(); RoutingAllocation allocation = getRoutingAllocation(clusterState, routingNodes); - final Map nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); - final Map nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); + Map nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); + Map nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); int avgPrimariesPerNode = getTotalShardCountAcrossNodes(nodePrimariesCounter) / remoteCapableNodes; - // Primary and replica are balanced post first reroute + // Primary and replica are balanced after first allocating unassigned + for (RoutingNode node : routingNodes) { + if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getNodePool(node))) { + if (Integer.parseInt(node.nodeId().split("-")[4]) < halfRemoteCapableNodes) { + assertEquals(0, (int) nodePrimariesCounter.getOrDefault(node.nodeId(), 0)); + } else { + assertEquals(avgPrimariesPerNode * 2, (int) nodePrimariesCounter.get(node.nodeId())); + } + assertTrue(nodeReplicaCounter.getOrDefault(node.nodeId(), 0) >= 0); + } + } + + // Remove exclude constraint and rebalance + service = this.createRemoteCapableAllocationService(); + clusterState = allocateShardsAndBalance(clusterState, service); + routingNodes = clusterState.getRoutingNodes(); + allocation = getRoutingAllocation(clusterState, routingNodes); + nodePrimariesCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, true); + nodeReplicaCounter = getShardCounterPerNodeForRemoteCapablePool(clusterState, allocation, false); for (RoutingNode node : routingNodes) { if (RoutingPool.REMOTE_CAPABLE.equals(RoutingPool.getNodePool(node))) { - assertInRange(nodePrimariesCounter.get(node.nodeId()), avgPrimariesPerNode, remoteCapableNodes - 1); - assertTrue(nodeReplicaCounter.get(node.nodeId()) >= 0); + assertEquals(avgPrimariesPerNode, (int) nodePrimariesCounter.get(node.nodeId())); + assertTrue(nodeReplicaCounter.getOrDefault(node.nodeId(), 0) >= 0); } } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index be07aa0d05e9f..e3684178a18ea 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -2354,6 +2354,14 @@ public void testReadLatestClusterStateFromCache() throws IOException { .getState(clusterState.getClusterName().value(), expectedManifest); assertEquals(stateFromCache.getMetadata(), state.getMetadata()); + ClusterState stateFromCache2 = remoteClusterStateService.getClusterStateForManifest( + clusterState.getClusterName().value(), + expectedManifest, + "nodeA", + true + ); + assertEquals(stateFromCache2.getMetadata(), state.getMetadata()); + final ClusterMetadataManifest notExistMetadata = ClusterMetadataManifest.builder() .indices(List.of()) .clusterTerm(1L) diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index c34f13041cb11..cd6beffa6e195 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -59,6 +59,7 @@ import org.opensearch.test.rest.FakeRestRequest; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transport; import org.junit.After; import org.junit.Before; @@ -70,8 +71,6 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.opensearch.http.AbstractHttpServerTransport.resolvePublishPort; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class AbstractHttpServerTransportTests extends OpenSearchTestCase { @@ -101,47 +100,40 @@ public void testHttpPublishPort() throws Exception { int boundPort = randomIntBetween(9000, 9100); int otherBoundPort = randomIntBetween(9200, 9300); - int publishPort = resolvePublishPort( - Settings.builder().put(HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.getKey(), 9080).build(), - randomAddresses(), - getByName("127.0.0.2") - ); + int publishPort = Transport.resolveTransportPublishPort(9080, randomAddresses(), getByName("127.0.0.2")); assertThat("Publish port should be explicitly set to 9080", publishPort, equalTo(9080)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)), getByName("127.0.0.3") ); assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort)); - final BindHttpException e = expectThrows( - BindHttpException.class, - () -> resolvePublishPort( - Settings.EMPTY, - asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), - getByName("127.0.0.3") - ) + publishPort = Transport.resolveTransportPublishPort( + -1, + asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), + getByName("127.0.0.3") ); - assertThat(e.getMessage(), containsString("Failed to auto-resolve http publish port")); + assertThat(publishPort, equalTo(-1)); - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort)); if (NetworkUtils.SUPPORTS_V6) { - publishPort = resolvePublishPort( - Settings.EMPTY, + publishPort = Transport.resolveTransportPublishPort( + -1, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("::1") ); @@ -293,6 +285,8 @@ public HttpStats stats() { + opaqueId + "\\]\\[" + (badRequest ? "BAD_REQUEST" : "OK") + + "\\]\\[" + + (badRequest ? "400" : "200") + "\\]\\[null\\]\\[0\\] sent response to \\[.*" ) ); diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java index 402ed1dbee98a..5603fe4e30f9f 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeKeywordDocValuesFormatTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; @@ -25,6 +26,7 @@ import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.network.InetAddresses; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; @@ -36,6 +38,8 @@ import org.opensearch.index.mapper.NumberFieldMapper; import java.io.IOException; +import java.net.InetAddress; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -65,12 +69,15 @@ public void testStarTreeKeywordDocValues() throws IOException { doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); iw.addDocument(doc); doc = new Document(); doc.add(new StringField("_id", "2", Field.Store.NO)); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); + iw.addDocument(doc); iw.flush(); iw.deleteDocuments(new Term("_id", "2")); @@ -80,12 +87,14 @@ public void testStarTreeKeywordDocValues() throws IOException { doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); iw.addDocument(doc); doc = new Document(); doc.add(new StringField("_id", "4", Field.Store.NO)); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); iw.addDocument(doc); iw.flush(); iw.deleteDocuments(new Term("_id", "4")); @@ -166,6 +175,9 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException { doc.add(new SortedSetDocValuesField("keyword2", new BytesRef(keyword2Value))); map.put(keyword1Value + "-" + keyword2Value, sndvValue + map.getOrDefault(keyword1Value + "-" + keyword2Value, 0)); + doc.add( + new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10." + i)))) + ); iw.addDocument(doc); documents.put(id, doc); } @@ -221,9 +233,7 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException { SortedSetStarTreeValuesIterator k1 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( "keyword1" ); - SortedSetStarTreeValuesIterator k2 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( - "keyword2" - ); + SortedSetStarTreeValuesIterator k2 = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator("ip1"); for (StarTreeDocument starDoc : actualStarTreeDocuments) { String keyword1 = null; if (starDoc.dimensions[0] != null) { @@ -232,7 +242,11 @@ public void testStarTreeKeywordDocValuesWithDeletions() throws IOException { String keyword2 = null; if (starDoc.dimensions[1] != null) { - keyword2 = k2.lookupOrd(starDoc.dimensions[1]).utf8ToString(); + BytesRef encoded = k2.lookupOrd(starDoc.dimensions[1]); + InetAddress address = InetAddressPoint.decode( + Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length) + ); + keyword2 = InetAddresses.toAddrString(address); } double metric = (double) starDoc.metrics[0]; if (map.containsKey(keyword1 + "-" + keyword2)) { @@ -254,21 +268,28 @@ public void testStarKeywordDocValuesWithMissingDocs() throws IOException { Document doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); + iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); iw.addDocument(doc); iw.forceMerge(1); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); + iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); + iw.addDocument(doc); iw.forceMerge(1); iw.close(); @@ -340,11 +361,14 @@ public void testStarKeywordDocValuesWithMissingDocsInSegment() throws IOExceptio doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text1"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text2"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.10"))))); iw.addDocument(doc); doc = new Document(); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedSetDocValuesField("keyword1", new BytesRef("text11"))); doc.add(new SortedSetDocValuesField("keyword2", new BytesRef("text22"))); + doc.add(new SortedSetDocValuesField("ip1", new BytesRef(InetAddressPoint.encode(InetAddresses.forString("10.10.10.11"))))); + iw.addDocument(doc); iw.forceMerge(1); iw.close(); @@ -538,7 +562,7 @@ protected XContentBuilder getMapping() throws IOException { b.field("name", "keyword1"); b.endObject(); b.startObject(); - b.field("name", "keyword2"); + b.field("name", "ip1"); b.endObject(); b.endArray(); b.startArray("metrics"); @@ -566,6 +590,9 @@ protected XContentBuilder getMapping() throws IOException { b.startObject("keyword2"); b.field("type", "keyword"); b.endObject(); + b.startObject("ip1"); + b.field("type", "ip"); + b.endObject(); b.endObject(); }); } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java index 440268f1f803c..70cc20fe4a9f6 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java @@ -20,10 +20,11 @@ import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory; import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat; import org.opensearch.index.compositeindex.datacube.Dimension; -import org.opensearch.index.compositeindex.datacube.KeywordDimension; +import org.opensearch.index.compositeindex.datacube.IpDimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; @@ -426,7 +427,7 @@ public void testFlushFlowForKeywords() throws IOException { ); List metricsWithField = List.of(0, 1, 2, 3, 4, 5); - compositeField = getStarTreeFieldWithKeywordField(); + compositeField = getStarTreeFieldWithKeywordField(random().nextBoolean()); SortedSetStarTreeValuesIterator d1sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList, docsWithField)); SortedSetStarTreeValuesIterator d2sndv = new SortedSetStarTreeValuesIterator(getSortedSetMock(dimList2, docsWithField2)); SortedNumericStarTreeValuesIterator m1sndv = new SortedNumericStarTreeValuesIterator( @@ -531,9 +532,9 @@ private StarTreeField getStarTreeFieldWithMultipleMetrics() { return new StarTreeField("sf", dims, metrics, c); } - private StarTreeField getStarTreeFieldWithKeywordField() { - Dimension d1 = new KeywordDimension("field1"); - Dimension d2 = new KeywordDimension("field3"); + private StarTreeField getStarTreeFieldWithKeywordField(boolean isIp) { + Dimension d1 = isIp ? new IpDimension("field1") : new OrdinalDimension("field1"); + Dimension d2 = isIp ? new IpDimension("field3") : new OrdinalDimension("field3"); Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); Metric m2 = new Metric("field2", List.of(MetricStat.VALUE_COUNT)); Metric m3 = new Metric("field2", List.of(MetricStat.AVG)); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java index be16961e781db..74ecff04076b1 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java @@ -1831,7 +1831,7 @@ public void testMergeFlowWithKeywords() throws IOException { List metricsList2 = List.of(0L, 1L, 2L, 3L, 4L); List metricsWithField2 = List.of(0, 1, 2, 3, 4); - compositeField = getStarTreeFieldWithKeywords(); + compositeField = getStarTreeFieldWithKeywords(random().nextBoolean()); StarTreeValues starTreeValues = getStarTreeValuesWithKeywords( getSortedSetMock(dimList, docsWithField), getSortedSetMock(dimList2, docsWithField2), diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java index 9c9beaea4f52c..cca987b6f9b16 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java @@ -32,10 +32,11 @@ import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit; import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.Dimension; -import org.opensearch.index.compositeindex.datacube.KeywordDimension; +import org.opensearch.index.compositeindex.datacube.IpDimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; @@ -352,9 +353,9 @@ protected StarTreeMetadata getStarTreeMetadata( ); } - protected StarTreeField getStarTreeFieldWithKeywords() { - Dimension d1 = new KeywordDimension("field1"); - Dimension d2 = new KeywordDimension("field3"); + protected StarTreeField getStarTreeFieldWithKeywords(boolean ip) { + Dimension d1 = ip ? new IpDimension("field1") : new OrdinalDimension("field1"); + Dimension d2 = ip ? new IpDimension("field3") : new OrdinalDimension("field3"); Metric m1 = new Metric("field2", List.of(MetricStat.VALUE_COUNT, MetricStat.SUM)); List dims = List.of(d1, d2); List metrics = List.of(m1); diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index afd9e994ce3ae..7e6aa00c87290 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -21,6 +21,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.DocValueFormat; import java.io.IOException; @@ -397,6 +398,27 @@ public void testDeduplicationValue() throws IOException { assertEquals(new BytesRef("field.labels=3"), fieldValueAndPaths[4].binaryValue()); } + public void testFetchDocValues() throws IOException { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "flat_object"))); + { + // test valueWithPathField + MappedFieldType ft = mapperService.fieldType("field.name"); + DocValueFormat format = ft.docValueFormat(null, null); + String storedValue = "field.field.name=1234"; + + Object object = format.format(new BytesRef(storedValue)); + assertEquals("1234", object); + } + + { + // test valueField + MappedFieldType ft = mapperService.fieldType("field"); + Throwable throwable = assertThrows(IllegalArgumentException.class, () -> ft.docValueFormat(null, null)); + assertEquals("Field [field] of type [flat_object] does not support doc_value in root field", throwable.getMessage()); + } + + } + @Override protected void registerParameters(ParameterChecker checker) throws IOException { // In the future we will want to make sure parameter updates are covered. diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java index 38a6f13777f00..4160108342534 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java @@ -9,6 +9,7 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.FieldExistsQuery; @@ -24,6 +25,7 @@ import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; @@ -138,39 +140,273 @@ public void testRewriteValue() { assertEquals("field.bar=foo", searchValuesDocPath); } - public void testTermQuery() { + public void testTermQueryCaseInsensitive() { - FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = + (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); + assertEquals( + AutomatonQueries.caseInsensitiveTermQuery(new Term("field._valueAndPath", "field.bar=fOo")), + dynamicMappedFieldType.termQueryCaseInsensitive("fOo", null) + ); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + assertEquals( + AutomatonQueries.caseInsensitiveTermQuery(new Term("field._value", "fOo")), + ft.termQueryCaseInsensitive("fOo", null) + ); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("fOo"))); + + assertEquals(expected, ft.termQuery("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = AutomatonQueries.createAutomatonQuery( + new Term("field" + VALUE_SUFFIX, "field.fOo"), + AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = AutomatonQueries.createAutomatonQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "field.fOo"), + AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + + assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType( "field", null, - true, - true + false, + false, + null, + Collections.emptyMap() + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.", + e.getMessage() + ); + } - // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field - String searchFieldName = (flatParentFieldType).directSubfield(); - String searchValues = (flatParentFieldType).rewriteValue("foo"); - assertEquals("foo", searchValues); - assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + public void testTermQuery() { - MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( - "field.bar", - flatParentFieldType.name(), - flatParentFieldType.getValueFieldType(), - flatParentFieldType.getValueAndPathFieldType() - ); + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = + (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true); - // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field - String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); - String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); - assertEquals("field.bar=foo", searchValuesDocPath); - assertEquals(new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), dynamicMappedFieldType.termQuery("foo", null)); + // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field + String searchFieldName = (flatParentFieldType).directSubfield(); + String searchValues = (flatParentFieldType).rewriteValue("foo"); + assertEquals("foo", searchValues); + assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); + + // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field + String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); + String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); + assertEquals("field.bar=foo", searchValuesDocPath); + assertEquals( + new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), + dynamicMappedFieldType.termQuery("foo", null) + ); + + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_SUFFIX, new BytesRef("foo"))); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("foo"))); + + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = SortedSetDocValuesField.newSlowRangeQuery( + "field" + VALUE_SUFFIX, + new BytesRef("field.foo"), + new BytesRef("field.foo"), + true, + true + ); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = SortedSetDocValuesField.newSlowRangeQuery( + "field" + VALUE_AND_PATH_SUFFIX, + new BytesRef("field.foo"), + new BytesRef("field.foo"), + true, + true + ); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType( "field", null, false, - true, + false, null, Collections.emptyMap() ); @@ -178,7 +414,10 @@ public void testTermQuery() { IllegalArgumentException.class, () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.", + e.getMessage() + ); } public void testExistsQuery() { diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index f291b864beb59..d52426c67d256 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; @@ -60,6 +61,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.analysis.AnalyzerScope; @@ -100,13 +102,52 @@ public void testIsFieldWithinQuery() throws IOException { ); } + public void testTermQueryCaseInsensitive() { + MappedFieldType ft = new KeywordFieldType("field"); + Query expected = AutomatonQueries.caseInsensitiveTermQuery(new Term("field", BytesRefs.toBytesRef("foo"))); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Term term = new Term("field", "foo"); + + expected = AutomatonQueries.createAutomatonQuery( + term, + AutomatonQueries.toCaseInsensitiveString("foo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQueryCaseInsensitive("foo", null)); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + public void testTermQuery() { MappedFieldType ft = new KeywordFieldType("field"); - assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", null)); + assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + ft = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query expected = SortedSetDocValuesField.newSlowRangeQuery("field", new BytesRef("foo"), new BytesRef("foo"), true, true); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); } public void testTermQueryWithNormalizer() { diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java index 580f8cccc9af5..d9f0fd6657085 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java @@ -15,11 +15,13 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -28,6 +30,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.SourceFieldMatchQuery; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.search.MatchQuery; import org.junit.Before; @@ -391,7 +394,7 @@ public void testPhraseQuery() throws IOException { assertThat(q, is(expectedQuery)); Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext); - assertThat(q4, is(new TermQuery(new Term("field", "singleton")))); + assertThat(q4, is(new ConstantScoreQuery(new TermQuery(new Term("field", "singleton"))))); Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext); expectedQuery = new SourceFieldMatchQuery( @@ -447,4 +450,22 @@ public void testPhraseQuery() throws IOException { ); assertThat(q6, is(expectedQuery)); } + + public void testTermQuery() throws Exception { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", textFieldName); + b.field("analyzer", "my_stop_analyzer"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + Query q = new TermQueryBuilder("field", "foo").rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), q); + + q = new TermQueryBuilder("field", "foo").caseInsensitive(true).rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "foo"))), q); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java index 51234fa04ddc2..0170cdde8b21c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java @@ -8,7 +8,11 @@ package org.opensearch.index.mapper; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.TermQuery; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; public class MatchOnlyTextFieldTypeTests extends TextFieldTypeTests { @@ -28,4 +32,18 @@ TextFieldMapper.TextFieldType createFieldType(boolean searchable) { ParametrizedFieldMapper.Parameter.metaParam().get() ); } + + @Override + public void testTermQuery() { + MappedFieldType ft = createFieldType(true); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), ft.termQuery("foo", null)); + assertEquals( + new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "fOo"))), + ft.termQueryCaseInsensitive("fOo", null) + ); + + MappedFieldType unsearchable = createFieldType(false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); + assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 8ec34b3eb660c..684704ad65b0a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -111,7 +111,7 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException { () -> mapper.parse(source(b -> b.startArray("status").value(0).value(1).endArray())) ); assertEquals( - "object mapping for [_doc] with array for [status] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [_doc] with array for [status] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", ex.getMessage() ); ParsedDocument doc = mapper.parse(source(b -> b.startArray("size").value(0).value(1).endArray())); @@ -284,6 +284,33 @@ public void testValidStarTreeDateDims() throws IOException { } } + public void testValidStarTreeNestedFields() throws IOException { + MapperService mapperService = createMapperService(getMinMappingWithNestedField()); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedDimensionFields = Arrays.asList("@timestamp_minute", "@timestamp_half-hour"); + assertEquals(expectedDimensionFields, dateDim.getSubDimensionNames()); + List expectedTimeUnits = Arrays.asList( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR), + DataCubeDateTimeUnit.HALF_HOUR_OF_DAY + ); + for (int i = 0; i < expectedTimeUnits.size(); i++) { + assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); + } + assertEquals("nested.status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("nested.status", starTreeFieldType.getMetrics().get(0).getField()); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + public void testInValidStarTreeMinDims() throws IOException { MapperParsingException ex = expectThrows( MapperParsingException.class, @@ -1047,6 +1074,56 @@ private XContentBuilder getMinMappingWith2StarTrees() throws IOException { }); } + private XContentBuilder getMinMappingWithNestedField() throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.endObject(); + b.startObject(); + b.field("name", "nested.status"); + b.endObject(); + b.endArray(); + + b.startArray("metrics"); + b.startObject(); + b.field("name", "nested.status"); + b.endObject(); + b.startObject(); + b.field("name", "metric_field"); + b.endObject(); + b.endArray(); + + b.endObject(); + b.endObject(); + + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("nested"); + b.startObject("properties"); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("metric_field"); + b.field("type", "integer"); + b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); + b.endObject(); + }); + } + private XContentBuilder getInvalidMapping( boolean singleDim, boolean invalidSkipDims, @@ -1085,6 +1162,9 @@ private XContentBuilder getInvalidMapping( b.startObject(); b.field("name", "keyword1"); b.endObject(); + b.startObject(); + b.field("name", "ip1"); + b.endObject(); } b.endArray(); b.startArray("metrics"); @@ -1117,7 +1197,7 @@ private XContentBuilder getInvalidMapping( if (!invalidDimType) { b.field("type", "integer"); } else { - b.field("type", "ip"); + b.field("type", "wildcard"); } b.endObject(); b.startObject("metric_field"); @@ -1130,6 +1210,9 @@ private XContentBuilder getInvalidMapping( b.startObject("keyword1"); b.field("type", "keyword"); b.endObject(); + b.startObject("ip1"); + b.field("type", "ip"); + b.endObject(); b.endObject(); }); } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 57a561bc8f2a3..4d85a3c491af8 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -12,6 +12,9 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.Version; import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; @@ -20,6 +23,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.CheckpointInfoResponse; @@ -32,6 +36,11 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.test.CorruptionUtils; import org.opensearch.test.junit.annotations.TestLogging; import org.hamcrest.MatcherAssert; @@ -41,6 +50,7 @@ import java.nio.channels.FileChannel; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -55,6 +65,8 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -541,6 +553,81 @@ public void onReplicationFailure( } } + public void testShallowCopySnapshotForClosedIndexSuccessful() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings)) { + final IndexShard primaryShard = shards.getPrimary(); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + shards.flush(); + shards.assertAllEqual(10); + + RepositoriesService repositoriesService = createRepositoriesService(); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository("random"); + + doAnswer(invocation -> { + IndexShardSnapshotStatus snapshotStatus = invocation.getArgument(5); + long commitGeneration = invocation.getArgument(7); + long startTime = invocation.getArgument(8); + final Map indexFilesToFileLengthMap = invocation.getArgument(9); + ActionListener listener = invocation.getArgument(10); + if (indexFilesToFileLengthMap != null) { + List fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet()); + long indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum(); + int indexTotalNumberOfFiles = fileNames.size(); + snapshotStatus.moveToStarted(startTime, 0, indexTotalNumberOfFiles, 0, indexTotalFileSize); + // Not performing actual snapshot, just modifying the state + snapshotStatus.moveToFinalize(commitGeneration); + snapshotStatus.moveToDone(System.currentTimeMillis(), snapshotStatus.generation()); + listener.onResponse(snapshotStatus.generation()); + return null; + } + listener.onResponse(snapshotStatus.generation()); + return null; + }).when(repository) + .snapshotRemoteStoreIndexShard(any(), any(), any(), any(), any(), any(), anyLong(), anyLong(), anyLong(), any(), any()); + + final SnapshotShardsService shardsService = getSnapshotShardsService( + primaryShard, + shards.getIndexMetadata(), + true, + repositoriesService + ); + final Snapshot snapshot1 = new Snapshot( + randomAlphaOfLength(10), + new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5)) + ); + + // Initialize the shallow copy snapshot + final ClusterState initState = addSnapshotIndex( + clusterService.state(), + snapshot1, + primaryShard, + SnapshotsInProgress.State.INIT, + true + ); + shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); + + // start the snapshot + shardsService.clusterChanged( + new ClusterChangedEvent( + "test", + addSnapshotIndex(clusterService.state(), snapshot1, primaryShard, SnapshotsInProgress.State.STARTED, true), + initState + ) + ); + + // Check the snapshot got completed successfully + assertBusy(() -> { + final IndexShardSnapshotStatus.Copy copy = shardsService.currentSnapshotShards(snapshot1) + .get(primaryShard.shardId) + .asCopy(); + final IndexShardSnapshotStatus.Stage stage = copy.getStage(); + assertEquals(IndexShardSnapshotStatus.Stage.DONE, stage); + }); + } + } + private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) { return new RemoteStoreReplicationSource(shard) { @Override diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 2311fc582616f..f4f94baabd7b0 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -68,6 +68,7 @@ import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfoTests; @@ -892,10 +893,21 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { replicateSegments(primaryShard, shards.getReplicas()); shards.assertAllEqual(10); - final SnapshotShardsService shardsService = getSnapshotShardsService(replicaShard); + final SnapshotShardsService shardsService = getSnapshotShardsService( + replicaShard, + shards.getIndexMetadata(), + false, + createRepositoriesService() + ); final Snapshot snapshot = new Snapshot(randomAlphaOfLength(10), new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5))); - final ClusterState initState = addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.INIT); + final ClusterState initState = addSnapshotIndex( + clusterService.state(), + snapshot, + replicaShard, + SnapshotsInProgress.State.INIT, + false + ); shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); CountDownLatch latch = new CountDownLatch(1); @@ -907,7 +919,7 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { shardsService.clusterChanged( new ClusterChangedEvent( "test", - addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED), + addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED, false), initState ) ); @@ -956,21 +968,30 @@ public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() th } } - private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) { + protected SnapshotShardsService getSnapshotShardsService( + IndexShard indexShard, + IndexMetadata indexMetadata, + boolean closedIdx, + RepositoriesService repositoriesService + ) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(threadPool); final IndicesService indicesService = mock(IndicesService.class); final IndexService indexService = mock(IndexService.class); when(indicesService.indexServiceSafe(any())).thenReturn(indexService); - when(indexService.getShardOrNull(anyInt())).thenReturn(replicaShard); - return new SnapshotShardsService(settings, clusterService, createRepositoriesService(), transportService, indicesService); + when(indexService.getShardOrNull(anyInt())).thenReturn(indexShard); + when(indexService.getMetadata()).thenReturn( + new IndexMetadata.Builder(indexMetadata).state(closedIdx ? IndexMetadata.State.CLOSE : IndexMetadata.State.OPEN).build() + ); + return new SnapshotShardsService(settings, clusterService, repositoriesService, transportService, indicesService); } - private ClusterState addSnapshotIndex( + protected ClusterState addSnapshotIndex( ClusterState state, Snapshot snapshot, IndexShard shard, - SnapshotsInProgress.State snapshotState + SnapshotsInProgress.State snapshotState, + boolean shallowCopySnapshot ) { final Map shardsBuilder = new HashMap<>(); ShardRouting shardRouting = shard.shardRouting; @@ -991,7 +1012,7 @@ private ClusterState addSnapshotIndex( null, SnapshotInfoTests.randomUserMetadata(), VersionUtils.randomVersion(random()), - false + shallowCopySnapshot ); return ClusterState.builder(state) .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(Collections.singletonList(entry))) diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index 12c7dc870c104..76294d85c64d4 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -44,6 +44,7 @@ import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; @@ -55,6 +56,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class PluginInfoTests extends OpenSearchTestCase { @@ -281,6 +283,30 @@ public void testReadFromPropertiesJvmMissingClassname() throws Exception { assertThat(e.getMessage(), containsString("property [classname] is missing")); } + public void testExtendedPluginsSingleOptionalExtension() throws IOException { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin", + "extended.plugins", + "foo;optional=true" + ); + PluginInfo info = PluginInfo.readFromProperties(pluginDir); + assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(true)); + } + public void testExtendedPluginsSingleExtension() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties( @@ -302,6 +328,7 @@ public void testExtendedPluginsSingleExtension() throws Exception { ); PluginInfo info = PluginInfo.readFromProperties(pluginDir); assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(false)); } public void testExtendedPluginsMultipleExtensions() throws Exception { diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index bd9ee33856f14..f5702fa1a7ade 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -361,7 +361,7 @@ public void testSortBundlesNoDeps() throws Exception { assertThat(sortedBundles, Matchers.contains(bundle1, bundle2, bundle3)); } - public void testSortBundlesMissingDep() throws Exception { + public void testSortBundlesMissingRequiredDep() throws Exception { Path pluginDir = createTempDir(); PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", "MyPlugin", Collections.singletonList("dne"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); @@ -372,6 +372,33 @@ public void testSortBundlesMissingDep() throws Exception { assertEquals("Missing plugin [dne], dependency of [foo]", e.getMessage()); } + public void testSortBundlesMissingOptionalDep() throws Exception { + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(PluginsService.class))) { + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "[.test] warning", + "org.opensearch.plugins.PluginsService", + Level.WARN, + "Missing plugin [dne], dependency of [foo]" + ) + ); + Path pluginDir = createTempDir(); + PluginInfo info = new PluginInfo( + "foo", + "desc", + "1.0", + Version.CURRENT, + "1.8", + "MyPlugin", + Collections.singletonList("dne;optional=true"), + false + ); + PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); + PluginsService.sortBundles(Collections.singleton(bundle)); + mockLogAppender.assertAllExpectationsMatched(); + } + } + public void testSortBundlesCommonDep() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 4cd822c7d583b..1ec6d320762f2 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -774,7 +774,9 @@ public void snapshotRemoteStoreIndexShard( String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus, long primaryTerm, + long commitGeneration, long startTime, + Map indexFilesToFileLengthMap, ActionListener listener ) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 12e83cbbadd5d..05f48eb9243af 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -28,18 +28,27 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.codec.composite.composite912.Composite912Codec; import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; @@ -49,14 +58,17 @@ import org.opensearch.search.aggregations.metrics.InternalSum; import org.opensearch.search.aggregations.metrics.InternalValueCount; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.aggregations.metrics.MinAggregationBuilder; import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Random; @@ -69,6 +81,8 @@ import static org.opensearch.search.aggregations.AggregationBuilders.min; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class MetricAggregatorTests extends AggregatorTestCase { @@ -267,6 +281,110 @@ public void testStarTreeDocValues() throws IOException { ); } + CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + + QueryShardContext queryShardContext = queryShardContextMock( + indexSearcher, + mapperServiceMock(), + createIndexSettings(), + circuitBreakerService, + new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService).withCircuitBreaking() + ); + + MetricAggregatorFactory aggregatorFactory = mock(MetricAggregatorFactory.class); + when(aggregatorFactory.getSubFactories()).thenReturn(AggregatorFactories.EMPTY); + when(aggregatorFactory.getField()).thenReturn(FIELD_NAME); + when(aggregatorFactory.getMetricStat()).thenReturn(MetricStat.SUM); + + // Case when field and metric type in aggregation are fully supported by star tree. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + true + ); + + // Case when the field is not supported by star tree + SumAggregationBuilder invalidFieldSumAggBuilder = sum("_name").field("hello"); + testCase( + indexSearcher, + query, + queryBuilder, + invalidFieldSumAggBuilder, + starTree, + supportedDimensions, + Collections.emptyList(), + verifyAggregation(InternalSum::getValue), + invalidFieldSumAggBuilder.build(queryShardContext, null), + false // Invalid fields will return null StarTreeQueryContext which will not cause early termination by leaf collector + ); + + // Case when metric type in aggregation is not supported by star tree but the field is supported. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when field is not present in supported metrics + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { mock(MetricAggregatorFactory.class) }); + when(aggregatorFactory.getSubFactories()).thenReturn(aggregatorFactories); + + // Case when sub aggregations are present + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when aggregation factory is not metric aggregation + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + mock(ValuesSourceAggregatorFactory.class), + false + ); + ir.close(); directory.close(); } @@ -287,6 +405,21 @@ private void testC CompositeIndexFieldInfo starTree, List supportedDimensions, BiConsumer verify + ) throws IOException { + testCase(searcher, query, queryBuilder, aggBuilder, starTree, supportedDimensions, Collections.emptyList(), verify, null, true); + } + + private void testCase( + IndexSearcher searcher, + Query query, + QueryBuilder queryBuilder, + T aggBuilder, + CompositeIndexFieldInfo starTree, + List supportedDimensions, + List supportedMetrics, + BiConsumer verify, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination ) throws IOException { V starTreeAggregation = searchAndReduceStarTree( createIndexSettings(), @@ -296,8 +429,11 @@ private void testC aggBuilder, starTree, supportedDimensions, + supportedMetrics, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); V expectedAggregation = searchAndReduceStarTree( @@ -308,8 +444,11 @@ private void testC aggBuilder, null, null, + null, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); verify.accept(expectedAggregation, starTreeAggregation); diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index b03cb5ac7bb9d..c1cb19b9576e4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -87,7 +87,8 @@ public void testStarTreeFilterWithDocsInSVDFieldButNoStarNode() throws IOExcepti testStarTreeFilter(10, false); } - private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + private Directory createStarTreeIndex(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension, List docs) + throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); conf.setCodec(getCodec(maxLeafDoc, skipStarNodeCreationForSDVDimension)); @@ -95,7 +96,6 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); int totalDocs = 100; - List docs = new ArrayList<>(); for (int i = 0; i < totalDocs; i++) { Document doc = new Document(); doc.add(new SortedNumericDocValuesField(SNDV, i)); @@ -110,6 +110,15 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS } iw.forceMerge(1); iw.close(); + return directory; + } + + private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + List docs = new ArrayList<>(); + + Directory directory = createStarTreeIndex(maxLeafDoc, skipStarNodeCreationForSDVDimension, docs); + + int totalDocs = docs.size(); DirectoryReader ir = DirectoryReader.open(directory); initValuesSourceRegistry(); diff --git a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java index b4726bab50198..23c21648b1263 100644 --- a/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/opensearch/threadpool/ScalingThreadPoolTests.java @@ -156,7 +156,6 @@ private int expectedSize(final String threadPoolName, final int numberOfProcesso sizes.put(ThreadPool.Names.REMOTE_PURGE, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_REFRESH_RETRY, ThreadPool::halfAllocatedProcessors); sizes.put(ThreadPool.Names.REMOTE_RECOVERY, ThreadPool::twiceAllocatedProcessors); - sizes.put(ThreadPool.Names.REMOTE_STATE_READ, n -> ThreadPool.boundedBy(4 * n, 4, 32)); return sizes.get(threadPoolName).apply(numberOfProcessors); } diff --git a/server/src/test/java/org/opensearch/transport/PublishPortTests.java b/server/src/test/java/org/opensearch/transport/PublishPortTests.java index 6a41409f6f181..2e5a57c4cdd60 100644 --- a/server/src/test/java/org/opensearch/transport/PublishPortTests.java +++ b/server/src/test/java/org/opensearch/transport/PublishPortTests.java @@ -43,8 +43,6 @@ import static java.net.InetAddress.getByName; import static java.util.Arrays.asList; -import static org.opensearch.transport.TcpTransport.resolvePublishPort; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class PublishPortTests extends OpenSearchTestCase { @@ -73,48 +71,44 @@ public void testPublishPort() throws Exception { } - int publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(settings, profile), + int publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(settings, profile).publishPort, randomAddresses(), getByName("127.0.0.2") ); assertThat("Publish port should be explicitly set", publishPort, equalTo(useProfile ? 9080 : 9081)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matched address", publishPort, equalTo(boundPort)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("127.0.0.1", boundPort), address("127.0.0.2", boundPort)), getByName("127.0.0.3") ); assertThat("Publish port should be derived from unique port of bound addresses", publishPort, equalTo(boundPort)); - try { - resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), - asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), - getByName("127.0.0.3") - ); - fail("Expected BindTransportException as publish_port not specified and non-unique port of bound addresses"); - } catch (BindTransportException e) { - assertThat(e.getMessage(), containsString("Failed to auto-resolve publish port")); - } + int resPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, + asList(address("127.0.0.1", boundPort), address("127.0.0.2", otherBoundPort)), + getByName("127.0.0.3") + ); + assertThat("as publish_port not specified and non-unique port of bound addresses", resPort, equalTo(-1)); - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("127.0.0.1") ); assertThat("Publish port should be derived from matching wildcard address", publishPort, equalTo(boundPort)); if (NetworkUtils.SUPPORTS_V6) { - publishPort = resolvePublishPort( - new TcpTransport.ProfileSettings(baseSettings, profile), + publishPort = Transport.resolvePublishPort( + new TcpTransport.ProfileSettings(baseSettings, profile).publishPort, asList(address("0.0.0.0", boundPort), address("127.0.0.2", otherBoundPort)), getByName("::1") ); diff --git a/settings.gradle b/settings.gradle index 035fe69eda7e9..a24da40069b90 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.develocity" version "3.18.2" + id "com.gradle.develocity" version "3.19" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 4dd1a2787ee87..bb2b7ebafdf81 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -72,12 +72,12 @@ dependencies { api "org.eclipse.jetty:jetty-server:${versions.jetty}" api "org.eclipse.jetty.websocket:javax-websocket-server-impl:${versions.jetty}" api 'org.apache.zookeeper:zookeeper:3.9.3' - api "org.apache.commons:commons-text:1.12.0" + api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" - api "ch.qos.logback:logback-core:1.5.12" - api "ch.qos.logback:logback-classic:1.5.12" + api "ch.qos.logback:logback-core:1.5.16" + api "ch.qos.logback:logback-classic:1.5.15" api "org.jboss.xnio:xnio-nio:3.8.16.Final" - api 'org.jline:jline:3.27.1' + api 'org.jline:jline:3.28.0' api 'org.apache.commons:commons-configuration2:2.11.0' api 'com.nimbusds:nimbus-jose-jwt:9.47' api ('org.apache.kerby:kerb-admin:2.1.0') { diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index a5dc13c334513..062ebd2051f6e 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -289,6 +289,10 @@ protected EngineConfigFactory getEngineConfigFactory(IndexSettings indexSettings return new EngineConfigFactory(indexSettings); } + public IndexMetadata getIndexMetadata() { + return indexMetadata; + } + public int indexDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName()).id(Integer.toString(docId.incrementAndGet())) diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index e1728c4476699..27142b298db52 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -93,6 +93,7 @@ import org.opensearch.index.cache.query.DisabledQueryCache; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -348,7 +349,9 @@ protected CountingAggregator createCountingAggregator( IndexSettings indexSettings, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext; @@ -360,7 +363,9 @@ protected CountingAggregator createCountingAggregator( queryBuilder, starTree, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); } else { @@ -389,7 +394,9 @@ protected SearchContext createSearchContextWithStarTreeContext( QueryBuilder queryBuilder, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext = createSearchContext( @@ -406,7 +413,12 @@ protected SearchContext createSearchContextWithStarTreeContext( AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); when(searchContext.aggregations()).thenReturn(searchContextAggregations); when(searchContextAggregations.factories()).thenReturn(aggregatorFactories); - when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + + if (aggregatorFactory != null) { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { aggregatorFactory }); + } else { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + } CompositeDataCubeFieldType compositeMappedFieldType = mock(CompositeDataCubeFieldType.class); when(compositeMappedFieldType.name()).thenReturn(starTree.getField()); @@ -414,6 +426,7 @@ protected SearchContext createSearchContextWithStarTreeContext( Set compositeFieldTypes = Set.of(compositeMappedFieldType); when((compositeMappedFieldType).getDimensions()).thenReturn(supportedDimensions); + when((compositeMappedFieldType).getMetrics()).thenReturn(supportedMetrics); MapperService mapperService = mock(MapperService.class); when(mapperService.getCompositeFieldTypes()).thenReturn(compositeFieldTypes); when(searchContext.mapperService()).thenReturn(mapperService); @@ -740,8 +753,11 @@ protected A searchAndReduc AggregationBuilder builder, CompositeIndexFieldInfo compositeIndexFieldInfo, List supportedDimensions, + List supportedMetrics, int maxBucket, boolean hasNested, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination, MappedFieldType... fieldTypes ) throws IOException { query = query.rewrite(searcher); @@ -764,7 +780,9 @@ protected A searchAndReduc indexSettings, compositeIndexFieldInfo, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); @@ -772,7 +790,7 @@ protected A searchAndReduc searcher.search(query, countingAggregator); countingAggregator.postCollection(); aggs.add(countingAggregator.buildTopLevel()); - if (compositeIndexFieldInfo != null) { + if (compositeIndexFieldInfo != null && assertCollectorEarlyTermination) { assertEquals(0, countingAggregator.collectCounter.get()); }