diff --git a/CHANGELOG.md b/CHANGELOG.md index d38c59c319aa1..fc112cb97ada3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Reader Writer Separation] Add searchOnly replica routing configuration ([#15410](https://github.com/opensearch-project/OpenSearch/pull/15410)) - Add index creation using the context field ([#15290](https://github.com/opensearch-project/OpenSearch/pull/15290)) - [Remote Publication] Add remote download stats ([#15291](https://github.com/opensearch-project/OpenSearch/pull/15291))) +- Add support to upload snapshot shard blobs with hashed prefix ([#15426](https://github.com/opensearch-project/OpenSearch/pull/15426)) - Add canRemain method to TargetPoolAllocationDecider to move shards from local to remote pool for hot to warm tiering ([#15010](https://github.com/opensearch-project/OpenSearch/pull/15010)) +- Add support for pluggable deciders for concurrent search ([#15363](https://github.com/opensearch-project/OpenSearch/pull/15363)) +- Add support for comma-separated list of index names to be used with Snapshot Status API ([#15409](https://github.com/opensearch-project/OpenSearch/pull/15409))[SnapshotV2] Snapshot Status API changes (#15409)) ### Dependencies - Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081)) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index 93ffd7cade7c3..fef8f4ab3991e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -230,6 +230,7 @@ public void testSnapshotsStatus() { Map expectedParams = new HashMap<>(); String repository = RequestConvertersTests.randomIndicesNames(1, 1)[0]; String[] snapshots = RequestConvertersTests.randomIndicesNames(1, 5); + String[] indices = RequestConvertersTests.randomIndicesNames(1, 5); StringBuilder snapshotNames = new StringBuilder(snapshots[0]); for (int idx = 1; idx < snapshots.length; idx++) { snapshotNames.append(",").append(snapshots[idx]); @@ -237,8 +238,9 @@ public void testSnapshotsStatus() { boolean ignoreUnavailable = randomBoolean(); String endpoint = "/_snapshot/" + repository + "/" + snapshotNames.toString() + "/_status"; - SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository, snapshots); + SnapshotsStatusRequest snapshotsStatusRequest = (new SnapshotsStatusRequest(repository, snapshots)).indices(indices); RequestConvertersTests.setRandomMasterTimeout(snapshotsStatusRequest, expectedParams); + snapshotsStatusRequest.ignoreUnavailable(ignoreUnavailable); expectedParams.put("ignore_unavailable", Boolean.toString(ignoreUnavailable)); diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index e073db7276119..1b85a1e227252 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -67,19 +67,11 @@ public void testUrlRepository() throws Exception { logger.info("--> creating repository"); Path repositoryLocation = randomRepoPath(); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); - + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository("test-repo", FsRepository.TYPE, settings); createIndex("test-idx"); ensureGreen(); @@ -115,17 +107,10 @@ public void testUrlRepository() throws Exception { cluster().wipeIndices("test-idx"); logger.info("--> create read-only URL repository"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("url-repo") - .setType(URLRepository.TYPE) - .setSettings( - Settings.builder() - .put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString()) - .put("list_directories", randomBoolean()) - ) - ); + Settings.Builder settingsBuilder = Settings.builder() + .put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString()) + .put("list_directories", randomBoolean()); + createRepository("url-repo", URLRepository.TYPE, settingsBuilder); logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 176e60a667aef..7f32f09602164 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -38,7 +38,6 @@ import com.azure.storage.blob.models.BlobStorageException; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; @@ -47,6 +46,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.AfterClass; import java.net.HttpURLConnection; @@ -56,7 +56,6 @@ import reactor.core.scheduler.Schedulers; import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { @@ -103,17 +102,11 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("azure") - .setSettings( - Settings.builder() - .put("container", System.getProperty("test.azure.container")) - .put("base_path", System.getProperty("test.azure.base")) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("container", System.getProperty("test.azure.container")) + .put("base_path", System.getProperty("test.azure.base")); + + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, "azure", settings); if (Strings.hasText(System.getProperty("test.azure.sas_token"))) { ensureSasTokenPermissions(); } diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index 1e11b1d111d8f..860b30fdef9ca 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -32,19 +32,18 @@ package org.opensearch.repositories.gcs; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Base64; import java.util.Collection; import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class GoogleCloudStorageThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { @@ -84,16 +83,9 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("gcs") - .setSettings( - Settings.builder() - .put("bucket", System.getProperty("test.google.bucket")) - .put("base_path", System.getProperty("test.google.base", "/")) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("bucket", System.getProperty("test.google.bucket")) + .put("base_path", System.getProperty("test.google.base", "/")); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "gcs", settings); } } diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java index ab10691240649..60fdbea011a44 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java @@ -34,12 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Collection; @@ -61,20 +61,13 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("hdfs") - .setSettings( - Settings.builder() - .put("uri", "hdfs:///") - .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) - .put("path", "foo") - .put("chunk_size", randomIntBetween(100, 1000) + "k") - .put("compress", randomBoolean()) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("uri", "hdfs:///") + .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, "hdfs", settings); } // HDFS repository doesn't have precise cleanup stats so we only check whether or not any blobs were removed diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index ce456f26af3a4..130bbbf1d2198 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.common.settings.Settings; @@ -45,6 +44,7 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; @@ -63,21 +63,13 @@ protected Collection> getPlugins() { public void testSimpleWorkflow() { Client client = client(); - - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings( - Settings.builder() - .put("uri", "hdfs:///") - .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) - .put("path", "foo") - .put("chunk_size", randomIntBetween(100, 1000) + "k") - .put("compress", randomBoolean()) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("uri", "hdfs:///") + .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "hdfs", settings); createIndex("test-idx-1"); createIndex("test-idx-2"); @@ -168,7 +160,7 @@ public void testSimpleWorkflow() { public void testMissingUri() { try { - client().admin().cluster().preparePutRepository("test-repo").setType("hdfs").setSettings(Settings.EMPTY).get(); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", Settings.builder()); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -178,12 +170,8 @@ public void testMissingUri() { public void testEmptyUri() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "/path").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "/path"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -193,12 +181,8 @@ public void testEmptyUri() { public void testNonHdfsUri() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "file:///").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "file:///"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -208,12 +192,8 @@ public void testNonHdfsUri() { public void testPathSpecifiedInHdfs() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "hdfs:///some/path").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "hdfs:///some/path"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -223,12 +203,8 @@ public void testPathSpecifiedInHdfs() { public void testMissingPath() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "hdfs:///").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "hdfs:///"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index f7a84864a8569..7db9a0d3ba790 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -33,7 +33,6 @@ import software.amazon.awssdk.services.s3.model.StorageClass; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -43,6 +42,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; import java.util.Collection; @@ -51,7 +51,6 @@ import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { @@ -111,13 +110,7 @@ protected void createRepository(String repoName) { settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("s3") - .setSettings(settings) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "s3", settings); } @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 573a4f3f51a41..21017160d77e5 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -55,6 +55,7 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.rest.FakeRestRequest; @@ -68,7 +69,6 @@ import static org.opensearch.repositories.s3.S3ClientSettings.ACCESS_KEY_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.SECRET_KEY_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -277,14 +277,8 @@ public void sendResponse(RestResponse response) { } private void createRepository(final String name, final Settings repositorySettings) { - assertAcked( - client().admin() - .cluster() - .preparePutRepository(name) - .setType(S3Repository.TYPE) - .setVerify(false) - .setSettings(repositorySettings) - ); + Settings.Builder settings = Settings.builder().put(repositorySettings); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), name, S3Repository.TYPE, false, settings); } /** diff --git a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java index 7a32b92d8aa75..e20e113d00e5a 100644 --- a/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/opensearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -33,6 +33,7 @@ package org.opensearch.upgrades; import org.opensearch.OpenSearchStatusException; +import com.sun.jna.StringArray; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; @@ -46,6 +47,7 @@ import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.json.JsonXContent; @@ -145,14 +147,14 @@ public void testCreateAndRestoreSnapshot() throws IOException { case STEP2_NEW_CLUSTER: case STEP4_NEW_CLUSTER: assertSnapshotStatusSuccessful(client, repoName, - snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new)); + snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new), Strings.EMPTY_ARRAY); break; case STEP1_OLD_CLUSTER: - assertSnapshotStatusSuccessful(client, repoName, "snapshot-" + TEST_STEP); + assertSnapshotStatusSuccessful(client, repoName, new String[] {"snapshot-" + TEST_STEP}, Strings.EMPTY_ARRAY); break; case STEP3_OLD_CLUSTER: assertSnapshotStatusSuccessful( - client, repoName, "snapshot-" + TEST_STEP, "snapshot-" + TestStep.STEP3_OLD_CLUSTER); + client, repoName, new String[] {"snapshot-" + TEST_STEP, "snapshot-" + TestStep.STEP3_OLD_CLUSTER}, Strings.EMPTY_ARRAY); break; } if (TEST_STEP == TestStep.STEP3_OLD_CLUSTER) { @@ -190,10 +192,10 @@ public void testReadOnlyRepo() throws IOException { break; } if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER || TEST_STEP == TestStep.STEP3_OLD_CLUSTER) { - assertSnapshotStatusSuccessful(client, repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER); + assertSnapshotStatusSuccessful(client, repoName, new String[] {"snapshot-" + TestStep.STEP1_OLD_CLUSTER}, Strings.EMPTY_ARRAY); } else { assertSnapshotStatusSuccessful(client, repoName, - "snapshot-" + TestStep.STEP1_OLD_CLUSTER, "snapshot-" + TestStep.STEP2_NEW_CLUSTER); + new String[] {"snapshot-" + TestStep.STEP1_OLD_CLUSTER, "snapshot-" + TestStep.STEP2_NEW_CLUSTER}, Strings.EMPTY_ARRAY); } if (TEST_STEP == TestStep.STEP3_OLD_CLUSTER) { ensureSnapshotRestoreWorks(repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER, shards); @@ -218,7 +220,7 @@ public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { // Every step creates one snapshot assertThat(snapshots, hasSize(TEST_STEP.ordinal() + 1)); assertSnapshotStatusSuccessful(client, repoName, - snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new)); + snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new), Strings.EMPTY_ARRAY); if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER) { ensureSnapshotRestoreWorks(repoName, "snapshot-" + TestStep.STEP1_OLD_CLUSTER, shards); } else { @@ -253,9 +255,9 @@ public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { } private static void assertSnapshotStatusSuccessful(RestHighLevelClient client, String repoName, - String... snapshots) throws IOException { + String[] snapshots, String[] indices) throws IOException { final SnapshotsStatusResponse statusResponse = client.snapshot() - .status(new SnapshotsStatusRequest(repoName, snapshots), RequestOptions.DEFAULT); + .status((new SnapshotsStatusRequest(repoName, snapshots)).indices(indices), RequestOptions.DEFAULT); for (SnapshotStatus status : statusResponse.getSnapshots()) { assertThat(status.getShardsStats().getFailedShards(), is(0)); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json index 1ac6042941013..354d3c35d2bda 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json @@ -40,6 +40,26 @@ "description":"A comma-separated list of snapshot names" } } + }, + { + "path":"/_snapshot/{repository}/{snapshot}/{index}/_status", + "methods":[ + "GET" + ], + "parts":{ + "repository":{ + "type":"string", + "description":"A repository name" + }, + "snapshot":{ + "type":"string", + "description":"A snapshot name" + }, + "index":{ + "type": "list", + "description":"A comma-separated list of index names" + } + } } ] }, @@ -58,7 +78,7 @@ }, "ignore_unavailable":{ "type":"boolean", - "description":"Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown" + "description":"Whether to ignore unavailable snapshots and indices, defaults to false which means a SnapshotMissingException or IndexNotFoundException is thrown" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml index 98abd58a54e4b..716b6fb51cb43 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml @@ -16,12 +16,15 @@ setup: properties: record: type: "flat_object" + order: + type: "integer" - do: index: index: flat_object_null_value id: 1 body: { - "record": null + "record": null, + "order" : 1 } - do: @@ -31,7 +34,8 @@ setup: body: { "record": { "name": null - } + }, + "order" : 2 } - do: @@ -43,7 +47,8 @@ setup: "name": null, "age":"5", "name1": null - } + }, + "order" : 3 } - do: @@ -60,7 +65,8 @@ setup: } } ] - } + }, + "order" : 4 } - do: @@ -77,7 +83,8 @@ setup: }, null ] - } + }, + "order" : 5 } - do: @@ -97,7 +104,8 @@ setup: } } ] - } + }, + "order" : 6 } - do: @@ -108,7 +116,8 @@ setup: "record": { "name": null, "age":"3" - } + }, + "order" : 7 } - do: @@ -119,7 +128,8 @@ setup: "record": { "age":"3", "name": null - } + }, + "order" : 8 } - do: @@ -133,7 +143,8 @@ setup: 3 ], "age": 4 - } + }, + "order" : 9 } - do: @@ -147,7 +158,8 @@ setup: null, 3 ] - } + }, + "order" : 10 } - do: @@ -157,7 +169,8 @@ setup: body: { "record": { "name": null - } + }, + "order": 11 } - do: @@ -171,7 +184,8 @@ setup: null ] } - } + }, + "order": 12 } - do: @@ -183,7 +197,8 @@ setup: "labels": [ null ] - } + }, + "order": 13 } - do: @@ -198,7 +213,8 @@ setup: null ] } - } + }, + "order": 14 } - do: @@ -211,7 +227,8 @@ setup: "labels": [ null ] - } + }, + "order": 15 } - do: @@ -224,7 +241,8 @@ setup: null ], "age": "4" - } + }, + "order": 16 } - do: @@ -239,7 +257,8 @@ setup: "dsdsdsd" ] } - } + }, + "order": 17 } - do: @@ -253,7 +272,8 @@ setup: "name2": null } } - } + }, + "order": 18 } - do: @@ -271,7 +291,8 @@ setup: ] ] } - } + }, + "order": 19 } - do: @@ -299,7 +320,7 @@ teardown: - is_true: flat_object_null_value.mappings - match: { flat_object_null_value.mappings.properties.record.type: flat_object } # https://github.com/opensearch-project/OpenSearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test#length - - length: { flat_object_null_value.mappings.properties: 1 } + - length: { flat_object_null_value.mappings.properties: 2 } --- @@ -328,7 +349,8 @@ teardown: size: 30, query: { exists: { "field": "record" } - } + }, + sort: [{ order: asc}] } - length: { hits.hits: 12 } @@ -352,7 +374,8 @@ teardown: _source: true, query: { exists: { "field": "record.d" } - } + }, + sort: [{ order: asc}] } - length: { hits.hits: 3 } @@ -367,7 +390,8 @@ teardown: _source: true, query: { term: { record: "dsdsdsd" } - } + }, + sort: [{ order: asc}] } - length: { hits.hits: 2 } @@ -381,7 +405,8 @@ teardown: _source: true, query: { term: { record.name.name1: "dsdsdsd" } - } + }, + sort: [{ order: asc}] } - length: { hits.hits: 2 } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 36fe3748e9d10..6c0a156eb6752 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -55,13 +55,17 @@ public void testPutRepositoryWithBlocks() { logger.info("--> registering a repository is blocked when the cluster is read only"); try { setClusterReadOnly(true); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); assertBlocked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())), + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client().admin().cluster(), + "test-repo-blocks", + "fs", + false, + settings, + null, + false + ), Metadata.CLUSTER_READ_ONLY_BLOCK ); } finally { @@ -69,25 +73,13 @@ public void testPutRepositoryWithBlocks() { } logger.info("--> registering a repository is allowed when the cluster is not read only"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); } public void testVerifyRepositoryWithBlocks() { - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { @@ -104,14 +96,8 @@ public void testVerifyRepositoryWithBlocks() { } public void testDeleteRepositoryWithBlocks() { - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); logger.info("--> deleting a repository is blocked when the cluster is read only"); try { @@ -126,14 +112,8 @@ public void testDeleteRepositoryWithBlocks() { } public void testGetRepositoryWithBlocks() { - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 78fb01b07b6b1..0f29f02b284a4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -80,13 +80,8 @@ protected void setUpRepository() throws Exception { logger.info("--> register a repository"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), REPOSITORY_NAME, "fs", settings); logger.info("--> verify the repository"); VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index 009f5111078de..abce2fc878f27 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -224,7 +224,7 @@ protected void setLowPriorityUploadRate(String repoName, String value) throws Ex Settings.Builder settings = Settings.builder() .put("location", rmd.settings().get("location")) .put("max_remote_low_priority_upload_bytes_per_sec", value); - assertAcked(client().admin().cluster().preparePutRepository(repoName).setType(rmd.type()).setSettings(settings).get()); + createRepository(repoName, rmd.type(), settings); } public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 2d268a26a5755..ea45173cdbf7f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -365,13 +365,8 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti final String dataNodeName = internalCluster().startDataOnlyNode(); ensureStableCluster(3); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("repo") - .setType(FsRepository.TYPE) - .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean()); + createRepository("repo", FsRepository.TYPE, settings); final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster() .getCurrentClusterManagerNodeInstance(ClusterInfoService.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index 5eef7074e1dd6..3718dce538053 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -494,8 +494,7 @@ public void testRestoreSnapshotOverLimit() { repoSettings.put("location", randomRepoPath()); repoSettings.put("compress", randomBoolean()); repoSettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); - - assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(repoSettings.build())); + createRepository("test-repo", "fs", repoSettings); int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 52c6c6801a3c2..8cfb710679137 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -8,19 +8,28 @@ package org.opensearch.index.mapper; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.Rounding; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; import org.opensearch.index.compositeindex.CompositeIndexSettings; import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.IndicesService; +import org.opensearch.search.SearchHit; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -39,6 +48,10 @@ */ public class StarTreeMapperIT extends OpenSearchIntegTestCase { private static final String TEST_INDEX = "test"; + Settings settings = Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .build(); private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean keywordDim) { try { @@ -116,6 +129,12 @@ private static XContentBuilder createMaxDimTestMapping() { .startObject() .field("name", "dim2") .endObject() + .startObject() + .field("name", "dim3") + .endObject() + .startObject() + .field("name", "dim4") + .endObject() .endArray() .endObject() .endObject() @@ -132,6 +151,10 @@ private static XContentBuilder createMaxDimTestMapping() { .field("type", "integer") .field("doc_values", true) .endObject() + .startObject("dim4") + .field("type", "integer") + .field("doc_values", true) + .endObject() .endObject() .endObject(); } catch (IOException e) { @@ -223,6 +246,56 @@ private static XContentBuilder createUpdateTestMapping(boolean changeDim, boolea } } + private XContentBuilder getMappingWithDuplicateFields(boolean isDuplicateDim, boolean isDuplicateMetric) { + XContentBuilder mapping = null; + try { + mapping = jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startArray("ordered_dimensions") + .startObject() + .field("name", "timestamp") + .endObject() + .startObject() + .field("name", "numeric_dv") + .endObject() + .startObject() + .field("name", isDuplicateDim ? "numeric_dv" : "numeric_dv1") // Duplicate dimension + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "numeric_dv") + .endObject() + .startObject() + .field("name", isDuplicateMetric ? "numeric_dv" : "numeric_dv1") // Duplicate metric + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("numeric_dv1") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + fail("Failed to create mapping: " + e.getMessage()); + } + return mapping; + } + private static String getDim(boolean hasDocValues, boolean isKeyword) { if (hasDocValues) { return "numeric"; @@ -244,7 +317,7 @@ public final void setupNodeSettings() { } public void testValidCompositeIndex() { - prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); for (IndicesService service : dataNodeInstances) { final Index index = resolveIndex("test"); @@ -285,8 +358,91 @@ public void testValidCompositeIndex() { } } + public void testCompositeIndexWithIndexNotSpecified() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .build(); + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get() + ); + assertEquals( + "Failed to parse mapping [_doc]: Set 'index.composite_index' as true as part of index settings to use star tree index", + ex.getMessage() + ); + } + + public void testCompositeIndexWithHigherTranslogFlushSize() { + Settings settings = Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(513, ByteSizeUnit.MB)) + .build(); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get() + ); + assertEquals("You can configure 'index.translog.flush_threshold_size' with upto '512mb' for composite index", ex.getMessage()); + } + + public void testCompositeIndexWithArraysInCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startArray("numeric_dv") + .value(10) + .value(20) + .value(30) + .endArray() + .endObject(); + + // Index the document and refresh + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() + ); + assertEquals( + "object mapping for [_doc] with array for [numeric_dv] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + } + + public void testCompositeIndexWithArraysInNonCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startArray("numeric") + .value(10) + .value(20) + .value(30) + .endArray() + .endObject(); + + // Index the document and refresh + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get(); + + assertEquals(RestStatus.CREATED, indexResponse.status()); + + client().admin().indices().prepareRefresh(TEST_INDEX).get(); + // Verify the document was indexed + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get(); + + assertEquals(1, searchResponse.getHits().getTotalHits().value); + + // Verify the values in the indexed document + SearchHit hit = searchResponse.getHits().getAt(0); + assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp")); + + List values = (List) hit.getSourceAsMap().get("numeric"); + assertEquals(3, values.size()); + assertTrue(values.contains(10)); + assertTrue(values.contains(20)); + assertTrue(values.contains(30)); + } + public void testUpdateIndexWithAdditionOfStarTree() { - prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -296,7 +452,7 @@ public void testUpdateIndexWithAdditionOfStarTree() { } public void testUpdateIndexWithNewerStarTree() { - prepareCreate(TEST_INDEX).setMapping(createTestMappingWithoutStarTree(false, false, false)).get(); + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createTestMappingWithoutStarTree(false, false, false)).get(); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, @@ -309,7 +465,7 @@ public void testUpdateIndexWithNewerStarTree() { } public void testUpdateIndexWhenMappingIsDifferent() { - prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); // update some field in the mapping IllegalArgumentException ex = expectThrows( @@ -320,7 +476,7 @@ public void testUpdateIndexWhenMappingIsDifferent() { } public void testUpdateIndexWhenMappingIsSame() { - prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get(); + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); // update some field in the mapping AcknowledgedResponse putMappingResponse = client().admin() @@ -368,7 +524,7 @@ public void testUpdateIndexWhenMappingIsSame() { public void testInvalidDimCompositeIndex() { IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(true, false, false)).get() + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(true, false, false)).get() ); assertEquals( "Aggregations not supported for the dimension field [numeric] with field type [integer] as part of star tree field", @@ -379,8 +535,14 @@ public void testInvalidDimCompositeIndex() { public void testMaxDimsCompositeIndex() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> prepareCreate(TEST_INDEX).setMapping(createMaxDimTestMapping()) - .setSettings(Settings.builder().put(StarTreeIndexSettings.STAR_TREE_MAX_DIMENSIONS_SETTING.getKey(), 2)) + () -> prepareCreate(TEST_INDEX).setSettings(settings) + .setMapping(createMaxDimTestMapping()) + .setSettings( + Settings.builder() + .put(StarTreeIndexSettings.STAR_TREE_MAX_DIMENSIONS_SETTING.getKey(), 2) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + ) .get() ); assertEquals( @@ -389,11 +551,35 @@ public void testMaxDimsCompositeIndex() { ); } + public void testMaxMetricsCompositeIndex() { + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setSettings(settings) + .setMapping(createMaxDimTestMapping()) + .setSettings( + Settings.builder() + .put(StarTreeIndexSettings.STAR_TREE_MAX_BASE_METRICS_SETTING.getKey(), 4) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + ) + .get() + ); + assertEquals( + "Failed to parse mapping [_doc]: There cannot be more than [4] base metrics for star tree field [startree-1]", + ex.getMessage() + ); + } + public void testMaxCalendarIntervalsCompositeIndex() { MapperParsingException ex = expectThrows( MapperParsingException.class, () -> prepareCreate(TEST_INDEX).setMapping(createMaxDimTestMapping()) - .setSettings(Settings.builder().put(StarTreeIndexSettings.STAR_TREE_MAX_DATE_INTERVALS_SETTING.getKey(), 1)) + .setSettings( + Settings.builder() + .put(StarTreeIndexSettings.STAR_TREE_MAX_DATE_INTERVALS_SETTING.getKey(), 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + ) .get() ); assertEquals( @@ -405,7 +591,7 @@ public void testMaxCalendarIntervalsCompositeIndex() { public void testUnsupportedDim() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, true)).get() + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, true)).get() ); assertEquals( "Failed to parse mapping [_doc]: unsupported field type associated with dimension [keyword] as part of star tree field [startree-1]", @@ -416,7 +602,7 @@ public void testUnsupportedDim() { public void testInvalidMetric() { IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, true, false)).get() + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, true, false)).get() ); assertEquals( "Aggregations not supported for the metrics field [numeric] with field type [integer] as part of star tree field", @@ -424,6 +610,145 @@ public void testInvalidMetric() { ); } + public void testDuplicateDimensions() { + XContentBuilder finalMapping = getMappingWithDuplicateFields(true, false); + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(finalMapping).setSettings(settings).get() + ); + assertEquals( + "Failed to parse mapping [_doc]: Duplicate dimension [numeric_dv] present as part star tree index field [startree-1]", + ex.getMessage() + ); + } + + public void testDuplicateMetrics() { + XContentBuilder finalMapping = getMappingWithDuplicateFields(false, true); + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(finalMapping).setSettings(settings).get() + ); + assertEquals( + "Failed to parse mapping [_doc]: Duplicate metrics [numeric_dv] present as part star tree index field [startree-1]", + ex.getMessage() + ); + } + + public void testValidTranslogFlushThresholdSize() { + Settings indexSettings = Settings.builder() + .put(settings) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(256, ByteSizeUnit.MB)) + .build(); + + AcknowledgedResponse response = prepareCreate(TEST_INDEX).setSettings(indexSettings) + .setMapping(createMinimalTestMapping(false, false, false)) + .get(); + + assertTrue(response.isAcknowledged()); + } + + public void testInvalidTranslogFlushThresholdSize() { + Settings indexSettings = Settings.builder() + .put(settings) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1024, ByteSizeUnit.MB)) + .build(); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> prepareCreate(TEST_INDEX).setSettings(indexSettings).setMapping(createMinimalTestMapping(false, false, false)).get() + ); + + assertTrue( + ex.getMessage().contains("You can configure 'index.translog.flush_threshold_size' with upto '512mb' for composite index") + ); + } + + public void testTranslogFlushThresholdSizeWithDefaultCompositeSettingLow() { + Settings updatedSettings = Settings.builder() + .put(CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "130m") + .build(); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings(updatedSettings); + + client().admin().cluster().updateSettings(updateSettingsRequest).actionGet(); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get() + ); + + assertEquals("You can configure 'index.translog.flush_threshold_size' with upto '130mb' for composite index", ex.getMessage()); + } + + public void testUpdateTranslogFlushThresholdSize() { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); + + // Update to a valid value + AcknowledgedResponse validUpdateResponse = client().admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "256mb")) + .get(); + assertTrue(validUpdateResponse.isAcknowledged()); + + // Try to update to an invalid value + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "1024mb")) + .get() + ); + + assertTrue( + ex.getMessage().contains("You can configure 'index.translog.flush_threshold_size' with upto '512mb' for composite index") + ); + + // update cluster settings to higher value + Settings updatedSettings = Settings.builder() + .put(CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "1030m") + .build(); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings(updatedSettings); + + client().admin().cluster().updateSettings(updateSettingsRequest).actionGet(); + + // update index threshold flush to higher value + validUpdateResponse = client().admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "1024mb")) + .get(); + assertTrue(validUpdateResponse.isAcknowledged()); + } + + public void testMinimumTranslogFlushThresholdSize() { + Settings indexSettings = Settings.builder() + .put(settings) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(56, ByteSizeUnit.BYTES)) + .build(); + + AcknowledgedResponse response = prepareCreate(TEST_INDEX).setSettings(indexSettings) + .setMapping(createMinimalTestMapping(false, false, false)) + .get(); + + assertTrue(response.isAcknowledged()); + } + + public void testBelowMinimumTranslogFlushThresholdSize() { + Settings indexSettings = Settings.builder() + .put(settings) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(55, ByteSizeUnit.BYTES)) + .build(); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> prepareCreate(TEST_INDEX).setSettings(indexSettings).setMapping(createMinimalTestMapping(false, false, false)).get() + ); + + assertEquals("failed to parse value [55b] for setting [index.translog.flush_threshold_size], must be >= [56b]", ex.getMessage()); + } + @After public final void cleanupNodeSettings() { assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index f46f413f4d23f..3ee506f58a9d7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -577,18 +577,12 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I // the other problem here why we can't corrupt segments.X files is that the snapshot flushes again before // it snapshots and that will write a new segments.X+1 file logger.info("--> creating repository"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath().toAbsolutePath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath().toAbsolutePath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository("test-repo", "fs", settings); + logger.info("--> snapshot"); final CreateSnapshotResponse createSnapshotResponse = client().admin() .cluster() @@ -761,18 +755,11 @@ public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() thro // Create a snapshot repository. This repo is used to take a snapshot after // corrupting a file, which causes the node to notice the corrupt data and // close the shard. - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath().toAbsolutePath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath().toAbsolutePath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository("test-repo", "fs", settings); client().prepareIndex("test").setSource("field", "value").execute(); indexingInFlight.await(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java index 06d2d2a90de87..0d3c8307c060f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java @@ -51,7 +51,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -271,14 +270,8 @@ public void testSpecifiedIndexUnavailableSnapshotRestore() throws Exception { createIndex("test1"); ensureGreen("test1"); waitForRelocation(); + createRepository("dummy-repo", "fs", Settings.builder().put("location", randomRepoPath())); - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); verify(snapshot("snap2", "test1", "test2"), true); @@ -391,13 +384,8 @@ public void testWildcardBehaviourSnapshotRestore() throws Exception { ensureGreen("foobar"); waitForRelocation(); - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + createRepository("dummy-repo", "fs", Settings.builder().put("location", randomRepoPath())); + client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index cf93a432d0371..68b29851c6c04 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -741,14 +741,7 @@ public void testSnapshotRecovery() throws Exception { String nodeA = internalCluster().startNode(); logger.info("--> create repository"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPO_NAME) - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", false)) - .get() - ); + createRepository(REPO_NAME, "fs", Settings.builder().put("location", randomRepoPath()).put("compress", false)); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index e4e681a5433b5..17a9c3ddbe317 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -108,9 +108,7 @@ protected void setFailRate(String repoName, int value) throws ExecutionException Settings.Builder settings = Settings.builder() .put("location", rmd.settings().get("location")) .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); - assertAcked( - client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() - ); + createRepository(repoName, ReloadableFsRepository.TYPE, settings); } public void initDocRepToRemoteMigration() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java index 377bd9529ca7a..c701a8d92c336 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java @@ -21,7 +21,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; import static org.opensearch.node.remotestore.RemoteStoreNodeService.Direction.REMOTE_STORE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreMigrationSettingsUpdateIT extends RemoteStoreMigrationShardAllocationBaseTestCase { @@ -92,11 +91,7 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix String snapshotName = "test-snapshot"; String snapshotRepoName = "test-restore-snapshot-repo"; Path snapshotRepoNameAbsolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(snapshotRepoName) - .setType("fs") - .setSettings(Settings.builder().put("location", snapshotRepoNameAbsolutePath)) - ); + createRepository(snapshotRepoName, "fs", Settings.builder().put("location", snapshotRepoNameAbsolutePath)); logger.info("Create snapshot of non remote stored backed index"); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index 4d37b2a1feb88..7d29e5d328492 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -116,15 +116,10 @@ public void testNoShallowSnapshotInMixedMode() throws Exception { logger.info("Create shallow snapshot setting enabled repo"); String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; Path shallowSnapshotRepoPath = randomRepoPath(); - assertAcked( - clusterAdmin().preparePutRepository(shallowSnapshotRepoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", shallowSnapshotRepoPath) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", shallowSnapshotRepoPath) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); + createRepository(shallowSnapshotRepoName, "fs", settings); logger.info("Verify shallow snapshot creation"); final String snapshot1 = "snapshot1"; diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 6994b731d123c..56078a6ef8800 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -803,20 +803,14 @@ public void testCreateSnapshotV2() throws Exception { Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -867,20 +861,14 @@ public void testMixedSnapshotCreationWithV2RepositorySetting() throws Exception Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); + Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); createIndex(indexName1, indexSettings); @@ -901,20 +889,14 @@ public void testMixedSnapshotCreationWithV2RepositorySetting() throws Exception assertThat(snapshotInfo.getPinnedTimestamp(), equalTo(0L)); // enable shallow_snapshot_v2 - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); + indexDocuments(client, indexName1, 10); indexDocuments(client, indexName2, 20); @@ -948,20 +930,13 @@ public void testConcurrentSnapshotV2CreateOperation() throws InterruptedExceptio Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1031,20 +1006,13 @@ public void testCreateSnapshotV2WithRedIndex() throws Exception { Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1086,20 +1054,13 @@ public void testCreateSnapshotV2WithIndexingLoad() throws Exception { Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1161,20 +1122,13 @@ public void testCreateSnapshotV2WithShallowCopySettingDisabled() throws Exceptio Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1216,20 +1170,13 @@ public void testClusterManagerFailoverDuringSnapshotCreation() throws Exception Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1295,21 +1242,14 @@ public void testConcurrentV1SnapshotAndV2RepoSettingUpdate() throws Exception { String snapshotName1 = "test-create-snapshot-v1"; Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false) - ) - ); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1346,19 +1286,16 @@ public void testConcurrentV1SnapshotAndV2RepoSettingUpdate() throws Exception { assertThrows( IllegalStateException.class, - () -> client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - .get() + () -> createRepository( + snapshotRepoName, + FsRepository.TYPE, + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) ); } catch (Exception e) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index f83ae3e0ca820..ba06bb463e5a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -190,9 +190,7 @@ protected void setFailRate(String repoName, int value) throws ExecutionException Settings.Builder settings = Settings.builder() .put("location", rmd.settings().get("location")) .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); - assertAcked( - client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() - ); + createRepository(repoName, ReloadableFsRepository.TYPE, settings); } public Settings indexSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 11260e0914dc5..6ec973090883b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -36,6 +36,8 @@ import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; @@ -476,14 +478,7 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception { private Path registerCustomRepository() { Path path = randomRepoPath(); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("custom-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", path).put("compress", false)) - .get() - ); + createRepository("custom-repo", "fs", Settings.builder().put("location", path).put("compress", false)); return path; } @@ -493,9 +488,15 @@ private void verifyRestoredRepositories(Path repoPath) { assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + Settings settings = repositoriesMetadata.repository("custom-repo").settings(); + PathType pathType = BlobStoreRepository.SHARD_PATH_TYPE.get(settings); assertEquals( - Settings.builder().put("location", repoPath).put("compress", false).build(), - repositoriesMetadata.repository("custom-repo").settings() + Settings.builder() + .put("location", repoPath) + .put("compress", false) + .put(BlobStoreRepository.SHARD_PATH_TYPE.getKey(), pathType) + .build(), + settings ); // repo cleanup post verification diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 08ce35f0911ac..9a8d3651160c0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -511,9 +511,7 @@ public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Excepti List dataNodes = internalCluster().startDataOnlyNodes(2); Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); + createRepository("test-repo", "fs", Settings.builder().put("location", absolutePath)); logger.info("--> Create index and ingest 50 docs"); createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index b0827dcfe4892..4cbafde6417af 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -131,13 +131,15 @@ public void testMultiNodeClusterRandomNodeRecoverNetworkIsolationPostNonRestrict .get(0); Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); updatedSettings.remove("system_repository"); - - client.admin() - .cluster() - .preparePutRepository(repositoryMetadata.name()) - .setType(repositoryMetadata.type()) - .setSettings(updatedSettings) - .get(); + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client.admin().cluster(), + repositoryMetadata.name(), + repositoryMetadata.type(), + true, + updatedSettings, + null, + false + ).get(); ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); networkDisruption.stopDisrupting(); @@ -161,12 +163,7 @@ public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); updatedSettings.remove("system_repository"); - client.admin() - .cluster() - .preparePutRepository(repositoryMetadata.name()) - .setType(repositoryMetadata.type()) - .setSettings(updatedSettings) - .get(); + createRepository(repositoryMetadata.name(), repositoryMetadata.type(), updatedSettings); internalCluster().restartRandomDataNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 195b2d18df7cc..8c7789846935d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -478,15 +478,7 @@ public void testRateLimitedRemoteDownloads() throws Exception { Settings.Builder settings = Settings.builder(); settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB); - - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType(ReloadableFsRepository.TYPE) - .setSettings(settings) - .get() - ); + createRepository(REPOSITORY_NAME, ReloadableFsRepository.TYPE, settings); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); @@ -515,14 +507,7 @@ public void testRateLimitedRemoteDownloads() throws Exception { // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700 settings.remove("max_remote_download_bytes_per_sec"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType(ReloadableFsRepository.TYPE) - .setSettings(settings) - .get() - ); + createRepository(REPOSITORY_NAME, ReloadableFsRepository.TYPE, settings); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index 3dfde6f472525..99cc58848a04a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -31,7 +31,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -119,19 +118,12 @@ public void testRateLimitedRemoteUploads() throws Exception { internalCluster().startNode(clusterSettings.build()); Client client = client(); logger.info("--> updating repository"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType(MockFsRepositoryPlugin.TYPE) - .setSettings( - Settings.builder() - .put("location", repositoryLocation) - .put("compress", compress) - .put("max_remote_upload_bytes_per_sec", "1kb") - .put("chunk_size", 100, ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repositoryLocation) + .put("compress", compress) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES); + createRepository(REPOSITORY_NAME, MockFsRepositoryPlugin.TYPE, settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index b8415f4b41815..e4347fef744ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -45,7 +45,6 @@ import java.util.Collection; import java.util.Collections; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -70,9 +69,12 @@ public void testUpdateRepository() { .next(); final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); - - assertAcked( - client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + FsRepository.TYPE, + true, + repoSettings ); final GetRepositoriesResponse originalGetRepositoriesResponse = client.admin() @@ -91,8 +93,12 @@ public void testUpdateRepository() { final boolean updated = randomBoolean(); final String updatedRepositoryType = updated ? "mock" : FsRepository.TYPE; - assertAcked( - client.admin().cluster().preparePutRepository(repositoryName).setType(updatedRepositoryType).setSettings(repoSettings).get() + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + updatedRepositoryType, + true, + repoSettings ); final GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() @@ -112,12 +118,8 @@ public void testUpdateRepository() { public void testSystemRepositoryCantBeCreated() { internalCluster(); final String repositoryName = "test-repo"; - final Client client = client(); final Settings.Builder repoSettings = Settings.builder().put("system_repository", true).put("location", randomRepoPath()); - assertThrows( - RepositoryException.class, - () -> client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() - ); + assertThrows(RepositoryException.class, () -> createRepository(repositoryName, FsRepository.TYPE, repoSettings)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java index 9057ef900efbd..34075b78e98af 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -76,19 +76,11 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce final Path repoPath = randomRepoPath(); logger.info("--> creating repository {} at {}", repoName, repoPath); - - assertAcked( - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", repoPath) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repoPath) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository(repoName, "fs", settings); final String indexName = randomName(); int docCount = iterations(10, 1000); @@ -112,14 +104,7 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce IOUtils.rm(deletedPath); } assertFalse(Files.exists(deletedPath)); - - assertAcked( - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings(Settings.builder().put("location", repoPath).put("readonly", true)) - ); + createRepository(repoName, "fs", Settings.builder().put("location", repoPath).put("readonly", true)); final OpenSearchException exception = expectThrows( OpenSearchException.class, diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 4051bee3e4e5c..01f3b434bc4dc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -57,7 +57,10 @@ import java.util.Collection; import java.util.List; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.global; import static org.opensearch.search.aggregations.AggregationBuilders.stats; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -81,8 +84,12 @@ public AggregationsIntegrationIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java index 1826dd69cd804..4ce74f8195da9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/CombiIT.java @@ -51,7 +51,10 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.missing; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -69,8 +72,12 @@ public CombiIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java index 302ec3116d187..2fcf09d6ebf50 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/EquivalenceIT.java @@ -72,7 +72,10 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.extendedStats; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -102,8 +105,12 @@ public EquivalenceIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java index b650855083eed..ca13b4de00156 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MetadataIT.java @@ -49,7 +49,10 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; @@ -65,8 +68,12 @@ public MetadataIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java index bdd16c7e74dc0..1310792e2f2e4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/MissingValueIT.java @@ -51,7 +51,10 @@ import java.util.Arrays; import java.util.Collection; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.cardinality; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.geoCentroid; @@ -73,8 +76,12 @@ public MissingValueIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java index 48fd06bac285b..7e1cae8e5b628 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/AvgBucketIT.java @@ -53,7 +53,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -83,8 +86,12 @@ public AvgBucketIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java index 1b22cf2018d96..204aaa764849f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketScriptIT.java @@ -63,7 +63,10 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.dateRange; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -96,8 +99,12 @@ public BucketScriptIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java index 7dca1d0d79b1e..7677cfdac3e29 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSelectorIT.java @@ -62,7 +62,10 @@ import java.util.function.Function; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.bucketSelector; @@ -95,8 +98,12 @@ public BucketSelectorIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java index ffb607866935b..cd489e5b982e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/BucketSortIT.java @@ -58,7 +58,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -91,8 +94,12 @@ public BucketSortIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java index 8c89c1232ebb3..fc4c76f513f1d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -63,7 +63,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.PipelineAggregatorBuilders.derivative; @@ -89,8 +92,12 @@ public DateDerivativeIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java index f8def40ec003a..6fabbb32a4d15 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/DerivativeIT.java @@ -61,7 +61,10 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.filters; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; @@ -106,8 +109,12 @@ public DerivativeIT(Settings dynamicSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 1bd04cc13268f..06d46e80f9710 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -56,7 +56,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -86,8 +89,12 @@ public ExtendedStatsBucketIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index f646c06aba556..7805e0bac4c0b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -66,7 +66,10 @@ import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.termQuery; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.filter; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; @@ -97,8 +100,12 @@ public MaxBucketIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java index b92e086e0f20b..e1e8d8b035ae1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java @@ -53,7 +53,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -83,8 +86,12 @@ public MinBucketIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java index d35b80b7918fe..e481e48cf6188 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java @@ -61,7 +61,10 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -139,8 +142,12 @@ public MovAvgIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java index 29cb334bfcd00..592a151781048 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -56,7 +56,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -86,8 +89,12 @@ public PercentilesBucketIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java index 507bff51f0e39..2a412bc0f7720 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SerialDiffIT.java @@ -54,7 +54,10 @@ import java.util.Map; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -104,8 +107,12 @@ public SerialDiffIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java index fbaf799871c8a..4e52e6e706324 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/StatsBucketIT.java @@ -53,7 +53,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -82,8 +85,12 @@ public StatsBucketIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java index a5967124ff921..55652877d3f4b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/SumBucketIT.java @@ -53,7 +53,10 @@ import java.util.List; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.aggregations.AggregationBuilders.histogram; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.search.aggregations.AggregationBuilders.terms; @@ -83,8 +86,12 @@ public SumBucketIT(Settings staticSettings) { @ParametersFactory public static Collection parameters() { return Arrays.asList( - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, - new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_ALL).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() }, + new Object[] { + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_NONE).build() } ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java index fbf2acf7b08a6..9c784e5c80fea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -72,6 +72,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.test.OpenSearchIntegTestCase.resolvePath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFileExists; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; @@ -140,18 +141,11 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { assertAcked(client.admin().cluster().prepareDeleteRepository(repoName)); logger.info("--> recreate repository"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", repo) - .put("compress", false) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repo) + .put("compress", false) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository(repoName, "fs", settings); startDeleteSnapshot(repoName, snapshot).get(); @@ -168,20 +162,12 @@ public void testConcurrentlyChangeRepositoryContentsInBwCMode() throws Exception Path repo = randomRepoPath(); final String repoName = "test-repo"; logger.info("--> creating repository at {}", repo.toAbsolutePath()); - assertAcked( - client.admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", repo) - .put("compress", false) - .put(BlobStoreRepository.ALLOW_CONCURRENT_MODIFICATION.getKey(), true) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); - + Settings.Builder settings = Settings.builder() + .put("location", repo) + .put("compress", false) + .put(BlobStoreRepository.ALLOW_CONCURRENT_MODIFICATION.getKey(), true) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository(repoName, "fs", settings); createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); indexRandom( @@ -472,7 +458,7 @@ public void testHandleSnapshotErrorWithBwCFormat() throws IOException, Execution // not break subsequent repository operations logger.info("--> move shard level metadata to new generation"); final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); + final Path shardPath = repoPath.resolve(resolvePath(indexId, "0")); final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); assertFileExists(initialShardMetaPath); Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "1")); @@ -501,8 +487,9 @@ public void testRepairBrokenShardGenerations() throws Exception { logger.info("--> move shard level metadata to new generation and make RepositoryData point at an older generation"); final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); + final Path shardPath = repoPath.resolve(resolvePath(indexId, "0")); final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); + assertFileExists(initialShardMetaPath); Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + randomIntBetween(1, 1000))); @@ -564,9 +551,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { assertThat(indexIds.size(), equalTo(1)); final IndexId corruptedIndex = indexIds.get(indexName); - final Path shardIndexFile = repo.resolve("indices") - .resolve(corruptedIndex.getId()) - .resolve("0") + final Path shardIndexFile = repo.resolve(resolvePath(corruptedIndex, "0")) .resolve("index-" + repositoryData.shardGenerations().getShardGen(corruptedIndex, 0)); logger.info("--> truncating shard index file [{}]", shardIndexFile); @@ -641,7 +626,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> delete index metadata and shard metadata"); for (String index : indices) { - Path shardZero = indicesPath.resolve(indexIds.get(index).getId()).resolve("0"); + Path shardZero = repo.resolve(resolvePath(indexIds.get(index), "0")); if (randomBoolean()) { Files.delete( shardZero.resolve("index-" + getRepositoryData("test-repo").shardGenerations().getShardGen(indexIds.get(index), 0)) @@ -834,10 +819,9 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); logger.info("--> deleting shard level index file"); - final Path indicesPath = repo.resolve("indices"); for (IndexId indexId : getRepositoryData("test-repo").getIndices().values()) { final Path shardGen; - try (Stream shardFiles = Files.list(indicesPath.resolve(indexId.getId()).resolve("0"))) { + try (Stream shardFiles = Files.list(repo.resolve(resolvePath(indexId, "0")))) { shardGen = shardFiles.filter(file -> file.getFileName().toString().startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)) .findFirst() .orElseThrow(() -> new AssertionError("Failed to find shard index blob")); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 2b88dbc64e2af..5b581eed3a944 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -88,6 +88,7 @@ import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.TestCustomMetadata; @@ -761,18 +762,26 @@ public void testRegistrationFailure() { internalCluster().startNode(nonClusterManagerNode()); // Register mock repositories for (int i = 0; i < 5; i++) { - clusterAdmin().preparePutRepository("test-repo" + i) - .setType("mock") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .setVerify(false) - .get(); + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + clusterAdmin(), + "test-repo" + i, + "mock", + false, + Settings.builder().put("location", randomRepoPath()), + null, + false + ).get(); } logger.info("--> make sure that properly setup repository can be registered on all nodes"); - clusterAdmin().preparePutRepository("test-repo-0") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); - + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + clusterAdmin(), + "test-repo-0", + "fs", + true, + Settings.builder().put("location", randomRepoPath()), + null, + false + ).get(); } public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception { @@ -981,11 +990,7 @@ public void testSnapshotWithDateMath() { final String snapshotName = ""; logger.info("--> creating repository"); - assertAcked( - clusterAdmin().preparePutRepository(repo) - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) - ); + createRepository(repo, "fs", Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())); final String expression1 = nameExpressionResolver.resolveDateMathExpression(snapshotName); logger.info("--> creating date math snapshot"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java index 1c46e37dea93a..c96d4a2f079ee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java @@ -115,13 +115,15 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { secondCluster.startClusterManagerOnlyNode(); secondCluster.startDataOnlyNode(); - secondCluster.client() - .admin() - .cluster() - .preparePutRepository(repoNameOnSecondCluster) - .setType("fs") - .setSettings(Settings.builder().put("location", repoPath)) - .get(); + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + secondCluster.client().admin().cluster(), + repoNameOnSecondCluster, + "fs", + true, + Settings.builder().put("location", repoPath), + null, + false + ).get(); createIndexWithRandomDocs("test-idx-1", randomIntBetween(1, 100)); createFullSnapshot(repoNameOnFirstCluster, "snap-1"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java index 8e2580aba1745..e84de36df2fca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -32,20 +32,28 @@ package org.opensearch.snapshots; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStage; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import org.junit.Before; import java.nio.file.Path; +import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.snapshots.SnapshotsService.MAX_SHARDS_ALLOWED_IN_STATUS_API; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -192,6 +200,110 @@ public void testStatusAPICallInProgressShallowSnapshot() throws Exception { createSnapshotResponseActionFuture.actionGet(); } + public void testStatusAPICallForShallowV2Snapshot() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used for the test"); + Settings pinnedTimestampSettings = Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings); + internalCluster().startDataOnlyNodes(2, pinnedTimestampSettings); + + final String index1 = "remote-index-1"; + final String index2 = "remote-index-2"; + final String index3 = "remote-index-3"; + final String snapshotRepoName = "snapshot-repo-name"; + final String snapshot = "snapshot"; + + logger.info("Create repository for shallow V2 snapshots"); + Settings.Builder snapshotV2RepoSettings = snapshotRepoSettingsForShallowCopy().put( + BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), + Boolean.TRUE + ); + createRepository(snapshotRepoName, "fs", snapshotV2RepoSettings); + + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(index1, remoteStoreEnabledIndexSettings); + createIndex(index2, remoteStoreEnabledIndexSettings); + createIndex(index3, remoteStoreEnabledIndexSettings); + ensureGreen(); + + logger.info("Indexing some data"); + for (int i = 0; i < 50; i++) { + index(index1, "_doc", Integer.toString(i), "foo", "bar" + i); + index(index2, "_doc", Integer.toString(i), "foo", "bar" + i); + index(index3, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + SnapshotInfo snapshotInfo = createFullSnapshot(snapshotRepoName, snapshot); + assertTrue(snapshotInfo.getPinnedTimestamp() > 0); // to assert creation of a shallow v2 snapshot + + logger.info("Set MAX_SHARDS_ALLOWED_IN_STATUS_API to a low value"); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey(), 2)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertBusy(() -> { + // without index filter + // although no. of shards in snapshot (3) is greater than the max value allowed in a status api call, the request does not fail + SnapshotStatus snapshotStatusWithoutIndexFilter = client().admin() + .cluster() + .prepareSnapshotStatus(snapshotRepoName) + .setSnapshots(snapshot) + .execute() + .actionGet() + .getSnapshots() + .get(0); + + assertShallowV2SnapshotStatus(snapshotStatusWithoutIndexFilter, false); + + // with index filter + SnapshotStatus snapshotStatusWithIndexFilter = client().admin() + .cluster() + .prepareSnapshotStatus(snapshotRepoName) + .setSnapshots(snapshot) + .setIndices(index1, index2) + .execute() + .actionGet() + .getSnapshots() + .get(0); + + assertShallowV2SnapshotStatus(snapshotStatusWithIndexFilter, true); + + }, 1, TimeUnit.MINUTES); + + } + + private void assertShallowV2SnapshotStatus(SnapshotStatus snapshotStatus, boolean hasIndexFilter) { + if (hasIndexFilter) { + assertEquals(0, snapshotStatus.getStats().getTotalSize()); + } else { + // TODO: after adding primary store size at the snapshot level, total size here should be > 0 + } + // assert that total and incremental values of file count and size_in_bytes are 0 at index and shard levels + assertEquals(0, snapshotStatus.getStats().getTotalFileCount()); + assertEquals(0, snapshotStatus.getStats().getIncrementalSize()); + assertEquals(0, snapshotStatus.getStats().getIncrementalFileCount()); + + for (Map.Entry entry : snapshotStatus.getIndices().entrySet()) { + // index level + SnapshotIndexStatus snapshotIndexStatus = entry.getValue(); + assertEquals(0, snapshotIndexStatus.getStats().getTotalSize()); + assertEquals(0, snapshotIndexStatus.getStats().getTotalFileCount()); + assertEquals(0, snapshotIndexStatus.getStats().getIncrementalSize()); + assertEquals(0, snapshotIndexStatus.getStats().getIncrementalFileCount()); + + for (SnapshotIndexShardStatus snapshotIndexShardStatus : snapshotStatus.getShards()) { + // shard level + assertEquals(0, snapshotIndexShardStatus.getStats().getTotalSize()); + assertEquals(0, snapshotIndexShardStatus.getStats().getTotalFileCount()); + assertEquals(0, snapshotIndexShardStatus.getStats().getIncrementalSize()); + assertEquals(0, snapshotIndexShardStatus.getStats().getIncrementalFileCount()); + assertEquals(SnapshotIndexShardStage.DONE, snapshotIndexShardStatus.getStage()); + } + } + } + private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotStatus, String indexName) { return snapshotStatus.getIndices().get(indexName).getShards().get(0); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index dd40c77ba918d..271fcf166139f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -33,6 +33,7 @@ package org.opensearch.snapshots; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.bulk.BulkRequest; @@ -56,7 +57,6 @@ import java.nio.file.Path; import java.util.List; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -110,19 +110,17 @@ public void testRepositoryCreation() throws Exception { assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue()); assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue()); + RepositoryMetadata testRepo1Md = findRepository(repositoriesResponse.repositories(), "test-repo-1"); + logger.info("--> check that trying to create a repository with the same settings repeatedly does not update cluster state"); String beforeStateUuid = clusterStateResponse.getState().stateUUID(); - assertThat( - client.admin() - .cluster() - .preparePutRepository("test-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", location)) - .get() - .isAcknowledged(), - equalTo(true) - ); - assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); + createRepository("test-repo-1", "fs", Settings.builder().put("location", location)); + repositoriesResponse = client.admin().cluster().prepareGetRepositories(randomFrom("_all", "*", "test-repo-*")).get(); + RepositoryMetadata testRepo1MdAfterUpdate = findRepository(repositoriesResponse.repositories(), "test-repo-1"); + + if (testRepo1Md.settings().equals(testRepo1MdAfterUpdate.settings())) { + assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); + } logger.info("--> delete repository test-repo-1"); client.admin().cluster().prepareDeleteRepository("test-repo-1").get(); @@ -225,12 +223,7 @@ public void testMisconfiguredRepository() throws Exception { Path invalidRepoPath = createTempDir().toAbsolutePath(); String location = invalidRepoPath.toString(); try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", location)) - .get(); + createRepository("test-repo", "fs", Settings.builder().put("location", location)); fail("Shouldn't be here"); } catch (RepositoryException ex) { assertThat( @@ -242,33 +235,28 @@ public void testMisconfiguredRepository() throws Exception { public void testRepositoryAckTimeout() throws Exception { logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack"); - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo-1") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES) - ) - .setTimeout("0s") - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false)); + + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES); + PutRepositoryRequestBuilder requestBuilder = OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client().admin().cluster(), + "test-repo-1", + "fs", + true, + settings, + "0s", + false + ); + assertFalse(requestBuilder.get().isAcknowledged()); logger.info("--> creating repository test-repo-2 with standard timeout - should ack"); - putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo-2") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + settings = Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES); + createRepository("test-repo-2", "fs", settings); logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack"); AcknowledgedResponse deleteRepositoryResponse = client().admin() @@ -292,25 +280,45 @@ public void testRepositoryVerification() throws Exception { Settings readonlySettings = Settings.builder().put(settings).put("readonly", true).build(); logger.info("--> creating repository that cannot write any files - should fail"); assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings), + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client.admin().cluster(), + "test-repo-1", + "mock", + true, + Settings.builder().put(settings), + null, + false + ), RepositoryVerificationException.class ); logger.info("--> creating read-only repository that cannot read any files - should fail"); assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings), + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client.admin().cluster(), + "test-repo-2", + "mock", + true, + Settings.builder().put(readonlySettings), + null, + false + ), RepositoryVerificationException.class ); logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings).setVerify(false)); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo-1", "mock", false, Settings.builder().put(settings)); logger.info("--> verifying repository"); assertRequestBuilderThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class); logger.info("--> creating read-only repository that cannot read any files, but suppress verification - should be acked"); - assertAcked( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings).setVerify(false) + OpenSearchIntegTestCase.putRepository( + client.admin().cluster(), + "test-repo-2", + "mock", + false, + Settings.builder().put(readonlySettings) ); logger.info("--> verifying repository"); @@ -320,12 +328,8 @@ public void testRepositoryVerification() throws Exception { logger.info("--> creating repository"); try { - client.admin() - .cluster() - .preparePutRepository("test-repo-1") - .setType("mock") - .setSettings(Settings.builder().put("location", location).put("localize_location", true)) - .get(); + Settings.Builder settingsBuilder = Settings.builder().put("location", location).put("localize_location", true); + createRepository("test-repo-1", "mock", settingsBuilder); fail("RepositoryVerificationException wasn't generated"); } catch (RepositoryVerificationException ex) { assertThat(ex.getMessage(), containsString("is not shared")); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java index 0bebe969b3f3e..4187ecdf5f283 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java @@ -59,7 +59,6 @@ import java.util.Map; import java.util.function.Function; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; public class RepositoryFilterUserMetadataIT extends OpenSearchIntegTestCase { @@ -72,17 +71,10 @@ protected Collection> nodePlugins() { public void testFilteredRepoMetadataIsUsed() { final String clusterManagerName = internalCluster().getClusterManagerName(); final String repoName = "test-repo"; - assertAcked( - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType(MetadataFilteringPlugin.TYPE) - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put(MetadataFilteringPlugin.CLUSTER_MANAGER_SETTING_VALUE, clusterManagerName) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath()) + .put(MetadataFilteringPlugin.CLUSTER_MANAGER_SETTING_VALUE, clusterManagerName); + createRepository(repoName, MetadataFilteringPlugin.TYPE, settings); createIndex("test-idx"); final SnapshotInfo snapshotInfo = client().admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index 75c3a0a23de37..2d48882e43739 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -82,6 +82,7 @@ import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import java.nio.channels.SeekableByteChannel; @@ -389,17 +390,11 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException disableRepoConsistencyCheck("This test uses a purposely broken repository so it would fail consistency checks"); logger.info("--> creating repository"); - assertAcked( - clusterAdmin().preparePutRepository("test-repo") - .setType("mock") - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put("random", randomAlphaOfLength(10)) - .put("random_control_io_exception_rate", 0.2) - ) - .setVerify(false) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath()) + .put("random", randomAlphaOfLength(10)) + .put("random_control_io_exception_rate", 0.2); + OpenSearchIntegTestCase.putRepository(clusterAdmin(), "test-repo", "mock", false, settings); createIndexWithRandomDocs("test-idx", 100); @@ -690,11 +685,8 @@ private void unrestorableUseCase( assertAcked(client().admin().indices().prepareDelete(indexName)); // update the test repository - assertAcked( - clusterAdmin().preparePutRepository("test-repo") - .setType("mock") - .setSettings(Settings.builder().put("location", repositoryLocation).put(repositorySettings).build()) - ); + Settings.Builder settings = Settings.builder().put("location", repositoryLocation).put(repositorySettings); + OpenSearchIntegTestCase.putRepository(clusterAdmin(), "test-repo", "mock", settings); // attempt to restore the snapshot with the given settings RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") @@ -1015,27 +1007,17 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { } logger.info("--> trying to move repository to another location"); + Settings.Builder settings = Settings.builder().put("location", repositoryLocation.resolve("test")); try { - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) - .get(); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "fs", settings); fail("shouldn't be able to replace in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository replacement failed"); } logger.info("--> trying to create a repository with different name"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo-2") - .setVerify(false) // do not do verification itself as snapshot threads could be fully blocked - .setType("fs") - .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) - ); + Settings.Builder settingsBuilder = Settings.builder().put("location", repositoryLocation.resolve("test")); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo-2", "fs", false, settingsBuilder); logger.info("--> unblocking blocked node"); unblockNode("test-repo", blockedNode); @@ -1941,20 +1923,12 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { logger.info("--> creating repository"); final Path repoPath = randomRepoPath(); final Client client = client(); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("mock") - .setVerify(false) - .setSettings( - Settings.builder() - .put("location", repoPath) - .put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f) - // test that we can take a snapshot after a failed one, even if a partial index-N was written - .put("random", randomAlphaOfLength(10)) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repoPath) + .put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f) + // test that we can take a snapshot after a failed one, even if a partial index-N was written + .put("random", randomAlphaOfLength(10)); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "mock", false, settings); assertAcked( prepareCreate("test-idx").setSettings( @@ -2004,14 +1978,8 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { logger.info("--> creating repository"); final Path repoPath = randomRepoPath(); final Client client = client(); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", repoPath)) - ); + Settings.Builder settings = Settings.builder().put("location", repoPath); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "fs", false, settings); logger.info("--> creating random number of indices"); final int numIndices = randomIntBetween(1, 10); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index fb69209f7adda..5a043e69e9735 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -33,11 +33,14 @@ package org.opensearch.snapshots; import org.opensearch.Version; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStage; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStats; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; @@ -49,6 +52,9 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.repositories.IndexId; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; @@ -59,9 +65,13 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.snapshots.SnapshotsService.MAX_SHARDS_ALLOWED_IN_STATUS_API; +import static org.opensearch.test.OpenSearchIntegTestCase.resolvePath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -200,11 +210,9 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { final SnapshotInfo snapshotInfo = createFullSnapshot("test-repo", "test-snap"); logger.info("--> delete shard-level snap-${uuid}.dat file for one shard in this snapshot to simulate concurrent delete"); - final String indexRepoId = getRepositoryData("test-repo").resolveIndexId(snapshotInfo.indices().get(0)).getId(); + IndexId indexId = getRepositoryData("test-repo").resolveIndexId(snapshotInfo.indices().get(0)); IOUtils.rm( - repoPath.resolve("indices") - .resolve(indexRepoId) - .resolve("0") + repoPath.resolve(resolvePath(indexId, "0")) .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") ); @@ -564,6 +572,194 @@ public void testGetSnapshotsRequest() throws Exception { waitForCompletion(repositoryName, inProgressSnapshot, TimeValue.timeValueSeconds(60)); } + public void testSnapshotStatusApiFailureForTooManyShardsAcrossSnapshots() throws Exception { + String repositoryName = "test-repo"; + String index1 = "test-idx-1"; + String index2 = "test-idx-2"; + String index3 = "test-idx-3"; + createRepository(repositoryName, "fs"); + + logger.info("Create indices"); + createIndex(index1, index2, index3); + ensureGreen(); + + logger.info("Indexing some data"); + for (int i = 0; i < 10; i++) { + index(index1, "_doc", Integer.toString(i), "foo", "bar" + i); + index(index2, "_doc", Integer.toString(i), "foo", "baz" + i); + index(index3, "_doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + String snapshot1 = "test-snap-1"; + String snapshot2 = "test-snap-2"; + createSnapshot(repositoryName, snapshot1, List.of(index1, index2, index3)); + createSnapshot(repositoryName, snapshot2, List.of(index1, index2)); + + logger.info("Set MAX_SHARDS_ALLOWED_IN_STATUS_API to a low value"); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey(), 2)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // across a single snapshot + assertBusy(() -> { + TooManyShardsInSnapshotsStatusException exception = expectThrows( + TooManyShardsInSnapshotsStatusException.class, + () -> client().admin().cluster().prepareSnapshotStatus(repositoryName).setSnapshots(snapshot1).execute().actionGet() + ); + assertEquals(exception.status(), RestStatus.REQUEST_ENTITY_TOO_LARGE); + assertTrue( + exception.getMessage().endsWith(" is more than the maximum allowed value of shard count [2] for snapshot status request") + ); + }, 1, TimeUnit.MINUTES); + + // across multiple snapshots + assertBusy(() -> { + TooManyShardsInSnapshotsStatusException exception = expectThrows( + TooManyShardsInSnapshotsStatusException.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot1, snapshot2) + .execute() + .actionGet() + ); + assertEquals(exception.status(), RestStatus.REQUEST_ENTITY_TOO_LARGE); + assertTrue( + exception.getMessage().endsWith(" is more than the maximum allowed value of shard count [2] for snapshot status request") + ); + }, 1, TimeUnit.MINUTES); + + logger.info("Reset MAX_SHARDS_ALLOWED_IN_STATUS_API to default value"); + updateSettingsRequest.persistentSettings(Settings.builder().putNull(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey())); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + } + + public void testSnapshotStatusForIndexFilter() throws Exception { + String repositoryName = "test-repo"; + String index1 = "test-idx-1"; + String index2 = "test-idx-2"; + String index3 = "test-idx-3"; + createRepository(repositoryName, "fs"); + + logger.info("Create indices"); + createIndex(index1, index2, index3); + ensureGreen(); + + logger.info("Indexing some data"); + for (int i = 0; i < 10; i++) { + index(index1, "_doc", Integer.toString(i), "foo", "bar" + i); + index(index2, "_doc", Integer.toString(i), "foo", "baz" + i); + index(index3, "_doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + String snapshot = "test-snap-1"; + createSnapshot(repositoryName, snapshot, List.of(index1, index2, index3)); + + assertBusy(() -> { + SnapshotStatus snapshotsStatus = client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot) + .setIndices(index1, index2) + .get() + .getSnapshots() + .get(0); + Map snapshotIndexStatusMap = snapshotsStatus.getIndices(); + // Although the snapshot contains 3 indices, the response of status api call only contains results for 2 + assertEquals(snapshotIndexStatusMap.size(), 2); + assertEquals(snapshotIndexStatusMap.keySet(), Set.of(index1, index2)); + }, 1, TimeUnit.MINUTES); + } + + public void testSnapshotStatusFailuresWithIndexFilter() throws Exception { + String repositoryName = "test-repo"; + String index1 = "test-idx-1"; + String index2 = "test-idx-2"; + String index3 = "test-idx-3"; + createRepository(repositoryName, "fs"); + + logger.info("Create indices"); + createIndex(index1, index2, index3); + ensureGreen(); + + logger.info("Indexing some data"); + for (int i = 0; i < 10; i++) { + index(index1, "_doc", Integer.toString(i), "foo", "bar" + i); + index(index2, "_doc", Integer.toString(i), "foo", "baz" + i); + index(index3, "_doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + String snapshot1 = "test-snap-1"; + String snapshot2 = "test-snap-2"; + createSnapshot(repositoryName, snapshot1, List.of(index1, index2, index3)); + createSnapshot(repositoryName, snapshot2, List.of(index1)); + + assertBusy(() -> { + // failure due to passing index filter for multiple snapshots + ActionRequestValidationException ex = expectThrows( + ActionRequestValidationException.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot1, snapshot2) + .setIndices(index1, index2, index3) + .execute() + .actionGet() + ); + String cause = "index list filter is supported only for a single snapshot"; + assertTrue(ex.getMessage().contains(cause)); + }, 1, TimeUnit.MINUTES); + + assertBusy(() -> { + // failure due to index not found in snapshot + IndexNotFoundException ex = expectThrows( + IndexNotFoundException.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot2) + .setIndices(index1, index2, index3) + .execute() + .actionGet() + ); + assertEquals(ex.status(), RestStatus.NOT_FOUND); + String cause = String.format( + Locale.ROOT, + "indices [%s] missing in snapshot [%s] of repository [%s]", + String.join(", ", List.of(index2, index3)), + snapshot2, + repositoryName + ); + assertEquals(cause, ex.getCause().getMessage()); + + }, 1, TimeUnit.MINUTES); + + assertBusy(() -> { + // failure due to too many shards requested + logger.info("Set MAX_SHARDS_ALLOWED_IN_STATUS_API to a low value"); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey(), 2)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + TooManyShardsInSnapshotsStatusException ex = expectThrows( + TooManyShardsInSnapshotsStatusException.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot1) + .setIndices(index1, index2, index3) + .execute() + .actionGet() + ); + assertEquals(ex.status(), RestStatus.REQUEST_ENTITY_TOO_LARGE); + assertTrue(ex.getMessage().endsWith(" is more than the maximum allowed value of shard count [2] for snapshot status request")); + + logger.info("Reset MAX_SHARDS_ALLOWED_IN_STATUS_API to default value"); + updateSettingsRequest.persistentSettings(Settings.builder().putNull(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey())); + assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + }, 2, TimeUnit.MINUTES); + } + private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotStatus, String indexName) { return snapshotStatus.getIndices().get(indexName).getShards().get(0); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java index 28b84655a2cc7..bb5cc89d4e1d5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -8,7 +8,6 @@ package org.opensearch.snapshots; -import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.RepositoryException; @@ -19,7 +18,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SystemRepositoryIT extends AbstractSnapshotIntegTestCase { @@ -43,13 +41,8 @@ public void testRestrictedSettingsCantBeUpdated() { disableRepoConsistencyCheck("System repository is being used for the test"); internalCluster().startNode(); - final Client client = client(); final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); - - RepositoryException e = expectThrows( - RepositoryException.class, - () -> client.admin().cluster().preparePutRepository(systemRepoName).setType("mock").setSettings(repoSettings).get() - ); + RepositoryException e = expectThrows(RepositoryException.class, () -> createRepository(systemRepoName, "mock", repoSettings)); assertEquals( e.getMessage(), "[system-repo-name] trying to modify an unmodifiable attribute type of system " @@ -59,18 +52,8 @@ public void testRestrictedSettingsCantBeUpdated() { public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { disableRepoConsistencyCheck("System repository is being used for the test"); - internalCluster().startNode(); - final Client client = client(); final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); - - assertAcked( - client.admin() - .cluster() - .preparePutRepository(systemRepoName) - .setType(ReloadableFsRepository.TYPE) - .setSettings(repoSettings) - .get() - ); + createRepository(systemRepoName, ReloadableFsRepository.TYPE, repoSettings); } } diff --git a/server/src/main/java/org/opensearch/OpenSearchServerException.java b/server/src/main/java/org/opensearch/OpenSearchServerException.java index 2f36c284599c0..5fb064f2c9182 100644 --- a/server/src/main/java/org/opensearch/OpenSearchServerException.java +++ b/server/src/main/java/org/opensearch/OpenSearchServerException.java @@ -1188,6 +1188,14 @@ public static void registerExceptions() { V_2_17_0 ) ); + registerExceptionHandle( + new OpenSearchExceptionHandle( + org.opensearch.snapshots.TooManyShardsInSnapshotsStatusException.class, + org.opensearch.snapshots.TooManyShardsInSnapshotsStatusException::new, + 175, + V_2_17_0 + ) + ); registerExceptionHandle( new OpenSearchExceptionHandle( org.opensearch.cluster.block.IndexCreateBlockException.class, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index 061e73f1094b5..3d7fb5b6beb56 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.annotation.PublicApi; @@ -54,6 +55,7 @@ public class SnapshotsStatusRequest extends ClusterManagerNodeRequesttrue to ignore unavailable snapshots, instead of throwing an exception. - * Defaults to false, which means unavailable snapshots cause an exception to be thrown. + * Returns the names of the indices. + * + * @return the names of indices + */ + public String[] indices() { + return this.indices; + } + + /** + * Sets the list of indices to be returned + * + * @return this request + */ + public SnapshotsStatusRequest indices(String[] indices) { + this.indices = indices; + return this; + } + + /** + * Set to true to ignore unavailable snapshots and indices, instead of throwing an exception. + * Defaults to false, which means unavailable snapshots and indices cause an exception to be thrown. * - * @param ignoreUnavailable whether to ignore unavailable snapshots + * @param ignoreUnavailable whether to ignore unavailable snapshots and indices * @return this request */ public SnapshotsStatusRequest ignoreUnavailable(boolean ignoreUnavailable) { @@ -158,9 +201,9 @@ public SnapshotsStatusRequest ignoreUnavailable(boolean ignoreUnavailable) { } /** - * Returns whether the request permits unavailable snapshots to be ignored. + * Returns whether the request permits unavailable snapshots and indices to be ignored. * - * @return true if the request will ignore unavailable snapshots, false if it will throw an exception on unavailable snapshots + * @return true if the request will ignore unavailable snapshots and indices, false if it will throw an exception on unavailable snapshots and indices */ public boolean ignoreUnavailable() { return ignoreUnavailable; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 9377eca60e353..6f0ac278d01c4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -96,10 +96,32 @@ public SnapshotsStatusRequestBuilder addSnapshots(String... snapshots) { } /** - * Set to true to ignore unavailable snapshots, instead of throwing an exception. - * Defaults to false, which means unavailable snapshots cause an exception to be thrown. + * Sets list of indices to return * - * @param ignoreUnavailable whether to ignore unavailable snapshots. + * @param indices list of indices + * @return this builder + */ + public SnapshotsStatusRequestBuilder setIndices(String... indices) { + request.indices(indices); + return this; + } + + /** + * Adds additional indices to the list of indices to return + * + * @param indices additional indices + * @return this builder + */ + public SnapshotsStatusRequestBuilder addIndices(String... indices) { + request.indices(ArrayUtils.concat(request.indices(), indices)); + return this; + } + + /** + * Set to true to ignore unavailable snapshots and indices, instead of throwing an exception. + * Defaults to false, which means unavailable snapshots and indices cause an exception to be thrown. + * + * @param ignoreUnavailable whether to ignore unavailable snapshots and indices. * @return this builder */ public SnapshotsStatusRequestBuilder setIgnoreUnavailable(boolean ignoreUnavailable) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 4fc2acb2caa51..f2a9b88f790c9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -52,6 +52,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -65,6 +66,7 @@ import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.snapshots.SnapshotState; import org.opensearch.snapshots.SnapshotsService; +import org.opensearch.snapshots.TooManyShardsInSnapshotsStatusException; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -81,6 +83,7 @@ import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; +import static org.opensearch.snapshots.SnapshotsService.MAX_SHARDS_ALLOWED_IN_STATUS_API; /** * Transport action for accessing snapshot status @@ -95,6 +98,8 @@ public class TransportSnapshotsStatusAction extends TransportClusterManagerNodeA private final TransportNodesSnapshotsStatus transportNodesSnapshotsStatus; + private long maximumAllowedShardCount; + @Inject public TransportSnapshotsStatusAction( TransportService transportService, @@ -314,38 +319,34 @@ private void loadRepositoryData( String repositoryName, ActionListener listener ) { - final Set requestedSnapshotNames = Sets.newHashSet(request.snapshots()); + maximumAllowedShardCount = clusterService.getClusterSettings().get(MAX_SHARDS_ALLOWED_IN_STATUS_API); final StepListener repositoryDataListener = new StepListener<>(); repositoriesService.getRepositoryData(repositoryName, repositoryDataListener); repositoryDataListener.whenComplete(repositoryData -> { - final Map matchedSnapshotIds = repositoryData.getSnapshotIds() - .stream() - .filter(s -> requestedSnapshotNames.contains(s.getName())) - .collect(Collectors.toMap(SnapshotId::getName, Function.identity())); - for (final String snapshotName : request.snapshots()) { - if (currentSnapshotNames.contains(snapshotName)) { - // we've already found this snapshot in the current snapshot entries, so skip over - continue; - } - SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName); - if (snapshotId == null) { - // neither in the current snapshot entries nor found in the repository - if (request.ignoreUnavailable()) { - // ignoring unavailable snapshots, so skip over - logger.debug( - "snapshot status request ignoring snapshot [{}], not found in repository [{}]", - snapshotName, - repositoryName - ); - continue; - } else { - throw new SnapshotMissingException(repositoryName, snapshotName); - } - } - SnapshotInfo snapshotInfo = snapshot(snapshotsInProgress, repositoryName, snapshotId); + Map snapshotsInfoMap = snapshotsInfo( + request, + repositoryName, + repositoryData, + snapshotsInProgress, + currentSnapshotNames + ); + for (Map.Entry entry : snapshotsInfoMap.entrySet()) { + SnapshotId snapshotId = entry.getKey(); + SnapshotInfo snapshotInfo = entry.getValue(); List shardStatusBuilder = new ArrayList<>(); if (snapshotInfo.state().completed()) { - Map shardStatuses = snapshotShards(repositoryName, repositoryData, snapshotInfo); + Map shardStatuses = snapshotShards( + request, + repositoryName, + repositoryData, + snapshotInfo + ); + boolean isShallowV2Snapshot = snapshotInfo.getPinnedTimestamp() > 0; + long initialSnapshotTotalSize = 0; + if (isShallowV2Snapshot && request.indices().length == 0) { + // TODO: add primary store size in bytes at the snapshot level + } + for (Map.Entry shardStatus : shardStatuses.entrySet()) { IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy(); shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); @@ -406,6 +407,68 @@ private SnapshotInfo snapshot(SnapshotsInProgress snapshotsInProgress, String re return repositoriesService.repository(repositoryName).getSnapshotInfo(snapshotId); } + /** + * Returns snapshot info for finished snapshots + * @param request snapshot status request + * @param repositoryName repository name + * @param repositoryData repository data + * @param snapshotsInProgress currently running snapshots + * @param currentSnapshotNames list of names of currently running snapshots + * @return map of snapshot id to snapshot info + */ + private Map snapshotsInfo( + SnapshotsStatusRequest request, + String repositoryName, + RepositoryData repositoryData, + SnapshotsInProgress snapshotsInProgress, + Set currentSnapshotNames + ) { + final Set requestedSnapshotNames = Sets.newHashSet(request.snapshots()); + final Map snapshotsInfoMap = new HashMap<>(); + final Map matchedSnapshotIds = repositoryData.getSnapshotIds() + .stream() + .filter(s -> requestedSnapshotNames.contains(s.getName())) + .collect(Collectors.toMap(SnapshotId::getName, Function.identity())); + int totalShardsAcrossSnapshots = 0; + for (final String snapshotName : request.snapshots()) { + if (currentSnapshotNames.contains(snapshotName)) { + // we've already found this snapshot in the current snapshot entries, so skip over + continue; + } + SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName); + if (snapshotId == null) { + // neither in the current snapshot entries nor found in the repository + if (request.ignoreUnavailable()) { + // ignoring unavailable snapshots, so skip over + logger.debug( + "snapshot status request ignoring snapshot [{}], not found in repository [{}]", + snapshotName, + repositoryName + ); + continue; + } else { + throw new SnapshotMissingException(repositoryName, snapshotName); + } + } + SnapshotInfo snapshotInfo = snapshot(snapshotsInProgress, repositoryName, snapshotId); + boolean isV2Snapshot = snapshotInfo.getPinnedTimestamp() > 0; + if (isV2Snapshot == false && request.indices().length == 0) { + totalShardsAcrossSnapshots += snapshotInfo.totalShards(); + } + snapshotsInfoMap.put(snapshotId, snapshotInfo); + } + if (totalShardsAcrossSnapshots > maximumAllowedShardCount && request.indices().length == 0) { + String message = "Total shard count [" + + totalShardsAcrossSnapshots + + "] is more than the maximum allowed value of shard count [" + + maximumAllowedShardCount + + "] for snapshot status request"; + + throw new TooManyShardsInSnapshotsStatusException(repositoryName, message, request.snapshots()); + } + return unmodifiableMap(snapshotsInfoMap); + } + /** * Returns status of shards currently finished snapshots *

@@ -413,21 +476,65 @@ private SnapshotInfo snapshot(SnapshotsInProgress snapshotsInProgress, String re * {@link SnapshotShardsService#currentSnapshotShards(Snapshot)} because it * returns similar information but for already finished snapshots. *

- * + * @param request snapshot status request * @param repositoryName repository name * @param snapshotInfo snapshot info * @return map of shard id to snapshot status */ private Map snapshotShards( + final SnapshotsStatusRequest request, final String repositoryName, final RepositoryData repositoryData, final SnapshotInfo snapshotInfo ) throws IOException { + final Set requestedIndexNames = Sets.newHashSet(request.indices()); + String snapshotName = snapshotInfo.snapshotId().getName(); + Set indices = Sets.newHashSet(snapshotInfo.indices()); + if (requestedIndexNames.isEmpty() == false) { + Set finalIndices = indices; + List indicesNotFound = requestedIndexNames.stream() + .filter(i -> finalIndices.contains(i) == false) + .collect(Collectors.toList()); + if (indicesNotFound.isEmpty() == false) { + handleIndexNotFound(String.join(", ", indicesNotFound), request, snapshotName, repositoryName); + } + indices = requestedIndexNames; + } + final Repository repository = repositoriesService.repository(repositoryName); - final Map shardStatus = new HashMap<>(); - for (String index : snapshotInfo.indices()) { + boolean isV2Snapshot = snapshotInfo.getPinnedTimestamp() > 0; + int totalShardsAcrossIndices = 0; + final Map indexMetadataMap = new HashMap<>(); + + for (String index : indices) { IndexId indexId = repositoryData.resolveIndexId(index); IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repositoryData, snapshotInfo.snapshotId(), indexId); + if (indexMetadata != null) { + if (requestedIndexNames.isEmpty() == false && isV2Snapshot == false) { + totalShardsAcrossIndices += indexMetadata.getNumberOfShards(); + } + indexMetadataMap.put(indexId, indexMetadata); + } else if (requestedIndexNames.isEmpty() == false) { + handleIndexNotFound(index, request, snapshotName, repositoryName); + } + } + + if (totalShardsAcrossIndices > maximumAllowedShardCount && requestedIndexNames.isEmpty() == false && isV2Snapshot == false) { + String message = "Total shard count [" + + totalShardsAcrossIndices + + "] across the requested indices [" + + requestedIndexNames.stream().collect(Collectors.joining(", ")) + + "] is more than the maximum allowed value of shard count [" + + maximumAllowedShardCount + + "] for snapshot status request"; + + throw new TooManyShardsInSnapshotsStatusException(repositoryName, message, snapshotName); + } + + final Map shardStatus = new HashMap<>(); + for (Map.Entry entry : indexMetadataMap.entrySet()) { + IndexId indexId = entry.getKey(); + IndexMetadata indexMetadata = entry.getValue(); if (indexMetadata != null) { int numberOfShards = indexMetadata.getNumberOfShards(); for (int i = 0; i < numberOfShards; i++) { @@ -447,7 +554,12 @@ private Map snapshotShards( // could not be taken due to partial being set to false. shardSnapshotStatus = IndexShardSnapshotStatus.newFailed("skipped"); } else { - shardSnapshotStatus = repository.getShardSnapshotStatus(snapshotInfo.snapshotId(), indexId, shardId); + // TODO: to be refactored later + if (isV2Snapshot) { + shardSnapshotStatus = IndexShardSnapshotStatus.newDone(0, 0, 0, 0, 0, 0, null); + } else { + shardSnapshotStatus = repository.getShardSnapshotStatus(snapshotInfo.snapshotId(), indexId, shardId); + } } shardStatus.put(shardId, shardSnapshotStatus); } @@ -457,6 +569,21 @@ private Map snapshotShards( return unmodifiableMap(shardStatus); } + private void handleIndexNotFound(String index, SnapshotsStatusRequest request, String snapshotName, String repositoryName) { + if (request.ignoreUnavailable()) { + // ignoring unavailable index + logger.debug( + "snapshot status request ignoring indices [{}], not found in snapshot[{}] in repository [{}]", + index, + snapshotName, + repositoryName + ); + } else { + String cause = "indices [" + index + "] missing in snapshot [" + snapshotName + "] of repository [" + repositoryName + "]"; + throw new IndexNotFoundException(index, new IllegalArgumentException(cause)); + } + } + private static SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 9764d840ecc64..0931ca8216556 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -82,6 +82,7 @@ import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; @@ -89,7 +90,9 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.compositeindex.CompositeIndexSettings; import org.opensearch.index.compositeindex.CompositeIndexValidator; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; @@ -154,6 +157,7 @@ import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.findContextTemplateName; import static org.opensearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; +import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteDataAttributePresent; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; @@ -1079,6 +1083,7 @@ static Settings aggregateIndexSettings( validateTranslogRetentionSettings(indexSettings); validateStoreTypeSettings(indexSettings); validateRefreshIntervalSettings(request.settings(), clusterSettings); + validateTranslogFlushIntervalSettingsForCompositeIndex(request.settings(), clusterSettings); validateTranslogDurabilitySettings(request.settings(), clusterSettings, settings); return indexSettings; } @@ -1766,6 +1771,71 @@ public static void validateTranslogRetentionSettings(Settings indexSettings) { } } + /** + * Validates {@code index.translog.flush_threshold_size} is equal or below the {@code indices.composite_index.translog.max_flush_threshold_size} + * for composite indices based on {{@code index.composite_index}} + * + * @param requestSettings settings passed in during index create/update request + * @param clusterSettings cluster setting + */ + public static void validateTranslogFlushIntervalSettingsForCompositeIndex(Settings requestSettings, ClusterSettings clusterSettings) { + if (StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.exists(requestSettings) == false + || requestSettings.get(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey()) == null) { + return; + } + ByteSizeValue translogFlushSize = INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.get(requestSettings); + ByteSizeValue compositeIndexMaxFlushSize = clusterSettings.get( + CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING + ); + if (translogFlushSize.compareTo(compositeIndexMaxFlushSize) > 0) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "You can configure '%s' with upto '%s' for composite index", + INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), + compositeIndexMaxFlushSize + ) + ); + } + } + + /** + * Validates {@code index.translog.flush_threshold_size} is equal or below the {@code indices.composite_index.translog.max_flush_threshold_size} + * for composite indices based on {{@code index.composite_index}} + * This is used during update index settings flow + * + * @param requestSettings settings passed in during index update request + * @param clusterSettings cluster setting + * @param indexSettings index settings + */ + public static Optional validateTranslogFlushIntervalSettingsForCompositeIndex( + Settings requestSettings, + ClusterSettings clusterSettings, + Settings indexSettings + ) { + if (INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.exists(requestSettings) == false + || requestSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey()) == null + || StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.exists(indexSettings) == false + || indexSettings.get(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey()) == null) { + return Optional.empty(); + } + ByteSizeValue translogFlushSize = INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.get(requestSettings); + ByteSizeValue compositeIndexMaxFlushSize = clusterSettings.get( + CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING + ); + if (translogFlushSize.compareTo(compositeIndexMaxFlushSize) > 0) { + return Optional.of( + String.format( + Locale.ROOT, + "You can configure '%s' with upto '%s' for composite index", + INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), + compositeIndexMaxFlushSize + ) + ); + } + return Optional.empty(); + } + /** * Validates {@code index.refresh_interval} is equal or below the {@code cluster.minimum.index.refresh_interval}. * diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index a7b9eba6dbc05..e4afc798cc64d 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -102,6 +102,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex; import static org.opensearch.common.util.concurrent.ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; @@ -1639,6 +1640,7 @@ private void validate(String name, @Nullable Settings settings, List ind // validate index refresh interval and translog durability settings validateRefreshIntervalSettings(settings, clusterService.getClusterSettings()); + validateTranslogFlushIntervalSettingsForCompositeIndex(settings, clusterService.getClusterSettings()); validateTranslogDurabilitySettingsInTemplate(settings, clusterService.getClusterSettings()); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 5d4cc6593dba5..7957a808970eb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -82,6 +82,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; +import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex; import static org.opensearch.cluster.metadata.MetadataIndexTemplateService.findComponentTemplate; import static org.opensearch.common.settings.AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX; import static org.opensearch.index.IndexSettings.same; @@ -221,6 +222,12 @@ public ClusterState execute(ClusterState currentState) { index.getName() ).ifPresent(validationErrors::add); } + validateTranslogFlushIntervalSettingsForCompositeIndex( + normalizedSettings, + clusterService.getClusterSettings(), + metadata.getSettings() + ).ifPresent(validationErrors::add); + } if (validationErrors.size() > 0) { diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java index 4b23c0e3808a7..e9b83f7a82e18 100644 --- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java @@ -792,6 +792,36 @@ public T get(Setting setting) { return setting.get(this.lastSettingsApplied, settings); } + /** + * Returns the value for the given setting if it is explicitly set, + * otherwise will return null instead of default value + **/ + public T getOrNull(Setting setting) { + if (setting.getProperties().contains(scope) == false) { + throw new SettingsException( + "settings scope doesn't match the setting scope [" + this.scope + "] not in [" + setting.getProperties() + "]" + ); + } + if (get(setting.getKey()) == null) { + throw new SettingsException("setting " + setting.getKey() + " has not been registered"); + } + if (setting.exists(lastSettingsApplied)) { + return setting.get(lastSettingsApplied); + } + if (setting.exists(settings)) { + return setting.get(settings); + } + if (setting.fallbackSetting != null) { + if (setting.fallbackSetting.exists(lastSettingsApplied)) { + return setting.fallbackSetting.get(lastSettingsApplied); + } + if (setting.fallbackSetting.exists(settings)) { + return setting.fallbackSetting.get(settings); + } + } + return null; + } + /** * Updates a target settings builder with new, updated or deleted settings from a given settings builder. *

diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 09ecefcf56efb..3e4df323f0f27 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -637,6 +637,7 @@ public void apply(Settings value, Settings current, Settings previous) { HandshakingTransportAddressConnector.PROBE_CONNECT_TIMEOUT_SETTING, HandshakingTransportAddressConnector.PROBE_HANDSHAKE_TIMEOUT_SETTING, SnapshotsService.MAX_CONCURRENT_SNAPSHOT_OPERATIONS_SETTING, + SnapshotsService.MAX_SHARDS_ALLOWED_IN_STATUS_API, FsHealthService.ENABLED_SETTING, FsHealthService.REFRESH_INTERVAL_SETTING, FsHealthService.SLOW_PATH_LOGGING_THRESHOLD_SETTING, @@ -753,8 +754,9 @@ public void apply(Settings value, Settings current, Settings previous) { IoBasedAdmissionControllerSettings.INDEXING_IO_USAGE_LIMIT, // Concurrent segment search settings - SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, // deprecated SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING, + SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE, RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, @@ -768,6 +770,10 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_LOOKBACK_INTERVAL, RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, + // Composite index settings + CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING, + CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, + SystemTemplatesService.SETTING_APPLICATION_BASED_CONFIGURATION_TEMPLATES_ENABLED, // WorkloadManagement settings diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 40ec1ec6c7794..bddbe963e8013 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -224,6 +224,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID, IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME, IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID, + IndexSettings.SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE, // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, @@ -238,7 +239,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING, // Settings for concurrent segment search - IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING, // deprecated + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MODE, IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT, IndexSettings.ALLOW_DERIVED_FIELDS, @@ -249,6 +251,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { StarTreeIndexSettings.DEFAULT_METRICS_LIST, StarTreeIndexSettings.DEFAULT_DATE_INTERVALS, StarTreeIndexSettings.STAR_TREE_MAX_DATE_INTERVALS_SETTING, + StarTreeIndexSettings.STAR_TREE_MAX_BASE_METRICS_SETTING, + StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING, IndexSettings.INDEX_CONTEXT_CREATED_VERSION, IndexSettings.INDEX_CONTEXT_CURRENT_VERSION, diff --git a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java index d3d3304cb909a..cfe2bbb85bda4 100644 --- a/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/BatchRunnableExecutor.java @@ -61,6 +61,13 @@ public void run() { "Time taken to execute timed runnables in this cycle:[{}ms]", TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) ); + onComplete(); } + /** + * Callback method that is invoked after all {@link TimeoutAwareRunnable} instances in the batch have been processed. + * By default, this method does nothing, but it can be overridden by subclasses or modified in the implementation if + * there is a need to perform additional actions once the batch execution is completed. + */ + public void onComplete() {} } diff --git a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java index 0d6af943d39e0..2b6c5e3f5ae53 100644 --- a/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/BaseGatewayShardAllocator.java @@ -47,7 +47,6 @@ import org.opensearch.core.index.shard.ShardId; import java.util.ArrayList; -import java.util.HashSet; import java.util.List; import java.util.Set; @@ -82,23 +81,29 @@ public void allocateUnassigned( executeDecision(shardRouting, allocateUnassignedDecision, allocation, unassignedAllocationHandler); } - protected void allocateUnassignedBatchOnTimeout(List shardRoutings, RoutingAllocation allocation, boolean primary) { - Set shardIdsFromBatch = new HashSet<>(); - for (ShardRouting shardRouting : shardRoutings) { - ShardId shardId = shardRouting.shardId(); - shardIdsFromBatch.add(shardId); + protected void allocateUnassignedBatchOnTimeout(Set shardIds, RoutingAllocation allocation, boolean primary) { + if (shardIds.isEmpty()) { + return; } RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); while (iterator.hasNext()) { ShardRouting unassignedShard = iterator.next(); AllocateUnassignedDecision allocationDecision; - if (unassignedShard.primary() == primary && shardIdsFromBatch.contains(unassignedShard.shardId())) { + if (unassignedShard.primary() == primary && shardIds.contains(unassignedShard.shardId())) { + if (isResponsibleFor(unassignedShard) == false) { + continue; + } allocationDecision = AllocateUnassignedDecision.throttle(null); executeDecision(unassignedShard, allocationDecision, allocation, iterator); } } } + /** + * Is the allocator responsible for allocating the given {@link ShardRouting}? + */ + protected abstract boolean isResponsibleFor(ShardRouting shardRouting); + protected void executeDecision( ShardRouting shardRouting, AllocateUnassignedDecision allocateUnassignedDecision, diff --git a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java index f41545cbdf9bf..dea7ca9a08edd 100644 --- a/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/PrimaryShardAllocator.java @@ -82,7 +82,7 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() // must be primary && shard.unassigned() // must be unassigned // only handle either an existing store or a snapshot recovery diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java index aaf0d696e1444..c30ee8479ac97 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardAllocator.java @@ -191,7 +191,7 @@ public void processExistingRecoveries(RoutingAllocation allocation) { /** * Is the allocator responsible for allocating the given {@link ShardRouting}? */ - protected static boolean isResponsibleFor(final ShardRouting shard) { + protected boolean isResponsibleFor(final ShardRouting shard) { return shard.primary() == false // must be a replica && shard.unassigned() // must be unassigned // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... diff --git a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java index 0818b187271cb..020a543ac5fc5 100644 --- a/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ReplicaShardBatchAllocator.java @@ -173,7 +173,7 @@ private AllocateUnassignedDecision getUnassignedShardAllocationDecision( RoutingAllocation allocation, Supplier> nodeStoreFileMetaDataMapSupplier ) { - if (!isResponsibleFor(shardRouting)) { + if (isResponsibleFor(shardRouting) == false) { return AllocateUnassignedDecision.NOT_TAKEN; } Tuple> result = canBeAllocatedToAtLeastOneNode(shardRouting, allocation); diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 6c6b1126a78d6..d18304ea73ed0 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -277,17 +277,14 @@ protected BatchRunnableExecutor innerAllocateUnassignedBatch( } List runnables = new ArrayList<>(); if (primary) { + Set timedOutPrimaryShardIds = new HashSet<>(); batchIdToStartedShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) .forEach(shardsBatch -> runnables.add(new TimeoutAwareRunnable() { @Override public void onTimeout() { - primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout( - shardsBatch.getBatchedShardRoutings(), - allocation, - true - ); + timedOutPrimaryShardIds.addAll(shardsBatch.getBatchedShards()); } @Override @@ -295,15 +292,22 @@ public void run() { primaryBatchShardAllocator.allocateUnassignedBatch(shardsBatch.getBatchedShardRoutings(), allocation); } })); - return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout); + return new BatchRunnableExecutor(runnables, () -> primaryShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] primary shards", timedOutPrimaryShardIds.size()); + primaryBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutPrimaryShardIds, allocation, true); + } + }; } else { + Set timedOutReplicaShardIds = new HashSet<>(); batchIdToStoreShardBatch.values() .stream() .filter(batch -> batchesToAssign.contains(batch.batchId)) .forEach(batch -> runnables.add(new TimeoutAwareRunnable() { @Override public void onTimeout() { - replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(batch.getBatchedShardRoutings(), allocation, false); + timedOutReplicaShardIds.addAll(batch.getBatchedShards()); } @Override @@ -311,7 +315,13 @@ public void run() { replicaBatchShardAllocator.allocateUnassignedBatch(batch.getBatchedShardRoutings(), allocation); } })); - return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout); + return new BatchRunnableExecutor(runnables, () -> replicaShardsBatchGatewayAllocatorTimeout) { + @Override + public void onComplete() { + logger.trace("Triggering oncomplete after timeout for [{}] replica shards", timedOutReplicaShardIds.size()); + replicaBatchShardAllocator.allocateUnassignedBatchOnTimeout(timedOutReplicaShardIds, allocation, false); + } + }; } } @@ -846,11 +856,11 @@ public int getNumberOfStoreShardBatches() { return batchIdToStoreShardBatch.size(); } - private void setPrimaryBatchAllocatorTimeout(TimeValue primaryShardsBatchGatewayAllocatorTimeout) { + protected void setPrimaryBatchAllocatorTimeout(TimeValue primaryShardsBatchGatewayAllocatorTimeout) { this.primaryShardsBatchGatewayAllocatorTimeout = primaryShardsBatchGatewayAllocatorTimeout; } - private void setReplicaBatchAllocatorTimeout(TimeValue replicaShardsBatchGatewayAllocatorTimeout) { + protected void setReplicaBatchAllocatorTimeout(TimeValue replicaShardsBatchGatewayAllocatorTimeout) { this.replicaShardsBatchGatewayAllocatorTimeout = replicaShardsBatchGatewayAllocatorTimeout; } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 77e13c9c02ba3..8d8bf88bb82e4 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -49,6 +49,8 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.Translog; @@ -77,6 +79,9 @@ import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE; /** @@ -678,6 +683,14 @@ public static IndexMergePolicy fromString(String text) { Property.InternalIndex ); + public static final Setting SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE = new Setting<>( + "index.searchable_snapshot.shard_path_type", + PathType.FIXED.toString(), + PathType::parseString, + Property.IndexScope, + Property.InternalIndex + ); + public static final Setting DEFAULT_SEARCH_PIPELINE = new Setting<>( "index.search.default_pipeline", SearchPipelineService.NOOP_PIPELINE_ID, @@ -690,7 +703,26 @@ public static IndexMergePolicy fromString(String text) { "index.search.concurrent_segment_search.enabled", false, Property.IndexScope, - Property.Dynamic + Property.Dynamic, + Property.Deprecated + ); + + public static final Setting INDEX_CONCURRENT_SEGMENT_SEARCH_MODE = Setting.simpleString( + "index.search.concurrent_segment_search.mode", + CONCURRENT_SEGMENT_SEARCH_MODE_NONE, + value -> { + switch (value) { + case CONCURRENT_SEGMENT_SEARCH_MODE_ALL: + case CONCURRENT_SEGMENT_SEARCH_MODE_NONE: + case CONCURRENT_SEGMENT_SEARCH_MODE_AUTO: + // valid setting + break; + default: + throw new IllegalArgumentException("Setting value must be one of [all, none, auto]"); + } + }, + Property.Dynamic, + Property.IndexScope ); public static final Setting INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT = Setting.intSetting( @@ -880,6 +912,8 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { */ private volatile double docIdFuzzySetFalsePositiveProbability; + private final boolean isCompositeIndex; + /** * Returns the default search fields for this index. */ @@ -1042,7 +1076,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); - + isCompositeIndex = scopedSettings.get(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING); scopedSettings.addSettingsUpdateConsumer( TieredMergePolicyProvider.INDEX_COMPOUND_FORMAT_SETTING, tieredMergePolicyProvider::setNoCFSRatio @@ -1287,6 +1321,10 @@ public int getNumberOfReplicas() { return settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, null); } + public boolean isCompositeIndex() { + return isCompositeIndex; + } + /** * Returns true if segment replication is enabled on the index. * diff --git a/server/src/main/java/org/opensearch/index/compositeindex/CompositeIndexSettings.java b/server/src/main/java/org/opensearch/index/compositeindex/CompositeIndexSettings.java index 014dd22426a10..a29e642d30f05 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/CompositeIndexSettings.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/CompositeIndexSettings.java @@ -13,6 +13,8 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; /** * Cluster level settings for composite indices @@ -37,12 +39,23 @@ public class CompositeIndexSettings { Setting.Property.Dynamic ); + /** + * This sets the max flush threshold size for composite index + */ + public static final Setting COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting( + "indices.composite_index.translog.max_flush_threshold_size", + new ByteSizeValue(512, ByteSizeUnit.MB), + new ByteSizeValue(128, ByteSizeUnit.MB), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + private volatile boolean starTreeIndexCreationEnabled; public CompositeIndexSettings(Settings settings, ClusterSettings clusterSettings) { this.starTreeIndexCreationEnabled = STAR_TREE_INDEX_ENABLED_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(STAR_TREE_INDEX_ENABLED_SETTING, this::starTreeIndexCreationEnabled); - } private void starTreeIndexCreationEnabled(boolean value) { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java index ce389a99b3626..e665831b83d93 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeIndexSettings.java @@ -26,6 +26,7 @@ public class StarTreeIndexSettings { public static int STAR_TREE_MAX_DIMENSIONS_DEFAULT = 10; + public static int STAR_TREE_MAX_BASE_METRICS_DEFAULT = 100; /** * This setting determines the max number of star tree fields that can be part of composite index mapping. For each * star tree field, we will generate associated star tree index. @@ -52,6 +53,19 @@ public class StarTreeIndexSettings { Setting.Property.Final ); + /** + * This setting determines the max number of dimensions that can be part of star tree index field. Number of + * dimensions and associated cardinality has direct effect of star tree index size and query performance. + */ + public static final Setting STAR_TREE_MAX_BASE_METRICS_SETTING = Setting.intSetting( + "index.composite_index.star_tree.field.max_base_metrics", + STAR_TREE_MAX_BASE_METRICS_DEFAULT, + 4, + 100, + Setting.Property.IndexScope, + Setting.Property.Final + ); + /** * This setting determines the max number of date intervals that can be part of star tree date field. */ @@ -108,4 +122,11 @@ public static Rounding.DateTimeUnit getTimeUnit(String expression) { } return DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(expression); } + + public static final Setting IS_COMPOSITE_INDEX_SETTING = Setting.boolSetting( + "index.composite_index", + false, + Setting.Property.IndexScope, + Setting.Property.Final + ); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index b03026d560dbf..50ff816695156 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -661,6 +661,17 @@ private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapp throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; + // block array values for composite index fields + if (context.indexSettings().isCompositeIndex() && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) { + throw new MapperParsingException( + String.format( + Locale.ROOT, + "object mapping for [%s] with array for [%s] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + mapper.name(), + arrayFieldName + ) + ); + } final String[] paths = splitAndValidatePath(lastFieldName); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index a20e3884d4bbc..a3f9d75cbb00a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -228,6 +228,7 @@ public enum MergeReason { private final BooleanSupplier idFieldDataEnabled; private volatile Set compositeMappedFieldTypes; + private volatile Set fieldsPartOfCompositeMappings; public MapperService( IndexSettings indexSettings, @@ -543,9 +544,18 @@ private synchronized Map internalMerge(DocumentMapper ma // initialize composite fields post merge this.compositeMappedFieldTypes = getCompositeFieldTypesFromMapper(); + buildCompositeFieldLookup(); return results; } + private void buildCompositeFieldLookup() { + Set fieldsPartOfCompositeMappings = new HashSet<>(); + for (CompositeMappedFieldType fieldType : compositeMappedFieldTypes) { + fieldsPartOfCompositeMappings.addAll(fieldType.fields()); + } + this.fieldsPartOfCompositeMappings = fieldsPartOfCompositeMappings; + } + private boolean assertSerialization(DocumentMapper mapper) { // capture the source now, it may change due to concurrent parsing final CompressedXContent mappingSource = mapper.mappingSource(); @@ -672,6 +682,10 @@ private Set getCompositeFieldTypesFromMapper() { return compositeMappedFieldTypes; } + public boolean isFieldPartOfCompositeIndex(String field) { + return fieldsPartOfCompositeMappings.contains(field); + } + public ObjectMapper getObjectMapper(String name) { return this.mapper == null ? null : this.mapper.objectMappers().get(name); } diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index 533e6ca73d737..dd984373fc9df 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -440,6 +440,15 @@ protected static void parseCompositeField( + " feature flag in the JVM options" ); } + if (StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.get(parserContext.getSettings()) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Set '%s' as true as part of index settings to use star tree index", + StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey() + ) + ); + } Iterator> iterator = compositeNode.entrySet().iterator(); if (compositeNode.size() > StarTreeIndexSettings.STAR_TREE_MAX_FIELDS_SETTING.get(parserContext.getSettings())) { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index e52d6a621e4e8..17c27ef149e54 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -22,6 +22,7 @@ import org.opensearch.search.lookup.SearchLookup; import java.util.ArrayList; +import java.util.HashSet; import java.util.LinkedHashSet; import java.util.LinkedList; import java.util.List; @@ -155,8 +156,20 @@ private List buildDimensions(String fieldName, Map ma String.format(Locale.ROOT, "Atleast two dimensions are required to build star tree index field [%s]", fieldName) ); } + Set dimensionFieldNames = new HashSet<>(); for (Object dim : dimList) { - dimensions.add(getDimension(fieldName, dim, context)); + Dimension dimension = getDimension(fieldName, dim, context); + if (dimensionFieldNames.add(dimension.getField()) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Duplicate dimension [%s] present as part star tree index field [%s]", + dimension.getField(), + fieldName + ) + ); + } + dimensions.add(dimension); } } else { throw new MapperParsingException( @@ -223,6 +236,7 @@ private List buildMetrics(String fieldName, Map map, Map } if (metricsFromInput instanceof List) { List metricsList = (List) metricsFromInput; + Set metricFieldNames = new HashSet<>(); for (Object metric : metricsList) { Map metricMap = (Map) metric; String name = (String) XContentMapValues.extractValue(CompositeDataCubeFieldType.NAME, metricMap); @@ -232,7 +246,18 @@ private List buildMetrics(String fieldName, Map map, Map } metricMap.remove(CompositeDataCubeFieldType.NAME); if (objbuilder == null || objbuilder.mappersBuilders == null) { - metrics.add(getMetric(name, metricMap, context)); + Metric metricFromParser = getMetric(name, metricMap, context); + if (metricFieldNames.add(metricFromParser.getField()) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Duplicate metrics [%s] present as part star tree index field [%s]", + metricFromParser.getField(), + fieldName + ) + ); + } + metrics.add(metricFromParser); } else { Optional meticBuilder = findMapperBuilderByName(name, this.objbuilder.mappersBuilders); if (meticBuilder.isEmpty()) { @@ -243,7 +268,18 @@ private List buildMetrics(String fieldName, Map map, Map String.format(Locale.ROOT, "non-numeric field type is associated with star tree metric [%s]", this.name) ); } - metrics.add(getMetric(name, metricMap, context)); + Metric metricFromParser = getMetric(name, metricMap, context); + if (metricFieldNames.add(metricFromParser.getField()) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Duplicate metrics [%s] present as part star tree index field [%s]", + metricFromParser.getField(), + fieldName + ) + ); + } + metrics.add(metricFromParser); DocumentMapperParser.checkNoRemainingFields( metricMap, context.indexVersionCreated(), @@ -254,6 +290,32 @@ private List buildMetrics(String fieldName, Map map, Map } else { throw new MapperParsingException(String.format(Locale.ROOT, "unable to parse metrics for star tree field [%s]", this.name)); } + int numBaseMetrics = 0; + for (Metric metric : metrics) { + for (MetricStat metricStat : metric.getMetrics()) { + if (metricStat.isDerivedMetric() == false) { + numBaseMetrics++; + } + } + } + if (numBaseMetrics > context.getSettings() + .getAsInt( + StarTreeIndexSettings.STAR_TREE_MAX_BASE_METRICS_SETTING.getKey(), + StarTreeIndexSettings.STAR_TREE_MAX_BASE_METRICS_DEFAULT + )) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "There cannot be more than [%s] base metrics for star tree field [%s]", + context.getSettings() + .getAsInt( + StarTreeIndexSettings.STAR_TREE_MAX_BASE_METRICS_SETTING.getKey(), + StarTreeIndexSettings.STAR_TREE_MAX_BASE_METRICS_DEFAULT + ), + fieldName + ) + ); + } Metric docCountMetric = new Metric(DocCountFieldMapper.NAME, List.of(MetricStat.DOC_COUNT)); metrics.add(docCountMetric); return metrics; diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index d3c6fc9d1f3bf..03d841d13b7f7 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -227,7 +227,8 @@ private RemoteRestoreResult executeRestore( .build(); } - IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID()); + // This instance of IndexId is not related to Snapshot Restore. Hence, we are using the ctor without pathType. + IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID(), IndexId.DEFAULT_SHARD_PATH_TYPE); if (metadataFromRemoteStore == false) { Map indexShardRoutingTableMap = currentState.routingTable() diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index 6118650f1924d..fb645e33b8fbd 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -213,7 +213,7 @@ public enum PathHashAlgorithm { @Override String hash(BasePathInput pathInput) { StringBuilder input = new StringBuilder(); - for (String path : pathInput.fixedSubPath().toArray()) { + for (String path : pathInput.hashPath().toArray()) { input.append(path); } long hash = FNV1a.hash64(input.toString()); @@ -228,7 +228,7 @@ String hash(BasePathInput pathInput) { @Override String hash(BasePathInput pathInput) { StringBuilder input = new StringBuilder(); - for (String path : pathInput.fixedSubPath().toArray()) { + for (String path : pathInput.hashPath().toArray()) { input.append(path); } long hash = FNV1a.hash64(input.toString()); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java index 05357aaf6ec72..843992004f23b 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java @@ -17,6 +17,7 @@ import org.opensearch.index.remote.RemoteStoreEnums.DataType; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.util.Objects; @@ -111,6 +112,10 @@ BlobPath fixedSubPath() { return BlobPath.cleanPath().add(indexUUID); } + BlobPath hashPath() { + return fixedSubPath(); + } + /** * Returns a new builder for {@link BasePathInput}. */ @@ -138,7 +143,7 @@ public T basePath(BlobPath basePath) { return self(); } - public Builder indexUUID(String indexUUID) { + public T indexUUID(String indexUUID) { this.indexUUID = indexUUID; return self(); } @@ -153,6 +158,65 @@ public BasePathInput build() { } } + /** + * A subclass of {@link PathInput} that represents the input required to generate a path + * for a shard in a snapshot. It includes the base path, index UUID, and shard ID. + * + * @opensearch.internal + */ + public static class SnapshotShardPathInput extends BasePathInput { + private final String shardId; + + public SnapshotShardPathInput(SnapshotShardPathInput.Builder builder) { + super(builder); + this.shardId = Objects.requireNonNull(builder.shardId); + } + + @Override + BlobPath fixedSubPath() { + return BlobPath.cleanPath().add(BlobStoreRepository.INDICES_DIR).add(super.fixedSubPath()).add(shardId); + } + + @Override + BlobPath hashPath() { + return BlobPath.cleanPath().add(shardId).add(indexUUID()); + } + + public String shardId() { + return shardId; + } + + /** + * Returns a new builder for {@link SnapshotShardPathInput}. + */ + public static SnapshotShardPathInput.Builder builder() { + return new SnapshotShardPathInput.Builder(); + } + + /** + * Builder for {@link SnapshotShardPathInput}. + * + * @opensearch.internal + */ + public static class Builder extends BasePathInput.Builder { + private String shardId; + + public SnapshotShardPathInput.Builder shardId(String shardId) { + this.shardId = shardId; + return this; + } + + @Override + protected SnapshotShardPathInput.Builder self() { + return this; + } + + public SnapshotShardPathInput build() { + return new SnapshotShardPathInput(this); + } + } + } + /** * Wrapper class for the data aware path input required to generate path for remote store uploads. This input is * composed of the parent inputs, shard id, data category and data type. diff --git a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java index 177f0526e7571..e027e8b7cb3b1 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java @@ -11,14 +11,15 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.opensearch.index.snapshots.blobstore.IndexShardSnapshot; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.utils.TransferManager; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -74,10 +75,11 @@ private Future createRemoteSnapshotDirectoryFromSnapsho ShardPath localShardPath, BlobStoreRepository blobStoreRepository ) throws IOException { - final BlobPath blobPath = blobStoreRepository.basePath() - .add("indices") - .add(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings())) - .add(Integer.toString(localShardPath.getShardId().getId())); + // The below information like the snapshot generated indexId, shard_path_type and shardId are used for + // creating the shard BlobContainer. This information has been updated as per the hashed_prefix snapshots. + String indexId = IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings()); + PathType pathType = IndexSettings.SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE.get(indexSettings.getSettings()); + int shardId = localShardPath.getShardId().getId(); final SnapshotId snapshotId = new SnapshotId( IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.get(indexSettings.getSettings()), IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.get(indexSettings.getSettings()) @@ -89,7 +91,12 @@ private Future createRemoteSnapshotDirectoryFromSnapsho // this trick is needed to bypass assertions in BlobStoreRepository::assertAllowableThreadPools in case of node restart and a remote // index restore is invoked return threadPool.executor(ThreadPool.Names.SNAPSHOT).submit(() -> { - final BlobContainer blobContainer = blobStoreRepository.blobStore().blobContainer(blobPath); + // shardContainer(IndexId, shardId) method uses the id and pathType information to generate the blobPath and + // hence the blobContainer. We have used a dummy name as it plays no relevance in the blobPath generation. + final BlobContainer blobContainer = blobStoreRepository.shardContainer( + new IndexId("DUMMY", indexId, pathType.getCode()), + shardId + ); final IndexShardSnapshot indexShardSnapshot = blobStoreRepository.loadShardSnapshot(blobContainer, snapshotId); assert indexShardSnapshot instanceof BlobStoreIndexShardSnapshot : "indexShardSnapshot should be an instance of BlobStoreIndexShardSnapshot"; diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java index 4599aa32325c1..5dc2ad076d21c 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -69,19 +69,35 @@ public Translog newTranslog( assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); - return new RemoteFsTranslog( - config, - translogUUID, - deletionPolicy, - globalCheckpointSupplier, - primaryTermSupplier, - persistedSequenceNumberConsumer, - blobStoreRepository, - threadPool, - startedPrimarySupplier, - remoteTranslogTransferTracker, - remoteStoreSettings - ); + if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { + return new RemoteFsTimestampAwareTranslog( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + blobStoreRepository, + threadPool, + startedPrimarySupplier, + remoteTranslogTransferTracker, + remoteStoreSettings + ); + } else { + return new RemoteFsTranslog( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + blobStoreRepository, + threadPool, + startedPrimarySupplier, + remoteTranslogTransferTracker, + remoteStoreSettings + ); + } } public Repository getRepository() { diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java new file mode 100644 index 0000000000000..0b134b3bddbec --- /dev/null +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -0,0 +1,377 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.translog; + +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.logging.Loggers; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.index.translog.transfer.TranslogTransferManager; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.TreeSet; +import java.util.function.BooleanSupplier; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; + +/** + * A Translog implementation which syncs local FS with a remote store + * The current impl uploads translog , ckp and metadata to remote store + * for every sync, post syncing to disk. Post that, a new generation is + * created. This implementation is also aware of pinned timestamp and makes + * sure data against pinned timestamp is retained. + * + * @opensearch.internal + */ +public class RemoteFsTimestampAwareTranslog extends RemoteFsTranslog { + + private final Logger logger; + private final Map metadataFilePinnedTimestampMap; + // For metadata files, with no min generation in the name, we cache generation data to avoid multiple reads. + private final Map> oldFormatMetadataFileGenerationMap; + + public RemoteFsTimestampAwareTranslog( + TranslogConfig config, + String translogUUID, + TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, + LongSupplier primaryTermSupplier, + LongConsumer persistedSequenceNumberConsumer, + BlobStoreRepository blobStoreRepository, + ThreadPool threadPool, + BooleanSupplier startedPrimarySupplier, + RemoteTranslogTransferTracker remoteTranslogTransferTracker, + RemoteStoreSettings remoteStoreSettings + ) throws IOException { + super( + config, + translogUUID, + deletionPolicy, + globalCheckpointSupplier, + primaryTermSupplier, + persistedSequenceNumberConsumer, + blobStoreRepository, + threadPool, + startedPrimarySupplier, + remoteTranslogTransferTracker, + remoteStoreSettings + ); + logger = Loggers.getLogger(getClass(), shardId); + this.metadataFilePinnedTimestampMap = new HashMap<>(); + this.oldFormatMetadataFileGenerationMap = new HashMap<>(); + } + + @Override + protected void onDelete() { + ClusterService.assertClusterOrClusterManagerStateThread(); + // clean up all remote translog files + try { + trimUnreferencedReaders(true, false); + } catch (IOException e) { + logger.error("Exception while deleting translog files from remote store", e); + } + } + + @Override + public void trimUnreferencedReaders() throws IOException { + trimUnreferencedReaders(false, true); + } + + // Visible for testing + protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) throws IOException { + if (trimLocal) { + // clean up local translog files and updates readers + super.trimUnreferencedReaders(); + } + + // Update file tracker to reflect local translog state + Optional minLiveGeneration = readers.stream().map(BaseTranslogReader::getGeneration).min(Long::compareTo); + if (minLiveGeneration.isPresent()) { + List staleFilesInTracker = new ArrayList<>(); + for (String file : fileTransferTracker.allUploaded()) { + if (file.endsWith(TRANSLOG_FILE_SUFFIX)) { + long generation = Translog.parseIdFromFileName(file); + if (generation < minLiveGeneration.get()) { + staleFilesInTracker.add(file); + staleFilesInTracker.add(Translog.getCommitCheckpointFileName(generation)); + } + } + fileTransferTracker.delete(staleFilesInTracker); + } + } + + // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote + // store. + if (startedPrimarySupplier.getAsBoolean() == false || pauseSync.get()) { + return; + } + + // This is to fail fast and avoid listing md files un-necessarily. + if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { + logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); + return; + } + + // Since remote generation deletion is async, this ensures that only one generation deletion happens at a time. + // Remote generations involves 2 async operations - 1) Delete translog generation files 2) Delete metadata files + // We try to acquire 2 permits and if we can not, we return from here itself. + if (remoteGenerationDeletionPermits.tryAcquire(REMOTE_DELETION_PERMITS) == false) { + return; + } + + ActionListener> listMetadataFilesListener = new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + List metadataFiles = blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList()); + + try { + if (metadataFiles.size() <= 1) { + logger.debug("No stale translog metadata files found"); + remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + return; + } + + // Check last fetch status of pinned timestamps. If stale, return. + if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { + logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); + remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + return; + } + + List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted(metadataFiles); + + // If index is not deleted, make sure to keep latest metadata file + if (indexDeleted == false) { + metadataFilesToBeDeleted.remove(metadataFiles.get(0)); + } + + if (metadataFilesToBeDeleted.isEmpty()) { + logger.debug("No metadata files to delete"); + remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + return; + } + + logger.debug(() -> "metadataFilesToBeDeleted = " + metadataFilesToBeDeleted); + // For all the files that we are keeping, fetch min and max generations + List metadataFilesNotToBeDeleted = new ArrayList<>(metadataFiles); + metadataFilesNotToBeDeleted.removeAll(metadataFilesToBeDeleted); + + logger.debug(() -> "metadataFilesNotToBeDeleted = " + metadataFilesNotToBeDeleted); + Set generationsToBeDeleted = getGenerationsToBeDeleted( + metadataFilesNotToBeDeleted, + metadataFilesToBeDeleted, + indexDeleted + ); + + logger.debug(() -> "generationsToBeDeleted = " + generationsToBeDeleted); + if (generationsToBeDeleted.isEmpty() == false) { + // Delete stale generations + translogTransferManager.deleteGenerationAsync( + primaryTermSupplier.getAsLong(), + generationsToBeDeleted, + remoteGenerationDeletionPermits::release + ); + + // Delete stale metadata files + translogTransferManager.deleteMetadataFilesAsync( + metadataFilesToBeDeleted, + remoteGenerationDeletionPermits::release + ); + + // Update cache to keep only those metadata files that are not getting deleted + oldFormatMetadataFileGenerationMap.keySet().retainAll(metadataFilesNotToBeDeleted); + + // Delete stale primary terms + deleteStaleRemotePrimaryTerms(metadataFiles); + } else { + remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + } + } catch (Exception e) { + remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + } + } + + @Override + public void onFailure(Exception e) { + remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + logger.error("Exception while listing translog metadata files", e); + } + }; + translogTransferManager.listTranslogMetadataFilesAsync(listMetadataFilesListener); + } + + // Visible for testing + protected Set getGenerationsToBeDeleted( + List metadataFilesNotToBeDeleted, + List metadataFilesToBeDeleted, + boolean indexDeleted + ) throws IOException { + long maxGenerationToBeDeleted = Long.MAX_VALUE; + + if (indexDeleted == false) { + maxGenerationToBeDeleted = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); + } + + Set generationsFromMetadataFilesToBeDeleted = new HashSet<>(); + for (String mdFile : metadataFilesToBeDeleted) { + Tuple minMaxGen = getMinMaxTranslogGenerationFromMetadataFile(mdFile, translogTransferManager); + generationsFromMetadataFilesToBeDeleted.addAll( + LongStream.rangeClosed(minMaxGen.v1(), minMaxGen.v2()).boxed().collect(Collectors.toList()) + ); + } + + Map> metadataFileNotToBeDeletedGenerationMap = getGenerationForMetadataFiles(metadataFilesNotToBeDeleted); + TreeSet> pinnedGenerations = getOrderedPinnedMetadataGenerations(metadataFileNotToBeDeletedGenerationMap); + Set generationsToBeDeleted = new HashSet<>(); + for (long generation : generationsFromMetadataFilesToBeDeleted) { + // Check if the generation is not referred by metadata file matching pinned timestamps + if (generation <= maxGenerationToBeDeleted && isGenerationPinned(generation, pinnedGenerations) == false) { + generationsToBeDeleted.add(generation); + } + } + return generationsToBeDeleted; + } + + // Visible for testing + protected List getMetadataFilesToBeDeleted(List metadataFiles) { + Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + + // Keep files since last successful run of scheduler + List metadataFilesToBeDeleted = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( + metadataFiles, + file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[3]), + pinnedTimestampsState.v1() + ); + + logger.trace( + "metadataFiles.size = {}, metadataFilesToBeDeleted based on age based filtering = {}", + metadataFiles.size(), + metadataFilesToBeDeleted.size() + ); + + // Get md files matching pinned timestamps + Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( + metadataFilesToBeDeleted, + pinnedTimestampsState.v2(), + metadataFilePinnedTimestampMap, + file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[3]), + TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen + ); + + // Filter out metadata files matching pinned timestamps + metadataFilesToBeDeleted.removeAll(implicitLockedFiles); + + logger.trace( + "implicitLockedFiles.size = {}, metadataFilesToBeDeleted based on pinned timestamp filtering = {}", + implicitLockedFiles.size(), + metadataFilesToBeDeleted.size() + ); + + return metadataFilesToBeDeleted; + } + + // Visible for testing + protected boolean isGenerationPinned(long generation, TreeSet> pinnedGenerations) { + Tuple ceilingGenerationRange = pinnedGenerations.ceiling(new Tuple<>(generation, generation)); + if (ceilingGenerationRange != null && generation >= ceilingGenerationRange.v1() && generation <= ceilingGenerationRange.v2()) { + return true; + } + Tuple floorGenerationRange = pinnedGenerations.floor(new Tuple<>(generation, generation)); + if (floorGenerationRange != null && generation >= floorGenerationRange.v1() && generation <= floorGenerationRange.v2()) { + return true; + } + return false; + } + + private TreeSet> getOrderedPinnedMetadataGenerations(Map> metadataFileGenerationMap) { + TreeSet> pinnedGenerations = new TreeSet<>((o1, o2) -> { + if (Objects.equals(o1.v1(), o2.v1()) == false) { + return o1.v1().compareTo(o2.v1()); + } else { + return o1.v2().compareTo(o2.v2()); + } + }); + pinnedGenerations.addAll(metadataFileGenerationMap.values()); + return pinnedGenerations; + } + + // Visible for testing + protected Map> getGenerationForMetadataFiles(List metadataFiles) throws IOException { + Map> metadataFileGenerationMap = new HashMap<>(); + for (String metadataFile : metadataFiles) { + metadataFileGenerationMap.put(metadataFile, getMinMaxTranslogGenerationFromMetadataFile(metadataFile, translogTransferManager)); + } + return metadataFileGenerationMap; + } + + // Visible for testing + protected Tuple getMinMaxTranslogGenerationFromMetadataFile( + String metadataFile, + TranslogTransferManager translogTransferManager + ) throws IOException { + Tuple minMaxGenerationFromFileName = TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(metadataFile); + if (minMaxGenerationFromFileName != null) { + return minMaxGenerationFromFileName; + } else { + if (oldFormatMetadataFileGenerationMap.containsKey(metadataFile)) { + return oldFormatMetadataFileGenerationMap.get(metadataFile); + } else { + TranslogTransferMetadata metadata = translogTransferManager.readMetadata(metadataFile); + Tuple minMaxGenTuple = new Tuple<>(metadata.getMinTranslogGeneration(), metadata.getGeneration()); + oldFormatMetadataFileGenerationMap.put(metadataFile, minMaxGenTuple); + return minMaxGenTuple; + } + } + } + + /** + * This method must be called only after there are valid generations to delete in trimUnreferencedReaders as it ensures + * implicitly that minimum primary term in latest translog metadata in remote store is the current primary term. + *
+ * This will also delete all stale translog metadata files from remote except the latest basis the metadata file comparator. + */ + private void deleteStaleRemotePrimaryTerms(List metadataFiles) { + // The deletion of older translog files in remote store is on best-effort basis, there is a possibility that there + // are older files that are no longer needed and should be cleaned up. In here, we delete all files that are part + // of older primary term. + if (olderPrimaryCleaned.trySet(Boolean.TRUE)) { + if (metadataFiles.isEmpty()) { + logger.trace("No metadata is uploaded yet, returning from deleteStaleRemotePrimaryTerms"); + return; + } + Optional minPrimaryTerm = metadataFiles.stream() + .map(file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[1])) + .min(Long::compareTo); + // First we delete all stale primary terms folders from remote store + long minimumReferencedPrimaryTerm = minPrimaryTerm.get() - 1; + translogTransferManager.deletePrimaryTermsAsync(minimumReferencedPrimaryTerm); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index bad752d44ad93..242234aed9f9f 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -11,19 +11,15 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; -import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; -import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.remote.RemoteStorePathStrategy; -import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.transfer.BlobStoreTransferService; @@ -34,7 +30,6 @@ import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; import org.opensearch.indices.RemoteStoreSettings; -import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; @@ -44,16 +39,11 @@ import java.nio.file.Files; import java.nio.file.NoSuchFileException; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -61,13 +51,10 @@ import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; import java.util.function.LongSupplier; -import java.util.stream.Collectors; -import java.util.stream.LongStream; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; -import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; /** * A Translog implementation which syncs local FS with a remote store @@ -80,36 +67,31 @@ public class RemoteFsTranslog extends Translog { private final Logger logger; - private final TranslogTransferManager translogTransferManager; - // This tracker keeps track of local tranlog files that are uploaded to remote store. - // Once tlog files are deleted from local, we remove them from tracker even if the files still exist in remote translog. - private final FileTransferTracker fileTransferTracker; - private final BooleanSupplier startedPrimarySupplier; + protected final TranslogTransferManager translogTransferManager; + protected final FileTransferTracker fileTransferTracker; + protected final BooleanSupplier startedPrimarySupplier; private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; private volatile long maxRemoteTranslogGenerationUploaded; private volatile long minSeqNoToKeep; // min generation referred by last uploaded translog - private volatile long minRemoteGenReferenced; + protected volatile long minRemoteGenReferenced; // clean up translog folder uploaded by previous primaries once - private final SetOnce olderPrimaryCleaned = new SetOnce<>(); + protected final SetOnce olderPrimaryCleaned = new SetOnce<>(); - private static final int REMOTE_DELETION_PERMITS = 2; + protected static final int REMOTE_DELETION_PERMITS = 2; private static final int DOWNLOAD_RETRIES = 2; // Semaphore used to allow only single remote generation to happen at a time - private final Semaphore remoteGenerationDeletionPermits = new Semaphore(REMOTE_DELETION_PERMITS); + protected final Semaphore remoteGenerationDeletionPermits = new Semaphore(REMOTE_DELETION_PERMITS); // These permits exist to allow any inflight background triggered upload. private static final int SYNC_PERMIT = 1; private final Semaphore syncPermit = new Semaphore(SYNC_PERMIT); - private final AtomicBoolean pauseSync = new AtomicBoolean(false); + protected final AtomicBoolean pauseSync = new AtomicBoolean(false); private final boolean isTranslogMetadataEnabled; - private final Map metadataFilePinnedTimestampMap; - // For metadata files, with no min generation in the name, we cache generation data to avoid multiple reads. - private final Map> oldFormatMetadataFileGenerationMap; public RemoteFsTranslog( TranslogConfig config, @@ -160,8 +142,6 @@ public RemoteFsTranslog( this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; fileTransferTracker = new FileTransferTracker(shardId, remoteTranslogTransferTracker); isTranslogMetadataEnabled = indexSettings().isTranslogMetadataEnabled(); - this.metadataFilePinnedTimestampMap = new HashMap<>(); - this.oldFormatMetadataFileGenerationMap = new HashMap<>(); this.translogTransferManager = buildTranslogTransferManager( blobStoreRepository, threadPool, @@ -617,31 +597,8 @@ protected Releasable drainSync() { @Override public void trimUnreferencedReaders() throws IOException { - trimUnreferencedReaders(false, true); - } - - // Visible for testing - protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) throws IOException { - if (trimLocal) { - // clean up local translog files and updates readers - super.trimUnreferencedReaders(); - } - - // Update file tracker to reflect local translog state - Optional minLiveGeneration = readers.stream().map(BaseTranslogReader::getGeneration).min(Long::compareTo); - if (minLiveGeneration.isPresent()) { - List staleFilesInTracker = new ArrayList<>(); - for (String file : fileTransferTracker.allUploaded()) { - if (file.endsWith(TRANSLOG_FILE_SUFFIX)) { - long generation = Translog.parseIdFromFileName(file); - if (generation < minLiveGeneration.get()) { - staleFilesInTracker.add(file); - staleFilesInTracker.add(Translog.getCommitCheckpointFileName(generation)); - } - } - fileTransferTracker.delete(staleFilesInTracker); - } - } + // clean up local translog files and updates readers + super.trimUnreferencedReaders(); // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote // store. @@ -649,12 +606,6 @@ protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) return; } - // This is to fail fast and avoid listing md files un-necessarily. - if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { - logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); - return; - } - // Since remote generation deletion is async, this ensures that only one generation deletion happens at a time. // Remote generations involves 2 async operations - 1) Delete translog generation files 2) Delete metadata files // We try to acquire 2 permits and if we can not, we return from here itself. @@ -662,209 +613,34 @@ protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) return; } - ActionListener> listMetadataFilesListener = new ActionListener<>() { - @Override - public void onResponse(List blobMetadata) { - List metadataFiles = blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList()); - - try { - if (metadataFiles.size() <= 1) { - logger.debug("No stale translog metadata files found"); - remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); - return; - } - - // Check last fetch status of pinned timestamps. If stale, return. - if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { - logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); - remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); - return; - } - - List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted(metadataFiles); - - // If index is not deleted, make sure to keep latest metadata file - if (indexDeleted == false) { - metadataFilesToBeDeleted.remove(metadataFiles.get(0)); - } - - if (metadataFilesToBeDeleted.isEmpty()) { - logger.debug("No metadata files to delete"); - remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); - return; - } - - logger.debug("metadataFilesToBeDeleted = {}", metadataFilesToBeDeleted); - // For all the files that we are keeping, fetch min and max generations - List metadataFilesNotToBeDeleted = new ArrayList<>(metadataFiles); - metadataFilesNotToBeDeleted.removeAll(metadataFilesToBeDeleted); - - logger.debug("metadataFilesNotToBeDeleted = {}", metadataFilesNotToBeDeleted); - Set generationsToBeDeleted = getGenerationsToBeDeleted( - metadataFilesNotToBeDeleted, - metadataFilesToBeDeleted, - indexDeleted - ); - - logger.debug("generationsToBeDeleted = {}", generationsToBeDeleted); - if (generationsToBeDeleted.isEmpty() == false) { - // Delete stale generations - translogTransferManager.deleteGenerationAsync( - primaryTermSupplier.getAsLong(), - generationsToBeDeleted, - remoteGenerationDeletionPermits::release - ); - - // Delete stale metadata files - translogTransferManager.deleteMetadataFilesAsync( - metadataFilesToBeDeleted, - remoteGenerationDeletionPermits::release - ); - - // Update cache to keep only those metadata files that are not getting deleted - oldFormatMetadataFileGenerationMap.keySet().retainAll(metadataFilesNotToBeDeleted); - - // Delete stale primary terms - deleteStaleRemotePrimaryTerms(metadataFiles); - } else { - remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); - } - } catch (Exception e) { - remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); - } - } - - @Override - public void onFailure(Exception e) { - remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); - logger.error("Exception while listing translog metadata files", e); + // cleans up remote translog files not referenced in latest uploaded metadata. + // This enables us to restore translog from the metadata in case of failover or relocation. + Set generationsToDelete = new HashSet<>(); + for (long generation = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); generation >= 0; generation--) { + if (fileTransferTracker.uploaded(Translog.getFilename(generation)) == false) { + break; } - }; - translogTransferManager.listTranslogMetadataFilesAsync(listMetadataFilesListener); - } - - // Visible for testing - protected Set getGenerationsToBeDeleted( - List metadataFilesNotToBeDeleted, - List metadataFilesToBeDeleted, - boolean indexDeleted - ) throws IOException { - long maxGenerationToBeDeleted = Long.MAX_VALUE; - - if (indexDeleted == false) { - maxGenerationToBeDeleted = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); + generationsToDelete.add(generation); } - - Set generationsFromMetadataFilesToBeDeleted = new HashSet<>(); - for (String mdFile : metadataFilesToBeDeleted) { - Tuple minMaxGen = getMinMaxTranslogGenerationFromMetadataFile(mdFile, translogTransferManager); - generationsFromMetadataFilesToBeDeleted.addAll( - LongStream.rangeClosed(minMaxGen.v1(), minMaxGen.v2()).boxed().collect(Collectors.toList()) - ); - } - - Map> metadataFileNotToBeDeletedGenerationMap = getGenerationForMetadataFiles(metadataFilesNotToBeDeleted); - TreeSet> pinnedGenerations = getOrderedPinnedMetadataGenerations(metadataFileNotToBeDeletedGenerationMap); - Set generationsToBeDeleted = new HashSet<>(); - for (long generation : generationsFromMetadataFilesToBeDeleted) { - // Check if the generation is not referred by metadata file matching pinned timestamps - if (generation <= maxGenerationToBeDeleted && isGenerationPinned(generation, pinnedGenerations) == false) { - generationsToBeDeleted.add(generation); - } + if (generationsToDelete.isEmpty() == false) { + deleteRemoteGeneration(generationsToDelete); + translogTransferManager.deleteStaleTranslogMetadataFilesAsync(remoteGenerationDeletionPermits::release); + deleteStaleRemotePrimaryTerms(); + } else { + remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); } - return generationsToBeDeleted; } - // Visible for testing - protected List getMetadataFilesToBeDeleted(List metadataFiles) { - Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); - - // Keep files since last successful run of scheduler - List metadataFilesToBeDeleted = RemoteStoreUtils.filterOutMetadataFilesBasedOnAge( - metadataFiles, - file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[3]), - pinnedTimestampsState.v1() - ); - - logger.trace( - "metadataFiles.size = {}, metadataFilesToBeDeleted based on age based filtering = {}", - metadataFiles.size(), - metadataFilesToBeDeleted.size() - ); - - // Get md files matching pinned timestamps - Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( - metadataFilesToBeDeleted, - pinnedTimestampsState.v2(), - metadataFilePinnedTimestampMap, - file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[3]), - TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen - ); - - // Filter out metadata files matching pinned timestamps - metadataFilesToBeDeleted.removeAll(implicitLockedFiles); - - logger.trace( - "implicitLockedFiles.size = {}, metadataFilesToBeDeleted based on pinned timestamp filtering = {}", - implicitLockedFiles.size(), - metadataFilesToBeDeleted.size() + /** + * Deletes remote translog and metadata files asynchronously corresponding to the generations. + * @param generations generations to be deleted. + */ + private void deleteRemoteGeneration(Set generations) { + translogTransferManager.deleteGenerationAsync( + primaryTermSupplier.getAsLong(), + generations, + remoteGenerationDeletionPermits::release ); - - return metadataFilesToBeDeleted; - } - - // Visible for testing - protected boolean isGenerationPinned(long generation, TreeSet> pinnedGenerations) { - Tuple ceilingGenerationRange = pinnedGenerations.ceiling(new Tuple<>(generation, generation)); - if (ceilingGenerationRange != null && generation >= ceilingGenerationRange.v1() && generation <= ceilingGenerationRange.v2()) { - return true; - } - Tuple floorGenerationRange = pinnedGenerations.floor(new Tuple<>(generation, generation)); - if (floorGenerationRange != null && generation >= floorGenerationRange.v1() && generation <= floorGenerationRange.v2()) { - return true; - } - return false; - } - - private TreeSet> getOrderedPinnedMetadataGenerations(Map> metadataFileGenerationMap) { - TreeSet> pinnedGenerations = new TreeSet<>((o1, o2) -> { - if (Objects.equals(o1.v1(), o2.v1()) == false) { - return o1.v1().compareTo(o2.v1()); - } else { - return o1.v2().compareTo(o2.v2()); - } - }); - pinnedGenerations.addAll(metadataFileGenerationMap.values()); - return pinnedGenerations; - } - - // Visible for testing - protected Map> getGenerationForMetadataFiles(List metadataFiles) throws IOException { - Map> metadataFileGenerationMap = new HashMap<>(); - for (String metadataFile : metadataFiles) { - metadataFileGenerationMap.put(metadataFile, getMinMaxTranslogGenerationFromMetadataFile(metadataFile, translogTransferManager)); - } - return metadataFileGenerationMap; - } - - // Visible for testing - protected Tuple getMinMaxTranslogGenerationFromMetadataFile( - String metadataFile, - TranslogTransferManager translogTransferManager - ) throws IOException { - Tuple minMaxGenerationFromFileName = TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(metadataFile); - if (minMaxGenerationFromFileName != null) { - return minMaxGenerationFromFileName; - } else { - if (oldFormatMetadataFileGenerationMap.containsKey(metadataFile)) { - return oldFormatMetadataFileGenerationMap.get(metadataFile); - } else { - TranslogTransferMetadata metadata = translogTransferManager.readMetadata(metadataFile); - Tuple minMaxGenTuple = new Tuple<>(metadata.getMinTranslogGeneration(), metadata.getGeneration()); - oldFormatMetadataFileGenerationMap.put(metadataFile, minMaxGenTuple); - return minMaxGenTuple; - } - } } /** @@ -873,20 +649,17 @@ protected Tuple getMinMaxTranslogGenerationFromMetadataFile( *
* This will also delete all stale translog metadata files from remote except the latest basis the metadata file comparator. */ - private void deleteStaleRemotePrimaryTerms(List metadataFiles) { + private void deleteStaleRemotePrimaryTerms() { // The deletion of older translog files in remote store is on best-effort basis, there is a possibility that there // are older files that are no longer needed and should be cleaned up. In here, we delete all files that are part // of older primary term. if (olderPrimaryCleaned.trySet(Boolean.TRUE)) { - if (metadataFiles.isEmpty()) { - logger.trace("No metadata is uploaded yet, returning from deleteStaleRemotePrimaryTerms"); + if (readers.isEmpty()) { + logger.trace("Translog reader list is empty, returning from deleteStaleRemotePrimaryTerms"); return; } - Optional minPrimaryTerm = metadataFiles.stream() - .map(file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[1])) - .min(Long::compareTo); // First we delete all stale primary terms folders from remote store - long minimumReferencedPrimaryTerm = minPrimaryTerm.get() - 1; + long minimumReferencedPrimaryTerm = readers.stream().map(BaseTranslogReader::getPrimaryTerm).min(Long::compare).get(); translogTransferManager.deletePrimaryTermsAsync(minimumReferencedPrimaryTerm); } } @@ -922,15 +695,7 @@ public static void cleanup( protected void onDelete() { ClusterService.assertClusterOrClusterManagerStateThread(); // clean up all remote translog files - if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { - try { - trimUnreferencedReaders(true, false); - } catch (IOException e) { - logger.error("Exception while deleting translog files from remote store", e); - } - } else { - translogTransferManager.delete(); - } + translogTransferManager.delete(); } // Visible for testing diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 9589d0f4e2e9a..6f84c0e1fbda1 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -233,6 +233,7 @@ import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; +import org.opensearch.search.deciders.ConcurrentSearchDecider; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.search.query.QueryPhase; @@ -1329,7 +1330,8 @@ protected Node( responseCollectorService, circuitBreakerService, searchModule.getIndexSearcherExecutor(threadPool), - taskResourceTrackingService + taskResourceTrackingService, + searchModule.getConcurrentSearchDeciders() ); final List> tasksExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) @@ -1976,7 +1978,8 @@ protected SearchService newSearchService( ResponseCollectorService responseCollectorService, CircuitBreakerService circuitBreakerService, Executor indexSearcherExecutor, - TaskResourceTrackingService taskResourceTrackingService + TaskResourceTrackingService taskResourceTrackingService, + Collection concurrentSearchDecidersList ) { return new SearchService( clusterService, @@ -1989,7 +1992,8 @@ protected SearchService newSearchService( responseCollectorService, circuitBreakerService, indexSearcherExecutor, - taskResourceTrackingService + taskResourceTrackingService, + concurrentSearchDecidersList ); } diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index a0f745a4270c4..efaefd530b34d 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -13,7 +13,6 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.node.Node; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -29,8 +28,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; - /** * This is an abstraction for validating and storing information specific to remote backed storage nodes. * @@ -202,7 +199,7 @@ private static boolean isRemoteRoutingTableAttributePresent(Settings settings) { } public static boolean isRemoteRoutingTableEnabled(Settings settings) { - return FeatureFlags.isEnabled(REMOTE_PUBLICATION_EXPERIMENTAL) && isRemoteRoutingTableAttributePresent(settings); + return isRemoteRoutingTableAttributePresent(settings); } public RepositoriesMetadata getRepositoriesMetadata() { diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index d14c99d9b765b..895e6ed2971d8 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.lucene.search.function.ScoreFunction; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; @@ -64,6 +65,7 @@ import org.opensearch.search.aggregations.pipeline.MovAvgPipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; +import org.opensearch.search.deciders.ConcurrentSearchDecider; import org.opensearch.search.fetch.FetchSubPhase; import org.opensearch.search.fetch.subphase.highlight.Highlighter; import org.opensearch.search.query.QueryPhaseSearcher; @@ -138,6 +140,15 @@ default Map getHighlighters() { return emptyMap(); } + /** + * Allows plugins to register custom decider for concurrent search + * @return A {@link ConcurrentSearchDecider} + */ + @ExperimentalApi + default ConcurrentSearchDecider getConcurrentSearchDecider() { + return null; + } + /** * The new {@link Suggester}s defined by this plugin. */ diff --git a/server/src/main/java/org/opensearch/repositories/IndexId.java b/server/src/main/java/org/opensearch/repositories/IndexId.java index 87a0063e8c21b..238dffbb46bde 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexId.java +++ b/server/src/main/java/org/opensearch/repositories/IndexId.java @@ -32,6 +32,7 @@ package org.opensearch.repositories; +import org.opensearch.Version; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -40,6 +41,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.remote.RemoteStoreEnums; import java.io.IOException; import java.util.Objects; @@ -51,23 +53,36 @@ */ @PublicApi(since = "1.0.0") public final class IndexId implements Writeable, ToXContentObject { - protected static final String NAME = "name"; - protected static final String ID = "id"; + static final String NAME = "name"; + static final String ID = "id"; + static final String SHARD_PATH_TYPE = "shard_path_type"; + public static final int DEFAULT_SHARD_PATH_TYPE = RemoteStoreEnums.PathType.FIXED.getCode(); private final String name; private final String id; + private final int shardPathType; private final int hashCode; + // Used for testing only public IndexId(final String name, final String id) { + this(name, id, DEFAULT_SHARD_PATH_TYPE); + } + + public IndexId(String name, String id, int shardPathType) { this.name = name; this.id = id; + this.shardPathType = shardPathType; this.hashCode = computeHashCode(); - } public IndexId(final StreamInput in) throws IOException { this.name = in.readString(); this.id = in.readString(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + this.shardPathType = in.readVInt(); + } else { + this.shardPathType = DEFAULT_SHARD_PATH_TYPE; + } this.hashCode = computeHashCode(); } @@ -93,9 +108,16 @@ public String getId() { return id; } + /** + * The storage path type in remote store for the indexes having the underlying index ids. + */ + public int getShardPathType() { + return shardPathType; + } + @Override public String toString() { - return "[" + name + "/" + id + "]"; + return "[" + name + "/" + id + "/" + shardPathType + "]"; } @Override @@ -107,7 +129,7 @@ public boolean equals(Object o) { return false; } IndexId that = (IndexId) o; - return Objects.equals(name, that.name) && Objects.equals(id, that.id); + return Objects.equals(name, that.name) && Objects.equals(id, that.id) && Objects.equals(this.shardPathType, that.shardPathType); } @Override @@ -116,13 +138,16 @@ public int hashCode() { } private int computeHashCode() { - return Objects.hash(name, id); + return Objects.hash(name, id, shardPathType); } @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); out.writeString(id); + if (out.getVersion().onOrAfter(Version.CURRENT)) { + out.writeVInt(shardPathType); + } } @Override @@ -130,6 +155,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.startObject(); builder.field(NAME, name); builder.field(ID, id); + builder.field(SHARD_PATH_TYPE, shardPathType); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryData.java b/server/src/main/java/org/opensearch/repositories/RepositoryData.java index ddccc633e5d75..1eeb1d838f2ca 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryData.java @@ -519,7 +519,7 @@ public List resolveIndices(final List indices) { * @param indicesToResolve names of indices to resolve * @param inFlightIds name to index mapping for currently in-flight snapshots not yet in the repository data to fall back to */ - public List resolveNewIndices(List indicesToResolve, Map inFlightIds) { + public List resolveNewIndices(List indicesToResolve, Map inFlightIds, int pathType) { List snapshotIndices = new ArrayList<>(); for (String index : indicesToResolve) { IndexId indexId = indices.get(index); @@ -527,13 +527,17 @@ public List resolveNewIndices(List indicesToResolve, Map resolveNewIndices(List indicesToResolve, Map inFlightIds) { + return resolveNewIndices(indicesToResolve, inFlightIds, IndexId.DEFAULT_SHARD_PATH_TYPE); + } + private static final String SHARD_GENERATIONS = "shard_generations"; private static final String INDEX_METADATA_IDENTIFIERS = "index_metadata_identifiers"; private static final String INDEX_METADATA_LOOKUP = "index_metadata_lookup"; @@ -546,10 +550,16 @@ public List resolveNewIndices(List indicesToResolve, Map snapshotIds = indexSnapshots.get(indexId); assert snapshotIds != null; @@ -791,14 +804,20 @@ private static void parseIndices( final List snapshotIds = new ArrayList<>(); final List gens = new ArrayList<>(); + String id = null; + int pathType = IndexId.DEFAULT_SHARD_PATH_TYPE; IndexId indexId = null; + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { final String indexMetaFieldName = parser.currentName(); final XContentParser.Token currentToken = parser.nextToken(); switch (indexMetaFieldName) { case INDEX_ID: - indexId = new IndexId(indexName, parser.text()); + id = parser.text(); + break; + case IndexId.SHARD_PATH_TYPE: + pathType = parser.intValue(); break; case SNAPSHOTS: XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, currentToken, parser); @@ -821,7 +840,7 @@ private static void parseIndices( // different versions create or delete snapshot in the same repository. throw new OpenSearchParseException( "Detected a corrupted repository, index " - + indexId + + new IndexId(indexName, id, pathType) + " references an unknown snapshot uuid [" + uuid + "]" @@ -838,9 +857,10 @@ private static void parseIndices( break; } } - assert indexId != null; + assert id != null; + indexId = new IndexId(indexName, id, pathType); indexSnapshots.put(indexId, Collections.unmodifiableList(snapshotIds)); - indexLookup.put(indexId.getId(), indexId); + indexLookup.put(id, indexId); for (int i = 0; i < gens.size(); i++) { String parsedGen = gens.get(i); if (fixBrokenShardGens) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 5d5b0e91d8a6a..48e5448de5d2d 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -109,8 +109,11 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStorePathStrategy.BasePathInput; +import org.opensearch.index.remote.RemoteStorePathStrategy.SnapshotShardPathInput; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -146,6 +149,8 @@ import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotMissingException; +import org.opensearch.snapshots.SnapshotShardPaths; +import org.opensearch.snapshots.SnapshotShardPaths.ShardInfo; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.ThreadPool; @@ -157,6 +162,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -179,7 +186,6 @@ import java.util.stream.Stream; import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; -import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; @@ -225,6 +231,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private static final String UPLOADED_DATA_BLOB_PREFIX = "__"; + public static final String INDICES_DIR = "indices"; + /** * Prefix used for the identifiers of data blobs that were not actually written to the repository physically because their contents are * already stored in the metadata referencing them, i.e. in {@link BlobStoreIndexShardSnapshot} and @@ -270,6 +278,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final Setting SHALLOW_SNAPSHOT_V2 = Setting.boolSetting("shallow_snapshot_v2", false); + public static final Setting SHARD_PATH_TYPE = new Setting<>( + "shard_path_type", + PathType.FIXED.toString(), + PathType::parseString + ); + /** * Setting to set batch size of stale snapshot shard blobs that will be deleted by snapshot workers as part of snapshot deletion. * For optimal performance the value of the setting should be equal to or close to repository's max # of keys that can be deleted in single operation @@ -383,6 +397,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp BlobStoreIndexShardSnapshots::fromXContent ); + public static final ConfigBlobStoreFormat SNAPSHOT_SHARD_PATHS_FORMAT = new ConfigBlobStoreFormat<>( + SnapshotShardPaths.FILE_NAME_FORMAT + ); + private volatile boolean readOnly; private final boolean isSystemRepository; @@ -393,6 +411,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final SetOnce blobContainer = new SetOnce<>(); + private final SetOnce rootBlobContainer = new SetOnce<>(); + + private final SetOnce snapshotShardPathBlobContainer = new SetOnce<>(); + private final SetOnce blobStore = new SetOnce<>(); protected final ClusterService clusterService; @@ -787,6 +809,16 @@ BlobContainer getBlobContainer() { return blobContainer.get(); } + // package private, only use for testing + BlobContainer getRootBlobContainer() { + return rootBlobContainer.get(); + } + + // package private, only use for testing + public SetOnce getSnapshotShardPathBlobContainer() { + return snapshotShardPathBlobContainer; + } + // for test purposes only protected BlobStore getBlobStore() { return blobStore.get(); @@ -812,10 +844,47 @@ protected BlobContainer blobContainer() { } } } - return blobContainer; } + /** + * maintains single lazy instance of {@link BlobContainer} + */ + protected BlobContainer rootBlobContainer() { + assertSnapshotOrGenericThread(); + + BlobContainer rootBlobContainer = this.rootBlobContainer.get(); + if (rootBlobContainer == null) { + synchronized (lock) { + rootBlobContainer = this.rootBlobContainer.get(); + if (rootBlobContainer == null) { + rootBlobContainer = blobStore().blobContainer(BlobPath.cleanPath()); + this.rootBlobContainer.set(rootBlobContainer); + } + } + } + return rootBlobContainer; + } + + /** + * maintains single lazy instance of {@link BlobContainer} + */ + protected BlobContainer snapshotShardPathBlobContainer() { + assertSnapshotOrGenericThread(); + + BlobContainer snapshotShardPathBlobContainer = this.snapshotShardPathBlobContainer.get(); + if (snapshotShardPathBlobContainer == null) { + synchronized (lock) { + snapshotShardPathBlobContainer = this.snapshotShardPathBlobContainer.get(); + if (snapshotShardPathBlobContainer == null) { + snapshotShardPathBlobContainer = blobStore().blobContainer(basePath().add(SnapshotShardPaths.DIR)); + this.snapshotShardPathBlobContainer.set(snapshotShardPathBlobContainer); + } + } + } + return snapshotShardPathBlobContainer; + } + /** * Maintains single lazy instance of {@link BlobStore}. * Public for testing. @@ -1046,6 +1115,18 @@ private void doDeleteShardSnapshots( RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, ActionListener listener ) { + // We can create map of indexId to ShardInfo based on the old repository data. This is later used in cleanup + // of stale indexes in combination with Snapshot Shard Paths file + Map idToShardInfoMap = repositoryData.getIndices() + .values() + .stream() + .collect( + Collectors.toMap( + IndexId::getId, + indexId -> new ShardInfo(indexId, repositoryData.shardGenerations().getGens(indexId).size()) + ) + ); + if (SnapshotsService.useShardGenerations(repoMetaVersion)) { // First write the new shard state metadata (with the removed snapshot) and compute deletion targets final StepListener> writeShardMetaDataAndComputeDeletesStep = new StepListener<>(); @@ -1092,7 +1173,8 @@ private void doDeleteShardSnapshots( rootBlobs, updatedRepoData, remoteStoreLockManagerFactory, - afterCleanupsListener + afterCleanupsListener, + idToShardInfoMap ); asyncCleanupUnlinkedShardLevelBlobs( repositoryData, @@ -1123,7 +1205,8 @@ private void doDeleteShardSnapshots( rootBlobs, newRepoData, remoteStoreLockManagerFactory, - afterCleanupsListener + afterCleanupsListener, + idToShardInfoMap ); final StepListener> writeMetaAndComputeDeletesStep = new StepListener<>(); writeUpdatedShardMetaDataAndComputeDeletes( @@ -1148,13 +1231,25 @@ private void doDeleteShardSnapshots( } } + /** + * Cleans up the indices and data corresponding to all it's shards. + * + * @param deletedSnapshots list of snapshots being deleted + * @param foundIndices indices that are found at [base_path]/indices + * @param rootBlobs the blobs at the [base_path] + * @param updatedRepoData the new repository data after the deletion + * @param remoteStoreLockManagerFactory remote store lock manager factory used for shallow snapshots + * @param listener listener on deletion of the stale indices + * @param idToShardInfoMap map of indexId to ShardInfo + */ private void cleanupUnlinkedRootAndIndicesBlobs( Collection deletedSnapshots, Map foundIndices, Map rootBlobs, RepositoryData updatedRepoData, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - ActionListener listener + ActionListener listener, + Map idToShardInfoMap ) { cleanupStaleBlobs( deletedSnapshots, @@ -1162,7 +1257,8 @@ private void cleanupUnlinkedRootAndIndicesBlobs( rootBlobs, updatedRepoData, remoteStoreLockManagerFactory, - ActionListener.map(listener, ignored -> null) + ActionListener.map(listener, ignored -> null), + idToShardInfoMap ); } @@ -1173,7 +1269,7 @@ private void asyncCleanupUnlinkedShardLevelBlobs( RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, ActionListener listener ) { - final List filesToDelete = resolveFilesToDelete(oldRepositoryData, snapshotIds, deleteResults); + final List> filesToDelete = resolveFilesToDelete(oldRepositoryData, snapshotIds, deleteResults); if (filesToDelete.isEmpty()) { listener.onResponse(null); return; @@ -1181,10 +1277,10 @@ private void asyncCleanupUnlinkedShardLevelBlobs( try { AtomicInteger counter = new AtomicInteger(); - Collection> subList = filesToDelete.stream() + Collection>> subList = filesToDelete.stream() .collect(Collectors.groupingBy(it -> counter.getAndIncrement() / maxShardBlobDeleteBatch)) .values(); - final BlockingQueue> staleFilesToDeleteInBatch = new LinkedBlockingQueue<>(subList); + final BlockingQueue>> staleFilesToDeleteInBatch = new LinkedBlockingQueue<>(subList); final GroupedActionListener groupedListener = new GroupedActionListener<>( ActionListener.wrap(r -> { listener.onResponse(null); }, listener::onFailure), @@ -1287,57 +1383,67 @@ protected void releaseRemoteStoreLockAndCleanup( // When remoteStoreLockManagerFactory is non-null, while deleting the files, lock files are also released before deletion of respective // shallow-snap-UUID files. And if it is null, we just delete the stale shard blobs. private void executeStaleShardDelete( - BlockingQueue> staleFilesToDeleteInBatch, + BlockingQueue>> staleFilesToDeleteInBatch, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, GroupedActionListener listener ) throws InterruptedException { - List filesToDelete = staleFilesToDeleteInBatch.poll(0L, TimeUnit.MILLISECONDS); - if (filesToDelete != null) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { - try { - // filtering files for which remote store lock release and cleanup succeeded, - // remaining files for which it failed will be retried in next snapshot delete run. - List eligibleFilesToDelete = new ArrayList<>(); - for (String fileToDelete : filesToDelete) { - if (fileToDelete.contains(SHALLOW_SNAPSHOT_PREFIX)) { - String[] fileToDeletePath = fileToDelete.split("/"); - String indexId = fileToDeletePath[1]; - String shardId = fileToDeletePath[2]; - String shallowSnapBlob = fileToDeletePath[3]; - String snapshotUUID = extractShallowSnapshotUUID(shallowSnapBlob).orElseThrow(); - BlobContainer shardContainer = blobStore().blobContainer(indicesPath().add(indexId).add(shardId)); - try { - releaseRemoteStoreLockAndCleanup(shardId, snapshotUUID, shardContainer, remoteStoreLockManagerFactory); - eligibleFilesToDelete.add(fileToDelete); - } catch (Exception e) { - logger.error( - "Failed to release lock or cleanup shard for indexID {}, shardID {} " + "and snapshot {}", - indexId, - shardId, - snapshotUUID - ); - } - } else { - eligibleFilesToDelete.add(fileToDelete); + List> filesToDelete = staleFilesToDeleteInBatch.poll(0L, TimeUnit.MILLISECONDS); + if (filesToDelete == null) { + return; + } + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { + try { + // filtering files for which remote store lock release and cleanup succeeded, + // remaining files for which it failed will be retried in next snapshot delete run. + List eligibleFilesToDelete = new ArrayList<>(); + for (Tuple fileToDelete : filesToDelete) { + BlobPath blobPath = fileToDelete.v1(); + String blobName = fileToDelete.v2(); + boolean deleteBlob = false; + if (blobName.startsWith(SHALLOW_SNAPSHOT_PREFIX)) { + String snapshotUUID = extractShallowSnapshotUUID(blobName).orElseThrow(); + String[] parts = blobPath.toArray(); + // For fixed, the parts would look like [,"indices","",""] + // For hashed_prefix, the parts would look like ["j01010001010",,"indices","",""] + // For hashed_infix, the parts would look like [,"j01010001010","indices","",""] + int partLength = parts.length; + String indexId = parts[partLength - 2]; + String shardId = parts[partLength - 1]; + BlobContainer shardContainer = blobStore().blobContainer(blobPath); + try { + releaseRemoteStoreLockAndCleanup(shardId, snapshotUUID, shardContainer, remoteStoreLockManagerFactory); + deleteBlob = true; + } catch (Exception e) { + logger.error( + "Failed to release lock or cleanup shard for indexID {}, shardID {} and snapshot {}", + indexId, + shardId, + snapshotUUID + ); } + } else { + deleteBlob = true; + } + if (deleteBlob) { + eligibleFilesToDelete.add(blobPath.buildAsString() + blobName); } - // Deleting the shard blobs - deleteFromContainer(blobContainer(), eligibleFilesToDelete); - l.onResponse(null); - } catch (Exception e) { - logger.warn( - () -> new ParameterizedMessage( - "[{}] Failed to delete following blobs during snapshot delete : {}", - metadata.name(), - filesToDelete - ), - e - ); - l.onFailure(e); } - executeStaleShardDelete(staleFilesToDeleteInBatch, remoteStoreLockManagerFactory, listener); - })); - } + // Deleting the shard blobs + deleteFromContainer(rootBlobContainer(), eligibleFilesToDelete); + l.onResponse(null); + } catch (Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "[{}] Failed to delete following blobs during snapshot delete : {}", + metadata.name(), + filesToDelete + ), + e + ); + l.onFailure(e); + } + executeStaleShardDelete(staleFilesToDeleteInBatch, remoteStoreLockManagerFactory, listener); + })); } // updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up. @@ -1479,26 +1585,30 @@ public void onFailure(Exception ex) { } } - private List resolveFilesToDelete( + /** + * Resolves the list of files that should be deleted during a snapshot deletion operation. + * This method combines files to be deleted from shard-level metadata and index-level metadata. + * + * @param oldRepositoryData The repository data before the snapshot deletion + * @param snapshotIds The IDs of the snapshots being deleted + * @param deleteResults The results of removing snapshots from shard-level metadata + * @return A list of tuples, each containing a blob path and the name of a blob to be deleted + */ + private List> resolveFilesToDelete( RepositoryData oldRepositoryData, Collection snapshotIds, Collection deleteResults ) { - final String basePath = basePath().buildAsString(); - final int basePathLen = basePath.length(); final Map> indexMetaGenerations = oldRepositoryData.indexMetaDataToRemoveAfterRemovingSnapshots( snapshotIds ); return Stream.concat(deleteResults.stream().flatMap(shardResult -> { - final String shardPath = shardContainer(shardResult.indexId, shardResult.shardId).path().buildAsString(); - return shardResult.blobsToDelete.stream().map(blob -> shardPath + blob); + final BlobPath shardPath = shardPath(shardResult.indexId, shardResult.shardId); + return shardResult.blobsToDelete.stream().map(blob -> Tuple.tuple(shardPath, blob)); }), indexMetaGenerations.entrySet().stream().flatMap(entry -> { - final String indexContainerPath = indexContainer(entry.getKey()).path().buildAsString(); - return entry.getValue().stream().map(id -> indexContainerPath + INDEX_METADATA_FORMAT.blobName(id)); - })).map(absolutePath -> { - assert absolutePath.startsWith(basePath); - return absolutePath.substring(basePathLen); - }).collect(Collectors.toList()); + final BlobPath indexPath = indexPath(entry.getKey()); + return entry.getValue().stream().map(id -> Tuple.tuple(indexPath, INDEX_METADATA_FORMAT.blobName(id))); + })).collect(Collectors.toList()); } /** @@ -1513,6 +1623,7 @@ private List resolveFilesToDelete( * @param rootBlobs all blobs found directly under the repository root * @param newRepoData new repository data that was just written * @param remoteStoreLockManagerFactory RemoteStoreLockManagerFactory to be used for cleaning up remote store lock files. + * @param idToShardInfoMap map of indexId to ShardInfo * @param listener listener to invoke with the combined {@link DeleteResult} of all blobs removed in this operation */ private void cleanupStaleBlobs( @@ -1521,7 +1632,8 @@ private void cleanupStaleBlobs( Map rootBlobs, RepositoryData newRepoData, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - ActionListener listener + ActionListener listener, + Map idToShardInfoMap ) { final GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { DeleteResult deleteResult = DeleteResult.ZERO; @@ -1546,10 +1658,27 @@ private void cleanupStaleBlobs( if (foundIndices.keySet().equals(survivingIndexIds)) { groupedListener.onResponse(DeleteResult.ZERO); } else { - cleanupStaleIndices(foundIndices, survivingIndexIds, remoteStoreLockManagerFactory, groupedListener); + Map snapshotShardPaths = getSnapshotShardPaths(); + cleanupStaleIndices( + foundIndices, + survivingIndexIds, + remoteStoreLockManagerFactory, + groupedListener, + snapshotShardPaths, + idToShardInfoMap + ); } } + private Map getSnapshotShardPaths() { + try { + return snapshotShardPathBlobContainer().listBlobs(); + } catch (IOException ex) { + logger.warn(new ParameterizedMessage("Repository [{}] Failed to get the snapshot shard paths", metadata.name()), ex); + } + return Collections.emptyMap(); + } + /** * Runs cleanup actions on the repository. Increments the repository state id by one before executing any modifications on the * repository. If remoteStoreLockManagerFactory is not null, remote store lock files are released when deleting the respective @@ -1602,7 +1731,8 @@ public void cleanup( rootBlobs, repositoryData, remoteStoreLockManagerFactory, - ActionListener.map(listener, RepositoryCleanupResult::new) + ActionListener.map(listener, RepositoryCleanupResult::new), + Collections.emptyMap() ), listener::onFailure ) @@ -1690,11 +1820,13 @@ private List cleanupStaleRootFiles( return Collections.emptyList(); } - private void cleanupStaleIndices( + void cleanupStaleIndices( Map foundIndices, Set survivingIndexIds, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - GroupedActionListener listener + GroupedActionListener listener, + Map snapshotShardPaths, + Map idToShardInfoMap ) { final GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { DeleteResult deleteResult = DeleteResult.ZERO; @@ -1718,7 +1850,13 @@ private void cleanupStaleIndices( foundIndices.size() - survivingIndexIds.size() ); for (int i = 0; i < workers; ++i) { - executeOneStaleIndexDelete(staleIndicesToDelete, remoteStoreLockManagerFactory, groupedListener); + executeOneStaleIndexDelete( + staleIndicesToDelete, + remoteStoreLockManagerFactory, + groupedListener, + snapshotShardPaths, + idToShardInfoMap + ); } } catch (Exception e) { // TODO: We shouldn't be blanket catching and suppressing all exceptions here and instead handle them safely upstream. @@ -1738,58 +1876,219 @@ private static boolean isIndexPresent(ClusterService clusterService, String inde return false; } + /** + * Executes the deletion of a single stale index. + * + * @param staleIndicesToDelete Queue of stale indices to delete + * @param remoteStoreLockManagerFactory Factory for creating remote store lock managers + * @param listener Listener for grouped delete actions + * @param snapshotShardPaths Map of snapshot shard paths and their metadata + * @param idToShardInfoMap Map of indexId to ShardInfo + * @throws InterruptedException if the thread is interrupted while waiting + */ private void executeOneStaleIndexDelete( BlockingQueue> staleIndicesToDelete, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - GroupedActionListener listener + GroupedActionListener listener, + Map snapshotShardPaths, + Map idToShardInfoMap ) throws InterruptedException { Map.Entry indexEntry = staleIndicesToDelete.poll(0L, TimeUnit.MILLISECONDS); - if (indexEntry != null) { - final String indexSnId = indexEntry.getKey(); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.supply(listener, () -> { - DeleteResult deleteResult = DeleteResult.ZERO; - try { - logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); - if (remoteStoreLockManagerFactory != null) { - final Map shardBlobs = indexEntry.getValue().children(); - for (Map.Entry shardBlob : shardBlobs.entrySet()) { - for (String blob : shardBlob.getValue().listBlobs().keySet()) { - final Optional snapshotUUID = extractShallowSnapshotUUID(blob); - if (snapshotUUID.isPresent()) { - releaseRemoteStoreLockAndCleanup( - shardBlob.getKey(), - snapshotUUID.get(), - shardBlob.getValue(), - remoteStoreLockManagerFactory - ); - } - } - } - } - // Deleting the index folder - deleteResult = indexEntry.getValue().delete(); - logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); - } catch (IOException e) { - logger.warn( - () -> new ParameterizedMessage( - "[{}] index {} is no longer part of any snapshots in the repository, " - + "but failed to clean up their index folders", - metadata.name(), - indexSnId - ), - e - ); - } catch (Exception e) { - assert false : e; - logger.warn(new ParameterizedMessage("[{}] Exception during single stale index delete", metadata.name()), e); + if (indexEntry == null) { + return; + } + final String indexSnId = indexEntry.getKey(); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.supply(listener, () -> { + try { + logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); + List matchingShardPaths = findMatchingShardPaths(indexSnId, snapshotShardPaths); + Optional highestGenShardPaths = findHighestGenerationShardPaths(matchingShardPaths); + + // The shardInfo can be null for 1) snapshots that pre-dates the hashed prefix snapshots. + // 2) Snapshot shard paths file upload failed + // In such cases, we fallback to fixed_path for cleanup of the data. + ShardInfo shardInfo = getShardInfo(highestGenShardPaths, idToShardInfoMap, indexSnId); + + if (remoteStoreLockManagerFactory != null) { + cleanupRemoteStoreLocks(indexEntry, shardInfo, remoteStoreLockManagerFactory); } - executeOneStaleIndexDelete(staleIndicesToDelete, remoteStoreLockManagerFactory, listener); + // Deletes the shard level data for the underlying index based on the shardInfo that was obtained above. + DeleteResult deleteResult = deleteShardData(shardInfo); + + // If there are matchingShardPaths, then we delete them after we have deleted the shard data. + deleteResult = deleteResult.add(cleanUpStaleSnapshotShardPathsFile(matchingShardPaths, snapshotShardPaths)); + + // Finally, we delete the [base_path]/indexId folder + deleteResult = deleteResult.add(indexEntry.getValue().delete()); // Deleting the index folder + logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); return deleteResult; - })); + } catch (IOException e) { + logger.warn( + () -> new ParameterizedMessage( + "[{}] index {} is no longer part of any snapshots in the repository, " + + "but failed to clean up their index folders", + metadata.name(), + indexSnId + ), + e + ); + return DeleteResult.ZERO; + } catch (Exception e) { + assert false : e; + logger.warn(new ParameterizedMessage("[{}] Exception during single stale index delete", metadata.name()), e); + return DeleteResult.ZERO; + } finally { + executeOneStaleIndexDelete( + staleIndicesToDelete, + remoteStoreLockManagerFactory, + listener, + snapshotShardPaths, + idToShardInfoMap + ); + } + })); + } + + /** + * Finds and returns a list of shard paths that match the given index ID. + * + * @param indexId The ID of the index to match + * @param snapshotShardPaths Map of snapshot shard paths and their metadata + * @return List of matching shard paths + */ + private List findMatchingShardPaths(String indexId, Map snapshotShardPaths) { + return snapshotShardPaths.keySet().stream().filter(s -> s.startsWith(indexId)).collect(Collectors.toList()); + } + + /** + * Finds the shard path with the highest generation number from the given list of matching shard paths. + * + * @param matchingShardPaths List of shard paths that match a specific criteria + * @return An Optional containing the shard path with the highest generation number, or empty if the list is empty + */ + private Optional findHighestGenerationShardPaths(List matchingShardPaths) { + return matchingShardPaths.stream() + .map(s -> s.split("\\" + SnapshotShardPaths.DELIMITER)) + .sorted((a, b) -> Integer.parseInt(b[2]) - Integer.parseInt(a[2])) + .map(parts -> String.join(SnapshotShardPaths.DELIMITER, parts)) + .findFirst(); + } + + /** + * Cleans up remote store locks for a given index entry. + * + * @param indexEntry The index entry containing the blob container + * @param shardInfo ShardInfo for the IndexId being cleaned up + * @param remoteStoreLockManagerFactory Factory for creating remote store lock managers + * @throws IOException If an I/O error occurs during the cleanup process + */ + private void cleanupRemoteStoreLocks( + Map.Entry indexEntry, + ShardInfo shardInfo, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory + ) throws IOException { + if (shardInfo == null) { + releaseRemoteStoreLocksAndCleanup(indexEntry.getValue().children(), remoteStoreLockManagerFactory); + } else { + Map shardContainers = new HashMap<>(shardInfo.getShardCount()); + for (int i = 0; i < shardInfo.getShardCount(); i++) { + shardContainers.put(String.valueOf(i), shardContainer(shardInfo.getIndexId(), i)); + } + releaseRemoteStoreLocksAndCleanup(shardContainers, remoteStoreLockManagerFactory); } } + /** + * Releases remote store locks and performs cleanup for each shard blob. + * + * @param shardBlobs Map of shard IDs to their corresponding BlobContainers + * @param remoteStoreLockManagerFactory Factory for creating remote store lock managers + * @throws IOException If an I/O error occurs during the release and cleanup process + */ + void releaseRemoteStoreLocksAndCleanup( + Map shardBlobs, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory + ) throws IOException { + for (Map.Entry shardBlob : shardBlobs.entrySet()) { + for (String blob : shardBlob.getValue().listBlobs().keySet()) { + final Optional snapshotUUID = extractShallowSnapshotUUID(blob); + if (snapshotUUID.isPresent()) { + releaseRemoteStoreLockAndCleanup( + shardBlob.getKey(), + snapshotUUID.get(), + shardBlob.getValue(), + remoteStoreLockManagerFactory + ); + } + } + } + } + + /** + * Deletes shard data for the provided ShardInfo object. + * + * @param shardInfo The ShardInfo object containing information about the shards to be deleted. + * @return A DeleteResult object representing the result of the deletion operation. + * @throws IOException If an I/O error occurs during the deletion process. + */ + private DeleteResult deleteShardData(ShardInfo shardInfo) throws IOException { + // If the provided ShardInfo is null, return a zero DeleteResult + if (shardInfo == null) { + return DeleteResult.ZERO; + } + + // Initialize the DeleteResult with zero values + DeleteResult deleteResult = DeleteResult.ZERO; + + // Iterate over the shards and delete each shard's data + for (int i = 0; i < shardInfo.getShardCount(); i++) { + // Call the delete method on the shardContainer and accumulate the result + deleteResult = deleteResult.add(shardContainer(shardInfo.getIndexId(), i).delete()); + } + + // Return the accumulated DeleteResult + return deleteResult; + } + + /** + * Retrieves the ShardInfo object based on the provided highest generation shard paths, + * index ID, and the mapping of index IDs to ShardInfo objects. + * + * @param highestGenShardPaths The optional highest generation shard path. + * @param idToShardInfoMap A map containing index IDs and their corresponding ShardInfo objects. + * @param indexId The index ID for which the ShardInfo object is needed. + * @return The ShardInfo object with the highest shard count, or null if no ShardInfo is available. + */ + private ShardInfo getShardInfo(Optional highestGenShardPaths, Map idToShardInfoMap, String indexId) { + // Extract the ShardInfo object from the highest generation shard path, if present + ShardInfo shardInfoFromPath = highestGenShardPaths.map(SnapshotShardPaths::parseShardPath).orElse(null); + + // Retrieve the ShardInfo object from the idToShardInfoMap using the indexId + ShardInfo shardInfoFromMap = idToShardInfoMap.get(indexId); + + // If shardInfoFromPath is null, return shardInfoFromMap (which could also be null) + if (shardInfoFromPath == null) { + return shardInfoFromMap; + } + + // If shardInfoFromMap is null, return shardInfoFromPath (which could also be null) + if (shardInfoFromMap == null) { + return shardInfoFromPath; + } + + // If both shardInfoFromPath and shardInfoFromMap are non-null, + // return the ShardInfo object with the higher shard count + return shardInfoFromPath.getShardCount() >= shardInfoFromMap.getShardCount() ? shardInfoFromPath : shardInfoFromMap; + } + + private DeleteResult cleanUpStaleSnapshotShardPathsFile(List matchingShardPaths, Map snapshotShardPaths) + throws IOException { + deleteFromContainer(snapshotShardPathBlobContainer(), matchingShardPaths); + long totalBytes = matchingShardPaths.stream().mapToLong(s -> snapshotShardPaths.get(s).length()).sum(); + return new DeleteResult(matchingShardPaths.size(), totalBytes); + } + @Override public void finalizeSnapshot( final ShardGenerations shardGenerations, @@ -1864,6 +2163,10 @@ public void finalizeSnapshot( indexMetas, indexMetaIdentifiers ); + // The snapshot shards path would be uploaded for new index ids or index ids where the shard gen count (a.k.a + // number_of_shards) has increased. + Set updatedIndexIds = writeNewIndexShardPaths(existingRepositoryData, updatedRepositoryData, snapshotId); + cleanupRedundantSnapshotShardPaths(updatedIndexIds); writeIndexGen( updatedRepositoryData, repositoryStateId, @@ -1926,21 +2229,130 @@ public void finalizeSnapshot( }, onUpdateFailure); } + /** + * This method cleans up the redundant snapshot shard paths file for index ids where the number of shards has increased + * on account of new indexes by same index name being snapshotted that exists already in the repository's snapshots. + */ + private void cleanupRedundantSnapshotShardPaths(Set updatedShardPathsIndexIds) { + Set updatedIndexIds = updatedShardPathsIndexIds.stream() + .map(s -> s.split("\\" + SnapshotShardPaths.DELIMITER)[0]) + .collect(Collectors.toSet()); + Set indexIdShardPaths = getSnapshotShardPaths().keySet(); + List staleShardPaths = indexIdShardPaths.stream().filter(s -> updatedShardPathsIndexIds.contains(s) == false).filter(s -> { + String indexId = s.split("\\" + SnapshotShardPaths.DELIMITER)[0]; + return updatedIndexIds.contains(indexId); + }).collect(Collectors.toList()); + try { + deleteFromContainer(snapshotShardPathBlobContainer(), staleShardPaths); + } catch (IOException e) { + logger.warn( + new ParameterizedMessage( + "Repository [{}] Exception during snapshot stale index deletion {}", + metadata.name(), + staleShardPaths + ), + e + ); + } + } + + private Set writeNewIndexShardPaths( + RepositoryData existingRepositoryData, + RepositoryData updatedRepositoryData, + SnapshotId snapshotId + ) { + Set updatedIndexIds = new HashSet<>(); + Set indicesToUpdate = new HashSet<>(updatedRepositoryData.getIndices().values()); + for (IndexId indexId : indicesToUpdate) { + if (indexId.getShardPathType() == PathType.FIXED.getCode()) { + continue; + } + int oldShardCount = existingRepositoryData.shardGenerations().getGens(indexId).size(); + int newShardCount = updatedRepositoryData.shardGenerations().getGens(indexId).size(); + if (newShardCount > oldShardCount) { + String shardPathsBlobName = writeIndexShardPaths(indexId, snapshotId, newShardCount); + if (Objects.nonNull(shardPathsBlobName)) { + updatedIndexIds.add(shardPathsBlobName); + } + } + } + return updatedIndexIds; + } + + String writeIndexShardPaths(IndexId indexId, SnapshotId snapshotId, int shardCount) { + try { + List paths = getShardPaths(indexId, shardCount); + int pathType = indexId.getShardPathType(); + int pathHashAlgorithm = FNV_1A_COMPOSITE_1.getCode(); + String blobName = String.join( + SnapshotShardPaths.DELIMITER, + indexId.getId(), + indexId.getName(), + String.valueOf(shardCount), + String.valueOf(pathType), + String.valueOf(pathHashAlgorithm) + ); + SnapshotShardPaths shardPaths = new SnapshotShardPaths( + paths, + indexId.getId(), + indexId.getName(), + shardCount, + PathType.fromCode(pathType), + PathHashAlgorithm.fromCode(pathHashAlgorithm) + ); + SNAPSHOT_SHARD_PATHS_FORMAT.write(shardPaths, snapshotShardPathBlobContainer(), blobName); + logShardPathsOperationSuccess(indexId, snapshotId); + return blobName; + } catch (IOException e) { + logShardPathsOperationWarning(indexId, snapshotId, e); + } + return null; + } + + private List getShardPaths(IndexId indexId, int shardCount) { + List paths = new ArrayList<>(); + for (int shardId = 0; shardId < shardCount; shardId++) { + BlobPath shardPath = shardPath(indexId, shardId); + paths.add(shardPath.buildAsString()); + } + return paths; + } + + private void logShardPathsOperationSuccess(IndexId indexId, SnapshotId snapshotId) { + logger.trace( + () -> new ParameterizedMessage( + "Repository [{}] successfully wrote shard paths for index [{}] in snapshot [{}]", + metadata.name(), + indexId.getName(), + snapshotId.getName() + ) + ); + } + + private void logShardPathsOperationWarning(IndexId indexId, SnapshotId snapshotId, @Nullable Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "Repository [{}] Failed to write shard paths for index [{}] in snapshot [{}]", + metadata.name(), + indexId.getName(), + snapshotId.getName() + ), + e + ); + } + // Delete all old shard gen blobs that aren't referenced any longer as a result from moving to updated repository data private void cleanupOldShardGens(RepositoryData existingRepositoryData, RepositoryData updatedRepositoryData) { final List toDelete = new ArrayList<>(); - final int prefixPathLen = basePath().buildAsString().length(); updatedRepositoryData.shardGenerations() .obsoleteShardGenerations(existingRepositoryData.shardGenerations()) .forEach( (indexId, gens) -> gens.forEach( - (shardId, oldGen) -> toDelete.add( - shardContainer(indexId, shardId).path().buildAsString().substring(prefixPathLen) + INDEX_FILE_PREFIX + oldGen - ) + (shardId, oldGen) -> toDelete.add(shardPath(indexId, shardId).buildAsString() + INDEX_FILE_PREFIX + oldGen) ) ); try { - deleteFromContainer(blobContainer(), toDelete); + deleteFromContainer(rootBlobContainer(), toDelete); } catch (Exception e) { logger.warn("Failed to clean up old shard generation blobs", e); } @@ -1987,11 +2399,15 @@ private void deleteFromContainer(BlobContainer container, List blobs) th } private BlobPath indicesPath() { - return basePath().add("indices"); + return basePath().add(INDICES_DIR); } private BlobContainer indexContainer(IndexId indexId) { - return blobStore().blobContainer(indicesPath().add(indexId.getId())); + return blobStore().blobContainer(indexPath(indexId)); + } + + private BlobPath indexPath(IndexId indexId) { + return indicesPath().add(indexId.getId()); } private BlobContainer shardContainer(IndexId indexId, ShardId shardId) { @@ -1999,7 +2415,17 @@ private BlobContainer shardContainer(IndexId indexId, ShardId shardId) { } public BlobContainer shardContainer(IndexId indexId, int shardId) { - return blobStore().blobContainer(indicesPath().add(indexId.getId()).add(Integer.toString(shardId))); + return blobStore().blobContainer(shardPath(indexId, shardId)); + } + + private BlobPath shardPath(IndexId indexId, int shardId) { + PathType pathType = PathType.fromCode(indexId.getShardPathType()); + SnapshotShardPathInput shardPathInput = new SnapshotShardPathInput.Builder().basePath(basePath()) + .indexUUID(indexId.getId()) + .shardId(String.valueOf(shardId)) + .build(); + PathHashAlgorithm pathHashAlgorithm = pathType != PathType.FIXED ? FNV_1A_COMPOSITE_1 : null; + return pathType.path(shardPathInput, pathHashAlgorithm); } /** @@ -2086,7 +2512,7 @@ private BlobContainer testContainer(String seed) { BlobPath testBlobPath; if (prefixModeVerification == true) { BasePathInput pathInput = BasePathInput.builder().basePath(basePath()).indexUUID(seed).build(); - testBlobPath = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + testBlobPath = PathType.HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); } else { testBlobPath = basePath(); } @@ -2174,11 +2600,12 @@ private void doGetRepositoryData(ActionListener listener) { loaded = repositoryDataFromCachedEntry(cached); } else { loaded = getRepositoryData(genToLoad); + Version minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); // We can cache serialized in the most recent version here without regard to the actual repository metadata version // since we're only caching the information that we just wrote and thus won't accidentally cache any information that // isn't safe cacheRepositoryData( - BytesReference.bytes(loaded.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)), + BytesReference.bytes(loaded.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT, minNodeVersion)), genToLoad ); } @@ -2569,8 +2996,9 @@ public void onFailure(Exception e) { } final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen); logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob); + Version minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); final BytesReference serializedRepoData = BytesReference.bytes( - newRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), version) + newRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), version, minNodeVersion) ); writeAtomic(blobContainer(), indexBlob, serializedRepoData, true); maybeWriteIndexLatest(newGen); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java index 8127bf8c2a2a2..630048c61785d 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java @@ -76,4 +76,8 @@ public void writeAsyncWithUrgentPriority(T obj, BlobContainer blobContainer, Str ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); } } + + public void write(T obj, BlobContainer blobContainer, String name) throws IOException { + write(obj, blobContainer, name, new NoneCompressor(), ToXContent.EMPTY_PARAMS, XContentType.JSON, null, null); + } } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index b2d4fff14a7f1..154ff5b0aaec1 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -61,6 +61,7 @@ public class RestSnapshotsStatusAction extends BaseRestHandler { public List routes() { return unmodifiableList( asList( + new Route(GET, "/_snapshot/{repository}/{snapshot}/{index}/_status"), new Route(GET, "/_snapshot/{repository}/{snapshot}/_status"), new Route(GET, "/_snapshot/{repository}/_status"), new Route(GET, "/_snapshot/_status") @@ -80,7 +81,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (snapshots.length == 1 && "_all".equalsIgnoreCase(snapshots[0])) { snapshots = Strings.EMPTY_ARRAY; } - SnapshotsStatusRequest snapshotsStatusRequest = snapshotsStatusRequest(repository).snapshots(snapshots); + String[] indices = request.paramAsStringArray("index", Strings.EMPTY_ARRAY); + SnapshotsStatusRequest snapshotsStatusRequest = snapshotsStatusRequest(repository).snapshots(snapshots).indices(indices); snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); snapshotsStatusRequest.clusterManagerNodeTimeout( diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 647af7ce0256a..4576921b8426e 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -32,6 +32,8 @@ package org.opensearch.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -70,6 +72,9 @@ import org.opensearch.search.aggregations.SearchContextAggregations; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.collapse.CollapseContext; +import org.opensearch.search.deciders.ConcurrentSearchDecider; +import org.opensearch.search.deciders.ConcurrentSearchDecision; +import org.opensearch.search.deciders.ConcurrentSearchVisitor; import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.fetch.FetchSearchResult; @@ -98,16 +103,23 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executor; import java.util.function.Function; import java.util.function.LongSupplier; +import java.util.stream.Collectors; import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_NONE; import static org.opensearch.search.SearchService.MAX_AGGREGATION_REWRITE_FILTERS; /** @@ -117,11 +129,14 @@ */ final class DefaultSearchContext extends SearchContext { + private static final Logger logger = LogManager.getLogger(DefaultSearchContext.class); + private final ReaderContext readerContext; private final Engine.Searcher engineSearcher; private final ShardSearchRequest request; private final SearchShardTarget shardTarget; private final LongSupplier relativeTimeSupplier; + private final Collection concurrentSearchDeciders; private SearchType searchType; private final BigArrays bigArrays; private final IndexShard indexShard; @@ -187,7 +202,7 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private final FetchPhase fetchPhase; private final Function requestToAggReduceContextBuilder; - private final boolean concurrentSearchSettingsEnabled; + private final String concurrentSearchMode; private final SetOnce requestShouldUseConcurrentSearch = new SetOnce<>(); private final int maxAggRewriteFilters; private final int cardinalityAggregationPruningThreshold; @@ -205,7 +220,8 @@ final class DefaultSearchContext extends SearchContext { Version minNodeVersion, boolean validate, Executor executor, - Function requestToAggReduceContextBuilder + Function requestToAggReduceContextBuilder, + Collection concurrentSearchDeciders ) throws IOException { this.readerContext = readerContext; this.request = request; @@ -221,14 +237,15 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); this.clusterService = clusterService; this.engineSearcher = readerContext.acquireSearcher("search"); - this.concurrentSearchSettingsEnabled = evaluateConcurrentSegmentSearchSettings(executor); + this.concurrentSearchMode = evaluateConcurrentSearchMode(executor); this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(), lowLevelCancellation, - concurrentSearchSettingsEnabled ? executor : null, + concurrentSearchMode.equals(CONCURRENT_SEGMENT_SEARCH_MODE_AUTO) + || concurrentSearchMode.equals(CONCURRENT_SEGMENT_SEARCH_MODE_ALL) ? executor : null, this ); this.relativeTimeSupplier = relativeTimeSupplier; @@ -247,6 +264,7 @@ final class DefaultSearchContext extends SearchContext { this.maxAggRewriteFilters = evaluateFilterRewriteSetting(); this.cardinalityAggregationPruningThreshold = evaluateCardinalityAggregationPruningThreshold(); + this.concurrentSearchDeciders = concurrentSearchDeciders; } @Override @@ -902,11 +920,68 @@ public Profilers getProfilers() { @Override public boolean shouldUseConcurrentSearch() { assert requestShouldUseConcurrentSearch.get() != null : "requestShouldUseConcurrentSearch must be set"; - return concurrentSearchSettingsEnabled && Boolean.TRUE.equals(requestShouldUseConcurrentSearch.get()); + assert concurrentSearchMode != null : "concurrentSearchMode must be set"; + return (concurrentSearchMode.equals(CONCURRENT_SEGMENT_SEARCH_MODE_AUTO) + || concurrentSearchMode.equals(CONCURRENT_SEGMENT_SEARCH_MODE_ALL)) + && Boolean.TRUE.equals(requestShouldUseConcurrentSearch.get()); + } + + private boolean evaluateAutoMode() { + + // filter out deciders that want to opt-out of decision-making + final Set filteredDeciders = concurrentSearchDeciders.stream() + .filter(concurrentSearchDecider -> concurrentSearchDecider.canEvaluateForIndex(indexService.getIndexSettings())) + .collect(Collectors.toSet()); + // evaluate based on concurrent search query visitor + if (filteredDeciders.size() > 0) { + ConcurrentSearchVisitor concurrentSearchVisitor = new ConcurrentSearchVisitor( + filteredDeciders, + indexService.getIndexSettings() + ); + if (request().source() != null && request().source().query() != null) { + QueryBuilder queryBuilder = request().source().query(); + queryBuilder.visit(concurrentSearchVisitor); + } + } + + final List decisions = new ArrayList<>(); + for (ConcurrentSearchDecider decider : filteredDeciders) { + ConcurrentSearchDecision decision = decider.getConcurrentSearchDecision(); + if (decision != null) { + if (logger.isDebugEnabled()) { + logger.debug("concurrent search decision from plugin decider [{}]", decision.toString()); + } + decisions.add(decision); + } + } + + final ConcurrentSearchDecision pluginDecision = ConcurrentSearchDecision.getCompositeDecision(decisions); + if (pluginDecision.getDecisionStatus().equals(ConcurrentSearchDecision.DecisionStatus.NO_OP)) { + // plugins don't have preference, decide based on whether request has aggregations or not. + if (aggregations() != null) { + if (logger.isDebugEnabled()) { + logger.debug("request has supported aggregations, using concurrent search"); + } + return true; + + } else { + if (logger.isDebugEnabled()) { + logger.debug("request does not have aggregations, not using concurrent search"); + } + return false; + } + + } else { + if (logger.isDebugEnabled()) { + logger.debug("concurrent search decision from plugins [{}]", pluginDecision.toString()); + } + return pluginDecision.getDecisionStatus() == ConcurrentSearchDecision.DecisionStatus.YES; + } + } /** - * Evaluate if parsed request supports concurrent segment search + * Evaluate if request should use concurrent search based on request and concurrent search deciders */ public void evaluateRequestShouldUseConcurrentSearch() { if (sort != null && sort.isSortOnTimeSeriesField()) { @@ -917,6 +992,8 @@ && aggregations().factories() != null requestShouldUseConcurrentSearch.set(false); } else if (terminateAfter != DEFAULT_TERMINATE_AFTER) { requestShouldUseConcurrentSearch.set(false); + } else if (concurrentSearchMode.equals(CONCURRENT_SEGMENT_SEARCH_MODE_AUTO)) { + requestShouldUseConcurrentSearch.set(evaluateAutoMode()); } else { requestShouldUseConcurrentSearch.set(true); } @@ -964,26 +1041,40 @@ public BucketCollectorProcessor bucketCollectorProcessor() { } /** - * Evaluate based on cluster and index settings if concurrent segment search should be used for this request context - * @return true: use concurrent search - * false: otherwise + * Evaluate the concurrentSearchMode based on cluster and index settings if concurrent segment search + * should be used for this request context + * If the cluster.search.concurrent_segment_search.mode setting + * is not explicitly set, the evaluation falls back to the + * cluster.search.concurrent_segment_search.enabled boolean setting + * which will evaluate to true or false. This is then evaluated to "all" or "none" respectively + * @return one of "none", "auto", "all" */ - private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { + private String evaluateConcurrentSearchMode(Executor concurrentSearchExecutor) { // Do not use concurrent segment search for system indices or throttled requests. See: // https://github.com/opensearch-project/OpenSearch/issues/12951 if (indexShard.isSystem() || indexShard.indexSettings().isSearchThrottled()) { - return false; + return CONCURRENT_SEGMENT_SEARCH_MODE_NONE; } + if ((clusterService != null) && concurrentSearchExecutor != null) { + String concurrentSearchMode = indexService.getIndexSettings() + .getSettings() + .get( + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), + clusterService.getClusterSettings().getOrNull(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE) + ); + if (concurrentSearchMode != null) { + return concurrentSearchMode; + } - if ((clusterService != null) && (concurrentSearchExecutor != null)) { + // mode setting not set, fallback to concurrent_segment_search.enabled setting return indexService.getIndexSettings() .getSettings() .getAsBoolean( IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); + ) ? CONCURRENT_SEGMENT_SEARCH_MODE_ALL : CONCURRENT_SEGMENT_SEARCH_MODE_NONE; } - return false; + return CONCURRENT_SEGMENT_SEARCH_MODE_NONE; } @Override diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 24f81104f0a0e..e9ed02828b971 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -255,6 +255,7 @@ import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregator; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; +import org.opensearch.search.deciders.ConcurrentSearchDecider; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.fetch.FetchSubPhase; import org.opensearch.search.fetch.subphase.ExplainPhase; @@ -298,6 +299,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Optional; @@ -332,6 +334,8 @@ public class SearchModule { private final QueryPhaseSearcher queryPhaseSearcher; private final SearchPlugin.ExecutorServiceProvider indexSearcherExecutorProvider; + private final Collection concurrentSearchDeciders; + /** * Constructs a new SearchModule object *

@@ -360,6 +364,25 @@ public SearchModule(Settings settings, List plugins) { queryPhaseSearcher = registerQueryPhaseSearcher(plugins); indexSearcherExecutorProvider = registerIndexSearcherExecutorProvider(plugins); namedWriteables.addAll(SortValue.namedWriteables()); + concurrentSearchDeciders = registerConcurrentSearchDeciders(plugins); + } + + private Collection registerConcurrentSearchDeciders(List plugins) { + List concurrentSearchDeciders = new ArrayList<>(); + for (SearchPlugin plugin : plugins) { + ConcurrentSearchDecider decider = plugin.getConcurrentSearchDecider(); + if (decider != null) { + concurrentSearchDeciders.add(decider); + } + } + return concurrentSearchDeciders; + } + + /** + * Returns the concurrent search deciders that the plugins have registered + */ + public Collection getConcurrentSearchDeciders() { + return concurrentSearchDeciders; } public List getNamedWriteables() { diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index e6e19c0b9582f..e33a47fe8e178 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -105,6 +105,7 @@ import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.collapse.CollapseContext; +import org.opensearch.search.deciders.ConcurrentSearchDecider; import org.opensearch.search.dfs.DfsPhase; import org.opensearch.search.dfs.DfsSearchResult; import org.opensearch.search.fetch.FetchPhase; @@ -147,6 +148,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -257,6 +259,34 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv "search.concurrent_segment_search.enabled", false, Property.Dynamic, + Property.NodeScope, + Property.Deprecated + ); + + // Allow concurrent segment search for all requests + public static final String CONCURRENT_SEGMENT_SEARCH_MODE_ALL = "all"; + + // Disallow concurrent search for all requests + public static final String CONCURRENT_SEGMENT_SEARCH_MODE_NONE = "none"; + + // Make decision for concurrent search based on concurrent search deciders + public static final String CONCURRENT_SEGMENT_SEARCH_MODE_AUTO = "auto"; + + public static final Setting CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE = Setting.simpleString( + "search.concurrent_segment_search.mode", + CONCURRENT_SEGMENT_SEARCH_MODE_NONE, + value -> { + switch (value) { + case CONCURRENT_SEGMENT_SEARCH_MODE_ALL: + case CONCURRENT_SEGMENT_SEARCH_MODE_NONE: + case CONCURRENT_SEGMENT_SEARCH_MODE_AUTO: + // valid setting + break; + default: + throw new IllegalArgumentException("Setting value must be one of [all, none, auto]"); + } + }, + Property.Dynamic, Property.NodeScope ); @@ -328,6 +358,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final QueryPhase queryPhase; private final FetchPhase fetchPhase; + private final Collection concurrentSearchDeciders; private volatile long defaultKeepAlive; @@ -372,7 +403,8 @@ public SearchService( ResponseCollectorService responseCollectorService, CircuitBreakerService circuitBreakerService, Executor indexSearcherExecutor, - TaskResourceTrackingService taskResourceTrackingService + TaskResourceTrackingService taskResourceTrackingService, + Collection concurrentSearchDeciders ) { Settings settings = clusterService.getSettings(); this.threadPool = threadPool; @@ -427,6 +459,8 @@ public SearchService( allowDerivedField = CLUSTER_ALLOW_DERIVED_FIELD_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_ALLOW_DERIVED_FIELD_SETTING, this::setAllowDerivedField); + + this.concurrentSearchDeciders = concurrentSearchDeciders; } private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { @@ -1126,7 +1160,8 @@ private DefaultSearchContext createSearchContext(ReaderContext reader, ShardSear clusterService.state().nodes().getMinNodeVersion(), validate, indexSearcherExecutor, - this::aggReduceContextBuilder + this::aggReduceContextBuilder, + concurrentSearchDeciders ); // we clone the query shard context here just for rewriting otherwise we // might end up with incorrect state since we are using now() or script services diff --git a/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchDecider.java b/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchDecider.java new file mode 100644 index 0000000000000..9c588bb45b4ec --- /dev/null +++ b/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchDecider.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.deciders; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.query.QueryBuilder; + +/** + * {@link ConcurrentSearchDecider} allows pluggable way to evaluate if a query in the search request + * can use concurrent segment search using the passed in queryBuilders from query tree and index settings + * on a per shard request basis. + * Implementations can also opt out of the evaluation process for certain indices based on the index settings. + * For all the deciders which can evaluate query tree for an index, its evaluateForQuery method + * will be called for each node in the query tree. After traversing of the query tree is completed, the final + * decision from the deciders will be obtained using {@link ConcurrentSearchDecider#getConcurrentSearchDecision} + */ +@ExperimentalApi +public abstract class ConcurrentSearchDecider { + + /** + * Evaluate for the passed in queryBuilder node in the query tree of the search request + * if concurrent segment search can be used. + * This method will be called for each of the query builder node in the query tree of the request. + */ + public abstract void evaluateForQuery(QueryBuilder queryBuilder, IndexSettings indexSettings); + + /** + * Provides a way for deciders to opt out of decision-making process for certain requests based on + * index settings. + * Return true if interested in decision making for index, + * false, otherwise + */ + public abstract boolean canEvaluateForIndex(IndexSettings indexSettings); + + /** + * Provide the final decision for concurrent search based on all evaluations + * Plugins may need to maintain internal state of evaluations to provide a final decision + * If decision is null, then it is ignored + * @return ConcurrentSearchDecision + */ + public abstract ConcurrentSearchDecision getConcurrentSearchDecision(); + +} diff --git a/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchDecision.java b/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchDecision.java new file mode 100644 index 0000000000000..2a30413eff9c8 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchDecision.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.deciders; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Collection; + +/** + * This Class defines the decisions that a {@link ConcurrentSearchDecider#getConcurrentSearchDecision} can return. + * + */ +@ExperimentalApi +public class ConcurrentSearchDecision { + + final private DecisionStatus decisionStatus; + final private String decisionReason; + + public ConcurrentSearchDecision(DecisionStatus decisionStatus, String decisionReason) { + this.decisionStatus = decisionStatus; + this.decisionReason = decisionReason; + } + + public DecisionStatus getDecisionStatus() { + return decisionStatus; + } + + public String getDecisionReason() { + return decisionReason; + } + + /** + * This enum contains the decision status for concurrent search. + */ + @ExperimentalApi + public enum DecisionStatus { + YES(0), // use concurrent search + NO(1), // don't use concurrent search + NO_OP(2); // no preference + + private final int id; + + DecisionStatus(int id) { + this.id = id; + } + } + + @Override + public String toString() { + return "ConcurrentSearchDecision{" + "decisionStatus=" + decisionStatus + ", decisionReason='" + decisionReason + '\'' + '}'; + } + + /** + * Combine a collection of {@link ConcurrentSearchDecision} to return final {@link ConcurrentSearchDecision} + * The decisions are combined as: + * NO_OP AND NO_OP results in NO_OP + * NO_OP AND YES results in YES + * NO_OP AND NO results in NO + */ + public static ConcurrentSearchDecision getCompositeDecision(Collection allDecisions) { + + DecisionStatus finalDecisionStatus = DecisionStatus.NO_OP; + for (ConcurrentSearchDecision decision : allDecisions) { + switch (decision.decisionStatus) { + case YES: + finalDecisionStatus = DecisionStatus.YES; + break; + case NO: + finalDecisionStatus = DecisionStatus.NO; + return new ConcurrentSearchDecision( + finalDecisionStatus, + "composite decision evaluated to false due to " + decision.decisionReason + ); + case NO_OP: + // NOOP doesn't change the final decision + break; + } + } + return new ConcurrentSearchDecision(finalDecisionStatus, "composite decision result"); + } + +} diff --git a/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchVisitor.java b/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchVisitor.java new file mode 100644 index 0000000000000..12ba1b2a9cc5f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/deciders/ConcurrentSearchVisitor.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.deciders; + +import org.apache.lucene.search.BooleanClause; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilderVisitor; + +import java.util.Objects; +import java.util.Set; + +/** + * Class to traverse the QueryBuilder tree and invoke the + * {@link ConcurrentSearchDecider#evaluateForQuery} at each node of the query tree + */ +@ExperimentalApi +public class ConcurrentSearchVisitor implements QueryBuilderVisitor { + + private final Set deciders; + private final IndexSettings indexSettings; + + public ConcurrentSearchVisitor(Set concurrentSearchVisitorDeciders, IndexSettings idxSettings) { + Objects.requireNonNull(concurrentSearchVisitorDeciders, "Concurrent search deciders cannot be null"); + deciders = concurrentSearchVisitorDeciders; + indexSettings = idxSettings; + } + + @Override + public void accept(QueryBuilder qb) { + // for each of the deciders, invoke evaluateForQuery using the current query builder and index settings. + deciders.forEach(concurrentSearchDecider -> { concurrentSearchDecider.evaluateForQuery(qb, indexSettings); }); + } + + @Override + public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { + return this; + } +} diff --git a/server/src/main/java/org/opensearch/search/deciders/package-info.java b/server/src/main/java/org/opensearch/search/deciders/package-info.java new file mode 100644 index 0000000000000..4ef26f7be9cfc --- /dev/null +++ b/server/src/main/java/org/opensearch/search/deciders/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes assist in deciding + * whether to run a search request using concurrent search or not. + */ + +package org.opensearch.search.deciders; diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index f102289160b71..08c30ea503a6d 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -87,6 +87,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -1345,6 +1346,7 @@ private static IndexMetadata addSnapshotToIndexSettings(IndexMetadata metadata, .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey(), snapshot.getSnapshotId().getUUID()) .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.getKey(), snapshot.getSnapshotId().getName()) .put(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.getKey(), indexId.getId()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE.getKey(), PathType.fromCode(indexId.getShardPathType())) .build(); return IndexMetadata.builder(metadata).settings(newSettings).build(); } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java new file mode 100644 index 0000000000000..88af14e2232f9 --- /dev/null +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.repositories.IndexId; + +import java.io.IOException; +import java.util.List; + +/** + * Snapshot Shard path information. + * + * @opensearch.internal + */ +public class SnapshotShardPaths implements ToXContent { + + public static final String DIR = "snapshot_shard_paths"; + + public static final String DELIMITER = "."; + + public static final String FILE_NAME_FORMAT = "%s"; + + private static final String PATHS_FIELD = "paths"; + private static final String INDEX_ID_FIELD = "indexId"; + private static final String INDEX_NAME_FIELD = "indexName"; + private static final String NUMBER_OF_SHARDS_FIELD = "number_of_shards"; + private static final String SHARD_PATH_TYPE_FIELD = "shard_path_type"; + private static final String SHARD_PATH_HASH_ALGORITHM_FIELD = "shard_path_hash_algorithm"; + + private final List paths; + private final String indexId; + private final String indexName; + private final int numberOfShards; + private final PathType shardPathType; + private final PathHashAlgorithm shardPathHashAlgorithm; + + public SnapshotShardPaths( + List paths, + String indexId, + String indexName, + int numberOfShards, + PathType shardPathType, + PathHashAlgorithm shardPathHashAlgorithm + ) { + assert !paths.isEmpty() : "paths must not be empty"; + assert indexId != null && !indexId.isEmpty() : "indexId must not be empty"; + assert indexName != null && !indexName.isEmpty() : "indexName must not be empty"; + assert numberOfShards > 0 : "numberOfShards must be > 0"; + assert shardPathType != null : "shardPathType must not be null"; + assert shardPathHashAlgorithm != null : "shardPathHashAlgorithm must not be null"; + + this.paths = paths; + this.indexId = indexId; + this.indexName = indexName; + this.numberOfShards = numberOfShards; + this.shardPathType = shardPathType; + this.shardPathHashAlgorithm = shardPathHashAlgorithm; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(INDEX_ID_FIELD, indexId); + builder.field(INDEX_NAME_FIELD, indexName); + builder.field(NUMBER_OF_SHARDS_FIELD, numberOfShards); + builder.field(SHARD_PATH_TYPE_FIELD, shardPathType.getCode()); + builder.field(SHARD_PATH_HASH_ALGORITHM_FIELD, shardPathHashAlgorithm.getCode()); + builder.startArray(PATHS_FIELD); + for (String path : paths) { + builder.value(path); + } + builder.endArray(); + return builder; + } + + public static SnapshotShardPaths fromXContent(XContentParser ignored) { + throw new UnsupportedOperationException("SnapshotShardPaths.fromXContent() is not supported"); + } + + /** + * Parses a shard path string and extracts relevant shard information. + * + * @param shardPath The shard path string to parse. Expected format is: + * [index_id]#[index_name]#[shard_count]#[path_type_code]#[path_hash_algorithm_code] + * @return A {@link ShardInfo} object containing the parsed index ID and shard count. + * @throws IllegalArgumentException if the shard path format is invalid or cannot be parsed. + */ + public static ShardInfo parseShardPath(String shardPath) { + String[] parts = shardPath.split("\\" + SnapshotShardPaths.DELIMITER); + if (parts.length != 5) { + throw new IllegalArgumentException("Invalid shard path format: " + shardPath); + } + try { + IndexId indexId = new IndexId(parts[1], parts[0], Integer.parseInt(parts[3])); + int shardCount = Integer.parseInt(parts[2]); + return new ShardInfo(indexId, shardCount); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid shard path format: " + shardPath, e); + } + } + + /** + * Represents parsed information from a shard path. + * This class encapsulates the index ID and shard count extracted from a shard path string. + */ + public static class ShardInfo { + /** The ID of the index associated with this shard. */ + private final IndexId indexId; + + /** The total number of shards for this index. */ + private final int shardCount; + + /** + * Constructs a new ShardInfo instance. + * + * @param indexId The ID of the index associated with this shard. + * @param shardCount The total number of shards for this index. + */ + public ShardInfo(IndexId indexId, int shardCount) { + this.indexId = indexId; + this.shardCount = shardCount; + } + + public IndexId getIndexId() { + return indexId; + } + + public int getShardCount() { + return shardCount; + } + } +} diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index f3afbd53f7703..998fa0161550c 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -138,6 +138,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SHALLOW_SNAPSHOT_V2; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SHARD_PATH_TYPE; import static org.opensearch.snapshots.SnapshotUtils.validateSnapshotsBackingAnyIndex; /** @@ -225,6 +226,18 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus Setting.Property.Dynamic ); + /** + * Setting to specify the maximum number of shards that can be included in the result for the snapshot status + * API call. Note that it does not apply to V2-shallow snapshots. + */ + public static final Setting MAX_SHARDS_ALLOWED_IN_STATUS_API = Setting.intSetting( + "snapshot.max_shards_allowed_in_status_api", + 200000, + 1, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + private static final String SNAPSHOT_PINNED_TIMESTAMP_DELIMITER = "__"; private volatile int maxConcurrentOperations; @@ -515,9 +528,13 @@ public ClusterState execute(ClusterState currentState) { logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); + int pathType = clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.CURRENT) + ? SHARD_PATH_TYPE.get(repository.getMetadata().settings()).getCode() + : IndexId.DEFAULT_SHARD_PATH_TYPE; final List indexIds = repositoryData.resolveNewIndices( indices, - getInFlightIndexIds(runningSnapshots, repositoryName) + getInFlightIndexIds(runningSnapshots, repositoryName), + pathType ); final Version version = minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null); final Map shards = shards( @@ -656,7 +673,8 @@ public void createSnapshotV2(final CreateSnapshotRequest request, final ActionLi final List indexIds = repositoryData.resolveNewIndices( indices, - getInFlightIndexIds(runningSnapshots, repositoryName) + getInFlightIndexIds(runningSnapshots, repositoryName), + IndexId.DEFAULT_SHARD_PATH_TYPE ); final Version version = minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null); final ShardGenerations shardGenerations = buildShardsGenerationFromRepositoryData( @@ -1336,7 +1354,14 @@ public ClusterState execute(ClusterState currentState) { assert entry.shards().isEmpty(); hadAbortedInitializations = true; } else { - final List indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap()); + int pathType = clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.CURRENT) + ? SHARD_PATH_TYPE.get(repository.getMetadata().settings()).getCode() + : IndexId.DEFAULT_SHARD_PATH_TYPE; + final List indexIds = repositoryData.resolveNewIndices( + indices, + Collections.emptyMap(), + pathType + ); // Replace the snapshot that was just initialized final Map shards = shards( snapshots, diff --git a/server/src/main/java/org/opensearch/snapshots/TooManyShardsInSnapshotsStatusException.java b/server/src/main/java/org/opensearch/snapshots/TooManyShardsInSnapshotsStatusException.java new file mode 100644 index 0000000000000..1689b3e4941ec --- /dev/null +++ b/server/src/main/java/org/opensearch/snapshots/TooManyShardsInSnapshotsStatusException.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.snapshots; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.rest.RestStatus; + +import java.io.IOException; + +/** + * Thrown if the number of shards across the requested resources (snapshot(s) or the index/indices of a particular snapshot) + * breaches the limit of snapshot.max_shards_allowed_in_status_api cluster setting + * + * @opensearch.internal + */ +public class TooManyShardsInSnapshotsStatusException extends SnapshotException { + + public TooManyShardsInSnapshotsStatusException( + final String repositoryName, + final SnapshotId snapshotId, + final String message, + final Throwable cause + ) { + super(repositoryName, snapshotId, message, cause); + } + + public TooManyShardsInSnapshotsStatusException(final String repositoryName, final String message, String... snapshotName) { + super(repositoryName, String.join(", ", snapshotName), message); + } + + public TooManyShardsInSnapshotsStatusException(StreamInput in) throws IOException { + super(in); + } + + @Override + public RestStatus status() { + return RestStatus.REQUEST_ENTITY_TOO_LARGE; + } +} diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 131617742e3a4..ba7edd3198436 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -117,6 +117,7 @@ import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInProgressException; import org.opensearch.snapshots.SnapshotInUseDeletionException; +import org.opensearch.snapshots.TooManyShardsInSnapshotsStatusException; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.transport.ActionNotFoundTransportException; @@ -898,6 +899,7 @@ public void testIds() { ids.put(170, SearchPipelineProcessingException.class); ids.put(171, CryptoRegistryException.class); ids.put(174, InvalidIndexContextException.class); + ids.put(175, TooManyShardsInSnapshotsStatusException.class); ids.put(10001, IndexCreateBlockException.class); Map, Integer> reverse = new HashMap<>(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 6d44700a8ce54..c81ad933a8757 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -58,6 +58,8 @@ import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; +import org.opensearch.index.compositeindex.CompositeIndexSettings; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperParsingException; import org.opensearch.index.mapper.MapperService; @@ -2424,6 +2426,19 @@ public void testAsyncTranslogDurabilityBlocked() { assertThat(throwables.get(0), instanceOf(IllegalArgumentException.class)); } + public void testMaxTranslogFlushSizeWithCompositeIndex() { + Settings clusterSettings = Settings.builder() + .put(CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "130m") + .build(); + PutRequest request = new PutRequest("test", "test_replicas"); + request.patterns(singletonList("test_shards_wait*")); + Settings.Builder settingsBuilder = builder().put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), "true") + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "131m"); + request.settings(settingsBuilder.build()); + List throwables = putTemplate(xContentRegistry(), request, clusterSettings); + assertThat(throwables.get(0), instanceOf(IllegalArgumentException.class)); + } + private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { return putTemplate(xContentRegistry, request, Settings.EMPTY); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TranslogFlushIntervalSettingsTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TranslogFlushIntervalSettingsTests.java new file mode 100644 index 0000000000000..f54b6cdf1b152 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/TranslogFlushIntervalSettingsTests.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.compositeindex.CompositeIndexSettings; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Optional; + +/** + * Tests for translog flush interval settings update with and without composite index + */ +public class TranslogFlushIntervalSettingsTests extends OpenSearchTestCase { + + Settings settings = Settings.builder() + .put(CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "130mb") + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + public void testValidSettings() { + Settings requestSettings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "50mb") + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + + // This should not throw an exception + MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex(requestSettings, clusterSettings); + } + + public void testDefaultTranslogFlushSetting() { + Settings requestSettings = Settings.builder().put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true).build(); + + // This should not throw an exception + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex(requestSettings, clusterSettings) + ); + assertEquals("You can configure 'index.translog.flush_threshold_size' with upto '130mb' for composite index", ex.getMessage()); + } + + public void testMissingCompositeIndexSetting() { + Settings requestSettings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "50mb") + .build(); + + // This should not throw an exception + MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex(requestSettings, clusterSettings); + } + + public void testNullTranslogFlushSetting() { + Settings requestSettings = Settings.builder() + .putNull(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey()) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + + // This should not throw an exception + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex(requestSettings, clusterSettings) + ); + assertEquals("You can configure 'index.translog.flush_threshold_size' with upto '130mb' for composite index", ex.getMessage()); + } + + public void testExceedingMaxFlushSize() { + Settings requestSettings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "150mb") + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex(requestSettings, clusterSettings) + ); + assertEquals("You can configure 'index.translog.flush_threshold_size' with upto '130mb' for composite index", ex.getMessage()); + } + + public void testEqualToMaxFlushSize() { + Settings requestSettings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "100mb") + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + + // This should not throw an exception + MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex(requestSettings, clusterSettings); + } + + public void testUpdateIndexThresholdFlushSize() { + Settings requestSettings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "100mb") + .build(); + + Settings indexSettings = Settings.builder().put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true).build(); + + // This should not throw an exception + assertTrue( + MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex( + requestSettings, + clusterSettings, + indexSettings + ).isEmpty() + ); + } + + public void testUpdateFlushSizeAboveThresholdWithCompositeIndex() { + Settings requestSettings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "131mb") + .build(); + + Settings indexSettings = Settings.builder().put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true).build(); + + Optional err = MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex( + requestSettings, + clusterSettings, + indexSettings + ); + assertTrue(err.isPresent()); + assertEquals("You can configure 'index.translog.flush_threshold_size' with upto '130mb' for composite index", err.get()); + } + + public void testUpdateFlushSizeAboveThresholdWithoutCompositeIndex() { + Settings requestSettings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "131mb") + .build(); + + Settings indexSettings = Settings.builder().build(); + + // This should not throw an exception + assertTrue( + MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex( + requestSettings, + clusterSettings, + indexSettings + ).isEmpty() + ); + } +} diff --git a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java index c6eb1843d05e1..7dfea69729b4e 100644 --- a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java @@ -61,6 +61,9 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.startsWith; @@ -1462,4 +1465,106 @@ public List getListValue(final List value) { ); } + public void testGetOrNullWhenSettingIsNotSet() { + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); + AbstractScopedSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(testSetting, testSetting2)) + ); + clusterSettings.registerSetting(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE); + clusterSettings.registerSetting(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING); + assertNull(clusterSettings.getOrNull(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)); + + } + + public void testGetOrNullWhenSettingIsSet() { + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); + AbstractScopedSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(testSetting, testSetting2)) + ); + clusterSettings.registerSetting(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE); + clusterSettings.applySettings( + Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO).build() + ); + assertEquals(clusterSettings.getOrNull(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE), "auto"); + + } + + public void testGetOrNullWhenSettingIsSetInNodeSettings() { + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); + Settings concurrentSearchModeSetting = Settings.builder() + .put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), CONCURRENT_SEGMENT_SEARCH_MODE_AUTO) + .build(); + AbstractScopedSettings clusterSettings = new ClusterSettings( + concurrentSearchModeSetting, + new HashSet<>(Arrays.asList(testSetting, testSetting2, CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE)) + ); + assertEquals(clusterSettings.getOrNull(CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE), "auto"); + + } + + public void testGetOrNullWhenSettingScopeDoesntMatch() { + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); + AbstractScopedSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(testSetting, testSetting2)) + ); + Setting failedSetting = Setting.intSetting("foo.bar.scope.fail", 1, Property.Dynamic, Property.IndexScope); + clusterSettings.registerSetting(failedSetting); + try { + clusterSettings.getOrNull(failedSetting); + fail("setting scope doesn't match"); + } catch (SettingsException ex) { + assertEquals("settings scope doesn't match the setting scope [NodeScope] not in [[Dynamic, IndexScope]]", ex.getMessage()); + } + + } + + public void testGetOrNullWhenSettingIsNotRegistered() { + Setting testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope); + AbstractScopedSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + new HashSet<>(Arrays.asList(testSetting, testSetting2)) + ); + Setting failedSetting = Setting.intSetting("foo.bar.register.fail", 1, Property.Dynamic, Property.NodeScope); + + try { + clusterSettings.getOrNull(failedSetting); + fail("setting is not registered"); + } catch (SettingsException ex) { + assertEquals("setting foo.bar.register.fail has not been registered", ex.getMessage()); + } + + } + + public void testGetOrNullWhenSettingIsRegisteredWithFallback() { + Setting fallbackSetting = Setting.intSetting("foo.bar", 10, Property.Dynamic, Property.NodeScope); + AbstractScopedSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(fallbackSetting))); + clusterSettings.registerSetting(fallbackSetting); + clusterSettings.applySettings(Settings.builder().put(fallbackSetting.getKey(), 100).build()); + Setting settingWithFallback = Setting.intSetting("foo.fallback", fallbackSetting, 1, Property.Dynamic, Property.NodeScope); + clusterSettings.registerSetting(settingWithFallback); + + assertEquals(clusterSettings.getOrNull(settingWithFallback), Integer.valueOf(100)); + + } + + public void testGetOrNullWhenSettingIsRegisteredNodeSettingFallback() { + Setting fallbackSetting = Setting.intSetting("foo.bar", 10, Property.Dynamic, Property.NodeScope); + Settings settings = Settings.builder().put(fallbackSetting.getKey(), 100).build(); + AbstractScopedSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(Arrays.asList(fallbackSetting))); + + Setting settingWithFallback = Setting.intSetting("foo.fallback", fallbackSetting, 1, Property.Dynamic, Property.NodeScope); + clusterSettings.registerSetting(settingWithFallback); + + assertEquals(clusterSettings.getOrNull(settingWithFallback), Integer.valueOf(100)); + + } + } diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index 17c6587c9f4c7..28f1df70fce09 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -290,20 +290,27 @@ public void testConcurrentSegmentSearchClusterSettings() { Settings settings = Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue).build(); SettingsModule settingsModule = new SettingsModule(settings); assertEquals(settingValue, SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settingsModule.getSettings())); + assertSettingDeprecationsAndWarnings(new Setting[] { SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING }); } public void testConcurrentSegmentSearchIndexSettings() { Settings.Builder target = Settings.builder().put(Settings.EMPTY); Settings.Builder update = Settings.builder(); - + boolean settingValue = randomBoolean(); SettingsModule module = new SettingsModule(Settings.EMPTY); IndexScopedSettings indexScopedSettings = module.getIndexScopedSettings(); indexScopedSettings.updateDynamicSettings( - Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build(), + Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue).build(), target, update, "node" ); + // apply the setting update + module.getIndexScopedSettings() + .applySettings(Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), settingValue).build()); + // assert value + assertEquals(settingValue, module.getIndexScopedSettings().get(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING)); + assertSettingDeprecationsAndWarnings(new Setting[] { IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING }); } public void testMaxSliceCountClusterSettingsForConcurrentSearch() { diff --git a/server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java b/server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java index 269f89faec54d..2f63ae43d0ded 100644 --- a/server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java +++ b/server/src/test/java/org/opensearch/common/util/BatchRunnableExecutorTests.java @@ -15,6 +15,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.function.Supplier; import static org.mockito.Mockito.atMost; @@ -42,7 +43,13 @@ public void setupRunnables() { public void testRunWithoutTimeout() { setupRunnables(); timeoutSupplier = () -> TimeValue.timeValueSeconds(1); - BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier); + CountDownLatch countDownLatch = new CountDownLatch(1); + BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier) { + @Override + public void onComplete() { + countDownLatch.countDown(); + } + }; executor.run(); verify(runnable1, times(1)).run(); verify(runnable2, times(1)).run(); @@ -50,12 +57,19 @@ public void testRunWithoutTimeout() { verify(runnable1, never()).onTimeout(); verify(runnable2, never()).onTimeout(); verify(runnable3, never()).onTimeout(); + assertEquals(0, countDownLatch.getCount()); } public void testRunWithTimeout() { setupRunnables(); timeoutSupplier = () -> TimeValue.timeValueNanos(1); - BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier); + CountDownLatch countDownLatch = new CountDownLatch(1); + BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier) { + @Override + public void onComplete() { + countDownLatch.countDown(); + } + }; executor.run(); verify(runnable1, times(1)).onTimeout(); verify(runnable2, times(1)).onTimeout(); @@ -63,12 +77,19 @@ public void testRunWithTimeout() { verify(runnable1, never()).run(); verify(runnable2, never()).run(); verify(runnable3, never()).run(); + assertEquals(0, countDownLatch.getCount()); } public void testRunWithPartialTimeout() { setupRunnables(); timeoutSupplier = () -> TimeValue.timeValueMillis(50); - BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier); + CountDownLatch countDownLatch = new CountDownLatch(1); + BatchRunnableExecutor executor = new BatchRunnableExecutor(runnableList, timeoutSupplier) { + @Override + public void onComplete() { + countDownLatch.countDown(); + } + }; doAnswer(invocation -> { Thread.sleep(100); return null; @@ -81,11 +102,18 @@ public void testRunWithPartialTimeout() { verify(runnable3, atMost(1)).onTimeout(); verify(runnable2, atMost(1)).onTimeout(); verify(runnable3, atMost(1)).onTimeout(); + assertEquals(0, countDownLatch.getCount()); } public void testRunWithEmptyRunnableList() { setupRunnables(); - BatchRunnableExecutor executor = new BatchRunnableExecutor(Collections.emptyList(), timeoutSupplier); + CountDownLatch countDownLatch = new CountDownLatch(1); + BatchRunnableExecutor executor = new BatchRunnableExecutor(Collections.emptyList(), timeoutSupplier) { + @Override + public void onComplete() { + countDownLatch.countDown(); + } + }; executor.run(); verify(runnable1, never()).onTimeout(); verify(runnable2, never()).onTimeout(); @@ -93,5 +121,6 @@ public void testRunWithEmptyRunnableList() { verify(runnable1, never()).run(); verify(runnable2, never()).run(); verify(runnable3, never()).run(); + assertEquals(1, countDownLatch.getCount()); } } diff --git a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java index 1596a0b566b28..c7eae77d6deba 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayAllocatorTests.java @@ -32,6 +32,7 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BatchRunnableExecutor; import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.shard.ShardId; @@ -45,6 +46,8 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static org.opensearch.gateway.ShardsBatchGatewayAllocator.PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING; @@ -423,6 +426,24 @@ public void testReplicaAllocatorTimeout() { assertEquals(-1, REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(build).getMillis()); } + public void testCollectTimedOutShards() throws InterruptedException { + createIndexAndUpdateClusterState(2, 5, 2); + CountDownLatch latch = new CountDownLatch(10); + testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(latch); + testShardsBatchGatewayAllocator.setPrimaryBatchAllocatorTimeout(TimeValue.ZERO); + testShardsBatchGatewayAllocator.setReplicaBatchAllocatorTimeout(TimeValue.ZERO); + BatchRunnableExecutor executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, true); + executor.run(); + assertTrue(latch.await(1, TimeUnit.MINUTES)); + latch = new CountDownLatch(10); + testShardsBatchGatewayAllocator = new TestShardBatchGatewayAllocator(latch); + testShardsBatchGatewayAllocator.setPrimaryBatchAllocatorTimeout(TimeValue.ZERO); + testShardsBatchGatewayAllocator.setReplicaBatchAllocatorTimeout(TimeValue.ZERO); + executor = testShardsBatchGatewayAllocator.allocateAllUnassignedShards(testAllocation, false); + executor.run(); + assertTrue(latch.await(1, TimeUnit.MINUTES)); + } + private void createIndexAndUpdateClusterState(int count, int numberOfShards, int numberOfReplicas) { if (count == 0) return; Metadata.Builder metadata = Metadata.builder(); diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java index 270cf465d0f80..2edde8281b11a 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardBatchAllocatorTests.java @@ -41,7 +41,6 @@ import org.junit.Before; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -52,6 +51,7 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.routing.UnassignedInfo.Reason.CLUSTER_RECOVERED; +import static org.opensearch.cluster.routing.UnassignedInfo.Reason.INDEX_CREATED; public class PrimaryShardBatchAllocatorTests extends OpenSearchAllocationTestCase { @@ -264,8 +264,9 @@ public void testAllocateUnassignedBatchOnTimeoutWithMatchingPrimaryShards() { final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, CLUSTER_RECOVERED, "allocId-0"); ShardRouting shardRouting = routingAllocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); - List shardRoutings = Arrays.asList(shardRouting); - batchAllocator.allocateUnassignedBatchOnTimeout(shardRoutings, routingAllocation, true); + Set shardIds = new HashSet<>(); + shardIds.add(shardRouting.shardId()); + batchAllocator.allocateUnassignedBatchOnTimeout(shardIds, routingAllocation, true); List ignoredShards = routingAllocation.routingNodes().unassigned().ignored(); assertEquals(1, ignoredShards.size()); @@ -277,30 +278,25 @@ public void testAllocateUnassignedBatchOnTimeoutWithNoMatchingPrimaryShards() { AllocationDeciders allocationDeciders = randomAllocationDeciders(Settings.builder().build(), clusterSettings, random()); setUpShards(1); final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, CLUSTER_RECOVERED, "allocId-0"); - List shardRoutings = new ArrayList<>(); - batchAllocator.allocateUnassignedBatchOnTimeout(shardRoutings, routingAllocation, true); + batchAllocator.allocateUnassignedBatchOnTimeout(new HashSet<>(), routingAllocation, true); List ignoredShards = routingAllocation.routingNodes().unassigned().ignored(); assertEquals(0, ignoredShards.size()); } - public void testAllocateUnassignedBatchOnTimeoutWithNonPrimaryShards() { + public void testAllocateUnassignedBatchOnTimeoutSkipIgnoringNewPrimaryShards() { ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); AllocationDeciders allocationDeciders = randomAllocationDeciders(Settings.builder().build(), clusterSettings, random()); setUpShards(1); - final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, CLUSTER_RECOVERED, "allocId-0"); + final RoutingAllocation routingAllocation = routingAllocationWithOnePrimary(allocationDeciders, INDEX_CREATED); + ShardRouting shardRouting = routingAllocation.routingTable().getIndicesRouting().get("test").shard(shardId.id()).primaryShard(); - ShardRouting shardRouting = routingAllocation.routingTable() - .getIndicesRouting() - .get("test") - .shard(shardId.id()) - .replicaShards() - .get(0); - List shardRoutings = Arrays.asList(shardRouting); - batchAllocator.allocateUnassignedBatchOnTimeout(shardRoutings, routingAllocation, false); + Set shardIds = new HashSet<>(); + shardIds.add(shardRouting.shardId()); + batchAllocator.allocateUnassignedBatchOnTimeout(shardIds, routingAllocation, true); List ignoredShards = routingAllocation.routingNodes().unassigned().ignored(); - assertEquals(1, ignoredShards.size()); + assertEquals(0, ignoredShards.size()); } private RoutingAllocation routingAllocationWithOnePrimary( diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java index 435fd78be2bcd..988723e023a2a 100644 --- a/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardBatchAllocatorTests.java @@ -720,9 +720,9 @@ public void testAllocateUnassignedBatchThrottlingAllocationDeciderIsHonoured() t public void testAllocateUnassignedBatchOnTimeoutWithUnassignedReplicaShard() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); - List shards = new ArrayList<>(); + Set shards = new HashSet<>(); while (iterator.hasNext()) { - shards.add(iterator.next()); + shards.add(iterator.next().shardId()); } testBatchAllocator.allocateUnassignedBatchOnTimeout(shards, allocation, false); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -736,9 +736,27 @@ public void testAllocateUnassignedBatchOnTimeoutWithUnassignedReplicaShard() { public void testAllocateUnassignedBatchOnTimeoutWithAlreadyRecoveringReplicaShard() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); - List shards = new ArrayList<>(); + Set shards = new HashSet<>(); while (iterator.hasNext()) { - shards.add(iterator.next()); + shards.add(iterator.next().shardId()); + } + testBatchAllocator.allocateUnassignedBatchOnTimeout(shards, allocation, false); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0)); + } + + public void testAllocateUnassignedBatchOnTimeoutSkipIgnoringNewReplicaShards() { + RoutingAllocation allocation = onePrimaryOnNode1And1Replica( + yesAllocationDeciders(), + Settings.EMPTY, + UnassignedInfo.Reason.INDEX_CREATED + ); + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + Set shards = new HashSet<>(); + while (iterator.hasNext()) { + ShardRouting sr = iterator.next(); + if (sr.primary() == false) { + shards.add(sr.shardId()); + } } testBatchAllocator.allocateUnassignedBatchOnTimeout(shards, allocation, false); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(0)); diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 63a9c1eaba977..4d301c1e6a4f4 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -150,6 +150,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteRoutingTableEnabled; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -459,8 +460,8 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { assertThat(manifest.getStateUUID(), is(expectedManifest.getStateUUID())); assertThat(manifest.getPreviousClusterUUID(), is(expectedManifest.getPreviousClusterUUID())); - assertEquals(7, actionListenerArgumentCaptor.getAllValues().size()); - assertEquals(7, writeContextArgumentCaptor.getAllValues().size()); + assertEquals(8, actionListenerArgumentCaptor.getAllValues().size()); + assertEquals(8, writeContextArgumentCaptor.getAllValues().size()); byte[] writtenBytes = capturedWriteContext.get("metadata") .getStreamProvider(Integer.MAX_VALUE) @@ -696,7 +697,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { eq(false), eq(Collections.emptyMap()), eq(false), - eq(Collections.emptyList()), + anyList(), Mockito.any(StringKeyDiffProvider.class) ); @@ -717,7 +718,7 @@ public void testWriteIncrementalMetadataSuccess() throws IOException { assertThat(manifest.getTemplatesMetadata(), notNullValue()); assertThat(manifest.getCoordinationMetadata(), notNullValue()); assertThat(manifest.getCustomMetadataMap().size(), is(2)); - assertThat(manifest.getIndicesRouting().size(), is(0)); + assertThat(manifest.getIndicesRouting().size(), is(1)); } public void testWriteIncrementalMetadataSuccessWhenPublicationEnabled() throws IOException { @@ -2608,7 +2609,7 @@ public void testRemoteStateUploadStats() throws IOException { } public void testRemoteRoutingTableNotInitializedWhenDisabled() { - if (publicationEnabled) { + if (isRemoteRoutingTableEnabled(settings)) { assertTrue(remoteClusterStateService.getRemoteRoutingTableService() instanceof InternalRemoteRoutingTableService); } else { assertTrue(remoteClusterStateService.getRemoteRoutingTableService() instanceof NoopRemoteRoutingTableService); diff --git a/server/src/test/java/org/opensearch/index/codec/composite99/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite99/datacube/startree/StarTreeDocValuesFormatTests.java index fa492e1adec0a..4bbefeba0845b 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite99/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite99/datacube/startree/StarTreeDocValuesFormatTests.java @@ -33,14 +33,18 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexSettings; import org.opensearch.index.MapperTestUtils; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.codec.composite.composite99.Composite99Codec; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.compositeindex.datacube.startree.StarTreeTestUtils; import org.opensearch.index.compositeindex.datacube.startree.aggregators.numerictype.StarTreeNumericType; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; @@ -213,20 +217,19 @@ private XContentBuilder topMapping(CheckedConsumer } private void createMapperService(XContentBuilder builder) throws IOException { - IndexMetadata indexMetadata = IndexMetadata.builder("test") - .settings( - Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - ) - .putMapping(builder.toString()) + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .build(); + IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).putMapping(builder.toString()).build(); IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); mapperService = MapperTestUtils.newMapperServiceWithHelperAnalyzer( new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), createTempDir(), - Settings.EMPTY, + settings, indicesModule, "test" ); diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index 504bc622ec12e..ff0533de4fe8d 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -37,7 +37,11 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.mapper.ObjectMapper.Dynamic; import org.opensearch.plugins.Plugin; @@ -544,7 +548,11 @@ public void testCompositeFields() throws Exception { final Settings starTreeEnabledSettings = Settings.builder().put(STAR_TREE_INDEX, "true").build(); FeatureFlags.initializeFeatureFlags(starTreeEnabledSettings); - DocumentMapper documentMapper = createIndex("test").mapperService() + Settings settings = Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .build(); + DocumentMapper documentMapper = createIndex("test", settings).mapperService() .documentMapperParser() .parse("tweet", new CompressedXContent(mapping)); diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 81454b210d6be..e06d9889ec905 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -13,6 +13,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.compositeindex.CompositeIndexSettings; import org.opensearch.index.compositeindex.CompositeIndexValidator; @@ -24,6 +26,7 @@ import org.opensearch.index.compositeindex.datacube.ReadDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.junit.After; import org.junit.Before; @@ -35,6 +38,9 @@ import java.util.List; import java.util.Set; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; +import static org.opensearch.index.compositeindex.CompositeIndexSettings.COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING; import static org.hamcrest.Matchers.containsString; /** @@ -52,7 +58,17 @@ public void teardown() { FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } + @Override + protected Settings getIndexSettings() { + return Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .put(SETTINGS) + .build(); + } + public void testValidStarTree() throws IOException { + MapperService mapperService = createMapperService(getExpandedMappingWithJustAvg("status", "size")); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); for (CompositeMappedFieldType type : compositeFieldTypes) { @@ -82,6 +98,40 @@ public void testValidStarTree() throws IOException { } } + public void testCompositeIndexWithArraysInCompositeField() throws IOException { + DocumentMapper mapper = createDocumentMapper(getExpandedMappingWithJustAvg("status", "status")); + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> mapper.parse(source(b -> b.startArray("status").value(0).value(1).endArray())) + ); + assertEquals( + "object mapping for [_doc] with array for [status] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + ParsedDocument doc = mapper.parse(source(b -> b.startArray("size").value(0).value(1).endArray())); + // 1 intPoint , 1 SNDV field for each value , so 4 in total + assertEquals(4, doc.rootDoc().getFields("size").length); + } + + public void testValidValueForFlushTresholdSizeWithoutCompositeIndex() { + Settings settings = Settings.builder() + .put(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "256mb") + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), false) + .build(); + + assertEquals(new ByteSizeValue(256, ByteSizeUnit.MB), INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.get(settings)); + } + + public void testValidValueForCompositeIndex() { + Settings settings = Settings.builder() + .put(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "256mb") + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "512mb") + .build(); + + assertEquals(new ByteSizeValue(256, ByteSizeUnit.MB), INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.get(settings)); + } + public void testMetricsWithJustSum() throws IOException { MapperService mapperService = createMapperService(getExpandedMappingWithJustSum("status", "size")); Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); @@ -291,6 +341,24 @@ public void testInvalidSingleDim() { ); } + public void testDuplicateDimensions() { + XContentBuilder finalMapping = getMappingWithDuplicateFields(true, false); + MapperParsingException ex = expectThrows(MapperParsingException.class, () -> createMapperService(finalMapping)); + assertEquals( + "Failed to parse mapping [_doc]: Duplicate dimension [numeric_dv] present as part star tree index field [startree-1]", + ex.getMessage() + ); + } + + public void testDuplicateMetrics() { + XContentBuilder finalMapping = getMappingWithDuplicateFields(false, true); + MapperParsingException ex = expectThrows(MapperParsingException.class, () -> createMapperService(finalMapping)); + assertEquals( + "Failed to parse mapping [_doc]: Duplicate metrics [numeric_dv] present as part star tree index field [startree-1]", + ex.getMessage() + ); + } + public void testMetric() { List m1 = new ArrayList<>(); m1.add(MetricStat.MAX); @@ -507,6 +575,56 @@ private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) }); } + private XContentBuilder getMappingWithDuplicateFields(boolean isDuplicateDim, boolean isDuplicateMetric) { + XContentBuilder mapping = null; + try { + mapping = jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startArray("ordered_dimensions") + .startObject() + .field("name", "timestamp") + .endObject() + .startObject() + .field("name", "numeric_dv") + .endObject() + .startObject() + .field("name", isDuplicateDim ? "numeric_dv" : "numeric_dv1") // Duplicate dimension + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "numeric_dv") + .endObject() + .startObject() + .field("name", isDuplicateMetric ? "numeric_dv" : "numeric_dv1") // Duplicate metric + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("numeric_dv1") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + fail("Failed to create mapping: " + e.getMessage()); + } + return mapping; + } + private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) throws IOException { return topMapping(b -> { b.startObject("composite"); diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index c3f52f3976414..e1110f51ecd3f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -14,6 +14,7 @@ import org.opensearch.index.remote.RemoteStoreEnums.DataType; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; +import org.opensearch.index.remote.RemoteStorePathStrategy.SnapshotShardPathInput; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -597,6 +598,47 @@ public void testGeneratePathForHashedInfixType() { assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); } + public void testGeneratePathForSnapshotShardPathInput() { + BlobPath blobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String indexUUID = "dsdkjsu8832njn"; + String shardId = "10"; + SnapshotShardPathInput pathInput = SnapshotShardPathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .build(); + + // FIXED PATH + BlobPath result = FIXED.path(pathInput, null); + String expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/indices/dsdkjsu8832njn/10/"; + String actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_PREFIX - FNV_1A_COMPOSITE_1 + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + expected = "_11001000010110/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_PREFIX - FNV_1A_BASE64 + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + expected = "_yFiSl_VGGM/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_INFIX - FNV_1A_COMPOSITE_1 + result = HASHED_INFIX.path(pathInput, FNV_1A_COMPOSITE_1); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/_11001000010110/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_INFIX - FNV_1A_BASE64 + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/_yFiSl_VGGM/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + } + private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java index 217ffe804573e..e4c64e16fb5be 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java @@ -82,6 +82,46 @@ public void testFixedSubPath() { .dataType(DATA) .build(); assertEquals(BlobPath.cleanPath().add(INDEX_UUID).add(SHARD_ID).add(TRANSLOG.getName()).add(DATA.getName()), input2.fixedSubPath()); + } + + public void testSnapshotShardPathInput() { + assertThrows(NullPointerException.class, () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().build()); + assertThrows( + NullPointerException.class, + () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().basePath(BASE_PATH).build() + ); + assertThrows( + NullPointerException.class, + () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().indexUUID(INDEX_UUID).build() + ); + assertThrows(NullPointerException.class, () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().shardId(SHARD_ID).build()); + + RemoteStorePathStrategy.SnapshotShardPathInput input = RemoteStorePathStrategy.SnapshotShardPathInput.builder() + .basePath(BASE_PATH) + .indexUUID(INDEX_UUID) + .shardId(SHARD_ID) + .build(); + assertEquals(BASE_PATH, input.basePath()); + assertEquals(INDEX_UUID, input.indexUUID()); + assertEquals(SHARD_ID, input.shardId()); + } + + public void testSnapshotShardPathInputFixedSubPath() { + RemoteStorePathStrategy.SnapshotShardPathInput input = RemoteStorePathStrategy.SnapshotShardPathInput.builder() + .basePath(BASE_PATH) + .indexUUID(INDEX_UUID) + .shardId(SHARD_ID) + .build(); + assertEquals(BlobPath.cleanPath().add("indices").add(INDEX_UUID).add(SHARD_ID), input.fixedSubPath()); + } + public void testSnapshotShardPathInputHashPath() { + RemoteStorePathStrategy.SnapshotShardPathInput input = RemoteStorePathStrategy.SnapshotShardPathInput.builder() + .basePath(BASE_PATH) + .indexUUID(INDEX_UUID) + .shardId(SHARD_ID) + .build(); + assertEquals(BlobPath.cleanPath().add(SHARD_ID).add(INDEX_UUID), input.hashPath()); } + } diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogWithPinnedTimestampTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java similarity index 93% rename from server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogWithPinnedTimestampTests.java rename to server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java index 386dde4dffc48..1f82dd9d7e641 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogWithPinnedTimestampTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java @@ -29,6 +29,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.translog.transfer.TranslogTransferManager; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.index.translog.transfer.TranslogUploadFailedException; import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.node.Node; @@ -42,6 +43,7 @@ import org.junit.Before; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -60,14 +62,13 @@ import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED; -import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @LuceneTestCase.SuppressFileSystems("ExtrasFS") -public class RemoteFsTranslogWithPinnedTimestampTests extends RemoteFsTranslogTests { +public class RemoteFsTimestampAwareTranslogTests extends RemoteFsTranslogTests { Runnable updatePinnedTimstampTask; BlobContainer blobContainer; @@ -125,6 +126,48 @@ public void setUp() throws Exception { remoteStorePinnedTimestampServiceSpy.start(); } + @Override + protected RemoteFsTranslog createTranslogInstance( + TranslogConfig translogConfig, + String translogUUID, + TranslogDeletionPolicy deletionPolicy + ) throws IOException { + return new RemoteFsTimestampAwareTranslog( + translogConfig, + translogUUID, + deletionPolicy, + () -> globalCheckpoint.get(), + primaryTerm::get, + getPersistedSeqNoConsumer(), + repository, + threadPool, + primaryMode::get, + new RemoteTranslogTransferTracker(shardId, 10), + DefaultRemoteStoreSettings.INSTANCE + ); + } + + @Override + public void testSyncUpAlwaysFailure() throws IOException { + int translogOperations = randomIntBetween(1, 20); + int count = 0; + fail.failAlways(); + for (int op = 0; op < translogOperations; op++) { + translog.add( + new Translog.Index(String.valueOf(op), count, primaryTerm.get(), Integer.toString(count).getBytes(StandardCharsets.UTF_8)) + ); + try { + translog.sync(); + fail("io exception expected"); + } catch (TranslogUploadFailedException e) { + assertTrue("at least one operation pending", translog.syncNeeded()); + } + } + assertTrue(translog.isOpen()); + fail.failNever(); + translog.sync(); + } + public void testGetMinMaxTranslogGenerationFromFilename() throws Exception { RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); ArrayList ops = new ArrayList<>(); @@ -198,7 +241,7 @@ public void testIndexDeletionWithNoPinnedTimestampNoRecentMdFiles() throws Excep assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); updatePinnedTimstampTask.run(); - translog.trimUnreferencedReaders(true, false); + ((RemoteFsTimestampAwareTranslog) translog).trimUnreferencedReaders(true, false); assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); @@ -221,7 +264,7 @@ public void testIndexDeletionWithNoPinnedTimestampButRecentFiles() throws Except addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 4, primaryTerm.get(), new byte[] { 1 })); updatePinnedTimstampTask.run(); - translog.trimUnreferencedReaders(true, false); + ((RemoteFsTimestampAwareTranslog) translog).trimUnreferencedReaders(true, false); assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertBusy(() -> { @@ -567,7 +610,11 @@ public void testGetGenerationsToBeDeletedEmptyMetadataFilesNotToBeDeleted() thro // 27 to 42 "metadata__9223372036438563903__9223372036854775765__9223370311919910403__31__9223372036854775780__1" ); - Set generations = translog.getGenerationsToBeDeleted(metadataFilesNotToBeDeleted, metadataFilesToBeDeleted, true); + Set generations = ((RemoteFsTimestampAwareTranslog) translog).getGenerationsToBeDeleted( + metadataFilesNotToBeDeleted, + metadataFilesToBeDeleted, + true + ); Set md1Generations = LongStream.rangeClosed(4, 7).boxed().collect(Collectors.toSet()); Set md2Generations = LongStream.rangeClosed(17, 37).boxed().collect(Collectors.toSet()); Set md3Generations = LongStream.rangeClosed(27, 42).boxed().collect(Collectors.toSet()); @@ -598,7 +645,11 @@ public void testGetGenerationsToBeDeleted() throws IOException { // 27 to 42 "metadata__9223372036438563903__9223372036854775765__9223370311919910403__31__9223372036854775780__1" ); - Set generations = translog.getGenerationsToBeDeleted(metadataFilesNotToBeDeleted, metadataFilesToBeDeleted, true); + Set generations = ((RemoteFsTimestampAwareTranslog) translog).getGenerationsToBeDeleted( + metadataFilesNotToBeDeleted, + metadataFilesToBeDeleted, + true + ); Set md1Generations = LongStream.rangeClosed(5, 7).boxed().collect(Collectors.toSet()); Set md2Generations = LongStream.rangeClosed(17, 25).boxed().collect(Collectors.toSet()); Set md3Generations = LongStream.rangeClosed(31, 41).boxed().collect(Collectors.toSet()); @@ -621,7 +672,7 @@ public void testGetMetadataFilesToBeDeletedNoExclusion() { "metadata__9223372036438563903__9223372036854775701__9223370311919910403__31__9223372036854775701__1" ); - assertEquals(metadataFiles, translog.getMetadataFilesToBeDeleted(metadataFiles)); + assertEquals(metadataFiles, ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles)); } public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeOnly() { @@ -637,7 +688,7 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeOnly() { "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" ); - List metadataFilesToBeDeleted = translog.getMetadataFilesToBeDeleted(metadataFiles); + List metadataFilesToBeDeleted = ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles); assertEquals(1, metadataFilesToBeDeleted.size()); assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); } @@ -659,7 +710,7 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnPinningOnly() throws "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" ); - List metadataFilesToBeDeleted = translog.getMetadataFilesToBeDeleted(metadataFiles); + List metadataFilesToBeDeleted = ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles); assertEquals(2, metadataFilesToBeDeleted.size()); assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(1)); @@ -682,7 +733,7 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeAndPinning() throw "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" ); - List metadataFilesToBeDeleted = translog.getMetadataFilesToBeDeleted(metadataFiles); + List metadataFilesToBeDeleted = ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles); assertEquals(1, metadataFilesToBeDeleted.size()); assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(0)); } @@ -707,6 +758,8 @@ public void testIsGenerationPinned() { pinnedGenerations.add(new Tuple<>(142L, 180L)); pinnedGenerations.add(new Tuple<>(4L, 9L)); + RemoteFsTimestampAwareTranslog translog = (RemoteFsTimestampAwareTranslog) this.translog; + assertFalse(translog.isGenerationPinned(3, pinnedGenerations)); assertFalse(translog.isGenerationPinned(10, pinnedGenerations)); assertFalse(translog.isGenerationPinned(141, pinnedGenerations)); @@ -724,6 +777,8 @@ public void testIsGenerationPinned() { public void testGetMinMaxTranslogGenerationFromMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = mock(TranslogTransferManager.class); + RemoteFsTimestampAwareTranslog translog = (RemoteFsTimestampAwareTranslog) this.translog; + // Fetch generations directly from the filename assertEquals( new Tuple<>(701L, 1008L), diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 163340e8ec7d5..339d876274557 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -118,11 +118,11 @@ public class RemoteFsTranslogTests extends OpenSearchTestCase { protected final ShardId shardId = new ShardId("index", "_na_", 1); protected RemoteFsTranslog translog; - private AtomicLong globalCheckpoint; + protected AtomicLong globalCheckpoint; protected Path translogDir; // A default primary term is used by translog instances created in this test. protected final AtomicLong primaryTerm = new AtomicLong(); - private final AtomicBoolean primaryMode = new AtomicBoolean(true); + protected final AtomicBoolean primaryMode = new AtomicBoolean(true); private final AtomicReference persistedSeqNoConsumer = new AtomicReference<>(); protected ThreadPool threadPool; protected final static String METADATA_DIR = "metadata"; @@ -136,7 +136,7 @@ public class RemoteFsTranslogTests extends OpenSearchTestCase { TestTranslog.SlowDownWriteSwitch slowDown; - private LongConsumer getPersistedSeqNoConsumer() { + protected LongConsumer getPersistedSeqNoConsumer() { return seqNo -> { final LongConsumer consumer = persistedSeqNoConsumer.get(); if (consumer != null) { @@ -167,7 +167,7 @@ public void tearDown() throws Exception { } } - private RemoteFsTranslog create(Path path) throws IOException { + protected RemoteFsTranslog create(Path path) throws IOException { final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); return create(path, createRepository(), translogUUID, 0); } @@ -179,6 +179,14 @@ private RemoteFsTranslog create(Path path, BlobStoreRepository repository, Strin final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); threadPool = new TestThreadPool(getClass().getName()); blobStoreTransferService = new BlobStoreTransferService(repository.blobStore(), threadPool); + return createTranslogInstance(translogConfig, translogUUID, deletionPolicy); + } + + protected RemoteFsTranslog createTranslogInstance( + TranslogConfig translogConfig, + String translogUUID, + TranslogDeletionPolicy deletionPolicy + ) throws IOException { return new RemoteFsTranslog( translogConfig, translogUUID, @@ -671,13 +679,13 @@ public void testSimpleOperationsUpload() throws Exception { assertThat(snapshot.totalOperations(), equalTo(ops.size())); } - assertEquals(2, translog.allUploaded().size()); + assertEquals(4, translog.allUploaded().size()); addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 1, primaryTerm.get(), new byte[] { 1 })); - assertEquals(4, translog.allUploaded().size()); + assertEquals(6, translog.allUploaded().size()); translog.rollGeneration(); - assertEquals(4, translog.allUploaded().size()); + assertEquals(6, translog.allUploaded().size()); Set mdFiles = blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)); assertEquals(2, mdFiles.size()); @@ -736,7 +744,7 @@ public void testSimpleOperationsUpload() throws Exception { assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertBusy(() -> { - assertEquals(2, translog.allUploaded().size()); + assertEquals(4, translog.allUploaded().size()); assertEquals( 4, blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() @@ -755,7 +763,7 @@ public void testSimpleOperationsUpload() throws Exception { assertEquals(1, translog.readers.size()); assertEquals(1, translog.stats().estimatedNumberOfOperations()); assertBusy(() -> { - assertEquals(2, translog.allUploaded().size()); + assertEquals(4, translog.allUploaded().size()); assertEquals( 4, blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() @@ -774,7 +782,7 @@ public void testMetadataFileDeletion() throws Exception { assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); } - assertBusy(() -> assertEquals(2, translog.allUploaded().size())); + assertBusy(() -> assertEquals(4, translog.allUploaded().size())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); int moreDocs = randomIntBetween(3, 10); logger.info("numDocs={} moreDocs={}", numDocs, moreDocs); @@ -872,7 +880,7 @@ public void testDrainSync() throws Exception { assertBusy(() -> assertEquals(0, latch.getCount())); assertEquals(0, translog.availablePermits()); slowDown.setSleepSeconds(0); - assertEquals(4, translog.allUploaded().size()); + assertEquals(6, translog.allUploaded().size()); assertEquals(2, translog.readers.size()); Set mdFiles = blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)); @@ -881,7 +889,7 @@ public void testDrainSync() throws Exception { translog.trimUnreferencedReaders(); assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); - assertEquals(2, translog.allUploaded().size()); + assertEquals(6, translog.allUploaded().size()); assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); // Case 4 - After drainSync, if an upload is an attempted, we do not upload to remote store. @@ -891,21 +899,21 @@ public void testDrainSync() throws Exception { new Translog.Index(String.valueOf(2), 2, primaryTerm.get(), new byte[] { 1 }) ); assertEquals(1, translog.readers.size()); - assertEquals(2, translog.allUploaded().size()); + assertEquals(6, translog.allUploaded().size()); assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); // Refill the permits back Releasables.close(releasable); addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(3), 3, primaryTerm.get(), new byte[] { 1 })); assertEquals(2, translog.readers.size()); - assertEquals(4, translog.allUploaded().size()); + assertEquals(8, translog.allUploaded().size()); assertEquals(3, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); translog.setMinSeqNoToKeep(3); translog.trimUnreferencedReaders(); assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); - assertBusy(() -> assertEquals(2, translog.allUploaded().size())); + assertBusy(() -> assertEquals(4, translog.allUploaded().size())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); } diff --git a/server/src/test/java/org/opensearch/repositories/IndexIdTests.java b/server/src/test/java/org/opensearch/repositories/IndexIdTests.java index 2b927b3b40115..3b719d287aa9b 100644 --- a/server/src/test/java/org/opensearch/repositories/IndexIdTests.java +++ b/server/src/test/java/org/opensearch/repositories/IndexIdTests.java @@ -32,6 +32,7 @@ package org.opensearch.repositories; +import org.opensearch.Version; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.json.JsonXContent; @@ -39,6 +40,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -52,51 +54,84 @@ public void testEqualsAndHashCode() { // assert equals and hashcode String name = randomAlphaOfLength(8); String id = UUIDs.randomBase64UUID(); - IndexId indexId1 = new IndexId(name, id); - IndexId indexId2 = new IndexId(name, id); + int shardPathType = randomIntBetween(0, 2); + IndexId indexId1 = new IndexId(name, id, shardPathType); + IndexId indexId2 = new IndexId(name, id, shardPathType); assertEquals(indexId1, indexId2); assertEquals(indexId1.hashCode(), indexId2.hashCode()); // assert equals when using index name for id id = name; - indexId1 = new IndexId(name, id); - indexId2 = new IndexId(name, id); + indexId1 = new IndexId(name, id, shardPathType); + indexId2 = new IndexId(name, id, shardPathType); assertEquals(indexId1, indexId2); assertEquals(indexId1.hashCode(), indexId2.hashCode()); - // assert not equals when name or id differ - indexId2 = new IndexId(randomAlphaOfLength(8), id); + // assert not equals when name, id, or shardPathType differ + indexId2 = new IndexId(randomAlphaOfLength(8), id, shardPathType); assertNotEquals(indexId1, indexId2); assertNotEquals(indexId1.hashCode(), indexId2.hashCode()); - indexId2 = new IndexId(name, UUIDs.randomBase64UUID()); + indexId2 = new IndexId(name, UUIDs.randomBase64UUID(), shardPathType); assertNotEquals(indexId1, indexId2); assertNotEquals(indexId1.hashCode(), indexId2.hashCode()); + int newShardPathType = randomIntBetween(0, 2); + indexId2 = new IndexId(name, id, newShardPathType); + if (shardPathType == newShardPathType) { + assertEquals(indexId1, indexId2); + assertEquals(indexId1.hashCode(), indexId2.hashCode()); + } else { + assertNotEquals(indexId1, indexId2); + assertNotEquals(indexId1.hashCode(), indexId2.hashCode()); + } } public void testSerialization() throws IOException { - IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); + IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID(), randomIntBetween(0, 2)); BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(Version.CURRENT); indexId.writeTo(out); assertEquals(indexId, new IndexId(out.bytes().streamInput())); } public void testXContent() throws IOException { - IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); + String name = randomAlphaOfLength(8); + String id = UUIDs.randomBase64UUID(); + int shardPathType = randomIntBetween(0, 2); + IndexId indexId = new IndexId(name, id, shardPathType); XContentBuilder builder = JsonXContent.contentBuilder(); indexId.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - String name = null; - String id = null; + String parsedName = null; + String parsedId = null; + int parsedShardPathType = -1; while (parser.nextToken() != XContentParser.Token.END_OBJECT) { final String currentFieldName = parser.currentName(); parser.nextToken(); - if (currentFieldName.equals(IndexId.NAME)) { - name = parser.text(); - } else if (currentFieldName.equals(IndexId.ID)) { - id = parser.text(); + switch (currentFieldName) { + case IndexId.NAME: + parsedName = parser.text(); + break; + case IndexId.ID: + parsedId = parser.text(); + break; + case IndexId.SHARD_PATH_TYPE: + parsedShardPathType = parser.intValue(); + break; } } - assertNotNull(name); - assertNotNull(id); - assertEquals(indexId, new IndexId(name, id)); + parser.close(); + assertNotNull(parsedName); + assertNotNull(parsedId); + assertNotEquals(-1, parsedShardPathType); + assertEquals(name, parsedName); + assertEquals(id, parsedId); + assertEquals(shardPathType, parsedShardPathType); + } + + public void testDefaultShardPathType() { + String name = randomAlphaOfLength(8); + String id = UUIDs.randomBase64UUID(); + IndexId indexId = new IndexId(name, id); + assertEquals(IndexId.DEFAULT_SHARD_PATH_TYPE, indexId.getShardPathType()); + assertEquals(RemoteStoreEnums.PathType.FIXED.getCode(), IndexId.DEFAULT_SHARD_PATH_TYPE); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java index a5bfca6892013..e43335246deb3 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java @@ -42,12 +42,14 @@ import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -59,6 +61,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.repositories.RepositoryData.EMPTY_REPO_GEN; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -70,7 +74,7 @@ public class RepositoryDataTests extends OpenSearchTestCase { public void testEqualsAndHashCode() { - RepositoryData repositoryData1 = generateRandomRepoData(); + RepositoryData repositoryData1 = generateRandomRepoData(FIXED.getCode()); RepositoryData repositoryData2 = repositoryData1.copy(); assertEquals(repositoryData1, repositoryData2); assertEquals(repositoryData1.hashCode(), repositoryData2.hashCode()); @@ -332,7 +336,7 @@ public void testIndexThatReferenceANullSnapshot() throws IOException { ); assertThat( e.getMessage(), - equalTo("Detected a corrupted repository, " + "index [docs/_id] references an unknown snapshot uuid [null]") + equalTo("Detected a corrupted repository, " + "index [docs/_id/0] references an unknown snapshot uuid [null]") ); } } @@ -403,11 +407,95 @@ public void testIndexMetaDataToRemoveAfterRemovingSnapshotWithSharing() { assertEquals(newRepoData.indexMetaDataToRemoveAfterRemovingSnapshots(Collections.singleton(otherSnapshotId)), removeFromOther); } + public void testResolveNewIndices() { + // Test case 1: All indices are new + List indicesToResolve = Arrays.asList("index1", "index2", "index3"); + Map inFlightIds = Collections.emptyMap(); + int pathType = randomIntBetween(0, 2); + List resolvedIndices = RepositoryData.EMPTY.resolveNewIndices(indicesToResolve, inFlightIds, pathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + assertTrue(indicesToResolve.contains(indexId.getName())); + assertNotNull(indexId.getId()); + assertEquals(pathType, indexId.getShardPathType()); + } + + // Test case 2: Some indices are existing, some are new + RepositoryData repositoryData = generateRandomRepoData(); + Map existingIndices = repositoryData.getIndices(); + List existingIndexNames = new ArrayList<>(existingIndices.keySet()); + List newIndexNames = Arrays.asList("newIndex1", "newIndex2"); + indicesToResolve = new ArrayList<>(existingIndexNames); + indicesToResolve.addAll(newIndexNames); + pathType = randomIntBetween(0, 2); + resolvedIndices = repositoryData.resolveNewIndices(indicesToResolve, Collections.emptyMap(), pathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + if (existingIndexNames.contains(indexId.getName())) { + assertEquals(existingIndices.get(indexId.getName()), indexId); + } else { + assertTrue(newIndexNames.contains(indexId.getName())); + assertNotNull(indexId.getId()); + assertEquals(pathType, indexId.getShardPathType()); + } + } + + // Test case 3: Some indices are in-flight + Map inFlightIndexIds = new HashMap<>(); + for (String indexName : newIndexNames) { + inFlightIndexIds.put(indexName, new IndexId(indexName, UUIDs.randomBase64UUID(), pathType)); + } + resolvedIndices = repositoryData.resolveNewIndices(indicesToResolve, inFlightIndexIds, pathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + if (existingIndexNames.contains(indexId.getName())) { + assertEquals(existingIndices.get(indexId.getName()), indexId); + } else if (newIndexNames.contains(indexId.getName())) { + assertEquals(inFlightIndexIds.get(indexId.getName()), indexId); + } else { + fail("Unexpected index: " + indexId.getName()); + } + } + } + + public void testResolveNewIndicesWithDifferentPathType() { + // Generate repository data with a fixed path type + int existingPathType = PathType.FIXED.getCode(); + RepositoryData repositoryData = generateRandomRepoData(existingPathType); + Map existingIndices = repositoryData.getIndices(); + + // Create a list of existing and new index names + List existingIndexNames = new ArrayList<>(existingIndices.keySet()); + List newIndexNames = Arrays.asList("newIndex1", "newIndex2"); + List indicesToResolve = new ArrayList<>(existingIndexNames); + indicesToResolve.addAll(newIndexNames); + + // Use a different path type for new indices + int newPathType = HASHED_PREFIX.getCode(); + + List resolvedIndices = repositoryData.resolveNewIndices(indicesToResolve, Collections.emptyMap(), newPathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + if (existingIndexNames.contains(indexId.getName())) { + assertEquals(existingIndices.get(indexId.getName()), indexId); + assertEquals(existingPathType, indexId.getShardPathType()); + } else { + assertTrue(newIndexNames.contains(indexId.getName())); + assertNotNull(indexId.getId()); + assertEquals(newPathType, indexId.getShardPathType()); + } + } + } + public static RepositoryData generateRandomRepoData() { + return generateRandomRepoData(randomFrom(PathType.values()).getCode()); + } + + public static RepositoryData generateRandomRepoData(int pathType) { final int numIndices = randomIntBetween(1, 30); final List indices = new ArrayList<>(numIndices); for (int i = 0; i < numIndices; i++) { - indices.add(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())); + indices.add(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID(), pathType)); } final int numSnapshots = randomIntBetween(1, 30); RepositoryData repositoryData = RepositoryData.EMPTY; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 29ffb94ce8bf4..958a499ada167 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -9,7 +9,6 @@ package org.opensearch.repositories.blobstore; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; @@ -99,25 +98,15 @@ protected void assertSnapshotOrGenericThread() { } protected void createRepository(Client client, String repoName) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder().put(node().settings()).put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repoName, REPO_TYPE, settings); } protected void createRepository(Client client, String repoName, Settings repoSettings) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings(repoSettings) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settingsBuilder = Settings.builder().put(repoSettings); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repoName, REPO_TYPE, settingsBuilder); } protected void updateRepository(Client client, String repoName, Settings repoSettings) { diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 6b550cbc60b29..fcc0c5198894f 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -34,19 +34,28 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.RepositoryPlugin; @@ -55,9 +64,12 @@ import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotShardPaths; +import org.opensearch.snapshots.SnapshotShardPaths.ShardInfo; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchIntegTestCase; @@ -65,15 +77,30 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.stream.Collectors; import static org.opensearch.repositories.RepositoryDataTests.generateRandomRepoData; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * Tests for the {@link BlobStoreRepository} and its subclasses. @@ -114,13 +141,8 @@ public void testRetrieveSnapshots() throws Exception { final String repositoryName = "test-repo"; logger.info("--> creating repository"); - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings(Settings.builder().put(node().settings()).put("location", location)) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder().put(node().settings()).put("location", location); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings); logger.info("--> creating an index and indexing documents"); final String indexName = "test-idx"; @@ -239,20 +261,13 @@ public void testBadChunksize() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final String repositoryName = "test-repo"; - + Settings.Builder settings = Settings.builder() + .put(node().settings()) + .put("location", location) + .put("chunk_size", randomLongBetween(-10, 0), ByteSizeUnit.BYTES); expectThrows( RepositoryException.class, - () -> client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder() - .put(node().settings()) - .put("location", location) - .put("chunk_size", randomLongBetween(-10, 0), ByteSizeUnit.BYTES) - ) - .get() + () -> OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings) ); } @@ -260,18 +275,11 @@ public void testPrefixModeVerification() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final String repositoryName = "test-repo"; - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder() - .put(node().settings()) - .put("location", location) - .put(BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(), true) - ) - .get(); - assertTrue(putRepositoryResponse.isAcknowledged()); + Settings.Builder settings = Settings.builder() + .put(node().settings()) + .put("location", location) + .put(BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(), true); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); @@ -305,13 +313,8 @@ private BlobStoreRepository setupRepo() { final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final String repositoryName = "test-repo"; - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings(Settings.builder().put(node().settings()).put("location", location)) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder().put(node().settings()).put("location", location); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); @@ -371,4 +374,258 @@ public void testRemoteStoreShardCleanupTask() { remoteStoreShardCleanupTask.run(); assertFalse(executed1.get()); } + + public void testParseShardPath() { + RepositoryData repoData = generateRandomRepoData(); + IndexId indexId = repoData.getIndices().values().iterator().next(); + int shardCount = repoData.shardGenerations().getGens(indexId).size(); + + String shardPath = String.join( + SnapshotShardPaths.DELIMITER, + indexId.getId(), + indexId.getName(), + String.valueOf(shardCount), + String.valueOf(indexId.getShardPathType()), + "1" + ); + ShardInfo shardInfo = SnapshotShardPaths.parseShardPath(shardPath); + + assertEquals(shardInfo.getIndexId(), indexId); + assertEquals(shardInfo.getShardCount(), shardCount); + } + + public void testWriteAndReadShardPaths() throws Exception { + BlobStoreRepository repository = setupRepo(); + RepositoryData repoData = generateRandomRepoData(); + SnapshotId snapshotId = repoData.getSnapshotIds().iterator().next(); + + Set writtenShardPaths = new HashSet<>(); + for (IndexId indexId : repoData.getIndices().values()) { + if (indexId.getShardPathType() != IndexId.DEFAULT_SHARD_PATH_TYPE) { + String shardPathBlobName = repository.writeIndexShardPaths(indexId, snapshotId, indexId.getShardPathType()); + writtenShardPaths.add(shardPathBlobName); + } + } + + // Read shard paths and verify + Map shardPathBlobs = repository.snapshotShardPathBlobContainer().listBlobs(); + + // Create sets for comparison + Set expectedPaths = new HashSet<>(writtenShardPaths); + Set actualPaths = new HashSet<>(shardPathBlobs.keySet()); + + // Remove known extra files - "extra0" file is added by the ExtrasFS, which is part of Lucene's test framework + actualPaths.remove("extra0"); + + // Check if all expected paths are present in the actual paths + assertTrue("All expected paths should be present", actualPaths.containsAll(expectedPaths)); + + // Check if there are any unexpected additional paths + Set unexpectedPaths = new HashSet<>(actualPaths); + unexpectedPaths.removeAll(expectedPaths); + if (!unexpectedPaths.isEmpty()) { + logger.warn("Unexpected additional paths found: " + unexpectedPaths); + } + + assertEquals("Expected and actual paths should match after removing known extra files", expectedPaths, actualPaths); + + for (String shardPathBlobName : expectedPaths) { + SnapshotShardPaths.ShardInfo shardInfo = SnapshotShardPaths.parseShardPath(shardPathBlobName); + IndexId indexId = repoData.getIndices().get(shardInfo.getIndexId().getName()); + assertNotNull("IndexId should not be null", indexId); + assertEquals("Index ID should match", shardInfo.getIndexId().getId(), indexId.getId()); + assertEquals("Shard path type should match", shardInfo.getIndexId().getShardPathType(), indexId.getShardPathType()); + String[] parts = shardPathBlobName.split("\\" + SnapshotShardPaths.DELIMITER); + assertEquals( + "Path hash algorithm should be FNV_1A_COMPOSITE_1", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1, + RemoteStoreEnums.PathHashAlgorithm.fromCode(Integer.parseInt(parts[4])) + ); + } + } + + public void testCleanupStaleIndices() throws Exception { + // Mock the BlobStoreRepository + BlobStoreRepository repository = mock(BlobStoreRepository.class); + + // Mock BlobContainer for stale index + BlobContainer staleIndexContainer = mock(BlobContainer.class); + when(staleIndexContainer.delete()).thenReturn(new DeleteResult(1, 100L)); + + // Mock BlobContainer for current index + BlobContainer currentIndexContainer = mock(BlobContainer.class); + + Map foundIndices = new HashMap<>(); + foundIndices.put("stale-index", staleIndexContainer); + foundIndices.put("current-index", currentIndexContainer); + + Set survivingIndexIds = new HashSet<>(); + survivingIndexIds.add("current-index"); + + // Create a mock RemoteStoreLockManagerFactory + RemoteStoreLockManagerFactory mockRemoteStoreLockManagerFactory = mock(RemoteStoreLockManagerFactory.class); + RemoteStoreLockManager mockLockManager = mock(RemoteStoreLockManager.class); + when(mockRemoteStoreLockManagerFactory.newLockManager(anyString(), anyString(), anyString(), any())).thenReturn(mockLockManager); + + // Create mock snapshot shard paths + Map mockSnapshotShardPaths = new HashMap<>(); + String validShardPath = "stale-index-id#stale-index#1#0#1"; + mockSnapshotShardPaths.put(validShardPath, mock(BlobMetadata.class)); + + // Mock snapshotShardPathBlobContainer + BlobContainer mockSnapshotShardPathBlobContainer = mock(BlobContainer.class); + when(mockSnapshotShardPathBlobContainer.delete()).thenReturn(new DeleteResult(1, 50L)); + when(repository.snapshotShardPathBlobContainer()).thenReturn(mockSnapshotShardPathBlobContainer); + + // Mock the cleanupStaleIndices method to call our test implementation + doAnswer(invocation -> { + Map indices = invocation.getArgument(0); + Set surviving = invocation.getArgument(1); + GroupedActionListener listener = invocation.getArgument(3); + + // Simulate the cleanup process + DeleteResult result = DeleteResult.ZERO; + for (Map.Entry entry : indices.entrySet()) { + if (!surviving.contains(entry.getKey())) { + result = result.add(entry.getValue().delete()); + } + } + result = result.add(mockSnapshotShardPathBlobContainer.delete()); + + listener.onResponse(result); + return null; + }).when(repository).cleanupStaleIndices(any(), any(), any(), any(), any(), anyMap()); + + AtomicReference> resultReference = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + + GroupedActionListener listener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { + resultReference.set(deleteResults); + latch.countDown(); + }, e -> { + logger.error("Error in cleanupStaleIndices", e); + latch.countDown(); + }), 1); + + // Call the method we're testing + repository.cleanupStaleIndices( + foundIndices, + survivingIndexIds, + mockRemoteStoreLockManagerFactory, + listener, + mockSnapshotShardPaths, + Collections.emptyMap() + ); + + assertTrue("Cleanup did not complete within the expected time", latch.await(30, TimeUnit.SECONDS)); + + Collection results = resultReference.get(); + assertNotNull("DeleteResult collection should not be null", results); + assertFalse("DeleteResult collection should not be empty", results.isEmpty()); + + DeleteResult combinedResult = results.stream().reduce(DeleteResult.ZERO, DeleteResult::add); + + assertTrue("Bytes deleted should be greater than 0", combinedResult.bytesDeleted() > 0); + assertTrue("Blobs deleted should be greater than 0", combinedResult.blobsDeleted() > 0); + + // Verify that the stale index was processed for deletion + verify(staleIndexContainer, times(1)).delete(); + + // Verify that the current index was not processed for deletion + verify(currentIndexContainer, never()).delete(); + + // Verify that snapshot shard paths were considered in the cleanup process + verify(mockSnapshotShardPathBlobContainer, times(1)).delete(); + + // Verify the total number of bytes and blobs deleted + assertEquals("Total bytes deleted should be 150", 150L, combinedResult.bytesDeleted()); + assertEquals("Total blobs deleted should be 2", 2, combinedResult.blobsDeleted()); + } + + public void testGetMetadata() { + BlobStoreRepository repository = setupRepo(); + RepositoryMetadata metadata = repository.getMetadata(); + assertNotNull(metadata); + assertEquals(metadata.name(), "test-repo"); + assertEquals(metadata.type(), REPO_TYPE); + repository.close(); + } + + public void testGetNamedXContentRegistry() { + BlobStoreRepository repository = setupRepo(); + NamedXContentRegistry registry = repository.getNamedXContentRegistry(); + assertNotNull(registry); + repository.close(); + } + + public void testGetCompressor() { + BlobStoreRepository repository = setupRepo(); + Compressor compressor = repository.getCompressor(); + assertNotNull(compressor); + repository.close(); + } + + public void testGetStats() { + BlobStoreRepository repository = setupRepo(); + RepositoryStats stats = repository.stats(); + assertNotNull(stats); + repository.close(); + } + + public void testGetSnapshotThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getSnapshotThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetRestoreThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getRestoreThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetRemoteUploadThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getRemoteUploadThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetLowPriorityRemoteUploadThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getLowPriorityRemoteUploadThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetRemoteDownloadThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getRemoteDownloadThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testIsReadOnly() { + BlobStoreRepository repository = setupRepo(); + assertFalse(repository.isReadOnly()); + repository.close(); + } + + public void testIsSystemRepository() { + BlobStoreRepository repository = setupRepo(); + assertFalse(repository.isSystemRepository()); + repository.close(); + } + + public void testGetRestrictedSystemRepositorySettings() { + BlobStoreRepository repository = setupRepo(); + List> settings = repository.getRestrictedSystemRepositorySettings(); + assertNotNull(settings); + assertTrue(settings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(settings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(settings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + repository.close(); + } } diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index a1a808c9faa9b..491a0377ab32e 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -51,6 +51,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; @@ -67,9 +68,16 @@ import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.AbstractQueryBuilder; +import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.ParsedQuery; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexShard; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.search.aggregations.SearchContextAggregations; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.deciders.ConcurrentSearchDecider; +import org.opensearch.search.deciders.ConcurrentSearchDecision; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.LegacyReaderContext; import org.opensearch.search.internal.PitReaderContext; @@ -84,8 +92,10 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -220,7 +230,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); contextWithoutScroll.from(300); contextWithoutScroll.close(); @@ -263,7 +274,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); context1.from(300); exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false)); @@ -334,7 +346,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); SliceBuilder sliceBuilder = mock(SliceBuilder.class); @@ -374,7 +387,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false); @@ -410,7 +424,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); context4.sliceBuilder(new SliceBuilder(1, 2)).parsedQuery(parsedQuery).preProcess(false); Query query1 = context4.query(); @@ -441,7 +456,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); int numSlicesForPit = maxSlicesPerPit + randomIntBetween(1, 100); when(sliceBuilder.getMax()).thenReturn(numSlicesForPit); @@ -547,7 +563,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); assertThat(context.searcher().hasCancellations(), is(false)); context.searcher().addQueryCancellation(() -> {}); @@ -643,6 +660,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { final ClusterService clusterService = mock(ClusterService.class); final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING); + // clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE); clusterSettings.applySettings( Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() ); @@ -651,7 +669,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { readerContext, shardSearchRequest, target, - null, + clusterService, bigArrays, null, null, @@ -660,7 +678,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); // Case1: if sort is on timestamp field, non-concurrent path is used @@ -685,7 +704,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); context.sort( new SortAndFormats(new Sort(new SortField("test2", SortField.Type.INT)), new DocValueFormat[] { DocValueFormat.RAW }) @@ -712,7 +732,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); context.evaluateRequestShouldUseConcurrentSearch(); if (executor == null) { @@ -744,7 +765,8 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); context.evaluateRequestShouldUseConcurrentSearch(); assertFalse(context.shouldUseConcurrentSearch()); @@ -772,12 +794,371 @@ protected Engine.Searcher acquireSearcherInternal(String source) { Version.CURRENT, false, executor, - null + null, + Collections.emptyList() ); context.evaluateRequestShouldUseConcurrentSearch(); assertFalse(context.shouldUseConcurrentSearch()); assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + if (clusterService.getClusterSettings().get(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)) { + assertSettingDeprecationsAndWarnings(new Setting[] { SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING }); + } + + // shutdown the threadpool + threadPool.shutdown(); + } + } + + public void testSearchPathEvaluationWithConcurrentSearchModeAsAuto() throws Exception { + ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); + when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); + ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); + when(shardSearchRequest.shardId()).thenReturn(shardId); + + ThreadPool threadPool = new TestThreadPool(this.getClass().getName()); + IndexShard indexShard = mock(IndexShard.class); + QueryCachingPolicy queryCachingPolicy = mock(QueryCachingPolicy.class); + when(indexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(indexShard.getThreadPool()).thenReturn(threadPool); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .build(); + + IndexService indexService = mock(IndexService.class); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(indexService.newQueryShardContext(eq(shardId.id()), any(), any(), nullable(String.class), anyBoolean())).thenReturn( + queryShardContext + ); + + IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + when(indexService.getIndexSettings()).thenReturn(indexSettings); + when(indexShard.indexSettings()).thenReturn(indexSettings); + + BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + + IndexShard systemIndexShard = mock(IndexShard.class); + when(systemIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(systemIndexShard.getThreadPool()).thenReturn(threadPool); + when(systemIndexShard.isSystem()).thenReturn(true); + + IndexShard throttledIndexShard = mock(IndexShard.class); + when(throttledIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(throttledIndexShard.getThreadPool()).thenReturn(threadPool); + IndexSettings throttledIndexSettings = new IndexSettings( + indexMetadata, + Settings.builder().put(INDEX_SEARCH_THROTTLED.getKey(), true).build() + ); + when(throttledIndexShard.indexSettings()).thenReturn(throttledIndexSettings); + + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + + final Supplier searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) { + @Override + protected void doClose() {} + + @Override + protected Engine.Searcher acquireSearcherInternal(String source) { + try { + IndexReader reader = w.getReader(); + return new Engine.Searcher( + "test", + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + reader + ); + } catch (IOException exc) { + throw new AssertionError(exc); + } + } + }; + + SearchShardTarget target = new SearchShardTarget("node", shardId, null, OriginalIndices.NONE); + ReaderContext readerContext = new ReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + + final ClusterService clusterService = mock(ClusterService.class); + final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING); + clusterSettings.registerSetting(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE); + clusterSettings.applySettings( + Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), "auto").build() + ); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + when(clusterService.getSettings()).thenReturn(settings); + + DefaultSearchContext context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + Collections.emptyList() + ); + + // Case1: if there is no agg in the query, non-concurrent path is used + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case2: if un supported agg present, non-concurrent path is used + SearchContextAggregations mockAggregations = mock(SearchContextAggregations.class); + when(mockAggregations.factories()).thenReturn(mock(AggregatorFactories.class)); + when(mockAggregations.factories().allFactoriesSupportConcurrentSearch()).thenReturn(false); + when(mockAggregations.multiBucketConsumer()).thenReturn(mock(MultiBucketConsumerService.MultiBucketConsumer.class)); + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + Collections.emptyList() + ); + + // add un-supported agg operation + context.aggregations(mockAggregations); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertFalse(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case3: if supported agg present, concurrent path is used + + // set agg operation to be supported + when(mockAggregations.factories().allFactoriesSupportConcurrentSearch()).thenReturn(true); + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + Collections.emptyList() + ); + // create a supported agg operation + context.aggregations(mockAggregations); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertTrue(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case4: multiple deciders are registered and all of them opt out of decision-making + // with supported agg query so concurrent path is used + + ConcurrentSearchDecider decider1 = mock(ConcurrentSearchDecider.class); + when(decider1.canEvaluateForIndex(any())).thenReturn(false); + ConcurrentSearchDecider decider2 = mock(ConcurrentSearchDecider.class); + when(decider2.canEvaluateForIndex(any())).thenReturn(false); + + Collection concurrentSearchDeciders = new ArrayList<>(); + concurrentSearchDeciders.add(decider1); + concurrentSearchDeciders.add(decider2); + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + concurrentSearchDeciders + ); + // create a supported agg operation + context.aggregations(mockAggregations); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertTrue(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case5: multiple deciders are registered and one of them returns ConcurrentSearchDecision.DecisionStatus.NO + // use non-concurrent path even if query contains supported agg + when(decider1.canEvaluateForIndex(any())).thenReturn(true); + when(decider1.getConcurrentSearchDecision()).thenReturn( + new ConcurrentSearchDecision(ConcurrentSearchDecision.DecisionStatus.NO, "disable concurrent search") + ); + when(decider2.canEvaluateForIndex(any())).thenReturn(false); + + concurrentSearchDeciders.clear(); + concurrentSearchDeciders.add(decider1); + concurrentSearchDeciders.add(decider2); + + // create a source so that query tree is parsed by visitor + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + BoolQueryBuilder queryBuilder = new BoolQueryBuilder(); + sourceBuilder.query(queryBuilder); + when(shardSearchRequest.source()).thenReturn(sourceBuilder); + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + concurrentSearchDeciders + ); + + // create a supported agg operation + context.aggregations(mockAggregations); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertFalse(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case6: multiple deciders are registered and first decider returns ConcurrentSearchDecision.DecisionStatus.YES + // while second decider returns ConcurrentSearchDecision.DecisionStatus.NO + // use non-concurrent path even if query contains supported agg + when(decider1.canEvaluateForIndex(any())).thenReturn(true); + when(decider1.getConcurrentSearchDecision()).thenReturn( + new ConcurrentSearchDecision(ConcurrentSearchDecision.DecisionStatus.YES, "enable concurrent search") + ); + when(decider2.canEvaluateForIndex(any())).thenReturn(true); + when(decider2.getConcurrentSearchDecision()).thenReturn( + new ConcurrentSearchDecision(ConcurrentSearchDecision.DecisionStatus.NO, "disable concurrent search") + ); + + concurrentSearchDeciders.clear(); + concurrentSearchDeciders.add(decider1); + concurrentSearchDeciders.add(decider2); + + // create a source so that query tree is parsed by visitor + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + concurrentSearchDeciders + ); + + // create a supported agg operation + context.aggregations(mockAggregations); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertFalse(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case7: multiple deciders are registered and all return ConcurrentSearchDecision.DecisionStatus.NO_OP + // but un-supported agg query is present, use non-concurrent path + when(decider1.canEvaluateForIndex(any())).thenReturn(true); + when(decider1.getConcurrentSearchDecision()).thenReturn( + new ConcurrentSearchDecision(ConcurrentSearchDecision.DecisionStatus.NO_OP, "noop") + ); + when(decider2.canEvaluateForIndex(any())).thenReturn(true); + when(decider2.getConcurrentSearchDecision()).thenReturn( + new ConcurrentSearchDecision(ConcurrentSearchDecision.DecisionStatus.NO_OP, "noop") + ); + + when(mockAggregations.factories().allFactoriesSupportConcurrentSearch()).thenReturn(false); + + concurrentSearchDeciders.clear(); + concurrentSearchDeciders.add(decider1); + concurrentSearchDeciders.add(decider2); + + // create a source so that query tree is parsed by visitor + + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + clusterService, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null, + concurrentSearchDeciders + ); + + // create a supported agg operation + context.aggregations(mockAggregations); + context.evaluateRequestShouldUseConcurrentSearch(); + if (executor == null) { + assertFalse(context.shouldUseConcurrentSearch()); + } else { + assertFalse(context.shouldUseConcurrentSearch()); + } + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // shutdown the threadpool threadPool.shutdown(); } diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index 4b57ee9aec106..b3483b76dee1c 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -70,6 +70,7 @@ import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.aggregations.support.ValuesSourceType; +import org.opensearch.search.deciders.ConcurrentSearchDecider; import org.opensearch.search.fetch.FetchSubPhase; import org.opensearch.search.fetch.subphase.ExplainPhase; import org.opensearch.search.fetch.subphase.highlight.CustomHighlighter; @@ -507,6 +508,73 @@ public Optional getIndexSearcherExecutorProvider() { expectThrows(IllegalStateException.class, () -> new SearchModule(Settings.EMPTY, searchPlugins)); } + public void testRegisterConcurrentSearchDecidersNoExternalPlugins() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); + assertEquals(searchModule.getConcurrentSearchDeciders().size(), 0); + } + + public void testRegisterConcurrentSearchDecidersExternalPluginsWithNoDeciders() { + SearchPlugin plugin1 = new SearchPlugin() { + @Override + public Optional getIndexSearcherExecutorProvider() { + return Optional.of(mock(ExecutorServiceProvider.class)); + } + }; + SearchPlugin plugin2 = new SearchPlugin() { + }; + + List searchPlugins = new ArrayList<>(); + searchPlugins.add(plugin1); + searchPlugins.add(plugin2); + SearchModule searchModule = new SearchModule(Settings.EMPTY, searchPlugins); + + assertEquals(searchModule.getConcurrentSearchDeciders().size(), 0); + } + + public void testRegisterConcurrentSearchDecidersExternalPluginsWithDeciders() { + SearchPlugin pluginDecider1 = new SearchPlugin() { + @Override + public Optional getIndexSearcherExecutorProvider() { + return Optional.of(mock(ExecutorServiceProvider.class)); + } + + @Override + public ConcurrentSearchDecider getConcurrentSearchDecider() { + return mock(ConcurrentSearchDecider.class); + } + }; + + SearchPlugin pluginDecider2 = new SearchPlugin() { + @Override + public ConcurrentSearchDecider getConcurrentSearchDecider() { + return mock(ConcurrentSearchDecider.class); + } + }; + + List searchPlugins = new ArrayList<>(); + searchPlugins.add(pluginDecider1); + searchPlugins.add(pluginDecider2); + + SearchModule searchModule = new SearchModule(Settings.EMPTY, searchPlugins); + assertEquals(searchModule.getConcurrentSearchDeciders().size(), 2); + } + + public void testRegisterConcurrentSearchDecidersPluginWithNullDecider() { + SearchPlugin pluginWithNullDecider = new SearchPlugin() { + @Override + public ConcurrentSearchDecider getConcurrentSearchDecider() { + return null; + } + }; + + List searchPlugins = new ArrayList<>(); + searchPlugins.add(pluginWithNullDecider); + SearchModule searchModule = new SearchModule(Settings.EMPTY, searchPlugins); + // null decider is filtered out, so 0 deciders + assertEquals(searchModule.getConcurrentSearchDeciders().size(), 0); + + } + private static final String[] NON_DEPRECATED_QUERIES = new String[] { "bool", "boosting", diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 8fb546d2592d9..b6aa033540710 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -56,6 +56,7 @@ import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; @@ -92,8 +93,10 @@ import org.opensearch.script.Script; import org.opensearch.script.ScriptType; import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.MultiBucketConsumerService; +import org.opensearch.search.aggregations.SearchContextAggregations; import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.opensearch.search.aggregations.support.ValueType; @@ -140,6 +143,8 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class SearchServiceTests extends OpenSearchSingleNodeTestCase { @@ -1308,17 +1313,40 @@ public void testCreateSearchContext() throws IOException { * index and cluster settings. */ public void testConcurrentSegmentSearchSearchContext() throws IOException { - Boolean[][] scenarios = { - // cluster setting, index setting, concurrent search enabled? - { null, null, false }, - { null, false, false }, - { null, true, true }, - { true, null, true }, - { true, false, false }, - { true, true, true }, - { false, null, false }, - { false, false, false }, - { false, true, true } }; + Object[][] scenarios = { + // cluster setting, index setting, cluster mode setting, concurrent search enabled?, concurrent search executor null? + { null, null, null, false, true }, + { null, false, null, false, true }, + { null, true, null, true, false }, + { true, null, null, true, false }, + { true, false, null, false, true }, + { true, true, null, true, false }, + { false, null, null, false, true }, + { false, false, null, false, true }, + { false, true, null, true, false }, + + // Adding cases with mode set to "none" + { null, null, "none", false, true }, + { true, true, "none", false, true }, + { false, false, "none", false, true }, + { true, false, "none", false, true }, + { false, true, "none", false, true }, + + // Adding cases with mode set to "all" + { null, null, "all", true, false }, + { true, true, "all", true, false }, + { false, false, "all", true, false }, + { true, false, "all", true, false }, + { false, true, "all", true, false }, + + // Adding cases with mode set to "auto" + // auto mode concurrent search is false since request has no aggregation + // however concurrentSearchExecutor will not be null + { null, null, "auto", false, false }, + { true, true, "auto", false, false }, + { false, false, "auto", false, false }, + { true, false, "auto", false, false }, + { false, true, "auto", false, false } }; String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); IndexService indexService = createIndex(index); @@ -1340,10 +1368,12 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { Strings.EMPTY_ARRAY ); - for (Boolean[] scenario : scenarios) { - Boolean clusterSetting = scenario[0]; - Boolean indexSetting = scenario[1]; - Boolean concurrentSearchEnabled = scenario[2]; + for (Object[] scenario : scenarios) { + Boolean clusterSetting = (Boolean) scenario[0]; + Boolean indexSetting = (Boolean) scenario[1]; + String mode = (String) scenario[2]; + Boolean concurrentSearchEnabled = (Boolean) scenario[3]; + Boolean concurrentSearchExecutorNull = (Boolean) scenario[4]; if (clusterSetting == null) { client().admin() @@ -1375,6 +1405,21 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .get(); } + // update mode + if (mode == null) { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey())) + .get(); + } else { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), mode)) + .get(); + } + try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { assertEquals( clusterSetting, @@ -1395,10 +1440,22 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .get() .getSetting(index, IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) ); + + assertEquals( + mode, + client().admin() + .cluster() + .prepareState() + .get() + .getState() + .getMetadata() + .transientSettings() + .get(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), null) + ); searchContext.evaluateRequestShouldUseConcurrentSearch(); assertEquals(concurrentSearchEnabled, searchContext.shouldUseConcurrentSearch()); - // verify executor nullability with concurrent search enabled/disabled - if (concurrentSearchEnabled) { + // verify executor nullability + if (!concurrentSearchExecutorNull) { assertNotNull(searchContext.searcher().getExecutor()); } else { assertNull(searchContext.searcher().getExecutor()); @@ -1409,8 +1466,133 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { client().admin() .cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey())) + .setTransientSettings( + Settings.builder() + .putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) + .putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey()) + ) .get(); + assertSettingDeprecationsAndWarnings(new Setting[] { IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING }); + } + + public void testConcurrentSegmentSearchWithRandomizedModeSettings() throws IOException { + + String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + ShardId shardId = new ShardId(indexService.index(), 0); + long nowInMillis = System.currentTimeMillis(); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(randomBoolean()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + nowInMillis, + clusterAlias, + Strings.EMPTY_ARRAY + ); + + String[] modeSettings = { "all", "auto", "none", null }; + + // Randomize both index and cluster settings + String clusterMode = randomFrom(modeSettings); + String indexMode = randomFrom(modeSettings); + + // default to false in case mode setting is not set + boolean concurrentSearchEnabled = false; + boolean nullExecutor = true; + + boolean aggregationSupportsConcurrent = randomBoolean(); + + if (indexMode != null) { + concurrentSearchEnabled = !indexMode.equals("none") && aggregationSupportsConcurrent; + nullExecutor = indexMode.equals("none"); + } else if (clusterMode != null) { + concurrentSearchEnabled = !clusterMode.equals("none") && aggregationSupportsConcurrent; + nullExecutor = clusterMode.equals("none"); + } + + // Set the cluster setting for mode + if (clusterMode == null) { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey())) + .get(); + } else { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), clusterMode)) + .get(); + } + + // Set the index setting for mode + if (indexMode == null) { + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings(Settings.builder().putNull(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MODE.getKey())) + .get(); + } else { + client().admin() + .indices() + .prepareUpdateSettings(index) + .setSettings(Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MODE.getKey(), indexMode)) + .get(); + } + + try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { + assertEquals( + clusterMode, + client().admin() + .cluster() + .prepareState() + .get() + .getState() + .getMetadata() + .transientSettings() + .get(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey()) + ); + assertEquals( + indexMode, + client().admin() + .indices() + .prepareGetSettings(index) + .get() + .getSetting(index, IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MODE.getKey()) + ); + SearchContextAggregations mockAggregations = mock(SearchContextAggregations.class); + when(mockAggregations.factories()).thenReturn(mock(AggregatorFactories.class)); + when(mockAggregations.factories().allFactoriesSupportConcurrentSearch()).thenReturn(aggregationSupportsConcurrent); + + // set the aggregations for context + searchContext.aggregations(mockAggregations); + + searchContext.evaluateRequestShouldUseConcurrentSearch(); + // check concurrentSearchenabled based on mode and supportedAggregation is computed correctly + assertEquals(concurrentSearchEnabled, searchContext.shouldUseConcurrentSearch()); + + // Verify executor nullability based on mode + if (!nullExecutor) { + assertNotNull(searchContext.searcher().getExecutor()); + } else { + assertNull(searchContext.searcher().getExecutor()); + } + } + + // Cleanup + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.getKey())) + .get(); + } /** diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index e27223cea0778..9571bea53ef53 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -229,6 +229,7 @@ import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1501,12 +1502,8 @@ private RepositoryData getRepositoryData(Repository repository) { private StepListener createRepoAndIndex(String repoName, String index, int shards) { final StepListener createRepositoryListener = new StepListener<>(); - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType(FsRepository.TYPE) - .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) - .execute(createRepositoryListener); + Settings.Builder settings = Settings.builder().put("location", randomAlphaOfLength(10)); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, FsRepository.TYPE, settings, createRepositoryListener); final StepListener createIndexResponseStepListener = new StepListener<>(); @@ -2287,7 +2284,8 @@ public void onFailure(final Exception e) { responseCollectorService, new NoneCircuitBreakerService(), null, - new TaskResourceTrackingService(settings, clusterSettings, threadPool) + new TaskResourceTrackingService(settings, clusterSettings, threadPool), + Collections.emptyList() ); SearchPhaseController searchPhaseController = new SearchPhaseController( writableRegistry(), diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotShardPathsTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotShardPathsTests.java new file mode 100644 index 0000000000000..15eb70913eb88 --- /dev/null +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotShardPathsTests.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class SnapshotShardPathsTests extends OpenSearchTestCase { + + public void testToXContent() throws IOException { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + SnapshotShardPaths snapshotShardPaths = new SnapshotShardPaths( + paths, + indexId, + indexName, + numberOfShards, + shardPathType, + shardPathHashAlgorithm + ); + + BytesReference bytes = XContentHelper.toXContent(snapshotShardPaths, XContentType.JSON, false); + String expectedJson = + "{\"indexId\":\"index-id\",\"indexName\":\"index-name\",\"number_of_shards\":5,\"shard_path_type\":1,\"shard_path_hash_algorithm\":0,\"paths\":[\"/path/to/shard/1\",\"/path/to/shard/2\",\"/path/to/shard/3\"]}"; + assertEquals(expectedJson, bytes.utf8ToString()); + } + + public void testMissingPaths() { + List paths = Collections.emptyList(); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.FIXED; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("paths must not be empty")); + } + + public void testMissingIndexId() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = ""; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("indexId must not be empty")); + } + + public void testMissingIndexName() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = ""; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("indexName must not be empty")); + } + + public void testMissingNumberOfShards() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 0; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("numberOfShards must be > 0")); + } + + public void testMissingShardPathType() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = null; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("shardPathType must not be null")); + } + + public void testMissingShardPathHashAlgorithm() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = null; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertEquals("shardPathHashAlgorithm must not be null", exception.getMessage()); + } + + public void testFromXContent() { + UnsupportedOperationException exception = expectThrows( + UnsupportedOperationException.class, + () -> SnapshotShardPaths.fromXContent(null) + ); + assertEquals("SnapshotShardPaths.fromXContent() is not supported", exception.getMessage()); + } +} diff --git a/test/framework/src/main/java/org/opensearch/node/MockNode.java b/test/framework/src/main/java/org/opensearch/node/MockNode.java index ecaee1ccc59b8..09df9b85320f0 100644 --- a/test/framework/src/main/java/org/opensearch/node/MockNode.java +++ b/test/framework/src/main/java/org/opensearch/node/MockNode.java @@ -57,6 +57,7 @@ import org.opensearch.script.ScriptService; import org.opensearch.search.MockSearchService; import org.opensearch.search.SearchService; +import org.opensearch.search.deciders.ConcurrentSearchDecider; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.query.QueryPhase; import org.opensearch.tasks.TaskResourceTrackingService; @@ -156,7 +157,8 @@ protected SearchService newSearchService( ResponseCollectorService responseCollectorService, CircuitBreakerService circuitBreakerService, Executor indexSearcherExecutor, - TaskResourceTrackingService taskResourceTrackingService + TaskResourceTrackingService taskResourceTrackingService, + Collection concurrentSearchDecidersList ) { if (getPluginsService().filterPlugins(MockSearchService.TestPlugin.class).isEmpty()) { return super.newSearchService( @@ -170,7 +172,8 @@ protected SearchService newSearchService( responseCollectorService, circuitBreakerService, indexSearcherExecutor, - taskResourceTrackingService + taskResourceTrackingService, + concurrentSearchDecidersList ); } return new MockSearchService( diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index 01ca3aed54e6f..6001ce369b228 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -142,7 +142,7 @@ public static void assertConsistency(BlobStoreRepository repository, Executor ex } assertIndexUUIDs(repository, repositoryData); assertSnapshotUUIDs(repository, repositoryData); - assertShardIndexGenerations(blobContainer, repositoryData); + assertShardIndexGenerations(repository, repositoryData); return null; } catch (AssertionError e) { return e; @@ -166,14 +166,12 @@ private static void assertIndexGenerations(BlobContainer repoRoot, long latestGe assertTrue(indexGenerations.length <= 2); } - private static void assertShardIndexGenerations(BlobContainer repoRoot, RepositoryData repositoryData) throws IOException { + private static void assertShardIndexGenerations(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException { final ShardGenerations shardGenerations = repositoryData.shardGenerations(); - final BlobContainer indicesContainer = repoRoot.children().get("indices"); for (IndexId index : shardGenerations.indices()) { final List gens = shardGenerations.getGens(index); if (gens.isEmpty() == false) { - final BlobContainer indexContainer = indicesContainer.children().get(index.getId()); - final Map shardContainers = indexContainer.children(); + final Map shardContainers = getShardContainers(index, repository, repositoryData); for (int i = 0; i < gens.size(); i++) { final String generation = gens.get(i); assertThat(generation, not(ShardGenerations.DELETED_SHARD_GEN)); @@ -190,6 +188,20 @@ private static void assertShardIndexGenerations(BlobContainer repoRoot, Reposito } } + private static Map getShardContainers( + IndexId indexId, + BlobStoreRepository repository, + RepositoryData repositoryData + ) { + final Map shardContainers = new HashMap<>(); + int shardCount = repositoryData.shardGenerations().getGens(indexId).size(); + for (int i = 0; i < shardCount; i++) { + final BlobContainer shardContainer = repository.shardContainer(indexId, i); + shardContainers.put(String.valueOf(i), shardContainer); + } + return shardContainers; + } + private static void assertIndexUUIDs(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException { final List expectedIndexUUIDs = repositoryData.getIndices() .values() diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java index 507a100c94e0d..e812589d64aa4 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java @@ -110,9 +110,7 @@ protected final String createRepository(final String name, final Settings settin final boolean verify = randomBoolean(); logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); - assertAcked( - client().admin().cluster().preparePutRepository(name).setType(repositoryType()).setVerify(verify).setSettings(settings) - ); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), name, repositoryType(), verify, Settings.builder().put(settings)); internalCluster().getDataOrClusterManagerNodeInstances(RepositoriesService.class).forEach(repositories -> { assertThat(repositories.repository(name), notNullValue()); diff --git a/test/framework/src/main/java/org/opensearch/search/MockSearchService.java b/test/framework/src/main/java/org/opensearch/search/MockSearchService.java index 6c9ace06c8219..28e202e783c4e 100644 --- a/test/framework/src/main/java/org/opensearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/opensearch/search/MockSearchService.java @@ -45,6 +45,7 @@ import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.threadpool.ThreadPool; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -111,7 +112,8 @@ public MockSearchService( null, circuitBreakerService, indexSearcherExecutor, - taskResourceTrackingService + taskResourceTrackingService, + Collections.emptyList() ); } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index c0265393ca7bb..16d439f706af3 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -104,7 +104,6 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -382,16 +381,6 @@ public void unblockNode(final String repository, final String node) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, node).repository(repository)).unblock(); } - protected void createRepository(String repoName, String type, Settings.Builder settings) { - logger.info("--> creating repository [{}] [{}]", repoName, type); - assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); - } - - protected void updateRepository(String repoName, String type, Settings.Builder settings) { - logger.info("--> updating repository [{}] [{}]", repoName, type); - assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); - } - protected void createRepository(String repoName, String type, Path location) { createRepository(repoName, type, Settings.builder().put("location", location)); } diff --git a/test/framework/src/main/java/org/opensearch/test/NotEqualMessageBuilder.java b/test/framework/src/main/java/org/opensearch/test/NotEqualMessageBuilder.java index a70e3f15a4bf5..9524b76f255a6 100644 --- a/test/framework/src/main/java/org/opensearch/test/NotEqualMessageBuilder.java +++ b/test/framework/src/main/java/org/opensearch/test/NotEqualMessageBuilder.java @@ -181,18 +181,9 @@ public void compare(String field, boolean hadKey, @Nullable Object actual, Objec field(field, "same [" + expected + "]"); return; } - field( - field, - "expected " - + expected.getClass().getSimpleName() - + " [" - + expected - + "] but was " - + actual.getClass().getSimpleName() - + " [" - + actual - + "]" - ); + String expectedClass = expected == null ? "null object" : expected.getClass().getSimpleName(); + String actualClass = actual == null ? "null object" : actual.getClass().getSimpleName(); + field(field, "expected " + expectedClass + " [" + expected + "] but was " + actualClass + " [" + actual + "]"); } private void indent() { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index cb7854d326db1..46954135bc619 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse; @@ -70,6 +71,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.ClusterAdminClient; @@ -99,6 +101,7 @@ import org.opensearch.cluster.service.applicationtemplates.TestSystemTemplatesRepositoryPlugin; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.network.NetworkModule; @@ -143,7 +146,9 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; @@ -159,6 +164,7 @@ import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.IndexId; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.repositories.fs.ReloadableFsRepository; @@ -225,6 +231,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2582,6 +2589,85 @@ protected long getLatestSegmentInfoVersion(IndexShard shard) { } } + protected void createRepository(String repoName, String type, Settings.Builder settings, String timeout) { + logger.info("--> creating repository [{}] [{}]", repoName, type); + putRepository(clusterAdmin(), repoName, type, timeout, settings); + } + + protected void createRepository(String repoName, String type, Settings.Builder settings) { + logger.info("--> creating repository [{}] [{}]", repoName, type); + putRepository(clusterAdmin(), repoName, type, null, settings); + } + + protected void updateRepository(String repoName, String type, Settings.Builder settings) { + logger.info("--> updating repository [{}] [{}]", repoName, type); + putRepository(clusterAdmin(), repoName, type, null, settings); + } + + public static void putRepository(ClusterAdminClient adminClient, String repoName, String type, Settings.Builder settings) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, true, settings, null, false)); + } + + public static void putRepository( + ClusterAdminClient adminClient, + String repoName, + String type, + String timeout, + Settings.Builder settings + ) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, true, settings, timeout, false)); + } + + public static void putRepository( + ClusterAdminClient adminClient, + String repoName, + String type, + boolean verify, + Settings.Builder settings + ) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, verify, settings, null, false)); + } + + public static void putRepositoryWithNoSettingOverrides( + ClusterAdminClient adminClient, + String repoName, + String type, + boolean verify, + Settings.Builder settings + ) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, verify, settings, null, true)); + } + + public static void putRepository( + ClusterAdminClient adminClient, + String repoName, + String type, + Settings.Builder settings, + ActionListener listener + ) { + putRepositoryRequestBuilder(adminClient, repoName, type, true, settings, null, false).execute(listener); + } + + public static PutRepositoryRequestBuilder putRepositoryRequestBuilder( + ClusterAdminClient adminClient, + String repoName, + String type, + boolean verify, + Settings.Builder settings, + String timeout, + boolean finalSettings + ) { + PutRepositoryRequestBuilder builder = adminClient.preparePutRepository(repoName).setType(type).setVerify(verify); + if (timeout != null) { + builder.setTimeout(timeout); + } + if (finalSettings == false) { + settings.put(BlobStoreRepository.SHARD_PATH_TYPE.getKey(), randomFrom(PathType.values())); + } + builder.setSettings(settings); + return builder; + } + public static Settings remoteStoreClusterSettings(String name, Path path) { return remoteStoreClusterSettings(name, path, name, path); } @@ -2823,4 +2909,15 @@ private static Settings buildRemoteStoreNodeAttributes( return settings.build(); } + public static String resolvePath(IndexId indexId, String shardId) { + PathType pathType = PathType.fromCode(indexId.getShardPathType()); + RemoteStorePathStrategy.SnapshotShardPathInput shardPathInput = new RemoteStorePathStrategy.SnapshotShardPathInput.Builder() + .basePath(BlobPath.cleanPath()) + .indexUUID(indexId.getId()) + .shardId(shardId) + .build(); + RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm = pathType != PathType.FIXED ? FNV_1A_COMPOSITE_1 : null; + BlobPath blobPath = pathType.path(shardPathInput, pathHashAlgorithm); + return blobPath.buildAsString(); + } } diff --git a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java index 23316adf6a2d7..ea0331cbfb9a3 100644 --- a/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/ParameterizedOpenSearchIntegTestCase.java @@ -10,7 +10,10 @@ import org.opensearch.common.settings.Settings; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_ALL; +import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_MODE_AUTO; /** * Base class for running the tests with parameterization of the settings. @@ -35,7 +38,9 @@ abstract class ParameterizedOpenSearchIntegTestCase extends OpenSearchIntegTestC // This method shouldn't be called in setupSuiteScopeCluster(). Only call this method inside single test. public void indexRandomForConcurrentSearch(String... indices) throws InterruptedException { - if (CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings)) { + if (CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.get(settings) + || CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.get(settings).equals(CONCURRENT_SEGMENT_SEARCH_MODE_AUTO) + || CLUSTER_CONCURRENT_SEGMENT_SEARCH_MODE.get(settings).equals(CONCURRENT_SEGMENT_SEARCH_MODE_ALL)) { indexRandomForMultipleSlices(indices); } } diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java index 0eb4bb6935bac..156b1d7c620e6 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestShardBatchGatewayAllocator.java @@ -29,13 +29,20 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CountDownLatch; public class TestShardBatchGatewayAllocator extends ShardsBatchGatewayAllocator { + CountDownLatch latch; + public TestShardBatchGatewayAllocator() { } + public TestShardBatchGatewayAllocator(CountDownLatch latch) { + this.latch = latch; + } + public TestShardBatchGatewayAllocator(long maxBatchSize) { super(maxBatchSize); } @@ -83,6 +90,13 @@ protected AsyncShardFetch.FetchResult(foundShards, shardsToIgnoreNodes); } + + @Override + protected void allocateUnassignedBatchOnTimeout(Set shardIds, RoutingAllocation allocation, boolean primary) { + for (int i = 0; i < shardIds.size(); i++) { + latch.countDown(); + } + } }; ReplicaShardBatchAllocator replicaBatchShardAllocator = new ReplicaShardBatchAllocator() { @@ -100,6 +114,13 @@ protected AsyncShardFetch.FetchResult shardIds, RoutingAllocation allocation, boolean primary) { + for (int i = 0; i < shardIds.size(); i++) { + latch.countDown(); + } + } }; @Override