diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 5587b8e5784c5..17bb5a7df9b21 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -41,3 +41,4 @@ BWC_VERSION: - "2.17.1" - "2.17.2" - "2.18.0" + - "2.19.0" \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d67ed755fa31..f5dc69a9ec290 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add _list/shards API as paginated alternate to _cat/shards ([#14641](https://github.com/opensearch-project/OpenSearch/pull/14641)) - Latency and Memory allocation improvements to Multi Term Aggregation queries ([#14993](https://github.com/opensearch-project/OpenSearch/pull/14993)) - Flat object field use IndexOrDocValuesQuery to optimize query ([#14383](https://github.com/opensearch-project/OpenSearch/issues/14383)) +- Add support for renaming aliases during snapshot restore ([#16292](https://github.com/opensearch-project/OpenSearch/pull/16292)) +- Add method to return dynamic SecureTransportParameters from SecureTransportSettingsProvider interface ([#16387](https://github.com/opensearch-project/OpenSearch/pull/16387)) +- URI path filtering support in cluster stats API ([#15938](https://github.com/opensearch-project/OpenSearch/pull/15938)) +- [Star Tree - Search] Add support for metric aggregations with/without term query ([15289](https://github.com/opensearch-project/OpenSearch/pull/15289)) +- Add support for restoring from snapshot with search replicas ([#16111](https://github.com/opensearch-project/OpenSearch/pull/16111)) ### Dependencies - Bump `com.azure:azure-identity` from 1.13.0 to 1.13.2 ([#15578](https://github.com/opensearch-project/OpenSearch/pull/15578)) @@ -33,7 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.logging.log4j:log4j-core` from 2.23.1 to 2.24.1 ([#15858](https://github.com/opensearch-project/OpenSearch/pull/15858), [#16134](https://github.com/opensearch-project/OpenSearch/pull/16134)) - Bump `peter-evans/create-pull-request` from 6 to 7 ([#15863](https://github.com/opensearch-project/OpenSearch/pull/15863)) - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.9.1 to 11.19.1 ([#15862](https://github.com/opensearch-project/OpenSearch/pull/15862)) -- Bump `com.microsoft.azure:msal4j` from 1.17.0 to 1.17.1 ([#15945](https://github.com/opensearch-project/OpenSearch/pull/15945)) +- Bump `com.microsoft.azure:msal4j` from 1.17.0 to 1.17.2 ([#15945](https://github.com/opensearch-project/OpenSearch/pull/15945), [#16406](https://github.com/opensearch-project/OpenSearch/pull/16406)) - Bump `ch.qos.logback:logback-core` from 1.5.6 to 1.5.10 ([#15946](https://github.com/opensearch-project/OpenSearch/pull/15946), [#16307](https://github.com/opensearch-project/OpenSearch/pull/16307)) - Update protobuf from 3.25.4 to 3.25.5 ([#16011](https://github.com/opensearch-project/OpenSearch/pull/16011)) - Bump `org.roaringbitmap:RoaringBitmap` from 1.2.1 to 1.3.0 ([#16040](https://github.com/opensearch-project/OpenSearch/pull/16040)) @@ -64,6 +69,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Code cleanup: Remove ApproximateIndexOrDocValuesQuery ([#16273](https://github.com/opensearch-project/OpenSearch/pull/16273)) - Optimise clone operation for incremental full cluster snapshots ([#16296](https://github.com/opensearch-project/OpenSearch/pull/16296)) - Update last seen cluster state in the commit phase ([#16215](https://github.com/opensearch-project/OpenSearch/pull/16215)) +- Make multiple settings dynamic for tuning on larger clusters([#16347](https://github.com/opensearch-project/OpenSearch/pull/16347)) ### Deprecated @@ -81,12 +87,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Streaming bulk request hangs ([#16158](https://github.com/opensearch-project/OpenSearch/pull/16158)) - Fix warnings from SLF4J on startup when repository-s3 is installed ([#16194](https://github.com/opensearch-project/OpenSearch/pull/16194)) - Fix protobuf-java leak through client library dependencies ([#16254](https://github.com/opensearch-project/OpenSearch/pull/16254)) +- Fix get index settings API doesn't show `number_of_routing_shards` setting when it was explicitly set ([#16294](https://github.com/opensearch-project/OpenSearch/pull/16294)) - Fix multi-search with template doesn't return status code ([#16265](https://github.com/opensearch-project/OpenSearch/pull/16265)) - [Streaming Indexing] Fix intermittent 'The bulk request must be terminated by a newline [\n]' failures [#16337](https://github.com/opensearch-project/OpenSearch/pull/16337)) - Fix wrong default value when setting `index.number_of_routing_shards` to null on index creation ([#16331](https://github.com/opensearch-project/OpenSearch/pull/16331)) -- [Workload Management] Make query groups persistent across process restarts [#16370](https://github.com/opensearch-project/OpenSearch/pull/16370) - +- [Workload Management] Make query groups persistent across process restarts ([#16370](https://github.com/opensearch-project/OpenSearch/pull/16370)) +- [Workload Management] Enhance rejection mechanism in workload management ([#16417](https://github.com/opensearch-project/OpenSearch/pull/16417)) - Fix inefficient Stream API call chains ending with count() ([#15386](https://github.com/opensearch-project/OpenSearch/pull/15386)) +- Fix array hashCode calculation in ResyncReplicationRequest ([#16378](https://github.com/opensearch-project/OpenSearch/pull/16378)) +- Fix missing fields in task index mapping to ensure proper task result storage ([#16201](https://github.com/opensearch-project/OpenSearch/pull/16201)) +- Fix typo super->sb in method toString() of RemoteStoreNodeAttribute ([#15362](https://github.com/opensearch-project/OpenSearch/pull/15362)) +- [Workload Management] Fixing Create/Update QueryGroup TransportActions to execute from non-cluster manager nodes ([16422](https://github.com/opensearch-project/OpenSearch/pull/16422)) +- Fix flaky test in `testApproximateRangeWithSizeOverDefault` by adjusting totalHits assertion logic ([#16434](https://github.com/opensearch-project/OpenSearch/pull/16434#pullrequestreview-2386999409)) +- Revert changes to upload remote state manifest using minimum codec version([#16403](https://github.com/opensearch-project/OpenSearch/pull/16403)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 9365f1c732229..439d0de39584d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "23+37"; + private static final String SYSTEM_JDK_VERSION = "23.0.1+11"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "23+37"; + private static final String GRADLE_JDK_VERSION = "23.0.1+11"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 5740c124910b9..f9a8bee5783b1 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -2,7 +2,7 @@ opensearch = 3.0.0 lucene = 9.12.0 bundled_jdk_vendor = adoptium -bundled_jdk = 23+37 +bundled_jdk = 23.0.1+11 # optional dependencies spatial4j = 0.7 diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 6b1f6898eec9b..4d685e3bc654a 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -113,6 +113,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_17_2 = new Version(2170299, org.apache.lucene.util.Version.LUCENE_9_11_1); public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0); + public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version CURRENT = V_3_0_0; public static Version fromId(int id) { diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java index 977121346dcc3..90a9194d3cfd7 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java @@ -142,7 +142,9 @@ public SSLServerChannelInitializer(String name) { protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - final boolean dualModeEnabled = NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED.get(settings); + final boolean dualModeEnabled = secureTransportSettingsProvider.parameters(settings) + .map(SecureTransportSettingsProvider.SecureTransportParameters::dualModeEnabled) + .orElse(false); if (dualModeEnabled) { logger.info("SSL Dual mode enabled, using port unification handler"); final ChannelHandler portUnificationHandler = new DualModeSslHandler( @@ -258,7 +260,9 @@ protected class SSLClientChannelInitializer extends Netty4Transport.ClientChanne public SSLClientChannelInitializer(DiscoveryNode node) { this.node = node; - final boolean dualModeEnabled = NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED.get(settings); + final boolean dualModeEnabled = secureTransportSettingsProvider.parameters(settings) + .map(SecureTransportSettingsProvider.SecureTransportParameters::dualModeEnabled) + .orElse(false); hostnameVerificationEnabled = NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION.get(settings); hostnameVerificationResolveHostName = NetworkModule.TRANSPORT_SSL_ENFORCE_HOSTNAME_VERIFICATION_RESOLVE_HOST_NAME.get(settings); diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index d7eebe70ec303..3d6b3264d3f60 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -61,7 +61,7 @@ dependencies { // Start of transitive dependencies for azure-identity api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" - api 'com.microsoft.azure:msal4j:1.17.1' + api 'com.microsoft.azure:msal4j:1.17.2' api 'com.nimbusds:oauth2-oidc-sdk:11.19.1' api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api 'com.nimbusds:content-type:2.3' diff --git a/plugins/repository-azure/licenses/msal4j-1.17.1.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.1.jar.sha1 deleted file mode 100644 index 46c14e819b630..0000000000000 --- a/plugins/repository-azure/licenses/msal4j-1.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4eb31a9919d9b103c548af7e37e6f9d9f6e46dbc \ No newline at end of file diff --git a/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 new file mode 100644 index 0000000000000..b5219ee17e9fa --- /dev/null +++ b/plugins/repository-azure/licenses/msal4j-1.17.2.jar.sha1 @@ -0,0 +1 @@ +a6211e3d71d0388929babaa0ff0951b30d001852 \ No newline at end of file diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java index d92283391dd3b..1ce04faa7ccc1 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/CreateQueryGroupRequest.java @@ -8,8 +8,8 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.QueryGroup; import org.opensearch.common.UUIDs; import org.opensearch.core.common.io.stream.StreamInput; @@ -33,7 +33,7 @@ * * @opensearch.experimental */ -public class CreateQueryGroupRequest extends ActionRequest { +public class CreateQueryGroupRequest extends ClusterManagerNodeRequest { private final QueryGroup queryGroup; /** diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java index 190ff17261bb4..dff9c429d63b0 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportCreateQueryGroupAction.java @@ -9,43 +9,82 @@ package org.opensearch.plugin.wlm.action; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; -import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import java.io.IOException; + +import static org.opensearch.threadpool.ThreadPool.Names.SAME; + /** * Transport action to create QueryGroup * * @opensearch.experimental */ -public class TransportCreateQueryGroupAction extends HandledTransportAction { +public class TransportCreateQueryGroupAction extends TransportClusterManagerNodeAction { private final QueryGroupPersistenceService queryGroupPersistenceService; /** * Constructor for TransportCreateQueryGroupAction * - * @param actionName - action name + * @param threadPool - {@link ThreadPool} object * @param transportService - a {@link TransportService} object * @param actionFilters - a {@link ActionFilters} object + * @param indexNameExpressionResolver - {@link IndexNameExpressionResolver} object * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object */ @Inject public TransportCreateQueryGroupAction( - String actionName, + ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, QueryGroupPersistenceService queryGroupPersistenceService ) { - super(CreateQueryGroupAction.NAME, transportService, actionFilters, CreateQueryGroupRequest::new); + super( + CreateQueryGroupAction.NAME, + transportService, + queryGroupPersistenceService.getClusterService(), + threadPool, + actionFilters, + CreateQueryGroupRequest::new, + indexNameExpressionResolver + ); this.queryGroupPersistenceService = queryGroupPersistenceService; } @Override - protected void doExecute(Task task, CreateQueryGroupRequest request, ActionListener listener) { + protected void clusterManagerOperation( + CreateQueryGroupRequest request, + ClusterState clusterState, + ActionListener listener + ) { queryGroupPersistenceService.persistInClusterStateMetadata(request.getQueryGroup(), listener); } + + @Override + protected String executor() { + return SAME; + } + + @Override + protected CreateQueryGroupResponse read(StreamInput in) throws IOException { + return new CreateQueryGroupResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(CreateQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateQueryGroupAction.java index a6aa2da8fdc08..09a0da7086b36 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportUpdateQueryGroupAction.java @@ -9,43 +9,81 @@ package org.opensearch.plugin.wlm.action; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.common.inject.Inject; import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.plugin.wlm.service.QueryGroupPersistenceService; -import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import java.io.IOException; + +import static org.opensearch.threadpool.ThreadPool.Names.SAME; + /** * Transport action to update QueryGroup * * @opensearch.experimental */ -public class TransportUpdateQueryGroupAction extends HandledTransportAction { +public class TransportUpdateQueryGroupAction extends TransportClusterManagerNodeAction { private final QueryGroupPersistenceService queryGroupPersistenceService; /** * Constructor for TransportUpdateQueryGroupAction * - * @param actionName - action name + * @param threadPool - {@link ThreadPool} object * @param transportService - a {@link TransportService} object * @param actionFilters - a {@link ActionFilters} object + * @param indexNameExpressionResolver - {@link IndexNameExpressionResolver} object * @param queryGroupPersistenceService - a {@link QueryGroupPersistenceService} object */ @Inject public TransportUpdateQueryGroupAction( - String actionName, + ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, QueryGroupPersistenceService queryGroupPersistenceService ) { - super(UpdateQueryGroupAction.NAME, transportService, actionFilters, UpdateQueryGroupRequest::new); + super( + UpdateQueryGroupAction.NAME, + transportService, + queryGroupPersistenceService.getClusterService(), + threadPool, + actionFilters, + UpdateQueryGroupRequest::new, + indexNameExpressionResolver + ); this.queryGroupPersistenceService = queryGroupPersistenceService; } @Override - protected void doExecute(Task task, UpdateQueryGroupRequest request, ActionListener listener) { + protected void clusterManagerOperation( + UpdateQueryGroupRequest request, + ClusterState clusterState, + ActionListener listener + ) { queryGroupPersistenceService.updateInClusterStateMetadata(request, listener); } + + @Override + protected String executor() { + return SAME; + } + + @Override + protected UpdateQueryGroupResponse read(StreamInput in) throws IOException { + return new UpdateQueryGroupResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(UpdateQueryGroupRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } } diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java index 048b599f095fd..18af58289be13 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/UpdateQueryGroupRequest.java @@ -8,8 +8,8 @@ package org.opensearch.plugin.wlm.action; -import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.QueryGroup; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -23,7 +23,7 @@ * * @opensearch.experimental */ -public class UpdateQueryGroupRequest extends ActionRequest { +public class UpdateQueryGroupRequest extends ClusterManagerNodeRequest { private final String name; private final MutableQueryGroupFragment mutableQueryGroupFragment; diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/ClusterStatsIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/ClusterStatsIT.java new file mode 100644 index 0000000000000..004bbd56bc526 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/ClusterStatsIT.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.upgrades; + +import org.opensearch.Version; +import org.opensearch.client.Request; +import org.opensearch.client.Response; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class ClusterStatsIT extends AbstractRollingTestCase { + + private final List nodeStatsMetrics = List.of("os", "process", "jvm", "fs", "plugins", "ingest", "network_types", "discovery_types", "packaging_types"); + + private final List indicesStatsMetrics = List.of("shards", "docs", "store", "fielddata", "query_cache", "completion", "segments", "analysis", "mappings"); + + public void testClusterStats() throws IOException { + Response response = client().performRequest(new Request("GET", "/_cluster/stats")); + validateClusterStatsWithFilterResponse(response, nodeStatsMetrics, indicesStatsMetrics); + if (AbstractRollingTestCase.UPGRADE_FROM_VERSION.onOrAfter(Version.V_2_18_0) || ( + CLUSTER_TYPE == ClusterType.UPGRADED && Version.CURRENT.onOrAfter(Version.V_2_18_0))) { + response = client().performRequest(new Request("GET", "/_cluster/stats/os/nodes/_all")); + validateClusterStatsWithFilterResponse(response, List.of("os"), Collections.emptyList()); + response = client().performRequest(new Request("GET", "/_cluster/stats/indices/mappings/nodes/_all")); + validateClusterStatsWithFilterResponse(response, Collections.emptyList(), List.of("mappings")); + response = client().performRequest(new Request("GET", "/_cluster/stats/os,indices/mappings/nodes/_all")); + validateClusterStatsWithFilterResponse(response, List.of("os"), List.of("mappings")); + } + } + + private void validateClusterStatsWithFilterResponse(Response response, List requestedNodesStatsMetrics, List requestedIndicesStatsMetrics) throws IOException { + assertEquals(200, response.getStatusLine().getStatusCode()); + Map entity = entityAsMap(response); + if (requestedNodesStatsMetrics != null && !requestedNodesStatsMetrics.isEmpty()) { + assertTrue(entity.containsKey("nodes")); + Map nodesStats = (Map) entity.get("nodes"); + for (String metric : nodeStatsMetrics) { + if (requestedNodesStatsMetrics.contains(metric)) { + assertTrue(nodesStats.containsKey(metric)); + } else { + assertFalse(nodesStats.containsKey(metric)); + } + } + } + + if (requestedIndicesStatsMetrics != null && !requestedIndicesStatsMetrics.isEmpty()) { + assertTrue(entity.containsKey("indices")); + Map indicesStats = (Map) entity.get("indices"); + for (String metric : indicesStatsMetrics) { + if (requestedIndicesStatsMetrics.contains(metric)) { + assertTrue(indicesStats.containsKey(metric)); + } else { + assertFalse(indicesStats.containsKey(metric)); + } + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/40_number_of_routing_shards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/40_number_of_routing_shards.yml new file mode 100644 index 0000000000000..3fb392d6db134 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_settings/40_number_of_routing_shards.yml @@ -0,0 +1,41 @@ +--- +setup: + - do: + indices.create: + body: + settings: + index: + number_of_routing_shards: 4 + number_of_shards: 2 + number_of_replicas: 1 + index: test-index + + - do: + indices.create: + body: + settings: + index: + number_of_shards: 2 + number_of_replicas: 1 + index: test-index1 + +--- +Test retrieval of number_routing_shards settings: + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0.0" # TODO: change it to 2.18.0 after backport to 2.x branch + - do: + indices.get_settings: + flat_settings: true + index: test-index + # show `index.number_of_routing_shards` if it was explicitly set when creating + - match: + test-index.settings.index\.number_of_routing_shards: "4" + + - do: + indices.get_settings: + flat_settings: true + index: test-index1 + # do not show `index.number_of_routing_shards` if it was not explicitly set when creating + - match: + test-index1.settings.index\.number_of_routing_shards: null diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java index c7d75108883dd..4d8c80954cd0a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/TasksIT.java @@ -58,9 +58,14 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.io.Streams; import org.opensearch.core.action.ActionListener; import org.opensearch.core.tasks.TaskId; +import org.opensearch.core.tasks.resourcetracker.TaskResourceStats; +import org.opensearch.core.tasks.resourcetracker.TaskResourceUsage; +import org.opensearch.core.tasks.resourcetracker.TaskThreadUsage; import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.mapper.StrictDynamicMappingException; import org.opensearch.index.query.QueryBuilders; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.tasks.Task; @@ -73,11 +78,17 @@ import org.opensearch.transport.ReceiveTimeoutTransportException; import org.opensearch.transport.TransportService; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; @@ -103,6 +114,8 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.spy; /** * Integration tests for task management API @@ -112,6 +125,26 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, minNumDataNodes = 2) public class TasksIT extends AbstractTasksIT { + protected final TaskInfo taskInfo = new TaskInfo( + new TaskId("fake", 1), + "test_type", + "test_action", + "test_description", + null, + 0L, + 1L, + false, + false, + TaskId.EMPTY_TASK_ID, + Collections.emptyMap(), + new TaskResourceStats(new HashMap<>() { + { + put("dummy-type1", new TaskResourceUsage(10, 20)); + } + }, new TaskThreadUsage(30, 40)), + 2L + ); + public void testTaskCounts() { // Run only on data nodes ListTasksResponse response = client().admin() @@ -879,46 +912,77 @@ public void testNodeNotFoundButTaskFound() throws Exception { // Save a fake task that looks like it is from a node that isn't part of the cluster CyclicBarrier b = new CyclicBarrier(2); TaskResultsService resultsService = internalCluster().getInstance(TaskResultsService.class); - resultsService.storeResult( - new TaskResult( - new TaskInfo( - new TaskId("fake", 1), - "test", - "test", - "", - null, - 0, - 0, - false, - false, - TaskId.EMPTY_TASK_ID, - Collections.emptyMap(), - null - ), - new RuntimeException("test") - ), - new ActionListener() { + resultsService.storeResult(new TaskResult(taskInfo, new RuntimeException("test")), new ActionListener() { + @Override + public void onResponse(Void response) { + try { + b.await(); + } catch (InterruptedException | BrokenBarrierException e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }); + b.await(); + + // Now we can find it! + GetTaskResponse response = expectFinishedTask(new TaskId("fake:1")); + TaskResult taskResult = response.getTask(); + TaskInfo task = taskResult.getTask(); + + assertEquals("fake", task.getTaskId().getNodeId()); + assertEquals(1, task.getTaskId().getId()); + assertEquals("test_type", task.getType()); + assertEquals("test_action", task.getAction()); + assertEquals("test_description", task.getDescription()); + assertEquals(0L, task.getStartTime()); + assertEquals(1L, task.getRunningTimeNanos()); + assertFalse(task.isCancellable()); + assertFalse(task.isCancelled()); + assertEquals(TaskId.EMPTY_TASK_ID, task.getParentTaskId()); + assertEquals(1, task.getResourceStats().getResourceUsageInfo().size()); + assertEquals(30, task.getResourceStats().getThreadUsage().getThreadExecutions()); + assertEquals(40, task.getResourceStats().getThreadUsage().getActiveThreads()); + assertEquals(Long.valueOf(2L), task.getCancellationStartTime()); + + assertNotNull(taskResult.getError()); + assertNull(taskResult.getResponse()); + } + + public void testStoreTaskResultFailsDueToMissingIndexMappingFields() throws IOException { + // given + TaskResultsService resultsService = spy(internalCluster().getInstance(TaskResultsService.class)); + + InputStream mockInputStream = getClass().getResourceAsStream("/org/opensearch/tasks/missing-fields-task-index-mapping.json"); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(mockInputStream, out); + String mockJsonString = out.toString(StandardCharsets.UTF_8.name()); + + // when & then + doReturn(mockJsonString).when(resultsService).taskResultIndexMapping(); + + CompletionException thrown = assertThrows(CompletionException.class, () -> { + CompletableFuture future = new CompletableFuture<>(); + + resultsService.storeResult(new TaskResult(taskInfo, new RuntimeException("test")), new ActionListener() { @Override public void onResponse(Void response) { - try { - b.await(); - } catch (InterruptedException | BrokenBarrierException e) { - onFailure(e); - } + future.complete(null); } @Override public void onFailure(Exception e) { - throw new RuntimeException(e); + future.completeExceptionally(e); } - } - ); - b.await(); + }); - // Now we can find it! - GetTaskResponse response = expectFinishedTask(new TaskId("fake:1")); - assertEquals("test", response.getTask().getTask().getAction()); - assertNotNull(response.getTask().getError()); - assertNull(response.getTask().getResponse()); + future.join(); + }); + + assertTrue(thrown.getCause() instanceof StrictDynamicMappingException); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index f23cdbb50b37a..5f00ba35c7b69 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -37,6 +37,9 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.IndexMetric; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.Metric; +import org.opensearch.action.index.IndexRequest; import org.opensearch.client.Client; import org.opensearch.client.Requests; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -44,6 +47,7 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.monitor.os.OsStats; import org.opensearch.node.NodeRoleSettings; import org.opensearch.test.OpenSearchIntegTestCase; @@ -230,6 +234,7 @@ public void testIndicesShardStatsWithoutNodeLevelAggregations() { } public void testIndicesShardStatsWithNodeLevelAggregations() { + internalCluster().startNode(); ensureGreen(); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(true).get(); @@ -317,6 +322,53 @@ public void testValuesSmokeScreen() throws IOException, ExecutionException, Inte assertEquals(msg, OsStats.calculatePercentage(free, total), response.nodesStats.getOs().getMem().getFreePercent()); } + public void testValuesSmokeScreenWithNodeStatsAndIndicesStatsMetricsFilter() throws IOException, ExecutionException, + InterruptedException { + internalCluster().startNodes(randomIntBetween(1, 3)); + index("test1", "type", "1", "f", "f"); + + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .computeAllMetrics(false) + .requestMetrics(Set.of(Metric.values())) + .indexMetrics(Set.of(IndexMetric.values())) + .get(); + String msg = response.toString(); + assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000L)); // 1 Jan 2000 + assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0L)); + + assertThat(msg, response.nodesStats.getFs().getTotal().getBytes(), Matchers.greaterThan(0L)); + assertThat(msg, response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0)); + + assertThat(msg, response.nodesStats.getVersions().size(), Matchers.greaterThan(0)); + assertThat(msg, response.nodesStats.getVersions().contains(Version.CURRENT), Matchers.equalTo(true)); + assertThat(msg, response.nodesStats.getPlugins().size(), Matchers.greaterThanOrEqualTo(0)); + + assertThat(msg, response.nodesStats.getProcess().count, Matchers.greaterThan(0)); + // 0 happens when not supported on platform + assertThat(msg, response.nodesStats.getProcess().getAvgOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(0L)); + // these can be -1 if not supported on platform + assertThat(msg, response.nodesStats.getProcess().getMinOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L)); + assertThat(msg, response.nodesStats.getProcess().getMaxOpenFileDescriptors(), Matchers.greaterThanOrEqualTo(-1L)); + + NodesStatsResponse nodesStatsResponse = client().admin().cluster().prepareNodesStats().addMetric(OS.metricName()).get(); + long total = 0; + long free = 0; + long used = 0; + for (NodeStats nodeStats : nodesStatsResponse.getNodes()) { + total += nodeStats.getOs().getMem().getTotal().getBytes(); + free += nodeStats.getOs().getMem().getFree().getBytes(); + used += nodeStats.getOs().getMem().getUsed().getBytes(); + } + assertEquals(msg, free, response.nodesStats.getOs().getMem().getFree().getBytes()); + assertEquals(msg, total, response.nodesStats.getOs().getMem().getTotal().getBytes()); + assertEquals(msg, used, response.nodesStats.getOs().getMem().getUsed().getBytes()); + assertEquals(msg, OsStats.calculatePercentage(used, total), response.nodesStats.getOs().getMem().getUsedPercent()); + assertEquals(msg, OsStats.calculatePercentage(free, total), response.nodesStats.getOs().getMem().getFreePercent()); + } + public void testAllocatedProcessors() throws Exception { // start one node with 7 processors. internalCluster().startNode(Settings.builder().put(OpenSearchExecutors.NODE_PROCESSORS_SETTING.getKey(), 7).build()); @@ -384,6 +436,43 @@ public void testFieldTypes() { } } + public void testFieldTypesWithMappingsFilter() { + internalCluster().startNode(); + ensureGreen(); + ClusterStatsResponse response = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .computeAllMetrics(randomBoolean()) + .requestMetrics(Set.of(Metric.INDICES)) + .indexMetrics(Set.of(IndexMetric.MAPPINGS)) + .get(); + assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); + assertTrue(response.getIndicesStats().getMappings().getFieldTypeStats().isEmpty()); + + client().admin().indices().prepareCreate("test1").setMapping("{\"properties\":{\"foo\":{\"type\": \"keyword\"}}}").get(); + client().admin() + .indices() + .prepareCreate("test2") + .setMapping( + "{\"properties\":{\"foo\":{\"type\": \"keyword\"},\"bar\":{\"properties\":{\"baz\":{\"type\":\"keyword\"}," + + "\"eggplant\":{\"type\":\"integer\"}}}}}" + ) + .get(); + response = client().admin().cluster().prepareClusterStats().useAggregatedNodeLevelResponses(randomBoolean()).get(); + assertThat(response.getIndicesStats().getMappings().getFieldTypeStats().size(), equalTo(3)); + Set stats = response.getIndicesStats().getMappings().getFieldTypeStats(); + for (IndexFeatureStats stat : stats) { + if (stat.getName().equals("integer")) { + assertThat(stat.getCount(), greaterThanOrEqualTo(1)); + } else if (stat.getName().equals("keyword")) { + assertThat(stat.getCount(), greaterThanOrEqualTo(3)); + } else if (stat.getName().equals("object")) { + assertThat(stat.getCount(), greaterThanOrEqualTo(1)); + } + } + } + public void testNodeRolesWithMasterLegacySettings() throws ExecutionException, InterruptedException { int total = 1; Settings legacyMasterSettings = Settings.builder() @@ -505,6 +594,293 @@ public void testNodeRolesWithDataNodeLegacySettings() throws ExecutionException, assertEquals(expectedNodesRoles, Set.of(getNodeRoles(client, 0), getNodeRoles(client, 1))); } + public void testClusterStatsWithNodeMetricsFilter() { + internalCluster().startNode(); + ensureGreen(); + + client().admin().indices().prepareCreate("test1").setMapping("{\"properties\":{\"foo\":{\"type\": \"keyword\"}}}").get(); + + ClusterStatsRequestBuilder clusterStatsRequestBuilder = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()); + assertTrue(clusterStatsRequestBuilder.request().computeAllMetrics()); + + ClusterStatsResponse response = clusterStatsRequestBuilder.get(); + assertNotNull(response); + assertNotNull(response.getNodesStats()); + assertNotNull(response.getIndicesStats()); + + ClusterStatsResponse statsResponseWithAllNodeStatsMetrics = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .requestMetrics(ClusterStatsNodes.NODE_STATS_METRICS) + .computeAllMetrics(false) + .get(); + assertNotNull(statsResponseWithAllNodeStatsMetrics); + assertNotNull(statsResponseWithAllNodeStatsMetrics.getNodesStats()); + assertNull(statsResponseWithAllNodeStatsMetrics.getIndicesStats()); + validateNodeStatsOutput(ClusterStatsNodes.NODE_STATS_METRICS, statsResponseWithAllNodeStatsMetrics); + assertEquals( + response.getNodesStats().getCounts().getTotal(), + statsResponseWithAllNodeStatsMetrics.getNodesStats().getCounts().getTotal() + ); + assertEquals( + response.getNodesStats().getCounts().getRoles(), + statsResponseWithAllNodeStatsMetrics.getNodesStats().getCounts().getRoles() + ); + assertEquals(response.getNodesStats().getVersions(), statsResponseWithAllNodeStatsMetrics.getNodesStats().getVersions()); + assertEquals(response.getNodesStats().getPlugins(), statsResponseWithAllNodeStatsMetrics.getNodesStats().getPlugins()); + } + + public void testClusterStatsWithIndicesOnlyMetricsFilter() { + internalCluster().startNode(); + ensureGreen(); + + client().admin().indices().prepareCreate("test1").setMapping("{\"properties\":{\"foo\":{\"type\": \"keyword\"}}}").get(); + + ClusterStatsRequestBuilder clusterStatsRequestBuilder = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()); + assertTrue(clusterStatsRequestBuilder.request().computeAllMetrics()); + + ClusterStatsResponse response = clusterStatsRequestBuilder.get(); + assertNotNull(response); + assertNotNull(response.getNodesStats()); + assertNotNull(response.getIndicesStats()); + + ClusterStatsResponse statsResponseWithIndicesRequestMetrics = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .requestMetrics(Set.of(Metric.INDICES)) + .indexMetrics(Set.of(IndexMetric.values())) + .computeAllMetrics(false) + .get(); + assertNotNull(statsResponseWithIndicesRequestMetrics); + assertNull(statsResponseWithIndicesRequestMetrics.getNodesStats()); + assertNotNull(statsResponseWithIndicesRequestMetrics.getIndicesStats()); + validateIndicesStatsOutput(Set.of(IndexMetric.values()), statsResponseWithIndicesRequestMetrics); + } + + public void testClusterStatsWithSelectiveNodeMetricAndIndexMetricsFilter() { + internalCluster().startNode(); + ensureGreen(); + + client().admin().indices().prepareCreate("test1").setMapping("{\"properties\":{\"foo\":{\"type\": \"keyword\"}}}").get(); + IndexRequest indexRequest = new IndexRequest("test1").id("doc_id").source(Map.of("test_type", "metrics_filter")); + client().index(indexRequest); + + ClusterStatsRequestBuilder clusterStatsRequestBuilder = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()); + assertTrue(clusterStatsRequestBuilder.request().computeAllMetrics()); + + ClusterStatsResponse response = clusterStatsRequestBuilder.get(); + assertNotNull(response); + assertNotNull(response.getNodesStats()); + assertNotNull(response.getIndicesStats()); + + ClusterStatsResponse statsResponseWithAllIndicesMetrics = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .requestMetrics(Set.of(Metric.OS, Metric.FS, Metric.INDICES)) + .indexMetrics(Set.of(IndexMetric.FIELDDATA, IndexMetric.SHARDS, IndexMetric.SEGMENTS, IndexMetric.DOCS, IndexMetric.STORE)) + .computeAllMetrics(false) + .get(); + assertNotNull(statsResponseWithAllIndicesMetrics); + assertNotNull(statsResponseWithAllIndicesMetrics.getNodesStats()); + assertNotNull(statsResponseWithAllIndicesMetrics.getIndicesStats()); + validateNodeStatsOutput(Set.of(Metric.FS, Metric.OS), statsResponseWithAllIndicesMetrics); + validateIndicesStatsOutput( + Set.of(IndexMetric.FIELDDATA, IndexMetric.SHARDS, IndexMetric.SEGMENTS, IndexMetric.DOCS, IndexMetric.STORE), + statsResponseWithAllIndicesMetrics + ); + assertEquals(response.getIndicesStats().getFieldData(), statsResponseWithAllIndicesMetrics.getIndicesStats().getFieldData()); + assertEquals(response.getIndicesStats().getIndexCount(), statsResponseWithAllIndicesMetrics.getIndicesStats().getIndexCount()); + assertEquals( + response.getIndicesStats().getShards().getTotal(), + statsResponseWithAllIndicesMetrics.getIndicesStats().getShards().getTotal() + ); + assertEquals( + response.getIndicesStats().getShards().getPrimaries(), + statsResponseWithAllIndicesMetrics.getIndicesStats().getShards().getPrimaries() + ); + } + + public void testClusterStatsWithMappingsAndAnalysisStatsIndexMetricsFilter() { + internalCluster().startNode(); + ensureGreen(); + + client().admin().indices().prepareCreate("test1").setMapping("{\"properties\":{\"foo\":{\"type\": \"keyword\"}}}").get(); + IndexRequest indexRequest = new IndexRequest("test1").id("doc_id").source(Map.of("test_type", "metrics_filter")); + client().index(indexRequest); + + ClusterStatsRequestBuilder clusterStatsRequestBuilder = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()); + assertTrue(clusterStatsRequestBuilder.request().computeAllMetrics()); + + ClusterStatsResponse response = clusterStatsRequestBuilder.get(); + assertNotNull(response); + assertNotNull(response.getNodesStats()); + assertNotNull(response.getIndicesStats()); + + ClusterStatsResponse statsResponseWithSpecificIndicesMetrics = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .requestMetrics(Set.of(Metric.INDICES)) + .indexMetrics(Set.of(IndexMetric.MAPPINGS, IndexMetric.ANALYSIS)) + .computeAllMetrics(false) + .get(); + assertNotNull(statsResponseWithSpecificIndicesMetrics); + assertNull(statsResponseWithSpecificIndicesMetrics.getNodesStats()); + assertNotNull(statsResponseWithSpecificIndicesMetrics.getIndicesStats()); + validateIndicesStatsOutput(Set.of(IndexMetric.MAPPINGS, IndexMetric.ANALYSIS), statsResponseWithSpecificIndicesMetrics); + assertEquals(response.getIndicesStats().getIndexCount(), statsResponseWithSpecificIndicesMetrics.getIndicesStats().getIndexCount()); + assertEquals(response.getIndicesStats().getMappings(), statsResponseWithSpecificIndicesMetrics.getIndicesStats().getMappings()); + assertEquals(response.getIndicesStats().getAnalysis(), statsResponseWithSpecificIndicesMetrics.getIndicesStats().getAnalysis()); + } + + public void testClusterStatsWithIndexMetricWithDocsFilter() throws IOException { + internalCluster().startNode(); + createIndex("test1"); + + client().prepareIndex("test1").setId(Integer.toString(1)).setSource("field1", "value1").execute().actionGet(); + client().prepareIndex("test1").setId(Integer.toString(2)).setSource("field2", "value2").execute().actionGet(); + refreshAndWaitForReplication(); + + ClusterStatsResponse statsResponseWithAllIndicesMetrics = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .requestMetrics(Set.of(Metric.INDICES)) + .indexMetrics(Set.of(IndexMetric.DOCS)) + .computeAllMetrics(false) + .get(); + assertNotNull(statsResponseWithAllIndicesMetrics); + assertNull(statsResponseWithAllIndicesMetrics.getNodesStats()); + assertNotNull(statsResponseWithAllIndicesMetrics.getIndicesStats()); + validateIndicesStatsOutput(Set.of(IndexMetric.DOCS), statsResponseWithAllIndicesMetrics); + assertEquals(2, statsResponseWithAllIndicesMetrics.getIndicesStats().getDocs().getCount()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getDocs().getDeleted()); + assertTrue(statsResponseWithAllIndicesMetrics.getIndicesStats().getDocs().getAverageSizeInBytes() > 0); + } + + public void testClusterStatsWithSelectiveMetricsFilterAndNoIndex() { + internalCluster().startNode(); + ensureGreen(); + ClusterStatsResponse statsResponseWithAllIndicesMetrics = client().admin() + .cluster() + .prepareClusterStats() + .useAggregatedNodeLevelResponses(randomBoolean()) + .requestMetrics(Set.of(Metric.OS, Metric.FS, Metric.INDICES)) + .indexMetrics(Set.of(IndexMetric.FIELDDATA, IndexMetric.SHARDS, IndexMetric.SEGMENTS, IndexMetric.DOCS, IndexMetric.STORE)) + .computeAllMetrics(false) + .get(); + assertNotNull(statsResponseWithAllIndicesMetrics); + assertNotNull(statsResponseWithAllIndicesMetrics.getNodesStats()); + assertNotNull(statsResponseWithAllIndicesMetrics.getIndicesStats()); + validateNodeStatsOutput(Set.of(Metric.FS, Metric.OS), statsResponseWithAllIndicesMetrics); + validateIndicesStatsOutput( + Set.of(IndexMetric.FIELDDATA, IndexMetric.SHARDS, IndexMetric.SEGMENTS, IndexMetric.DOCS, IndexMetric.STORE), + statsResponseWithAllIndicesMetrics + ); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getShards().getIndices()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getShards().getTotal()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getShards().getPrimaries()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getDocs().getCount()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getDocs().getDeleted()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getDocs().getTotalSizeInBytes()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getStore().getSizeInBytes()); + assertEquals(new ByteSizeValue(0), statsResponseWithAllIndicesMetrics.getIndicesStats().getStore().getReservedSize()); + assertEquals(new ByteSizeValue(0), statsResponseWithAllIndicesMetrics.getIndicesStats().getFieldData().getMemorySize()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getFieldData().getEvictions()); + assertNull(statsResponseWithAllIndicesMetrics.getIndicesStats().getFieldData().getFields()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getSegments().getCount()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getSegments().getIndexWriterMemoryInBytes()); + assertEquals(0, statsResponseWithAllIndicesMetrics.getIndicesStats().getSegments().getVersionMapMemoryInBytes()); + } + + private void validateNodeStatsOutput(Set expectedMetrics, ClusterStatsResponse clusterStatsResponse) { + // Ingest, network types, discovery types and packaging types stats are not included here as they don't have a get method exposed. + Set NodeMetrics = Set.of(Metric.OS, Metric.JVM, Metric.FS, Metric.PROCESS, Metric.PLUGINS); + for (Metric metric : NodeMetrics) { + Object object = null; + switch (metric) { + case OS: + object = clusterStatsResponse.getNodesStats().getOs(); + break; + case JVM: + object = clusterStatsResponse.getNodesStats().getJvm(); + break; + case FS: + object = clusterStatsResponse.getNodesStats().getFs(); + break; + case PROCESS: + object = clusterStatsResponse.getNodesStats().getProcess(); + break; + case PLUGINS: + object = clusterStatsResponse.getNodesStats().getPlugins(); + break; + } + if (expectedMetrics.contains(metric)) { + assertNotNull(object); + } else { + assertNull(object); + } + } + } + + private void validateIndicesStatsOutput( + Set expectedMetrics, + ClusterStatsResponse clusterStatsResponse + ) { + for (IndexMetric indexMetric : IndexMetric.values()) { + Object object = null; + switch (indexMetric) { + case SHARDS: + object = clusterStatsResponse.getIndicesStats().getShards(); + break; + case DOCS: + object = clusterStatsResponse.getIndicesStats().getDocs(); + break; + case STORE: + object = clusterStatsResponse.getIndicesStats().getStore(); + break; + case FIELDDATA: + object = clusterStatsResponse.getIndicesStats().getFieldData(); + break; + case QUERY_CACHE: + object = clusterStatsResponse.getIndicesStats().getQueryCache(); + break; + case COMPLETION: + object = clusterStatsResponse.getIndicesStats().getCompletion(); + break; + case SEGMENTS: + object = clusterStatsResponse.getIndicesStats().getSegments(); + break; + case ANALYSIS: + object = clusterStatsResponse.getIndicesStats().getAnalysis(); + break; + case MAPPINGS: + object = clusterStatsResponse.getIndicesStats().getMappings(); + break; + } + if (expectedMetrics.contains(indexMetric)) { + assertNotNull(object); + } else { + assertNull(object); + } + } + } + private Map getExpectedCounts( int dataRoleCount, int masterRoleCount, diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java new file mode 100644 index 0000000000000..352332b962c92 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java @@ -0,0 +1,226 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.snapshots.SnapshotRestoreException; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SearchReplicaRestoreIT extends AbstractSnapshotIntegTestCase { + + private static final String INDEX_NAME = "test-idx-1"; + private static final String RESTORED_INDEX_NAME = INDEX_NAME + "-restored"; + private static final String REPOSITORY_NAME = "test-repo"; + private static final String SNAPSHOT_NAME = "test-snapshot"; + private static final String FS_REPOSITORY_TYPE = "fs"; + private static final int DOC_COUNT = 10; + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); + } + + public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnDocRepWithSearchReplica() throws Exception { + bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT); + createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> restoreSnapshot( + REPOSITORY_NAME, + SNAPSHOT_NAME, + INDEX_NAME, + RESTORED_INDEX_NAME, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build() + ) + ); + assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.DOCUMENT, ReplicationType.DOCUMENT))); + } + + public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnSegRepWithSearchReplica() throws Exception { + bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT); + createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); + + restoreSnapshot( + REPOSITORY_NAME, + SNAPSHOT_NAME, + INDEX_NAME, + RESTORED_INDEX_NAME, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build() + ); + ensureYellowAndNoInitializingShards(RESTORED_INDEX_NAME); + internalCluster().startDataOnlyNode(); + ensureGreen(RESTORED_INDEX_NAME); + assertEquals(1, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); + + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } + + public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnDocRepWithSearchReplica() throws Exception { + bootstrapIndexWithOutSearchReplicas(ReplicationType.SEGMENT); + createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> restoreSnapshot( + REPOSITORY_NAME, + SNAPSHOT_NAME, + INDEX_NAME, + RESTORED_INDEX_NAME, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build() + ) + ); + assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.SEGMENT, ReplicationType.DOCUMENT))); + } + + public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnSegRepWithSearchReplica() throws Exception { + bootstrapIndexWithOutSearchReplicas(ReplicationType.SEGMENT); + createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); + + restoreSnapshot( + REPOSITORY_NAME, + SNAPSHOT_NAME, + INDEX_NAME, + RESTORED_INDEX_NAME, + Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1).build() + ); + ensureYellowAndNoInitializingShards(RESTORED_INDEX_NAME); + internalCluster().startDataOnlyNode(); + ensureGreen(RESTORED_INDEX_NAME); + assertEquals(1, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); + + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } + + public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_RestoreOnDocRep() throws Exception { + bootstrapIndexWithSearchReplicas(); + createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> restoreSnapshot( + REPOSITORY_NAME, + SNAPSHOT_NAME, + INDEX_NAME, + RESTORED_INDEX_NAME, + Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build() + ) + ); + assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.SEGMENT, ReplicationType.DOCUMENT))); + } + + public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_RestoreOnDocRepWithNoSearchReplica() throws Exception { + bootstrapIndexWithSearchReplicas(); + createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); + + restoreSnapshot( + REPOSITORY_NAME, + SNAPSHOT_NAME, + INDEX_NAME, + RESTORED_INDEX_NAME, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0) + .build() + ); + ensureGreen(RESTORED_INDEX_NAME); + assertEquals(0, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); + + SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); + assertHitCount(resp, DOC_COUNT); + } + + private void bootstrapIndexWithOutSearchReplicas(ReplicationType replicationType) throws InterruptedException { + startCluster(2); + + Settings settings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, replicationType) + .build(); + + createIndex(INDEX_NAME, settings); + indexRandomDocs(INDEX_NAME, DOC_COUNT); + refresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + } + + private void bootstrapIndexWithSearchReplicas() throws InterruptedException { + startCluster(3); + + Settings settings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + + createIndex(INDEX_NAME, settings); + ensureGreen(INDEX_NAME); + for (int i = 0; i < DOC_COUNT; i++) { + client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); + } + flushAndRefresh(INDEX_NAME); + } + + private void startCluster(int numOfNodes) { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(numOfNodes); + } + + private void createRepoAndSnapshot(String repositoryName, String repositoryType, String snapshotName, String indexName) { + createRepository(repositoryName, repositoryType, randomRepoPath().toAbsolutePath()); + createSnapshot(repositoryName, snapshotName, List.of(indexName)); + assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); + assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + } + + private String getSnapshotExceptionMessage(ReplicationType snapshotReplicationType, ReplicationType restoreReplicationType) { + return "snapshot was created with [index.replication.type] as [" + + snapshotReplicationType + + "]. " + + "To restore with [index.replication.type] as [" + + restoreReplicationType + + "], " + + "[index.number_of_search_only_replicas] must be set to [0]"; + } + + private int getNumberOfSearchReplicas(String index) { + Metadata metadata = client().admin().cluster().prepareState().get().getState().metadata(); + return Integer.valueOf(metadata.index(index).getSettings().get(SETTING_NUMBER_OF_SEARCH_REPLICAS)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java index 6bd91df1de66f..fa836e2cc5784 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java @@ -126,6 +126,8 @@ public void testFailoverWithSearchReplica_WithoutWriterReplicas() throws IOExcep .put(indexSettings()) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numWriterReplicas) .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numSearchReplicas) + .put("index.refresh_interval", "40ms") // set lower interval so replica attempts replication cycles after primary is + // removed. .build() ); ensureYellow(TEST_INDEX); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java index d532abaa2b0ad..ecb97e79b348e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java @@ -927,7 +927,7 @@ public void testContinuousIndexing() throws Exception { int numDocs = randomIntBetween(200, 300); totalDocs += numDocs; try (BackgroundIndexer indexer = new BackgroundIndexer(index, MapperService.SINGLE_MAPPING_NAME, client(), numDocs)) { - int numberOfSnapshots = 5; + int numberOfSnapshots = 2; for (int i = 0; i < numberOfSnapshots; i++) { logger.info("--> waiting for {} docs to be indexed ...", numDocs); long finalTotalDocs1 = totalDocs; @@ -976,4 +976,112 @@ public void testContinuousIndexing() throws Exception { }); } } + + public void testHashedPrefixTranslogMetadataCombination() throws Exception { + Settings settings = Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(RemoteStoreEnums.PathType.values())) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()) + .build(); + + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNode(settings); + String index = "test-index"; + String snapshotRepo = "test-restore-snapshot-repo"; + String baseSnapshotName = "snapshot_"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, true)); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + + createIndex(index, indexSettings); + ensureGreen(index); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(index) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(randomIntBetween(1, 5))); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.timeValueSeconds(randomIntBetween(1, 5))); + + long totalDocs = 0; + Map snapshots = new HashMap<>(); + int numDocs = randomIntBetween(200, 300); + totalDocs += numDocs; + try (BackgroundIndexer indexer = new BackgroundIndexer(index, MapperService.SINGLE_MAPPING_NAME, client(), numDocs)) { + int numberOfSnapshots = 2; + for (int i = 0; i < numberOfSnapshots; i++) { + logger.info("--> waiting for {} docs to be indexed ...", numDocs); + long finalTotalDocs1 = totalDocs; + assertBusy(() -> assertEquals(finalTotalDocs1, indexer.totalIndexedDocs()), 120, TimeUnit.SECONDS); + logger.info("--> {} total docs indexed", totalDocs); + String snapshotName = baseSnapshotName + i; + createSnapshot(snapshotRepo, snapshotName, new ArrayList<>()); + snapshots.put(snapshotName, totalDocs); + if (i < numberOfSnapshots - 1) { + numDocs = randomIntBetween(200, 300); + indexer.continueIndexing(numDocs); + totalDocs += numDocs; + } + } + } + + logger.info("Snapshots Status: " + snapshots); + + for (String snapshot : snapshots.keySet()) { + logger.info("Restoring snapshot: {}", snapshot); + + if (randomBoolean()) { + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(index)).get()); + } else { + assertAcked(client().admin().indices().prepareClose(index)); + } + + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), randomFrom(RemoteStoreEnums.PathType.values())) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), randomBoolean()) + ) + .get() + .isAcknowledged() + ); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshot) + .setWaitForCompletion(true) + .setIndices() + .get(); + + assertEquals(RestStatus.OK, restoreSnapshotResponse1.status()); + + // Verify restored index's stats + ensureGreen(TimeValue.timeValueSeconds(60), index); + long finalTotalDocs = totalDocs; + assertBusy(() -> { + Long hits = client().prepareSearch(index) + .setQuery(matchAllQuery()) + .setSize((int) finalTotalDocs) + .storedFields() + .execute() + .actionGet() + .getHits() + .getTotalHits().value; + + assertEquals(snapshots.get(snapshot), hits); + }); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java index 78497cac41d46..ab5b22c69b517 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java @@ -529,14 +529,13 @@ public void testDeleteWhileV2CreateOngoing() throws Exception { awaitNumberOfSnapshotsInProgress(1); ActionFuture a = startDeleteSnapshot(repoName, "snapshot-v1"); + expectThrows(ConcurrentSnapshotExecutionException.class, a::actionGet); unblockNode(repoName, clusterManagerName); CreateSnapshotResponse csr = snapshotFuture.actionGet(); assertTrue(csr.getSnapshotInfo().getPinnedTimestamp() != 0); - assertTrue(a.actionGet().isAcknowledged()); List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); - assertEquals(1, snapInfo.size()); - assertThat(snapInfo, contains(csr.getSnapshotInfo())); + assertEquals(2, snapInfo.size()); } @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/16205") diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index 8b6869aa1d81a..123277a3780a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -619,9 +619,9 @@ public void testSnapshotStatusApiFailureForTooManyShardsAcrossSnapshots() throws ); assertEquals(exception.status(), RestStatus.TOO_MANY_REQUESTS); assertTrue( - exception.getMessage().endsWith(" is more than the maximum allowed value of shard count [2] for snapshot status request") + exception.getMessage().contains(" is more than the maximum allowed value of shard count [2] for snapshot status request") ); - }, 1, TimeUnit.MINUTES); + }); // across multiple snapshots assertBusy(() -> { @@ -636,13 +636,13 @@ public void testSnapshotStatusApiFailureForTooManyShardsAcrossSnapshots() throws ); assertEquals(exception.status(), RestStatus.TOO_MANY_REQUESTS); assertTrue( - exception.getMessage().endsWith(" is more than the maximum allowed value of shard count [2] for snapshot status request") + exception.getMessage().contains(" is more than the maximum allowed value of shard count [2] for snapshot status request") ); - }, 1, TimeUnit.MINUTES); + }); logger.info("Reset MAX_SHARDS_ALLOWED_IN_STATUS_API to default value"); updateSettingsRequest.persistentSettings(Settings.builder().putNull(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey())); - assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); } public void testSnapshotStatusForIndexFilter() throws Exception { @@ -666,6 +666,7 @@ public void testSnapshotStatusForIndexFilter() throws Exception { String snapshot = "test-snap-1"; createSnapshot(repositoryName, snapshot, List.of(index1, index2, index3)); + // for a completed snapshot assertBusy(() -> { SnapshotStatus snapshotsStatus = client().admin() .cluster() @@ -682,6 +683,96 @@ public void testSnapshotStatusForIndexFilter() throws Exception { }, 1, TimeUnit.MINUTES); } + public void testSnapshotStatusForIndexFilterForInProgressSnapshot() throws Exception { + String repositoryName = "test-repo"; + createRepository(repositoryName, "mock", Settings.builder().put("location", randomRepoPath()).put("block_on_data", true)); + + logger.info("Create indices"); + String index1 = "test-idx-1"; + String index2 = "test-idx-2"; + String index3 = "test-idx-3"; + createIndex(index1, index2, index3); + ensureGreen(); + + logger.info("Indexing some data"); + for (int i = 0; i < 10; i++) { + index(index1, "_doc", Integer.toString(i), "foo", "bar" + i); + index(index2, "_doc", Integer.toString(i), "foo", "baz" + i); + index(index3, "_doc", Integer.toString(i), "foo", "baz" + i); + } + refresh(); + String inProgressSnapshot = "test-in-progress-snapshot"; + + logger.info("Create snapshot"); + ActionFuture createSnapshotResponseActionFuture = startFullSnapshot(repositoryName, inProgressSnapshot); + + logger.info("Block data node"); + waitForBlockOnAnyDataNode(repositoryName, TimeValue.timeValueMinutes(1)); + awaitNumberOfSnapshotsInProgress(1); + + // test normal functioning of index filter for in progress snapshot + assertBusy(() -> { + SnapshotStatus snapshotsStatus = client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot) + .setIndices(index1, index2) + .get() + .getSnapshots() + .get(0); + Map snapshotIndexStatusMap = snapshotsStatus.getIndices(); + // Although the snapshot contains 3 indices, the response of status api call only contains results for 2 + assertEquals(snapshotIndexStatusMap.size(), 2); + assertEquals(snapshotIndexStatusMap.keySet(), Set.of(index1, index2)); + }); + + // when a non-existent index is requested in the index-filter + assertBusy(() -> { + // failure due to index not found in snapshot + final String nonExistentIndex1 = "non-existent-index-1"; + final String nonExistentIndex2 = "non-existent-index-2"; + Exception ex = expectThrows( + Exception.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot) + .setIndices(index1, index2, nonExistentIndex1, nonExistentIndex2) + .execute() + .actionGet() + ); + String cause = String.format( + Locale.ROOT, + "indices [%s] missing in snapshot [%s] of repository [%s]", + String.join(", ", List.of(nonExistentIndex2, nonExistentIndex1)), + inProgressSnapshot, + repositoryName + ); + assertEquals(cause, ex.getCause().getMessage()); + + // no error for ignore_unavailable = true and status response contains only the found indices + SnapshotStatus snapshotsStatus = client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot) + .setIndices(index1, index2, nonExistentIndex1, nonExistentIndex2) + .setIgnoreUnavailable(true) + .get() + .getSnapshots() + .get(0); + + Map snapshotIndexStatusMap = snapshotsStatus.getIndices(); + assertEquals(snapshotIndexStatusMap.size(), 2); + assertEquals(snapshotIndexStatusMap.keySet(), Set.of(index1, index2)); + }); + + logger.info("Unblock data node"); + unblockAllDataNodes(repositoryName); + + logger.info("Wait for snapshot to finish"); + waitForCompletion(repositoryName, inProgressSnapshot, TimeValue.timeValueSeconds(60)); + } + public void testSnapshotStatusFailuresWithIndexFilter() throws Exception { String repositoryName = "test-repo"; String index1 = "test-idx-1"; @@ -705,6 +796,39 @@ public void testSnapshotStatusFailuresWithIndexFilter() throws Exception { createSnapshot(repositoryName, snapshot1, List.of(index1, index2, index3)); createSnapshot(repositoryName, snapshot2, List.of(index1)); + assertBusy(() -> { + // failure due to passing index filter for _all value of repository param + Exception ex = expectThrows( + Exception.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus("_all") + .setSnapshots(snapshot1) + .setIndices(index1, index2, index3) + .execute() + .actionGet() + ); + String cause = + "index list filter is supported only when a single 'repository' is passed, but found 'repository' param = [_all]"; + assertTrue(ex.getMessage().contains(cause)); + }); + + assertBusy(() -> { + // failure due to passing index filter for _all value of snapshot param --> gets translated as a blank array + Exception ex = expectThrows( + Exception.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots() + .setIndices(index1, index2, index3) + .execute() + .actionGet() + ); + String cause = "index list filter is supported only when a single 'snapshot' is passed, but found 'snapshot' param = [_all]"; + assertTrue(ex.getMessage().contains(cause)); + }); + assertBusy(() -> { // failure due to passing index filter for multiple snapshots ActionRequestValidationException ex = expectThrows( @@ -717,9 +841,10 @@ public void testSnapshotStatusFailuresWithIndexFilter() throws Exception { .execute() .actionGet() ); - String cause = "index list filter is supported only for a single snapshot"; + String cause = + "index list filter is supported only when a single 'snapshot' is passed, but found 'snapshot' param = [[test-snap-1, test-snap-2]]"; assertTrue(ex.getMessage().contains(cause)); - }, 1, TimeUnit.MINUTES); + }); assertBusy(() -> { // failure due to index not found in snapshot @@ -743,7 +868,18 @@ public void testSnapshotStatusFailuresWithIndexFilter() throws Exception { ); assertEquals(cause, ex.getCause().getMessage()); - }, 1, TimeUnit.MINUTES); + // no error for ignore_unavailable = true and status response contains only the found indices + SnapshotStatus snapshotsStatus = client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(snapshot2) + .setIndices(index1, index2, index3) + .setIgnoreUnavailable(true) + .get() + .getSnapshots() + .get(0); + assertEquals(1, snapshotsStatus.getIndices().size()); + }); assertBusy(() -> { // failure due to too many shards requested @@ -763,12 +899,148 @@ public void testSnapshotStatusFailuresWithIndexFilter() throws Exception { .actionGet() ); assertEquals(ex.status(), RestStatus.TOO_MANY_REQUESTS); - assertTrue(ex.getMessage().endsWith(" is more than the maximum allowed value of shard count [2] for snapshot status request")); + assertTrue(ex.getMessage().contains(" is more than the maximum allowed value of shard count [2] for snapshot status request")); logger.info("Reset MAX_SHARDS_ALLOWED_IN_STATUS_API to default value"); updateSettingsRequest.persistentSettings(Settings.builder().putNull(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey())); - assertAcked(internalCluster().client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); - }, 2, TimeUnit.MINUTES); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + }); + } + + public void testSnapshotStatusShardLimitOfResponseForInProgressSnapshot() throws Exception { + logger.info("Create repository"); + String repositoryName = "test-repo"; + createRepository( + repositoryName, + "mock", + Settings.builder() + .put("location", randomRepoPath()) + .put("compress", false) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put("wait_after_unblock", 200) + ); + + logger.info("Create indices"); + String index1 = "test-idx-1"; + String index2 = "test-idx-2"; + String index3 = "test-idx-3"; + assertAcked(prepareCreate(index1, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + assertAcked(prepareCreate(index2, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + assertAcked(prepareCreate(index3, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + ensureGreen(); + + logger.info("Index some data"); + indexRandomDocs(index1, 10); + indexRandomDocs(index2, 10); + indexRandomDocs(index3, 10); + + logger.info("Create completed snapshot"); + String completedSnapshot = "test-completed-snapshot"; + String blockedNode = blockNodeWithIndex(repositoryName, index1); + client().admin().cluster().prepareCreateSnapshot(repositoryName, completedSnapshot).setWaitForCompletion(false).get(); + waitForBlock(blockedNode, repositoryName, TimeValue.timeValueSeconds(60)); + unblockNode(repositoryName, blockedNode); + waitForCompletion(repositoryName, completedSnapshot, TimeValue.timeValueSeconds(60)); + + logger.info("Index some more data"); + indexRandomDocs(index1, 10); + indexRandomDocs(index2, 10); + indexRandomDocs(index3, 10); + refresh(); + + logger.info("Create in-progress snapshot"); + String inProgressSnapshot = "test-in-progress-snapshot"; + blockedNode = blockNodeWithIndex(repositoryName, index1); + client().admin().cluster().prepareCreateSnapshot(repositoryName, inProgressSnapshot).setWaitForCompletion(false).get(); + waitForBlock(blockedNode, repositoryName, TimeValue.timeValueSeconds(60)); + List snapshotStatuses = client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot, completedSnapshot) + .get() + .getSnapshots(); + + assertEquals(2, snapshotStatuses.size()); + assertEquals(SnapshotsInProgress.State.STARTED, snapshotStatuses.get(0).getState()); + assertEquals(SnapshotsInProgress.State.SUCCESS, snapshotStatuses.get(1).getState()); + + logger.info("Set MAX_SHARDS_ALLOWED_IN_STATUS_API to a low value"); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey(), 1)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // shard limit exceeded due to inProgress snapshot alone @ without index-filter + assertBusy(() -> { + CircuitBreakingException exception = expectThrows( + CircuitBreakingException.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot) + .execute() + .actionGet() + ); + assertEquals(exception.status(), RestStatus.TOO_MANY_REQUESTS); + assertTrue( + exception.getMessage().contains(" is more than the maximum allowed value of shard count [1] for snapshot status request") + ); + }); + + // shard limit exceeded due to inProgress snapshot alone @ with index-filter + assertBusy(() -> { + CircuitBreakingException exception = expectThrows( + CircuitBreakingException.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot) + .setIndices(index1, index2) + .execute() + .actionGet() + ); + assertEquals(exception.status(), RestStatus.TOO_MANY_REQUESTS); + assertTrue( + exception.getMessage().contains(" is more than the maximum allowed value of shard count [1] for snapshot status request") + ); + }); + + logger.info("Set MAX_SHARDS_ALLOWED_IN_STATUS_API to a slightly higher value"); + updateSettingsRequest.persistentSettings(Settings.builder().put(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey(), 5)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // shard limit exceeded due to passing for inProgress but failing for current + completed + + assertBusy(() -> { + SnapshotStatus inProgressSnapshotStatus = client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot) + .get() + .getSnapshots() + .get(0); + assertEquals(3, inProgressSnapshotStatus.getShards().size()); + + CircuitBreakingException exception = expectThrows( + CircuitBreakingException.class, + () -> client().admin() + .cluster() + .prepareSnapshotStatus(repositoryName) + .setSnapshots(inProgressSnapshot, completedSnapshot) + .execute() + .actionGet() + ); + assertEquals(exception.status(), RestStatus.TOO_MANY_REQUESTS); + assertTrue( + exception.getMessage().contains(" is more than the maximum allowed value of shard count [5] for snapshot status request") + ); + }); + + unblockNode(repositoryName, blockedNode); + waitForCompletion(repositoryName, inProgressSnapshot, TimeValue.timeValueSeconds(60)); + + logger.info("Reset MAX_SHARDS_ALLOWED_IN_STATUS_API to default value"); + updateSettingsRequest.persistentSettings(Settings.builder().putNull(MAX_SHARDS_ALLOWED_IN_STATUS_API.getKey())); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); } private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotStatus, String indexName) { diff --git a/server/src/main/java/org/apache/lucene/index/DocValuesProducerUtil.java b/server/src/main/java/org/apache/lucene/index/DocValuesProducerUtil.java new file mode 100644 index 0000000000000..3aebec56b82d9 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/index/DocValuesProducerUtil.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.index; + +import org.apache.lucene.codecs.DocValuesProducer; + +import java.util.Collections; +import java.util.Set; + +/** + * Utility class for DocValuesProducers + * @opensearch.internal + */ +public class DocValuesProducerUtil { + /** + * Returns the segment doc values producers for the given doc values producer. + * If the given doc values producer is not a segment doc values producer, an empty set is returned. + * @param docValuesProducer the doc values producer + * @return the segment doc values producers + */ + public static Set getSegmentDocValuesProducers(DocValuesProducer docValuesProducer) { + if (docValuesProducer instanceof SegmentDocValuesProducer) { + return (((SegmentDocValuesProducer) docValuesProducer).dvProducers); + } + return Collections.emptySet(); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsRequest.java index 0319fa103138f..9511a45423e63 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsRequest.java @@ -37,7 +37,7 @@ public CatShardsRequest() {} public CatShardsRequest(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_18_0)) { indices = in.readStringArray(); cancelAfterTimeInterval = in.readOptionalTimeValue(); if (in.readBoolean()) { @@ -50,7 +50,7 @@ public CatShardsRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_18_0)) { if (indices == null) { out.writeVInt(0); } else { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsResponse.java index c2499ab190ded..dee0b0f2d45c0 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/CatShardsResponse.java @@ -38,7 +38,7 @@ public CatShardsResponse() {} public CatShardsResponse(StreamInput in) throws IOException { super(in); indicesStatsResponse = new IndicesStatsResponse(in); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_18_0)) { nodes = DiscoveryNodes.readFrom(in, null); responseShards = in.readList(ShardRouting::new); if (in.readBoolean()) { @@ -50,7 +50,7 @@ public CatShardsResponse(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { indicesStatsResponse.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_18_0)) { nodes.writeToWithAttribute(out); out.writeList(responseShards); out.writeBoolean(pageToken != null); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java index 3dc8c38152a16..7b36b7a10f4f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportCatShardsAction.java @@ -108,6 +108,12 @@ public void onResponse(ClusterStateResponse clusterStateResponse) { : paginationStrategy.getRequestedEntities() ); catShardsResponse.setPageToken(Objects.isNull(paginationStrategy) ? null : paginationStrategy.getResponseToken()); + // For paginated queries, if strategy outputs no shards to be returned, avoid fetching IndicesStats. + if (shouldSkipIndicesStatsRequest(paginationStrategy)) { + catShardsResponse.setIndicesStatsResponse(IndicesStatsResponse.getEmptyResponse()); + cancellableListener.onResponse(catShardsResponse); + return; + } IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); indicesStatsRequest.setShouldCancelOnTimeout(true); indicesStatsRequest.all(); @@ -159,4 +165,8 @@ private void validateRequestLimit( } } } + + private boolean shouldSkipIndicesStatsRequest(ShardPaginationStrategy paginationStrategy) { + return Objects.nonNull(paginationStrategy) && paginationStrategy.getRequestedEntities().isEmpty(); + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index f3110cc8f20a5..84bc87a5cb1ba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -112,6 +112,8 @@ private static StorageType fromString(String string) { private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); private String renamePattern; private String renameReplacement; + private String renameAliasPattern; + private String renameAliasReplacement; private boolean waitForCompletion; private boolean includeGlobalState = false; private boolean partial = false; @@ -164,6 +166,12 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_17_0)) { sourceRemoteTranslogRepository = in.readOptionalString(); } + if (in.getVersion().onOrAfter(Version.V_2_18_0)) { + renameAliasPattern = in.readOptionalString(); + } + if (in.getVersion().onOrAfter(Version.V_2_18_0)) { + renameAliasReplacement = in.readOptionalString(); + } } @Override @@ -191,6 +199,12 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_17_0)) { out.writeOptionalString(sourceRemoteTranslogRepository); } + if (out.getVersion().onOrAfter(Version.V_2_18_0)) { + out.writeOptionalString(renameAliasPattern); + } + if (out.getVersion().onOrAfter(Version.V_2_18_0)) { + out.writeOptionalString(renameAliasReplacement); + } } @Override @@ -361,6 +375,51 @@ public String renameReplacement() { return renameReplacement; } + /** + * Sets rename pattern that should be applied to restored indices' alias. + *

+ * Alias that match the rename pattern will be renamed according to {@link #renameAliasReplacement(String)}. The + * rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)} + * If two or more aliases are renamed into the same name, they will be merged. + * + * @param renameAliasPattern rename pattern + * @return this request + */ + public RestoreSnapshotRequest renameAliasPattern(String renameAliasPattern) { + this.renameAliasPattern = renameAliasPattern; + return this; + } + + /** + * Returns rename alias pattern + * + * @return rename alias pattern + */ + public String renameAliasPattern() { + return renameAliasPattern; + } + + /** + * Sets rename alias replacement + *

+ * See {@link #renameAliasPattern(String)} for more information. + * + * @param renameAliasReplacement rename replacement + */ + public RestoreSnapshotRequest renameAliasReplacement(String renameAliasReplacement) { + this.renameAliasReplacement = renameAliasReplacement; + return this; + } + + /** + * Returns rename alias replacement + * + * @return rename alias replacement + */ + public String renameAliasReplacement() { + return renameAliasReplacement; + } + /** * If this parameter is set to true the operation will wait for completion of restore process before returning. * @@ -625,6 +684,18 @@ public RestoreSnapshotRequest source(Map source) { } else { throw new IllegalArgumentException("malformed rename_replacement"); } + } else if (name.equals("rename_alias_pattern")) { + if (entry.getValue() instanceof String) { + renameAliasPattern((String) entry.getValue()); + } else { + throw new IllegalArgumentException("malformed rename_alias_pattern"); + } + } else if (name.equals("rename_alias_replacement")) { + if (entry.getValue() instanceof String) { + renameAliasReplacement((String) entry.getValue()); + } else { + throw new IllegalArgumentException("malformed rename_alias_replacement"); + } } else if (name.equals("index_settings")) { if (!(entry.getValue() instanceof Map)) { throw new IllegalArgumentException("malformed index_settings section"); @@ -685,6 +756,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (renameReplacement != null) { builder.field("rename_replacement", renameReplacement); } + if (renameAliasPattern != null) { + builder.field("rename_alias_pattern", renameAliasPattern); + } + if (renameAliasReplacement != null) { + builder.field("rename_alias_replacement", renameAliasReplacement); + } builder.field("include_global_state", includeGlobalState); builder.field("partial", partial); builder.field("include_aliases", includeAliases); @@ -733,6 +810,8 @@ public boolean equals(Object o) { && Objects.equals(indicesOptions, that.indicesOptions) && Objects.equals(renamePattern, that.renamePattern) && Objects.equals(renameReplacement, that.renameReplacement) + && Objects.equals(renameAliasPattern, that.renameAliasPattern) + && Objects.equals(renameAliasReplacement, that.renameAliasReplacement) && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) && Objects.equals(snapshotUuid, that.snapshotUuid) @@ -751,6 +830,8 @@ public int hashCode() { indicesOptions, renamePattern, renameReplacement, + renameAliasPattern, + renameAliasReplacement, waitForCompletion, includeGlobalState, partial, diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 53c9557a621b7..038d62ad7f4cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -144,6 +144,34 @@ public RestoreSnapshotRequestBuilder setRenameReplacement(String renameReplaceme return this; } + /** + * Sets rename pattern that should be applied to restored indices' aliases. + *

+ * Aliases that match the rename pattern will be renamed according to {@link #setRenameAliasReplacement(String)}. The + * rename pattern is applied according to the {@link java.util.regex.Matcher#appendReplacement(StringBuffer, String)} + * The request will fail if two or more alias will be renamed into the same name. + * + * @param renameAliasPattern rename alias pattern + * @return this builder + */ + public RestoreSnapshotRequestBuilder setRenameAliasPattern(String renameAliasPattern) { + request.renameAliasPattern(renameAliasPattern); + return this; + } + + /** + * Sets rename replacement + *

+ * See {@link #setRenameAliasPattern(String)} for more information. + * + * @param renameAliasReplacement rename alias replacement + * @return this builder + */ + public RestoreSnapshotRequestBuilder setRenameAliasReplacement(String renameAliasReplacement) { + request.renameAliasReplacement(renameAliasReplacement); + return this; + } + /** * If this parameter is set to true the operation will wait for completion of restore process before returning. * diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index 3d7fb5b6beb56..a270dcfa53474 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -41,6 +41,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Arrays; import static org.opensearch.action.ValidateActions.addValidationError; @@ -124,8 +125,20 @@ public ActionRequestValidationException validate() { if (snapshots == null) { validationException = addValidationError("snapshots is null", validationException); } - if (indices.length != 0 && snapshots.length != 1) { - validationException = addValidationError("index list filter is supported only for a single snapshot", validationException); + if (indices.length != 0) { + if (repository.equals("_all")) { + String error = + "index list filter is supported only when a single 'repository' is passed, but found 'repository' param = [_all]"; + validationException = addValidationError(error, validationException); + } + if (snapshots.length != 1) { + // snapshot param was '_all' (length = 0) or a list of snapshots (length > 1) + String snapshotParamValue = snapshots.length == 0 ? "_all" : Arrays.toString(snapshots); + String error = "index list filter is supported only when a single 'snapshot' is passed, but found 'snapshot' param = [" + + snapshotParamValue + + "]"; + validationException = addValidationError(error, validationException); + } } return validationException; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 8228cb6301c8c..2c8b06bb5e8fe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -99,8 +99,14 @@ public class TransportSnapshotsStatusAction extends TransportClusterManagerNodeA private final TransportNodesSnapshotsStatus transportNodesSnapshotsStatus; + private Set requestedIndexNames; + private long maximumAllowedShardCount; + private int totalShardsRequiredInResponse; + + private boolean requestUsesIndexFilter; + @Inject public TransportSnapshotsStatusAction( TransportService transportService, @@ -145,25 +151,21 @@ protected void clusterManagerOperation( final ClusterState state, final ActionListener listener ) throws Exception { + setupForRequest(request); + final SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); List currentSnapshots = SnapshotsService.currentSnapshots( snapshotsInProgress, request.repository(), Arrays.asList(request.snapshots()) ); + if (currentSnapshots.isEmpty()) { buildResponse(snapshotsInProgress, request, currentSnapshots, null, listener); return; } - Set nodesIds = new HashSet<>(); - for (SnapshotsInProgress.Entry entry : currentSnapshots) { - for (final SnapshotsInProgress.ShardSnapshotStatus status : entry.shards().values()) { - if (status.nodeId() != null) { - nodesIds.add(status.nodeId()); - } - } - } + Set nodesIds = getNodeIdsOfCurrentSnapshots(request, currentSnapshots); if (!nodesIds.isEmpty()) { // There are still some snapshots running - check their progress @@ -192,6 +194,97 @@ protected void clusterManagerOperation( } + private void setupForRequest(SnapshotsStatusRequest request) { + requestedIndexNames = new HashSet<>(Arrays.asList(request.indices())); + requestUsesIndexFilter = requestedIndexNames.isEmpty() == false; + totalShardsRequiredInResponse = 0; + maximumAllowedShardCount = clusterService.getClusterSettings().get(MAX_SHARDS_ALLOWED_IN_STATUS_API); + } + + /* + * To get the node IDs of the relevant (according to the index filter) shards which are part of current snapshots + * It also deals with any missing indices (for index-filter case) and calculates the number of shards contributed by all + * the current snapshots to the total count (irrespective of index-filter) + * If this count exceeds the limit, CircuitBreakingException is thrown + * */ + private Set getNodeIdsOfCurrentSnapshots(final SnapshotsStatusRequest request, List currentSnapshots) + throws CircuitBreakingException { + Set nodesIdsOfCurrentSnapshotShards = new HashSet<>(); + int totalShardsAcrossCurrentSnapshots = 0; + + for (SnapshotsInProgress.Entry currentSnapshotEntry : currentSnapshots) { + if (currentSnapshotEntry.remoteStoreIndexShallowCopyV2()) { + // skip current shallow v2 snapshots + continue; + } + if (requestUsesIndexFilter) { + // index-filter is allowed only for a single snapshot, which has to be this one + // first check if any requested indices are missing from this current snapshot + + final Set indicesInCurrentSnapshot = currentSnapshotEntry.indices() + .stream() + .map(IndexId::getName) + .collect(Collectors.toSet()); + + final Set indicesNotFound = requestedIndexNames.stream() + .filter(index -> indicesInCurrentSnapshot.contains(index) == false) + .collect(Collectors.toSet()); + + if (indicesNotFound.isEmpty() == false) { + handleIndexNotFound( + requestedIndexNames, + indicesNotFound, + request, + currentSnapshotEntry.snapshot().getSnapshotId().getName(), + false + ); + } + // the actual no. of shards contributed by this current snapshot will now be calculated + } else { + // all shards of this current snapshot are required in response + totalShardsAcrossCurrentSnapshots += currentSnapshotEntry.shards().size(); + } + + for (final Map.Entry shardStatusEntry : currentSnapshotEntry.shards() + .entrySet()) { + SnapshotsInProgress.ShardSnapshotStatus shardStatus = shardStatusEntry.getValue(); + boolean indexPresentInFilter = requestedIndexNames.contains(shardStatusEntry.getKey().getIndexName()); + + if (requestUsesIndexFilter && indexPresentInFilter) { + // count only those shards whose index belongs to the index-filter + totalShardsAcrossCurrentSnapshots++; + + // for non-index filter case, we already counted all the shards of this current snapshot (non-shallow v2) + } + + if (shardStatus.nodeId() != null) { + if (requestUsesIndexFilter) { + if (indexPresentInFilter) { + // include node only if the index containing this shard belongs to the index filter + nodesIdsOfCurrentSnapshotShards.add(shardStatus.nodeId()); + } + } else { + nodesIdsOfCurrentSnapshotShards.add(shardStatus.nodeId()); + } + } + } + } + + totalShardsRequiredInResponse += totalShardsAcrossCurrentSnapshots; + if (totalShardsRequiredInResponse > maximumAllowedShardCount) { + // index-filter is allowed only for a single snapshot. If index-filter is being used and limit got exceeded, + // this snapshot is current and its relevant indices contribute more shards than the limit + + // if index-filter is not being used and limit got exceed, there could be more shards required in response coming from completed + // snapshots + // but since the limit is already exceeded, we can fail request here + boolean couldInvolveMoreShards = requestUsesIndexFilter == false; + handleMaximumAllowedShardCountExceeded(request.repository(), totalShardsRequiredInResponse, couldInvolveMoreShards); + } + + return nodesIdsOfCurrentSnapshotShards; + } + private void buildResponse( SnapshotsInProgress snapshotsInProgress, SnapshotsStatusRequest request, @@ -215,6 +308,10 @@ private void buildResponse( List shardStatusBuilder = new ArrayList<>(); Map indexIdLookup = null; for (final Map.Entry shardEntry : entry.shards().entrySet()) { + if (requestUsesIndexFilter && requestedIndexNames.contains(shardEntry.getKey().getIndexName()) == false) { + // skip shard if its index does not belong to the index-filter + continue; + } SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.getValue(); if (status.nodeId() != null) { // We should have information about this shard from the shard: @@ -320,7 +417,6 @@ private void loadRepositoryData( String repositoryName, ActionListener listener ) { - maximumAllowedShardCount = clusterService.getClusterSettings().get(MAX_SHARDS_ALLOWED_IN_STATUS_API); final StepListener repositoryDataListener = new StepListener<>(); repositoriesService.getRepositoryData(repositoryName, repositoryDataListener); repositoryDataListener.whenComplete(repositoryData -> { @@ -343,8 +439,7 @@ private void loadRepositoryData( snapshotInfo ); boolean isShallowV2Snapshot = snapshotInfo.getPinnedTimestamp() > 0; - long initialSnapshotTotalSize = 0; - if (isShallowV2Snapshot && request.indices().length == 0) { + if (isShallowV2Snapshot && requestUsesIndexFilter == false) { // TODO: add primary store size in bytes at the snapshot level } @@ -430,7 +525,10 @@ private Map snapshotsInfo( .stream() .filter(s -> requestedSnapshotNames.contains(s.getName())) .collect(Collectors.toMap(SnapshotId::getName, Function.identity())); + + // for no index filter-case and excludes shards from shallow v2 snapshots int totalShardsAcrossSnapshots = 0; + for (final String snapshotName : request.snapshots()) { if (currentSnapshotNames.contains(snapshotName)) { // we've already found this snapshot in the current snapshot entries, so skip over @@ -453,23 +551,15 @@ private Map snapshotsInfo( } SnapshotInfo snapshotInfo = snapshot(snapshotsInProgress, repositoryName, snapshotId); boolean isV2Snapshot = snapshotInfo.getPinnedTimestamp() > 0; - if (isV2Snapshot == false && request.indices().length == 0) { + if (isV2Snapshot == false && requestUsesIndexFilter == false) { totalShardsAcrossSnapshots += snapshotInfo.totalShards(); } snapshotsInfoMap.put(snapshotId, snapshotInfo); } - if (totalShardsAcrossSnapshots > maximumAllowedShardCount && request.indices().length == 0) { - String message = "[" - + repositoryName - + ":" - + String.join(", ", request.snapshots()) - + "]" - + " Total shard count [" - + totalShardsAcrossSnapshots - + "] is more than the maximum allowed value of shard count [" - + maximumAllowedShardCount - + "] for snapshot status request"; - throw new CircuitBreakingException(message, CircuitBreaker.Durability.PERMANENT); + totalShardsRequiredInResponse += totalShardsAcrossSnapshots; + if (totalShardsRequiredInResponse > maximumAllowedShardCount && requestUsesIndexFilter == false) { + // includes shard contributions from all snapshots (current and completed) + handleMaximumAllowedShardCountExceeded(repositoryName, totalShardsRequiredInResponse, false); } return unmodifiableMap(snapshotsInfoMap); } @@ -492,52 +582,46 @@ private Map snapshotShards( final RepositoryData repositoryData, final SnapshotInfo snapshotInfo ) throws IOException { - final Set requestedIndexNames = Sets.newHashSet(request.indices()); String snapshotName = snapshotInfo.snapshotId().getName(); - Set indices = Sets.newHashSet(snapshotInfo.indices()); - if (requestedIndexNames.isEmpty() == false) { - Set finalIndices = indices; - List indicesNotFound = requestedIndexNames.stream() - .filter(i -> finalIndices.contains(i) == false) - .collect(Collectors.toList()); + Set indicesToProcess; + if (requestUsesIndexFilter) { + Set snapshotIndices = Sets.newHashSet(snapshotInfo.indices()); + Set indicesNotFound = requestedIndexNames.stream() + .filter(index -> snapshotIndices.contains(index) == false) + .collect(Collectors.toSet()); if (indicesNotFound.isEmpty() == false) { - handleIndexNotFound(String.join(", ", indicesNotFound), request, snapshotName, repositoryName); + boolean moreMissingIndicesPossible = indicesNotFound.size() == requestedIndexNames.size(); + handleIndexNotFound(requestedIndexNames, indicesNotFound, request, snapshotName, moreMissingIndicesPossible); } - indices = requestedIndexNames; + indicesToProcess = requestedIndexNames; + } else { + // all indices of this snapshot + indicesToProcess = Sets.newHashSet(snapshotInfo.indices()); } final Repository repository = repositoriesService.repository(repositoryName); boolean isV2Snapshot = snapshotInfo.getPinnedTimestamp() > 0; + + // for index filter-case and excludes shards from shallow v2 snapshots int totalShardsAcrossIndices = 0; final Map indexMetadataMap = new HashMap<>(); - - for (String index : indices) { + for (String index : indicesToProcess) { IndexId indexId = repositoryData.resolveIndexId(index); IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData(repositoryData, snapshotInfo.snapshotId(), indexId); if (indexMetadata != null) { - if (requestedIndexNames.isEmpty() == false && isV2Snapshot == false) { + if (requestUsesIndexFilter && isV2Snapshot == false) { totalShardsAcrossIndices += indexMetadata.getNumberOfShards(); } indexMetadataMap.put(indexId, indexMetadata); - } else if (requestedIndexNames.isEmpty() == false) { - handleIndexNotFound(index, request, snapshotName, repositoryName); + } else if (requestUsesIndexFilter) { + handleIndexNotFound(indicesToProcess, Collections.singleton(index), request, snapshotName, true); } } - if (totalShardsAcrossIndices > maximumAllowedShardCount && requestedIndexNames.isEmpty() == false && isV2Snapshot == false) { - String message = "[" - + repositoryName - + ":" - + String.join(", ", request.snapshots()) - + "]" - + " Total shard count [" - + totalShardsAcrossIndices - + "] across the requested indices [" - + requestedIndexNames.stream().collect(Collectors.joining(", ")) - + "] is more than the maximum allowed value of shard count [" - + maximumAllowedShardCount - + "] for snapshot status request"; - throw new CircuitBreakingException(message, CircuitBreaker.Durability.PERMANENT); + totalShardsRequiredInResponse += totalShardsAcrossIndices; + if (totalShardsRequiredInResponse > maximumAllowedShardCount && requestUsesIndexFilter && isV2Snapshot == false) { + // index-filter is allowed only for a single snapshot, which has to be this one + handleMaximumAllowedShardCountExceeded(request.repository(), totalShardsRequiredInResponse, false); } final Map shardStatus = new HashMap<>(); @@ -563,7 +647,6 @@ private Map snapshotShards( // could not be taken due to partial being set to false. shardSnapshotStatus = IndexShardSnapshotStatus.newFailed("skipped"); } else { - // TODO: to be refactored later if (isV2Snapshot) { shardSnapshotStatus = IndexShardSnapshotStatus.newDone(0, 0, 0, 0, 0, 0, null); } else { @@ -578,21 +661,55 @@ private Map snapshotShards( return unmodifiableMap(shardStatus); } - private void handleIndexNotFound(String index, SnapshotsStatusRequest request, String snapshotName, String repositoryName) { + private void handleIndexNotFound( + Set indicesToProcess, + Set indicesNotFound, + SnapshotsStatusRequest request, + String snapshotName, + boolean moreMissingIndicesPossible + ) throws IndexNotFoundException { + String indices = String.join(", ", indicesNotFound); + if (moreMissingIndicesPossible) { + indices = indices.concat(" and possibly more indices"); + } if (request.ignoreUnavailable()) { - // ignoring unavailable index + // ignoring unavailable indices logger.debug( "snapshot status request ignoring indices [{}], not found in snapshot[{}] in repository [{}]", - index, + indices, snapshotName, - repositoryName + request.repository() ); + + // remove unavailable indices from the set to be processed + indicesToProcess.removeAll(indicesNotFound); } else { - String cause = "indices [" + index + "] missing in snapshot [" + snapshotName + "] of repository [" + repositoryName + "]"; - throw new IndexNotFoundException(index, new IllegalArgumentException(cause)); + String cause = "indices [" + + indices + + "] missing in snapshot [" + + snapshotName + + "] of repository [" + + request.repository() + + "]"; + throw new IndexNotFoundException(indices, new IllegalArgumentException(cause)); } } + private void handleMaximumAllowedShardCountExceeded(String repositoryName, int totalContributingShards, boolean couldInvolveMoreShards) + throws CircuitBreakingException { + String shardCount = "[" + totalContributingShards + (couldInvolveMoreShards ? "+" : "") + "]"; + String message = "[" + + repositoryName + + "] Total shard count " + + shardCount + + " is more than the maximum allowed value of shard count [" + + maximumAllowedShardCount + + "] for snapshot status request. Try narrowing down the request by using a snapshot list or " + + "an index list for a singular snapshot."; + + throw new CircuitBreakingException(message, CircuitBreaker.Durability.PERMANENT); + } + private static SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java index 03a73f45ffe81..9ebe36531c208 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.stats; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.IndexMetric; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.xcontent.ToXContentFragment; @@ -47,6 +48,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; /** * Cluster Stats per index @@ -68,14 +70,23 @@ public class ClusterStatsIndices implements ToXContentFragment { private MappingStats mappings; public ClusterStatsIndices(List nodeResponses, MappingStats mappingStats, AnalysisStats analysisStats) { - Map countsPerIndex = new HashMap<>(); + this(Set.of(IndexMetric.values()), nodeResponses, mappingStats, analysisStats); + + } - this.docs = new DocsStats(); - this.store = new StoreStats(); - this.fieldData = new FieldDataStats(); - this.queryCache = new QueryCacheStats(); - this.completion = new CompletionStats(); - this.segments = new SegmentsStats(); + public ClusterStatsIndices( + Set indicesMetrics, + List nodeResponses, + MappingStats mappingStats, + AnalysisStats analysisStats + ) { + Map countsPerIndex = new HashMap<>(); + this.docs = indicesMetrics.contains(IndexMetric.DOCS) ? new DocsStats() : null; + this.store = indicesMetrics.contains(IndexMetric.STORE) ? new StoreStats() : null; + this.fieldData = indicesMetrics.contains(IndexMetric.FIELDDATA) ? new FieldDataStats() : null; + this.queryCache = indicesMetrics.contains(IndexMetric.QUERY_CACHE) ? new QueryCacheStats() : null; + this.completion = indicesMetrics.contains(IndexMetric.COMPLETION) ? new CompletionStats() : null; + this.segments = indicesMetrics.contains(IndexMetric.SEGMENTS) ? new SegmentsStats() : null; for (ClusterStatsNodeResponse r : nodeResponses) { // Aggregated response from the node @@ -92,12 +103,24 @@ public ClusterStatsIndices(List nodeResponses, Mapping } } - docs.add(r.getAggregatedNodeLevelStats().commonStats.docs); - store.add(r.getAggregatedNodeLevelStats().commonStats.store); - fieldData.add(r.getAggregatedNodeLevelStats().commonStats.fieldData); - queryCache.add(r.getAggregatedNodeLevelStats().commonStats.queryCache); - completion.add(r.getAggregatedNodeLevelStats().commonStats.completion); - segments.add(r.getAggregatedNodeLevelStats().commonStats.segments); + if (indicesMetrics.contains(IndexMetric.DOCS)) { + docs.add(r.getAggregatedNodeLevelStats().commonStats.docs); + } + if (indicesMetrics.contains(IndexMetric.STORE)) { + store.add(r.getAggregatedNodeLevelStats().commonStats.store); + } + if (indicesMetrics.contains(IndexMetric.FIELDDATA)) { + fieldData.add(r.getAggregatedNodeLevelStats().commonStats.fieldData); + } + if (indicesMetrics.contains(IndexMetric.QUERY_CACHE)) { + queryCache.add(r.getAggregatedNodeLevelStats().commonStats.queryCache); + } + if (indicesMetrics.contains(IndexMetric.COMPLETION)) { + completion.add(r.getAggregatedNodeLevelStats().commonStats.completion); + } + if (indicesMetrics.contains(IndexMetric.SEGMENTS)) { + segments.add(r.getAggregatedNodeLevelStats().commonStats.segments); + } } else { // Default response from the node for (org.opensearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { @@ -113,21 +136,35 @@ public ClusterStatsIndices(List nodeResponses, Mapping if (shardStats.getShardRouting().primary()) { indexShardStats.primaries++; - docs.add(shardCommonStats.docs); + if (indicesMetrics.contains(IndexMetric.DOCS)) { + docs.add(shardCommonStats.docs); + } + } + if (indicesMetrics.contains(IndexMetric.STORE)) { + store.add(shardCommonStats.store); + } + if (indicesMetrics.contains(IndexMetric.FIELDDATA)) { + fieldData.add(shardCommonStats.fieldData); + } + if (indicesMetrics.contains(IndexMetric.QUERY_CACHE)) { + queryCache.add(shardCommonStats.queryCache); + } + if (indicesMetrics.contains(IndexMetric.COMPLETION)) { + completion.add(shardCommonStats.completion); + } + if (indicesMetrics.contains(IndexMetric.SEGMENTS)) { + segments.add(shardCommonStats.segments); } - store.add(shardCommonStats.store); - fieldData.add(shardCommonStats.fieldData); - queryCache.add(shardCommonStats.queryCache); - completion.add(shardCommonStats.completion); - segments.add(shardCommonStats.segments); } } } - shards = new ShardStats(); indexCount = countsPerIndex.size(); - for (final ShardStats indexCountsCursor : countsPerIndex.values()) { - shards.addIndexShardCount(indexCountsCursor); + if (indicesMetrics.contains(IndexMetric.SHARDS)) { + shards = new ShardStats(); + for (final ShardStats indexCountsCursor : countsPerIndex.values()) { + shards.addIndexShardCount(indexCountsCursor); + } } this.mappings = mappingStats; @@ -186,13 +223,27 @@ static final class Fields { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.COUNT, indexCount); - shards.toXContent(builder, params); - docs.toXContent(builder, params); - store.toXContent(builder, params); - fieldData.toXContent(builder, params); - queryCache.toXContent(builder, params); - completion.toXContent(builder, params); - segments.toXContent(builder, params); + if (shards != null) { + shards.toXContent(builder, params); + } + if (docs != null) { + docs.toXContent(builder, params); + } + if (store != null) { + store.toXContent(builder, params); + } + if (fieldData != null) { + fieldData.toXContent(builder, params); + } + if (queryCache != null) { + queryCache.toXContent(builder, params); + } + if (completion != null) { + completion.toXContent(builder, params); + } + if (segments != null) { + segments.toXContent(builder, params); + } if (mappings != null) { mappings.toXContent(builder, params); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index b44e9cfc5c74a..bf8218a66fc17 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.Metric; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.common.annotation.PublicApi; @@ -89,10 +90,29 @@ public class ClusterStatsNodes implements ToXContentFragment { private final PackagingTypes packagingTypes; private final IngestStats ingestStats; + public static final Set NODE_STATS_METRICS = Set.of( + // Stats computed from node info and node stat + Metric.OS, + Metric.JVM, + // Stats computed from node stat + Metric.FS, + Metric.PROCESS, + Metric.INGEST, + // Stats computed from node info + Metric.PLUGINS, + Metric.NETWORK_TYPES, + Metric.DISCOVERY_TYPES, + Metric.PACKAGING_TYPES + ); + ClusterStatsNodes(List nodeResponses) { + this(Set.of(Metric.values()), nodeResponses); + } + + ClusterStatsNodes(Set requestedMetrics, List nodeResponses) { this.versions = new HashSet<>(); - this.fs = new FsInfo.Path(); - this.plugins = new HashSet<>(); + this.fs = requestedMetrics.contains(ClusterStatsRequest.Metric.FS) ? new FsInfo.Path() : null; + this.plugins = requestedMetrics.contains(ClusterStatsRequest.Metric.PLUGINS) ? new HashSet<>() : null; Set seenAddresses = new HashSet<>(nodeResponses.size()); List nodeInfos = new ArrayList<>(nodeResponses.size()); @@ -101,7 +121,9 @@ public class ClusterStatsNodes implements ToXContentFragment { nodeInfos.add(nodeResponse.nodeInfo()); nodeStats.add(nodeResponse.nodeStats()); this.versions.add(nodeResponse.nodeInfo().getVersion()); - this.plugins.addAll(nodeResponse.nodeInfo().getInfo(PluginsAndModules.class).getPluginInfos()); + if (requestedMetrics.contains(ClusterStatsRequest.Metric.PLUGINS)) { + this.plugins.addAll(nodeResponse.nodeInfo().getInfo(PluginsAndModules.class).getPluginInfos()); + } // now do the stats that should be deduped by hardware (implemented by ip deduping) TransportAddress publishAddress = nodeResponse.nodeInfo().getInfo(TransportInfo.class).address().publishAddress(); @@ -109,18 +131,19 @@ public class ClusterStatsNodes implements ToXContentFragment { if (!seenAddresses.add(inetAddress)) { continue; } - if (nodeResponse.nodeStats().getFs() != null) { + if (requestedMetrics.contains(ClusterStatsRequest.Metric.FS) && nodeResponse.nodeStats().getFs() != null) { this.fs.add(nodeResponse.nodeStats().getFs().getTotal()); } } + this.counts = new Counts(nodeInfos); - this.os = new OsStats(nodeInfos, nodeStats); - this.process = new ProcessStats(nodeStats); - this.jvm = new JvmStats(nodeInfos, nodeStats); - this.networkTypes = new NetworkTypes(nodeInfos); - this.discoveryTypes = new DiscoveryTypes(nodeInfos); - this.packagingTypes = new PackagingTypes(nodeInfos); - this.ingestStats = new IngestStats(nodeStats); + this.networkTypes = requestedMetrics.contains(ClusterStatsRequest.Metric.NETWORK_TYPES) ? new NetworkTypes(nodeInfos) : null; + this.discoveryTypes = requestedMetrics.contains(ClusterStatsRequest.Metric.DISCOVERY_TYPES) ? new DiscoveryTypes(nodeInfos) : null; + this.packagingTypes = requestedMetrics.contains(ClusterStatsRequest.Metric.PACKAGING_TYPES) ? new PackagingTypes(nodeInfos) : null; + this.ingestStats = requestedMetrics.contains(ClusterStatsRequest.Metric.INGEST) ? new IngestStats(nodeStats) : null; + this.process = requestedMetrics.contains(ClusterStatsRequest.Metric.PROCESS) ? new ProcessStats(nodeStats) : null; + this.os = requestedMetrics.contains(ClusterStatsRequest.Metric.OS) ? new OsStats(nodeInfos, nodeStats) : null; + this.jvm = requestedMetrics.contains(ClusterStatsRequest.Metric.JVM) ? new JvmStats(nodeInfos, nodeStats) : null; } public Counts getCounts() { @@ -179,36 +202,54 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); - builder.startObject(Fields.OS); - os.toXContent(builder, params); - builder.endObject(); + if (os != null) { + builder.startObject(Fields.OS); + os.toXContent(builder, params); + builder.endObject(); + } - builder.startObject(Fields.PROCESS); - process.toXContent(builder, params); - builder.endObject(); + if (process != null) { + builder.startObject(Fields.PROCESS); + process.toXContent(builder, params); + builder.endObject(); + } - builder.startObject(Fields.JVM); - jvm.toXContent(builder, params); - builder.endObject(); + if (jvm != null) { + builder.startObject(Fields.JVM); + jvm.toXContent(builder, params); + builder.endObject(); + } - builder.field(Fields.FS); - fs.toXContent(builder, params); + if (fs != null) { + builder.field(Fields.FS); + fs.toXContent(builder, params); + } - builder.startArray(Fields.PLUGINS); - for (PluginInfo pluginInfo : plugins) { - pluginInfo.toXContent(builder, params); + if (plugins != null) { + builder.startArray(Fields.PLUGINS); + for (PluginInfo pluginInfo : plugins) { + pluginInfo.toXContent(builder, params); + } + builder.endArray(); } - builder.endArray(); - builder.startObject(Fields.NETWORK_TYPES); - networkTypes.toXContent(builder, params); - builder.endObject(); + if (networkTypes != null) { + builder.startObject(Fields.NETWORK_TYPES); + networkTypes.toXContent(builder, params); + builder.endObject(); + } - discoveryTypes.toXContent(builder, params); + if (discoveryTypes != null) { + discoveryTypes.toXContent(builder, params); + } - packagingTypes.toXContent(builder, params); + if (packagingTypes != null) { + packagingTypes.toXContent(builder, params); + } - ingestStats.toXContent(builder, params); + if (ingestStats != null) { + ingestStats.toXContent(builder, params); + } return builder; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java index b82a9d256a134..19b3033c9745b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -39,6 +39,8 @@ import org.opensearch.core.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.HashSet; +import java.util.Set; /** * A request to get cluster level stats. @@ -48,11 +50,30 @@ @PublicApi(since = "1.0.0") public class ClusterStatsRequest extends BaseNodesRequest { + private final Set requestedMetrics = new HashSet<>(); + private final Set indexMetricsRequested = new HashSet<>(); + private Boolean computeAllMetrics = true; + public ClusterStatsRequest(StreamInput in) throws IOException { super(in); if (in.getVersion().onOrAfter(Version.V_2_16_0)) { useAggregatedNodeLevelResponses = in.readOptionalBoolean(); } + if (in.getVersion().onOrAfter(Version.V_2_18_0)) { + computeAllMetrics = in.readOptionalBoolean(); + final long longMetricsFlags = in.readLong(); + for (Metric metric : Metric.values()) { + if ((longMetricsFlags & (1 << metric.getIndex())) != 0) { + requestedMetrics.add(metric); + } + } + final long longIndexMetricFlags = in.readLong(); + for (IndexMetric indexMetric : IndexMetric.values()) { + if ((longIndexMetricFlags & (1 << indexMetric.getIndex())) != 0) { + indexMetricsRequested.add(indexMetric); + } + } + } } private Boolean useAggregatedNodeLevelResponses = false; @@ -73,12 +94,133 @@ public void useAggregatedNodeLevelResponses(boolean useAggregatedNodeLevelRespon this.useAggregatedNodeLevelResponses = useAggregatedNodeLevelResponses; } + public boolean computeAllMetrics() { + return computeAllMetrics; + } + + public void computeAllMetrics(boolean computeAllMetrics) { + this.computeAllMetrics = computeAllMetrics; + } + + /** + * Add Metric + */ + public ClusterStatsRequest addMetric(Metric metric) { + requestedMetrics.add(metric); + return this; + } + + /** + * Get the names of requested metrics + */ + public Set requestedMetrics() { + return new HashSet<>(requestedMetrics); + } + + /** + * Add IndexMetric + */ + public ClusterStatsRequest addIndexMetric(IndexMetric indexMetric) { + indexMetricsRequested.add(indexMetric); + return this; + } + + public Set indicesMetrics() { + return new HashSet<>(indexMetricsRequested); + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); if (out.getVersion().onOrAfter(Version.V_2_16_0)) { out.writeOptionalBoolean(useAggregatedNodeLevelResponses); } + if (out.getVersion().onOrAfter(Version.V_2_18_0)) { + out.writeOptionalBoolean(computeAllMetrics); + long longMetricFlags = 0; + for (Metric metric : requestedMetrics) { + longMetricFlags |= (1 << metric.getIndex()); + } + out.writeLong(longMetricFlags); + long longIndexMetricFlags = 0; + for (IndexMetric indexMetric : indexMetricsRequested) { + longIndexMetricFlags |= (1 << indexMetric.getIndex()); + } + out.writeLong(longIndexMetricFlags); + } } + /** + * An enumeration of the "core" sections of metrics that may be requested + * from the cluster stats endpoint. + */ + @PublicApi(since = "2.18.0") + public enum Metric { + OS("os", 0), + JVM("jvm", 1), + FS("fs", 2), + PROCESS("process", 3), + INGEST("ingest", 4), + PLUGINS("plugins", 5), + NETWORK_TYPES("network_types", 6), + DISCOVERY_TYPES("discovery_types", 7), + PACKAGING_TYPES("packaging_types", 8), + INDICES("indices", 9); + + private String metricName; + + private int index; + + Metric(String name, int index) { + this.metricName = name; + this.index = index; + } + + public String metricName() { + return this.metricName; + } + + public int getIndex() { + return index; + } + + } + + /** + * An enumeration of the "core" sections of indices metrics that may be requested + * from the cluster stats endpoint. + * + * When no value is provided for param index_metric, default filter is set to _all. + */ + @PublicApi(since = "2.18.0") + public enum IndexMetric { + // Metrics computed from ShardStats + SHARDS("shards", 0), + DOCS("docs", 1), + STORE("store", 2), + FIELDDATA("fielddata", 3), + QUERY_CACHE("query_cache", 4), + COMPLETION("completion", 5), + SEGMENTS("segments", 6), + // Metrics computed from ClusterState + ANALYSIS("analysis", 7), + MAPPINGS("mappings", 8); + + private String metricName; + + private int index; + + IndexMetric(String name, int index) { + this.metricName = name; + this.index = index; + } + + public String metricName() { + return this.metricName; + } + + public int getIndex() { + return this.index; + } + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java index 4d0932bd3927d..34fd9ea06235e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsRequestBuilder.java @@ -36,6 +36,11 @@ import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; +import java.util.Set; + +import static org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.IndexMetric; +import static org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.Metric; + /** * Transport request builder for obtaining cluster stats * @@ -55,4 +60,19 @@ public final ClusterStatsRequestBuilder useAggregatedNodeLevelResponses(boolean request.useAggregatedNodeLevelResponses(useAggregatedNodeLevelResponses); return this; } + + public final ClusterStatsRequestBuilder computeAllMetrics(boolean applyMetricFiltering) { + request.computeAllMetrics(applyMetricFiltering); + return this; + } + + public final ClusterStatsRequestBuilder requestMetrics(Set requestMetrics) { + requestMetrics.forEach(request::addMetric); + return this; + } + + public final ClusterStatsRequestBuilder indexMetrics(Set indexMetrics) { + indexMetrics.forEach(request::addIndexMetric); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java index cc002b689a2a5..870996bb61b23 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -33,6 +33,8 @@ package org.opensearch.action.admin.cluster.stats; import org.opensearch.action.FailedNodeException; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.IndexMetric; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.Metric; import org.opensearch.action.support.nodes.BaseNodesResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; @@ -47,6 +49,7 @@ import java.io.IOException; import java.util.List; import java.util.Locale; +import java.util.Set; /** * Transport response for obtaining cluster stats @@ -88,12 +91,31 @@ public ClusterStatsResponse( List nodes, List failures, ClusterState state + ) { + this(timestamp, clusterUUID, clusterName, nodes, failures, state, Set.of(Metric.values()), Set.of(IndexMetric.values())); + } + + public ClusterStatsResponse( + long timestamp, + String clusterUUID, + ClusterName clusterName, + List nodes, + List failures, + ClusterState state, + Set requestedMetrics, + Set indicesMetrics ) { super(clusterName, nodes, failures); this.clusterUUID = clusterUUID; this.timestamp = timestamp; - nodesStats = new ClusterStatsNodes(nodes); - indicesStats = new ClusterStatsIndices(nodes, MappingStats.of(state), AnalysisStats.of(state)); + nodesStats = requestedMetrics.stream().anyMatch(ClusterStatsNodes.NODE_STATS_METRICS::contains) + ? new ClusterStatsNodes(requestedMetrics, nodes) + : null; + MappingStats mappingStats = indicesMetrics.contains(IndexMetric.MAPPINGS) ? MappingStats.of(state) : null; + AnalysisStats analysisStats = indicesMetrics.contains(IndexMetric.ANALYSIS) ? AnalysisStats.of(state) : null; + indicesStats = requestedMetrics.contains(Metric.INDICES) + ? new ClusterStatsIndices(indicesMetrics, nodes, mappingStats, analysisStats) + : null; ClusterHealthStatus status = null; for (ClusterStatsNodeResponse response : nodes) { // only the cluster-manager node populates the status @@ -131,8 +153,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(timestamp); out.writeOptionalWriteable(status); out.writeOptionalString(clusterUUID); - out.writeOptionalWriteable(indicesStats.getMappings()); - out.writeOptionalWriteable(indicesStats.getAnalysis()); + if (indicesStats != null) { + out.writeOptionalWriteable(indicesStats.getMappings()); + out.writeOptionalWriteable(indicesStats.getAnalysis()); + } else { + out.writeOptionalWriteable(null); + out.writeOptionalWriteable(null); + } } @Override @@ -153,12 +180,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (status != null) { builder.field("status", status.name().toLowerCase(Locale.ROOT)); } - builder.startObject("indices"); - indicesStats.toXContent(builder, params); - builder.endObject(); - builder.startObject("nodes"); - nodesStats.toXContent(builder, params); - builder.endObject(); + if (indicesStats != null) { + builder.startObject("indices"); + indicesStats.toXContent(builder, params); + builder.endObject(); + } + if (nodesStats != null) { + builder.startObject("nodes"); + nodesStats.toXContent(builder, params); + builder.endObject(); + } return builder; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index c4b3524cf6da5..c6581b99eb559 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -37,6 +37,7 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.Metric; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.ShardStats; @@ -63,7 +64,10 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Set; /** * Transport action for obtaining cluster state @@ -76,13 +80,19 @@ public class TransportClusterStatsAction extends TransportNodesAction< TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> { - private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags( + private static final Map SHARDS_STATS_FLAG_MAP_TO_INDEX_METRIC = Map.of( CommonStatsFlags.Flag.Docs, + ClusterStatsRequest.IndexMetric.DOCS, CommonStatsFlags.Flag.Store, + ClusterStatsRequest.IndexMetric.STORE, CommonStatsFlags.Flag.FieldData, + ClusterStatsRequest.IndexMetric.FIELDDATA, CommonStatsFlags.Flag.QueryCache, + ClusterStatsRequest.IndexMetric.QUERY_CACHE, CommonStatsFlags.Flag.Completion, - CommonStatsFlags.Flag.Segments + ClusterStatsRequest.IndexMetric.COMPLETION, + CommonStatsFlags.Flag.Segments, + ClusterStatsRequest.IndexMetric.SEGMENTS ); private final NodeService nodeService; @@ -124,14 +134,27 @@ protected ClusterStatsResponse newResponse( + " the cluster state that are too slow for a transport thread" ); ClusterState state = clusterService.state(); - return new ClusterStatsResponse( - System.currentTimeMillis(), - state.metadata().clusterUUID(), - clusterService.getClusterName(), - responses, - failures, - state - ); + if (request.computeAllMetrics()) { + return new ClusterStatsResponse( + System.currentTimeMillis(), + state.metadata().clusterUUID(), + clusterService.getClusterName(), + responses, + failures, + state + ); + } else { + return new ClusterStatsResponse( + System.currentTimeMillis(), + state.metadata().clusterUUID(), + clusterService.getClusterName(), + responses, + failures, + state, + request.requestedMetrics(), + request.indicesMetrics() + ); + } } @Override @@ -149,17 +172,17 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false, false, false); NodeStats nodeStats = nodeService.stats( CommonStatsFlags.NONE, - true, - true, - true, + isMetricRequired(Metric.OS, nodeRequest.request), + isMetricRequired(Metric.PROCESS, nodeRequest.request), + isMetricRequired(Metric.JVM, nodeRequest.request), false, - true, + isMetricRequired(Metric.FS, nodeRequest.request), false, false, false, false, false, - true, + isMetricRequired(Metric.INGEST, nodeRequest.request), false, false, false, @@ -178,33 +201,36 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false ); List shardsStats = new ArrayList<>(); - for (IndexService indexService : indicesService) { - for (IndexShard indexShard : indexService) { - if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { - // only report on fully started shards - CommitStats commitStats; - SeqNoStats seqNoStats; - RetentionLeaseStats retentionLeaseStats; - try { - commitStats = indexShard.commitStats(); - seqNoStats = indexShard.seqNoStats(); - retentionLeaseStats = indexShard.getRetentionLeaseStats(); - } catch (final AlreadyClosedException e) { - // shard is closed - no stats is fine - commitStats = null; - seqNoStats = null; - retentionLeaseStats = null; + if (isMetricRequired(Metric.INDICES, nodeRequest.request)) { + CommonStatsFlags commonStatsFlags = getCommonStatsFlags(nodeRequest); + for (IndexService indexService : indicesService) { + for (IndexShard indexShard : indexService) { + if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { + // only report on fully started shards + CommitStats commitStats; + SeqNoStats seqNoStats; + RetentionLeaseStats retentionLeaseStats; + try { + commitStats = indexShard.commitStats(); + seqNoStats = indexShard.seqNoStats(); + retentionLeaseStats = indexShard.getRetentionLeaseStats(); + } catch (final AlreadyClosedException e) { + // shard is closed - no stats is fine + commitStats = null; + seqNoStats = null; + retentionLeaseStats = null; + } + shardsStats.add( + new ShardStats( + indexShard.routingEntry(), + indexShard.shardPath(), + new CommonStats(indicesService.getIndicesQueryCache(), indexShard, commonStatsFlags), + commitStats, + seqNoStats, + retentionLeaseStats + ) + ); } - shardsStats.add( - new ShardStats( - indexShard.routingEntry(), - indexShard.shardPath(), - new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), - commitStats, - seqNoStats, - retentionLeaseStats - ) - ); } } } @@ -224,6 +250,31 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq ); } + /** + * A metric is required when: all cluster stats are required (OR) if the metric is requested + * @param metric + * @param clusterStatsRequest + * @return + */ + private boolean isMetricRequired(Metric metric, ClusterStatsRequest clusterStatsRequest) { + return clusterStatsRequest.computeAllMetrics() || clusterStatsRequest.requestedMetrics().contains(metric); + } + + private static CommonStatsFlags getCommonStatsFlags(ClusterStatsNodeRequest nodeRequest) { + Set requestedCommonStatsFlags = new HashSet<>(); + if (nodeRequest.request.computeAllMetrics()) { + requestedCommonStatsFlags.addAll(SHARDS_STATS_FLAG_MAP_TO_INDEX_METRIC.keySet()); + } else { + for (Map.Entry entry : SHARDS_STATS_FLAG_MAP_TO_INDEX_METRIC + .entrySet()) { + if (nodeRequest.request.indicesMetrics().contains(entry.getValue())) { + requestedCommonStatsFlags.add(entry.getKey()); + } + } + } + return new CommonStatsFlags(requestedCommonStatsFlags.toArray(new CommonStatsFlags.Flag[0])); + } + /** * Inner Cluster Stats Node Request * diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java index ae989573b39ea..a5409e076730d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -45,6 +45,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -230,4 +231,8 @@ static final class Fields { public String toString() { return Strings.toString(MediaTypeRegistry.JSON, this, true, false); } + + public static IndicesStatsResponse getEmptyResponse() { + return new IndicesStatsResponse(new ShardStats[0], 0, 0, 0, Collections.emptyList()); + } } diff --git a/server/src/main/java/org/opensearch/action/pagination/PageParams.java b/server/src/main/java/org/opensearch/action/pagination/PageParams.java index 03de1aa465a15..6459893a8505f 100644 --- a/server/src/main/java/org/opensearch/action/pagination/PageParams.java +++ b/server/src/main/java/org/opensearch/action/pagination/PageParams.java @@ -20,7 +20,7 @@ * * Class specific to paginated queries, which will contain common query params required by a paginated API. */ -@PublicApi(since = "3.0.0") +@PublicApi(since = "2.18.0") public class PageParams implements Writeable { public static final String PARAM_SORT = "sort"; diff --git a/server/src/main/java/org/opensearch/action/pagination/PaginationStrategy.java b/server/src/main/java/org/opensearch/action/pagination/PaginationStrategy.java index edb2489b1e4f0..8cc1ebdd2f194 100644 --- a/server/src/main/java/org/opensearch/action/pagination/PaginationStrategy.java +++ b/server/src/main/java/org/opensearch/action/pagination/PaginationStrategy.java @@ -44,8 +44,11 @@ public interface PaginationStrategy { List getRequestedEntities(); /** - * - * Utility method to get list of indices filtered as per {@param filterPredicate} and the sorted according to {@param comparator}. + * Utility method to get list of indices filtered and sorted as per the provided parameters. + * @param clusterState state consisting of all the indices to be filtered and sorted. + * @param filterPredicate predicate to be used for filtering out the required indices. + * @param comparator comparator to be used for sorting the already filtered list of indices. + * @return list of filtered and sorted IndexMetadata. */ static List getSortedIndexMetadata( final ClusterState clusterState, @@ -56,8 +59,10 @@ static List getSortedIndexMetadata( } /** - * - * Utility method to get list of indices sorted as per {@param comparator}. + * Utility method to get list of sorted indices. + * @param clusterState state consisting of indices to be sorted. + * @param comparator comparator to be used for sorting the list of indices. + * @return list of sorted IndexMetadata. */ static List getSortedIndexMetadata(final ClusterState clusterState, Comparator comparator) { return clusterState.metadata().indices().values().stream().sorted(comparator).collect(Collectors.toList()); diff --git a/server/src/main/java/org/opensearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/opensearch/action/resync/ResyncReplicationRequest.java index 6a4f2f0607144..a7d6c0abb8705 100644 --- a/server/src/main/java/org/opensearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/opensearch/action/resync/ResyncReplicationRequest.java @@ -103,7 +103,7 @@ public boolean equals(final Object o) { @Override public int hashCode() { - return Objects.hash(trimAboveSeqNo, maxSeenAutoIdTimestampOnPrimary, operations); + return Objects.hash(trimAboveSeqNo, maxSeenAutoIdTimestampOnPrimary, Arrays.hashCode(operations)); } @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 684a6b0c3eae5..6fee2037501e7 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -141,7 +141,8 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery "cluster.publish.timeout", TimeValue.timeValueMillis(30000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + Setting.Property.NodeScope, + Setting.Property.Dynamic ); private final Settings settings; @@ -164,7 +165,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final Random random; private final ElectionSchedulerFactory electionSchedulerFactory; private final SeedHostsResolver configuredHostsResolver; - private final TimeValue publishTimeout; + private TimeValue publishTimeout; private final TimeValue publishInfoTimeout; private final PublicationTransportHandler publicationHandler; private final LeaderChecker leaderChecker; @@ -247,6 +248,7 @@ public Coordinator( this.lastJoin = Optional.empty(); this.joinAccumulator = new InitialJoinAccumulator(); this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(PUBLISH_TIMEOUT_SETTING, this::setPublishTimeout); this.publishInfoTimeout = PUBLISH_INFO_TIMEOUT_SETTING.get(settings); this.random = random; this.electionSchedulerFactory = new ElectionSchedulerFactory(settings, random, transportService.getThreadPool()); @@ -301,6 +303,7 @@ public Coordinator( ); this.lagDetector = new LagDetector( settings, + clusterSettings, transportService.getThreadPool(), n -> removeNode(n, "lagging"), transportService::getLocalNode @@ -319,6 +322,10 @@ public Coordinator( this.clusterSettings = clusterSettings; } + private void setPublishTimeout(TimeValue publishTimeout) { + this.publishTimeout = publishTimeout; + } + private ClusterFormationState getClusterFormationState() { return new ClusterFormationState( settings, @@ -1669,7 +1676,6 @@ public void onNodeAck(DiscoveryNode node, Exception e) { this.localNodeAckEvent = localNodeAckEvent; this.ackListener = ackListener; this.publishListener = publishListener; - this.timeoutHandler = singleNodeDiscovery ? null : transportService.getThreadPool().schedule(new Runnable() { @Override public void run() { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java b/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java index 828db5864d28b..1cc88c71c609b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ElectionSchedulerFactory.java @@ -214,7 +214,7 @@ protected void doRun() { if (isClosed.get()) { logger.debug("{} not starting election", this); } else { - logger.debug("{} starting election", this); + logger.debug("{} starting election with duration {}", this, duration); scheduleNextElection(duration, scheduledRunnable); scheduledRunnable.run(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index 2ec0dabd91786..ca414ef7c4fc8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -92,7 +92,8 @@ public class FollowersChecker { "cluster.fault_detection.follower_check.interval", TimeValue.timeValueMillis(1000), TimeValue.timeValueMillis(100), - Setting.Property.NodeScope + Setting.Property.NodeScope, + Setting.Property.Dynamic ); // the timeout for each check sent to each node @@ -100,7 +101,7 @@ public class FollowersChecker { "cluster.fault_detection.follower_check.timeout", TimeValue.timeValueMillis(10000), TimeValue.timeValueMillis(1), - TimeValue.timeValueMillis(60000), + TimeValue.timeValueMillis(150000), Setting.Property.NodeScope, Setting.Property.Dynamic ); @@ -115,7 +116,7 @@ public class FollowersChecker { private final Settings settings; - private final TimeValue followerCheckInterval; + private TimeValue followerCheckInterval; private TimeValue followerCheckTimeout; private final int followerCheckRetryCount; private final BiConsumer onNodeFailure; @@ -148,6 +149,7 @@ public FollowersChecker( followerCheckInterval = FOLLOWER_CHECK_INTERVAL_SETTING.get(settings); followerCheckTimeout = FOLLOWER_CHECK_TIMEOUT_SETTING.get(settings); followerCheckRetryCount = FOLLOWER_CHECK_RETRY_COUNT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(FOLLOWER_CHECK_INTERVAL_SETTING, this::setFollowerCheckInterval); clusterSettings.addSettingsUpdateConsumer(FOLLOWER_CHECK_TIMEOUT_SETTING, this::setFollowerCheckTimeout); updateFastResponseState(0, Mode.CANDIDATE); transportService.registerRequestHandler( @@ -167,6 +169,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti this.clusterManagerMetrics = clusterManagerMetrics; } + private void setFollowerCheckInterval(TimeValue followerCheckInterval) { + this.followerCheckInterval = followerCheckInterval; + } + private void setFollowerCheckTimeout(TimeValue followerCheckTimeout) { this.followerCheckTimeout = followerCheckTimeout; } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java index eeb0800663d0a..969c121dc87cf 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LagDetector.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -68,10 +69,11 @@ public class LagDetector { "cluster.follower_lag.timeout", TimeValue.timeValueMillis(90000), TimeValue.timeValueMillis(1), - Setting.Property.NodeScope + Setting.Property.NodeScope, + Setting.Property.Dynamic ); - private final TimeValue clusterStateApplicationTimeout; + private TimeValue clusterStateApplicationTimeout; private final Consumer onLagDetected; private final Supplier localNodeSupplier; private final ThreadPool threadPool; @@ -79,12 +81,14 @@ public class LagDetector { public LagDetector( final Settings settings, + final ClusterSettings clusterSettings, final ThreadPool threadPool, final Consumer onLagDetected, final Supplier localNodeSupplier ) { this.threadPool = threadPool; this.clusterStateApplicationTimeout = CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING, this::setFollowerLagTimeout); this.onLagDetected = onLagDetected; this.localNodeSupplier = localNodeSupplier; } @@ -136,6 +140,10 @@ public String toString() { } } + private void setFollowerLagTimeout(TimeValue followerCheckLagTimeout) { + this.clusterStateApplicationTimeout = followerCheckLagTimeout; + } + @Override public String toString() { return "LagDetector{" diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index d30efde52bffb..c4cb484cda693 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -367,7 +367,6 @@ public PublicationContext newPublicationContext( } private boolean validateRemotePublicationConfiguredOnAllNodes(DiscoveryNodes discoveryNodes) { - assert ClusterMetadataManifest.getCodecForVersion(discoveryNodes.getMinNodeVersion()) >= ClusterMetadataManifest.CODEC_V0; for (DiscoveryNode node : discoveryNodes.getNodes().values()) { // if a node is non-remote then created local publication context if (node.isRemoteStatePublicationEnabled() == false) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 713f8c9fc332c..c8ea5442a0dd0 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -300,7 +300,8 @@ public Iterator> settings() { } }, - Property.IndexScope + Property.IndexScope, + Property.NotCopyableOnResize ); /** diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index abda5dad25e4e..11df35527eea7 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -626,14 +626,9 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( final boolean isHiddenAfterTemplates = IndexMetadata.INDEX_HIDDEN_SETTING.get(aggregatedIndexSettings); final boolean isSystem = validateDotIndex(request.index(), isHiddenAfterTemplates); - // remove the setting it's temporary and is only relevant once we create the index - final Settings.Builder settingsBuilder = Settings.builder().put(aggregatedIndexSettings); - settingsBuilder.remove(IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey()); - final Settings indexSettings = settingsBuilder.build(); - final IndexMetadata.Builder tmpImdBuilder = IndexMetadata.builder(request.index()); tmpImdBuilder.setRoutingNumShards(routingNumShards); - tmpImdBuilder.settings(indexSettings); + tmpImdBuilder.settings(aggregatedIndexSettings); tmpImdBuilder.system(isSystem); addRemoteStoreCustomMetadata(tmpImdBuilder, true); diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index 9cc3bb21e2d12..b4592659bb70f 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -573,6 +573,17 @@ private Builder initializeAsRestore( ); } } + for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned( + shardId, + false, + true, + PeerRecoverySource.INSTANCE, // TODO: Update to remote store if enabled + unassignedInfo + ) + ); + } shards.put(shardNumber, indexShardRoutingBuilder.build()); } return this; diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index f5da6df2689bd..1e2f5612ca002 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -753,12 +753,8 @@ public void setLastAcceptedState(ClusterState clusterState) { } try { final RemoteClusterStateManifestInfo manifestDetails; - // Decide the codec version - int codecVersion = ClusterMetadataManifest.getCodecForVersion(clusterState.nodes().getMinNodeVersion()); - assert codecVersion >= 0 : codecVersion; - logger.info("codec version is {}", codecVersion); - if (shouldWriteFullClusterState(clusterState, codecVersion)) { + if (shouldWriteFullClusterState(clusterState)) { final Optional latestManifest = remoteClusterStateService.getLatestClusterMetadataManifest( clusterState.getClusterName().value(), clusterState.metadata().clusterUUID() @@ -775,7 +771,7 @@ public void setLastAcceptedState(ClusterState clusterState) { clusterState.metadata().clusterUUID() ); } - manifestDetails = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID, codecVersion); + manifestDetails = remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID); } else { assert verifyManifestAndClusterState(lastAcceptedManifest, lastAcceptedState) == true : "Previous manifest and previous ClusterState are not in sync"; @@ -820,13 +816,11 @@ private boolean verifyManifestAndClusterState(ClusterMetadataManifest manifest, return true; } - private boolean shouldWriteFullClusterState(ClusterState clusterState, int codecVersion) { - assert lastAcceptedManifest == null || lastAcceptedManifest.getCodecVersion() <= codecVersion; + private boolean shouldWriteFullClusterState(ClusterState clusterState) { if (lastAcceptedState == null || lastAcceptedManifest == null || (remoteClusterStateService.isRemotePublicationEnabled() == false && lastAcceptedState.term() != clusterState.term()) - || lastAcceptedManifest.getOpensearchVersion() != Version.CURRENT - || lastAcceptedManifest.getCodecVersion() != codecVersion) { + || lastAcceptedManifest.getOpensearchVersion() != Version.CURRENT) { return true; } return false; diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index d7c0a66ba3424..9c38ea1df8a41 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -72,7 +72,7 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { public static final String ALLOCATOR_NAME = "shards_batch_gateway_allocator"; private static final Logger logger = LogManager.getLogger(ShardsBatchGatewayAllocator.class); - private final long maxBatchSize; + private long maxBatchSize; private static final short DEFAULT_SHARD_BATCH_SIZE = 2000; public static final String PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING_KEY = @@ -93,7 +93,8 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { DEFAULT_SHARD_BATCH_SIZE, 1, 10000, - Setting.Property.NodeScope + Setting.Property.NodeScope, + Setting.Property.Dynamic ); /** @@ -172,6 +173,7 @@ public ShardsBatchGatewayAllocator( this.batchStartedAction = batchStartedAction; this.batchStoreAction = batchStoreAction; this.maxBatchSize = GATEWAY_ALLOCATOR_BATCH_SIZE.get(settings); + clusterSettings.addSettingsUpdateConsumer(GATEWAY_ALLOCATOR_BATCH_SIZE, this::setMaxBatchSize); this.primaryShardsBatchGatewayAllocatorTimeout = PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setPrimaryBatchAllocatorTimeout); this.replicaShardsBatchGatewayAllocatorTimeout = REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); @@ -402,6 +404,7 @@ else if (shardRouting.primary() == primary) { Iterator iterator = newShardsToBatch.values().iterator(); assert maxBatchSize > 0 : "Shards batch size must be greater than 0"; + logger.debug("Using async fetch batch size {}", maxBatchSize); long batchSize = maxBatchSize; Map perBatchShards = new HashMap<>(); while (iterator.hasNext()) { @@ -906,6 +909,10 @@ public int getNumberOfStoreShardBatches() { return batchIdToStoreShardBatch.size(); } + private void setMaxBatchSize(long maxBatchSize) { + this.maxBatchSize = maxBatchSize; + } + protected void setPrimaryBatchAllocatorTimeout(TimeValue primaryShardsBatchGatewayAllocatorTimeout) { this.primaryShardsBatchGatewayAllocatorTimeout = primaryShardsBatchGatewayAllocatorTimeout; } diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 0cd2025b98783..dc41189afc3cb 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -301,8 +301,7 @@ public RemoteClusterStateService( * @return A manifest object which contains the details of uploaded entity metadata. */ @Nullable - public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterState, String previousClusterUUID, int codecVersion) - throws IOException { + public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterState, String previousClusterUUID) throws IOException { final long startTimeNanos = relativeTimeNanosSupplier.getAsLong(); if (clusterState.nodes().isLocalNodeElectedClusterManager() == false) { logger.error("Local node is not elected cluster manager. Exiting"); @@ -342,8 +341,7 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) ? new ClusterStateChecksum(clusterState, threadpool) : null, - false, - codecVersion + false ); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); @@ -551,8 +549,7 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) ? new ClusterStateChecksum(clusterState, threadpool) : null, - false, - previousManifest.getCodecVersion() + false ); final long durationMillis = TimeValue.nsecToMSec(relativeTimeNanosSupplier.getAsLong() - startTimeNanos); @@ -1024,8 +1021,7 @@ public RemoteClusterStateManifestInfo markLastStateAsCommitted( !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) ? new ClusterStateChecksum(clusterState, threadpool) : null, - true, - previousManifest.getCodecVersion() + true ); if (!previousManifest.isClusterUUIDCommitted() && committedManifestDetails.getClusterMetadataManifest().isClusterUUIDCommitted()) { remoteClusterStateCleanupManager.deleteStaleClusterUUIDs(clusterState, committedManifestDetails.getClusterMetadataManifest()); diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java index b243269fe323e..20e14ff805ca8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteManifestManager.java @@ -100,8 +100,7 @@ RemoteClusterStateManifestInfo uploadManifest( String previousClusterUUID, ClusterStateDiffManifest clusterDiffManifest, ClusterStateChecksum clusterStateChecksum, - boolean committed, - int codecVersion + boolean committed ) { synchronized (this) { ClusterMetadataManifest.Builder manifestBuilder = ClusterMetadataManifest.builder(); @@ -112,7 +111,7 @@ RemoteClusterStateManifestInfo uploadManifest( .opensearchVersion(Version.CURRENT) .nodeId(nodeId) .committed(committed) - .codecVersion(codecVersion) + .codecVersion(ClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION) .indices(uploadedMetadataResult.uploadedIndexMetadata) .previousClusterUUID(previousClusterUUID) .clusterUUIDCommitted(clusterState.metadata().clusterUUIDCommitted()) diff --git a/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java b/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java index d844e5cbb8897..8ca3157d9f775 100644 --- a/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java +++ b/server/src/main/java/org/opensearch/index/cache/query/QueryCacheStats.java @@ -77,6 +77,9 @@ public QueryCacheStats(long ramBytesUsed, long hitCount, long missCount, long ca } public void add(QueryCacheStats stats) { + if (stats == null) { + return; + } ramBytesUsed += stats.ramBytesUsed; hitCount += stats.hitCount; missCount += stats.missCount; diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java index bb8a07d856d87..637d3250fda3f 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java @@ -185,7 +185,13 @@ public Composite912DocValuesReader(DocValuesProducer producer, SegmentReadState // populates the dummy list of field infos to fetch doc id set iterators for respective fields. // the dummy field info is used to fetch the doc id set iterators for respective fields based on field name FieldInfos fieldInfos = new FieldInfos(getFieldInfoList(fields)); - this.readState = new SegmentReadState(readState.directory, readState.segmentInfo, fieldInfos, readState.context); + this.readState = new SegmentReadState( + readState.directory, + readState.segmentInfo, + fieldInfos, + readState.context, + readState.segmentSuffix + ); // initialize star-tree doc values producer diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java index 2225870afae8e..dd35091dece2f 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesWriter.java @@ -12,6 +12,7 @@ import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.DocValuesProducerUtil; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; @@ -35,7 +36,6 @@ import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.StarTreeMapper; import java.io.IOException; import java.util.ArrayList; @@ -221,12 +221,8 @@ private void createCompositeIndicesIfPossible(DocValuesProducer valuesProducer, } // we have all the required fields to build composite fields if (compositeFieldSet.isEmpty()) { - for (CompositeMappedFieldType mappedType : compositeMappedFieldTypes) { - if (mappedType instanceof StarTreeMapper.StarTreeFieldType) { - try (StarTreesBuilder starTreesBuilder = new StarTreesBuilder(state, mapperService, fieldNumberAcrossCompositeFields)) { - starTreesBuilder.build(metaOut, dataOut, fieldProducerMap, compositeDocValuesConsumer); - } - } + try (StarTreesBuilder starTreesBuilder = new StarTreesBuilder(state, mapperService, fieldNumberAcrossCompositeFields)) { + starTreesBuilder.build(metaOut, dataOut, fieldProducerMap, compositeDocValuesConsumer); } } } @@ -285,9 +281,20 @@ private void mergeStarTreeFields(MergeState mergeState) throws IOException { if (mergeState.docValuesProducers[i] instanceof CompositeIndexReader) { reader = (CompositeIndexReader) mergeState.docValuesProducers[i]; } else { - continue; + Set docValuesProducers = DocValuesProducerUtil.getSegmentDocValuesProducers( + mergeState.docValuesProducers[i] + ); + for (DocValuesProducer docValuesProducer : docValuesProducers) { + if (docValuesProducer instanceof CompositeIndexReader) { + reader = (CompositeIndexReader) docValuesProducer; + List compositeFieldInfo = reader.getCompositeIndexFields(); + if (compositeFieldInfo.isEmpty() == false) { + break; + } + } + } } - + if (reader == null) continue; List compositeFieldInfo = reader.getCompositeIndexFields(); for (CompositeIndexFieldInfo fieldInfo : compositeFieldInfo) { if (fieldInfo.getType().equals(CompositeMappedFieldType.CompositeFieldType.STAR_TREE)) { @@ -295,17 +302,6 @@ private void mergeStarTreeFields(MergeState mergeState) throws IOException { if (compositeIndexValues instanceof StarTreeValues) { StarTreeValues starTreeValues = (StarTreeValues) compositeIndexValues; List fieldsList = starTreeSubsPerField.getOrDefault(fieldInfo.getField(), new ArrayList<>()); - if (starTreeField == null) { - starTreeField = starTreeValues.getStarTreeField(); - } - // assert star tree configuration is same across segments - else { - if (starTreeField.equals(starTreeValues.getStarTreeField()) == false) { - throw new IllegalArgumentException( - "star tree field configuration must match the configuration of the field being merged" - ); - } - } fieldsList.add(starTreeValues); starTreeSubsPerField.put(fieldInfo.getField(), fieldsList); } @@ -340,7 +336,8 @@ private static SegmentWriteState getSegmentWriteState(SegmentWriteState segmentW segmentInfo, segmentWriteState.fieldInfos, segmentWriteState.segUpdates, - segmentWriteState.context + segmentWriteState.context, + segmentWriteState.segmentSuffix ); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java index ee6d5b4680c73..8feb9ccd27dbd 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java @@ -8,6 +8,7 @@ package org.opensearch.index.compositeindex.datacube; +import org.apache.lucene.index.DocValuesType; import org.opensearch.common.Rounding; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.time.DateUtils; @@ -169,4 +170,8 @@ public int compare(DateTimeUnitRounding unit1, DateTimeUnitRounding unit2) { public static List getSortedDateTimeUnits(List dateTimeUnits) { return dateTimeUnits.stream().sorted(new DateTimeUnitComparator()).collect(Collectors.toList()); } + + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_NUMERIC; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java index cfa8d3a2a8164..3d71b38881693 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java @@ -8,6 +8,7 @@ package org.opensearch.index.compositeindex.datacube; +import org.apache.lucene.index.DocValuesType; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.xcontent.ToXContent; @@ -42,4 +43,6 @@ public interface Dimension extends ToXContent { * Returns the list of dimension fields that represent the dimension */ List getSubDimensionNames(); + + DocValuesType getDocValuesType(); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java index acc14f5f05c68..f1d1b15337f4a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/NumericDimension.java @@ -8,6 +8,7 @@ package org.opensearch.index.compositeindex.datacube; +import org.apache.lucene.index.DocValuesType; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.CompositeDataCubeFieldType; @@ -71,4 +72,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(field); } + + @Override + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_NUMERIC; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java index be3667f10b6da..0e2ec086abc0a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java @@ -8,6 +8,7 @@ package org.opensearch.index.compositeindex.datacube; +import org.apache.lucene.index.DocValuesType; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.mapper.CompositeDataCubeFieldType; @@ -69,4 +70,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(field); } + + @Override + public DocValuesType getDocValuesType() { + return DocValuesType.SORTED_NUMERIC; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index c3ea04d52e892..3054e8e66b601 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -735,7 +735,6 @@ private SequentialDocValuesIterator getIteratorForNumericField( * @throws IOException throws an exception if we are unable to add the doc */ private void appendToStarTree(StarTreeDocument starTreeDocument) throws IOException { - appendStarTreeDocument(starTreeDocument); numStarTreeDocs++; } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java new file mode 100644 index 0000000000000..e538be5d5bece --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java @@ -0,0 +1,248 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; +import org.opensearch.search.aggregations.support.ValuesSource; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeFilter; +import org.opensearch.search.startree.StarTreeQueryContext; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +/** + * Helper class for building star-tree query + * + * @opensearch.internal + * @opensearch.experimental + */ +public class StarTreeQueryHelper { + + /** + * Checks if the search context can be supported by star-tree + */ + public static boolean isStarTreeSupported(SearchContext context) { + return context.aggregations() != null && context.mapperService().isCompositeIndexPresent() && context.parsedPostFilter() == null; + } + + /** + * Gets StarTreeQueryContext from the search context and source builder. + * Returns null if the query and aggregation cannot be supported. + */ + public static StarTreeQueryContext getStarTreeQueryContext(SearchContext context, SearchSourceBuilder source) throws IOException { + // Current implementation assumes only single star-tree is supported + CompositeDataCubeFieldType compositeMappedFieldType = (CompositeDataCubeFieldType) context.mapperService() + .getCompositeFieldTypes() + .iterator() + .next(); + CompositeIndexFieldInfo starTree = new CompositeIndexFieldInfo( + compositeMappedFieldType.name(), + compositeMappedFieldType.getCompositeIndexType() + ); + + for (AggregatorFactory aggregatorFactory : context.aggregations().factories().getFactories()) { + MetricStat metricStat = validateStarTreeMetricSupport(compositeMappedFieldType, aggregatorFactory); + if (metricStat == null) { + return null; + } + } + + // need to cache star tree values only for multiple aggregations + boolean cacheStarTreeValues = context.aggregations().factories().getFactories().length > 1; + int cacheSize = cacheStarTreeValues ? context.indexShard().segments(false).size() : -1; + + return StarTreeQueryHelper.tryCreateStarTreeQueryContext(starTree, compositeMappedFieldType, source.query(), cacheSize); + } + + /** + * Uses query builder and composite index info to form star-tree query context + */ + private static StarTreeQueryContext tryCreateStarTreeQueryContext( + CompositeIndexFieldInfo compositeIndexFieldInfo, + CompositeDataCubeFieldType compositeFieldType, + QueryBuilder queryBuilder, + int cacheStarTreeValuesSize + ) { + Map queryMap; + if (queryBuilder == null || queryBuilder instanceof MatchAllQueryBuilder) { + queryMap = null; + } else if (queryBuilder instanceof TermQueryBuilder) { + // TODO: Add support for keyword fields + if (compositeFieldType.getDimensions().stream().anyMatch(d -> d.getDocValuesType() != DocValuesType.SORTED_NUMERIC)) { + // return null for non-numeric fields + return null; + } + + List supportedDimensions = compositeFieldType.getDimensions() + .stream() + .map(Dimension::getField) + .collect(Collectors.toList()); + queryMap = getStarTreePredicates(queryBuilder, supportedDimensions); + if (queryMap == null) { + return null; + } + } else { + return null; + } + return new StarTreeQueryContext(compositeIndexFieldInfo, queryMap, cacheStarTreeValuesSize); + } + + /** + * Parse query body to star-tree predicates + * @param queryBuilder to match star-tree supported query shape + * @return predicates to match + */ + private static Map getStarTreePredicates(QueryBuilder queryBuilder, List supportedDimensions) { + TermQueryBuilder tq = (TermQueryBuilder) queryBuilder; + String field = tq.fieldName(); + if (!supportedDimensions.contains(field)) { + return null; + } + long inputQueryVal = Long.parseLong(tq.value().toString()); + + // Create a map with the field and the value + Map predicateMap = new HashMap<>(); + predicateMap.put(field, inputQueryVal); + return predicateMap; + } + + private static MetricStat validateStarTreeMetricSupport( + CompositeDataCubeFieldType compositeIndexFieldInfo, + AggregatorFactory aggregatorFactory + ) { + if (aggregatorFactory instanceof MetricAggregatorFactory && aggregatorFactory.getSubFactories().getFactories().length == 0) { + String field; + Map> supportedMetrics = compositeIndexFieldInfo.getMetrics() + .stream() + .collect(Collectors.toMap(Metric::getField, Metric::getMetrics)); + + MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat(); + field = ((MetricAggregatorFactory) aggregatorFactory).getField(); + + if (supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { + return metricStat; + } + } + return null; + } + + public static CompositeIndexFieldInfo getSupportedStarTree(SearchContext context) { + StarTreeQueryContext starTreeQueryContext = context.getStarTreeQueryContext(); + return (starTreeQueryContext != null) ? starTreeQueryContext.getStarTree() : null; + } + + public static StarTreeValues getStarTreeValues(LeafReaderContext context, CompositeIndexFieldInfo starTree) throws IOException { + SegmentReader reader = Lucene.segmentReader(context.reader()); + if (!(reader.getDocValuesReader() instanceof CompositeIndexReader)) { + return null; + } + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + return (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(starTree); + } + + /** + * Get the star-tree leaf collector + * This collector computes the aggregation prematurely and invokes an early termination collector + */ + public static LeafBucketCollector getStarTreeLeafCollector( + SearchContext context, + ValuesSource.Numeric valuesSource, + LeafReaderContext ctx, + LeafBucketCollector sub, + CompositeIndexFieldInfo starTree, + String metric, + Consumer valueConsumer, + Runnable finalConsumer + ) throws IOException { + StarTreeValues starTreeValues = getStarTreeValues(ctx, starTree); + assert starTreeValues != null; + String fieldName = ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(); + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues(starTree.getField(), fieldName, metric); + + assert starTreeValues != null; + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator( + metricName + ); + // Obtain a FixedBitSet of matched star tree document IDs + FixedBitSet filteredValues = getStarTreeFilteredValues(context, ctx, starTreeValues); + assert filteredValues != null; + + int numBits = filteredValues.length(); // Get the number of the filtered values (matching docs) + if (numBits > 0) { + // Iterate over the filtered values + for (int bit = filteredValues.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) + ? filteredValues.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + // Advance to the entryId in the valuesIterator + if (valuesIterator.advanceExact(bit) == false) { + continue; // Skip if no more entries + } + + // Iterate over the values for the current entryId + for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { + long value = valuesIterator.nextValue(); + valueConsumer.accept(value); // Apply the consumer operation (e.g., max, sum) + } + } + } + + // Call the final consumer after processing all entries + finalConsumer.run(); + + // Return a LeafBucketCollector that terminates collection + return new LeafBucketCollectorBase(sub, valuesSource.doubleValues(ctx)) { + @Override + public void collect(int doc, long bucket) { + throw new CollectionTerminatedException(); + } + }; + } + + /** + * Get the filtered values for the star-tree query + * Cache the results in case of multiple aggregations (if cache is initialized) + * @return FixedBitSet of matched document IDs + */ + public static FixedBitSet getStarTreeFilteredValues(SearchContext context, LeafReaderContext ctx, StarTreeValues starTreeValues) + throws IOException { + FixedBitSet result = context.getStarTreeQueryContext().getStarTreeValues(ctx); + if (result == null) { + result = StarTreeFilter.getStarTreeResult(starTreeValues, context.getStarTreeQueryContext().getQueryMap()); + context.getStarTreeQueryContext().setStarTreeValues(ctx, result); + } + return result; + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java index 27afdf1479b4e..4b4bfa6a915eb 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java @@ -29,4 +29,12 @@ public SortedNumericStarTreeValuesIterator(DocIdSetIterator docIdSetIterator) { public long nextValue() throws IOException { return ((SortedNumericDocValues) docIdSetIterator).nextValue(); } + + public int entryValueCount() throws IOException { + return ((SortedNumericDocValues) docIdSetIterator).docValueCount(); + } + + public boolean advanceExact(int target) throws IOException { + return ((SortedNumericDocValues) docIdSetIterator).advanceExact(target); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java b/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java index 85b435e969bfa..1bd81c840a3d4 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java +++ b/server/src/main/java/org/opensearch/index/fielddata/FieldDataStats.java @@ -80,6 +80,9 @@ public FieldDataStats(long memorySize, long evictions, @Nullable FieldMemoryStat } public void add(FieldDataStats stats) { + if (stats == null) { + return; + } this.memorySize += stats.memorySize; this.evictions += stats.evictions; if (stats.fields != null) { diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 27a78dc3ce2f6..d51fe0643575e 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -39,7 +39,6 @@ import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; -import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; import org.opensearch.threadpool.ThreadPool; @@ -862,9 +861,11 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); + Set pinnedTimestamps = new HashSet<>(pinnedTimestampsState.v2()); + pinnedTimestamps.add(pinnedTimestampsState.v1()); Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( sortedMetadataFileList, - pinnedTimestampsState.v2(), + pinnedTimestamps, metadataFilePinnedTimestampMap, MetadataFilenameUtils::getTimestamp, MetadataFilenameUtils::getNodeIdByPrimaryTermAndGen @@ -897,11 +898,6 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException return; } - // If pinned timestamps are enabled, make sure to not delete last metadata file. - if (RemoteStoreSettings.isPinnedTimestampsEnabled()) { - metadataFilesEligibleToDelete.remove(sortedMetadataFileList.get(0)); - } - List metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream() .filter(metadataFile -> allLockFiles.contains(metadataFile) == false) .collect(Collectors.toList()); diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 54cbf8ac9a9f8..99153324b8372 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -189,7 +189,7 @@ public void onResponse(List blobMetadata) { List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted(metadataFiles, indexDeleted); // If index is not deleted, make sure to keep latest metadata file - if (indexDeleted == false || RemoteStoreSettings.isPinnedTimestampsEnabled()) { + if (indexDeleted == false) { metadataFilesToBeDeleted.remove(metadataFiles.get(0)); } @@ -345,9 +345,11 @@ protected static List getMetadataFilesToBeDeleted( ); // Get md files matching pinned timestamps + Set pinnedTimestamps = new HashSet<>(pinnedTimestampsState.v2()); + pinnedTimestamps.add(pinnedTimestampsState.v1()); Set implicitLockedFiles = RemoteStoreUtils.getPinnedTimestampLockedFiles( metadataFilesToBeDeleted, - pinnedTimestampsState.v2(), + pinnedTimestamps, metadataFilePinnedTimestampMap, file -> RemoteStoreUtils.invertLong(file.split(METADATA_SEPARATOR)[3]), TranslogTransferMetadata::getNodeIdByPrimaryTermAndGen diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java index a17779810239a..af37594f88fee 100644 --- a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -47,6 +47,8 @@ public PrimaryShardReplicationSource( RecoverySettings recoverySettings, DiscoveryNode sourceNode ) { + assert targetNode != null : "Target node must be set"; + assert sourceNode != null : "Source node must be set"; this.targetAllocationId = targetAllocationId; this.transportService = transportService; this.sourceNode = sourceNode; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 81eb38757aebe..2b6723512abf2 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -53,6 +53,10 @@ public SegmentReplicationSource get(IndexShard shard) { private DiscoveryNode getPrimaryNode(ShardId shardId) { ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard(); - return clusterService.state().nodes().get(primaryShard.currentNodeId()); + DiscoveryNode node = clusterService.state().nodes().get(primaryShard.currentNodeId()); + if (node == null) { + throw new IllegalStateException("Cannot replicate, primary shard for " + shardId + " is not allocated on any node"); + } + return node; } } diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java index d52b37f9a7bd6..b1b6259e4ca18 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -287,6 +287,6 @@ public boolean equals(Object o) { public String toString() { StringBuilder sb = new StringBuilder(); sb.append('{').append(this.repositoriesMetadata).append('}'); - return super.toString(); + return sb.toString(); } } diff --git a/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java b/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java new file mode 100644 index 0000000000000..e3771f224a7db --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/DefaultSecureTransportParameters.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.settings.Settings; + +/** + * Default implementation of {@link SecureTransportSettingsProvider.SecureTransportParameters}. + */ +class DefaultSecureTransportParameters implements SecureTransportSettingsProvider.SecureTransportParameters { + private final Settings settings; + + DefaultSecureTransportParameters(Settings settings) { + this.settings = settings; + } + + @Override + public boolean dualModeEnabled() { + return NetworkModule.TRANSPORT_SSL_DUAL_MODE_ENABLED.get(settings); + } +} diff --git a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java index 5b7402a01f82d..5f9e1a952b6e8 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java @@ -36,6 +36,24 @@ default Collection> getTransportAdapterProvi return Collections.emptyList(); } + /** + * Returns parameters that can be dynamically provided by a plugin providing a {@link SecureTransportSettingsProvider} + * implementation + * @param settings settings + * @return an instance of {@link SecureTransportParameters} + */ + default Optional parameters(Settings settings) { + return Optional.of(new DefaultSecureTransportParameters(settings)); + } + + /** + * Dynamic parameters that can be provided by the {@link SecureTransportSettingsProvider} + */ + @ExperimentalApi + interface SecureTransportParameters { + boolean dualModeEnabled(); + } + /** * If supported, builds the {@link TransportExceptionHandler} instance for {@link Transport} instance * @param settings settings diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 7da52147661dc..9aec81536dbd0 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -81,6 +81,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -729,7 +730,7 @@ public static void validateRepositoryMetadataSettings( + " in the name as this delimiter is used to create pinning entity" ); } - if (repositoryWithShallowV2Exists(repositories)) { + if (repositoryWithShallowV2Exists(repositories, repositoryName)) { throw new RepositoryException( repositoryName, "setting " @@ -763,8 +764,13 @@ public static void validateRepositoryMetadataSettings( } } - private static boolean repositoryWithShallowV2Exists(Map repositories) { - return repositories.values().stream().anyMatch(repo -> SHALLOW_SNAPSHOT_V2.get(repo.getMetadata().settings())); + private static boolean repositoryWithShallowV2Exists(Map repositories, String repositoryName) { + return repositories.values() + .stream() + .anyMatch( + repository -> SHALLOW_SNAPSHOT_V2.get(repository.getMetadata().settings()) + && !Objects.equals(repository.getMetadata().name(), repositoryName) + ); } private static boolean pinnedTimestampExistsWithDifferentRepository( diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java index ee33bd18db05d..47f3e048c516a 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsAction.java @@ -33,13 +33,24 @@ package org.opensearch.rest.action.admin.cluster; import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.IndexMetric; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.Metric; import org.opensearch.client.node.NodeClient; +import org.opensearch.core.common.Strings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestActions.NodesResponseRestListener; import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.function.Consumer; import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; @@ -54,7 +65,34 @@ public class RestClusterStatsAction extends BaseRestHandler { @Override public List routes() { - return unmodifiableList(asList(new Route(GET, "/_cluster/stats"), new Route(GET, "/_cluster/stats/nodes/{nodeId}"))); + return unmodifiableList( + asList( + new Route(GET, "/_cluster/stats"), + new Route(GET, "/_cluster/stats/nodes/{nodeId}"), + new Route(GET, "/_cluster/stats/{metric}/nodes/{nodeId}"), + new Route(GET, "/_cluster/stats/{metric}/{index_metric}/nodes/{nodeId}") + ) + ); + } + + static final Map> INDEX_METRIC_TO_REQUEST_CONSUMER_MAP; + + static final Map> METRIC_REQUEST_CONSUMER_MAP; + + static { + Map> metricRequestConsumerMap = new HashMap<>(); + for (Metric metric : Metric.values()) { + metricRequestConsumerMap.put(metric.metricName(), request -> request.addMetric(metric)); + } + METRIC_REQUEST_CONSUMER_MAP = Collections.unmodifiableMap(metricRequestConsumerMap); + } + + static { + Map> metricMap = new HashMap<>(); + for (IndexMetric indexMetric : IndexMetric.values()) { + metricMap.put(indexMetric.metricName(), request -> request.addIndexMetric(indexMetric)); + } + INDEX_METRIC_TO_REQUEST_CONSUMER_MAP = Collections.unmodifiableMap(metricMap); } @Override @@ -64,10 +102,101 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null)); + ClusterStatsRequest clusterStatsRequest = fromRequest(request); + return channel -> client.admin().cluster().clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel)); + } + + public static ClusterStatsRequest fromRequest(final RestRequest request) { + Set metrics = Strings.tokenizeByCommaToSet(request.param("metric", "_all")); + // Value for param index_metric defaults to _all when indices metric or all metrics are requested. + String indicesMetricsDefaultValue = metrics.contains(Metric.INDICES.metricName()) || metrics.contains("_all") ? "_all" : null; + Set indexMetrics = Strings.tokenizeByCommaToSet(request.param("index_metric", indicesMetricsDefaultValue)); + String[] nodeIds = request.paramAsStringArray("nodeId", null); + + ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(nodeIds); clusterStatsRequest.timeout(request.param("timeout")); clusterStatsRequest.useAggregatedNodeLevelResponses(true); - return channel -> client.admin().cluster().clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel)); + clusterStatsRequest.computeAllMetrics(false); + + paramValidations(metrics, indexMetrics, request); + final Set metricsRequested = metrics.contains("_all") + ? new HashSet<>(METRIC_REQUEST_CONSUMER_MAP.keySet()) + : new HashSet<>(metrics); + Set invalidMetrics = validateAndSetRequestedMetrics(metricsRequested, METRIC_REQUEST_CONSUMER_MAP, clusterStatsRequest); + if (!invalidMetrics.isEmpty()) { + throw new IllegalArgumentException( + unrecognizedStrings(request, invalidMetrics, METRIC_REQUEST_CONSUMER_MAP.keySet(), "metric") + ); + } + if (metricsRequested.contains(Metric.INDICES.metricName())) { + final Set indexMetricsRequested = indexMetrics.contains("_all") + ? INDEX_METRIC_TO_REQUEST_CONSUMER_MAP.keySet() + : new HashSet<>(indexMetrics); + Set invalidIndexMetrics = validateAndSetRequestedMetrics( + indexMetricsRequested, + INDEX_METRIC_TO_REQUEST_CONSUMER_MAP, + clusterStatsRequest + ); + if (!invalidIndexMetrics.isEmpty()) { + throw new IllegalArgumentException( + unrecognizedStrings(request, invalidIndexMetrics, INDEX_METRIC_TO_REQUEST_CONSUMER_MAP.keySet(), "index metric") + ); + } + } + + return clusterStatsRequest; + } + + private static void paramValidations(Set metrics, Set indexMetrics, RestRequest request) { + if (metrics.size() > 1 && metrics.contains("_all")) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "request [%s] contains _all and individual metrics [%s]", + request.path(), + request.param("metric") + ) + ); + } + + if (indexMetrics.size() > 1 && indexMetrics.contains("_all")) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "request [%s] contains _all and individual index metrics [%s]", + request.path(), + request.param("index_metric") + ) + ); + } + + if (!metrics.contains(Metric.INDICES.metricName()) && !metrics.contains("_all") && !indexMetrics.isEmpty()) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "request [%s] contains index metrics [%s] but indices stats not requested", + request.path(), + request.param("index_metric") + ) + ); + } + } + + private static Set validateAndSetRequestedMetrics( + Set metrics, + Map> metricConsumerMap, + ClusterStatsRequest clusterStatsRequest + ) { + final Set invalidMetrics = new TreeSet<>(); + for (String metric : metrics) { + Consumer clusterStatsRequestConsumer = metricConsumerMap.get(metric); + if (clusterStatsRequestConsumer != null) { + clusterStatsRequestConsumer.accept(clusterStatsRequest); + } else { + invalidMetrics.add(metric); + } + } + return invalidMetrics; } @Override diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index 502be16f2fa8e..50380c506358f 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -82,6 +82,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC snapshots = Strings.EMPTY_ARRAY; } String[] indices = request.paramAsStringArray("index", Strings.EMPTY_ARRAY); + if (indices.length == 1 && "_all".equalsIgnoreCase(indices[0])) { + indices = Strings.EMPTY_ARRAY; + } SnapshotsStatusRequest snapshotsStatusRequest = snapshotsStatusRequest(repository).snapshots(snapshots).indices(indices); snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index b566ba9bbb8e4..ea73c474e90b8 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -212,13 +212,19 @@ public void onResponse(ClusterStateResponse clusterStateResponse) { groupedListener.onResponse(getSettingsResponse); groupedListener.onResponse(clusterStateResponse); - sendIndicesStatsRequest( - indicesToBeQueried, - subRequestIndicesOptions, - includeUnloadedSegments, - client, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); + // For paginated queries, if strategy outputs no indices to be returned, + // avoid fetching indices stats. + if (shouldSkipIndicesStatsRequest(paginationStrategy)) { + groupedListener.onResponse(IndicesStatsResponse.getEmptyResponse()); + } else { + sendIndicesStatsRequest( + indicesToBeQueried, + subRequestIndicesOptions, + includeUnloadedSegments, + client, + ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) + ); + } sendClusterHealthRequest( indicesToBeQueried, @@ -1093,4 +1099,8 @@ public Tuple next() { }; } + private boolean shouldSkipIndicesStatsRequest(IndexPaginationStrategy paginationStrategy) { + return Objects.nonNull(paginationStrategy) && paginationStrategy.getRequestedEntities().isEmpty(); + } + } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index c2f8b17fcf166..e892a2f1a7620 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -77,6 +77,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.DerivedFieldResolver; import org.opensearch.index.mapper.DerivedFieldResolverFactory; @@ -137,6 +138,7 @@ import org.opensearch.search.sort.SortAndFormats; import org.opensearch.search.sort.SortBuilder; import org.opensearch.search.sort.SortOrder; +import org.opensearch.search.startree.StarTreeQueryContext; import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.completion.CompletionSuggestion; import org.opensearch.tasks.TaskResourceTrackingService; @@ -164,6 +166,7 @@ import static org.opensearch.common.unit.TimeValue.timeValueHours; import static org.opensearch.common.unit.TimeValue.timeValueMillis; import static org.opensearch.common.unit.TimeValue.timeValueMinutes; +import static org.opensearch.search.internal.SearchContext.TRACK_TOTAL_HITS_DISABLED; /** * The main search service @@ -1358,6 +1361,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.evaluateRequestShouldUseConcurrentSearch(); return; } + SearchShardTarget shardTarget = context.shardTarget(); QueryShardContext queryShardContext = context.getQueryShardContext(); context.from(source.from()); @@ -1371,7 +1375,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc InnerHitContextBuilder.extractInnerHits(source.postFilter(), innerHitBuilders); context.parsedPostFilter(queryShardContext.toQuery(source.postFilter())); } - if (innerHitBuilders.size() > 0) { + if (!innerHitBuilders.isEmpty()) { for (Map.Entry entry : innerHitBuilders.entrySet()) { try { entry.getValue().build(context, context.innerHits()); @@ -1383,9 +1387,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (source.sorts() != null) { try { Optional optionalSort = SortBuilder.buildSort(source.sorts(), context.getQueryShardContext()); - if (optionalSort.isPresent()) { - context.sort(optionalSort.get()); - } + optionalSort.ifPresent(context::sort); } catch (IOException e) { throw new SearchException(shardTarget, "failed to create sort elements", e); } @@ -1540,6 +1542,20 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (source.profile()) { context.setProfilers(new Profilers(context.searcher(), context.shouldUseConcurrentSearch())); } + + if (this.indicesService.getCompositeIndexSettings() != null + && this.indicesService.getCompositeIndexSettings().isStarTreeIndexCreationEnabled() + && StarTreeQueryHelper.isStarTreeSupported(context)) { + try { + StarTreeQueryContext starTreeQueryContext = StarTreeQueryHelper.getStarTreeQueryContext(context, source); + if (starTreeQueryContext != null) { + context.starTreeQueryContext(starTreeQueryContext); + logger.debug("can use star tree"); + } else { + logger.debug("cannot use star tree"); + } + } catch (IOException ignored) {} + } } /** @@ -1699,7 +1715,7 @@ public static boolean canMatchSearchAfter( && minMax != null && primarySortField != null && primarySortField.missing() == null - && Objects.equals(trackTotalHitsUpto, SearchContext.TRACK_TOTAL_HITS_DISABLED)) { + && Objects.equals(trackTotalHitsUpto, TRACK_TOTAL_HITS_DISABLED)) { final Object searchAfterPrimary = searchAfter.fields[0]; if (primarySortField.order() == SortOrder.DESC) { if (minMax.compareMin(searchAfterPrimary) > 0) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java index eeb0c606694b0..720a24da1d9d4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactories.java @@ -661,4 +661,8 @@ public PipelineTree buildPipelineTree() { return new PipelineTree(subTrees, aggregators); } } + + public AggregatorFactory[] getFactories() { + return factories; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java index 6cc3a78fb1e36..86fbb46a9ad3c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorFactory.java @@ -127,4 +127,8 @@ protected boolean supportsConcurrentSegmentSearch() { public boolean evaluateChildFactories() { return factories.allFactoriesSupportConcurrentSearch(); } + + public AggregatorFactories getSubFactories() { + return factories; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java index e58466b56df2a..2970c5ca851e7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java @@ -32,11 +32,21 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.FixedBitSet; +import org.apache.lucene.util.NumericUtils; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; import org.opensearch.common.util.LongArray; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -50,6 +60,9 @@ import java.io.IOException; import java.util.Map; +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getStarTreeFilteredValues; +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; + /** * Aggregate all docs into an average * @@ -93,6 +106,14 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + if (supportedStarTree != null) { + return getStarTreeLeafCollector(ctx, sub, supportedStarTree); + } + return getDefaultLeafCollector(ctx, sub); + } + + private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); final CompensatedSum kahanSummation = new CompensatedSum(0, 0); @@ -126,6 +147,59 @@ public void collect(int doc, long bucket) throws IOException { }; } + public LeafBucketCollector getStarTreeLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) + throws IOException { + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + assert starTreeValues != null; + + String fieldName = ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(); + String sumMetricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + fieldName, + MetricStat.SUM.getTypeName() + ); + String countMetricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + fieldName, + MetricStat.VALUE_COUNT.getTypeName() + ); + + final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), 0); + SortedNumericStarTreeValuesIterator sumValuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(sumMetricName); + SortedNumericStarTreeValuesIterator countValueIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(countMetricName); + FixedBitSet matchedDocIds = getStarTreeFilteredValues(context, ctx, starTreeValues); + assert matchedDocIds != null; + + int numBits = matchedDocIds.length(); // Get the length of the FixedBitSet + if (numBits > 0) { + // Iterate over the FixedBitSet + for (int bit = matchedDocIds.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = bit + 1 < numBits + ? matchedDocIds.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + // Advance to the bit (entryId) in the valuesIterator + if ((sumValuesIterator.advanceExact(bit) && countValueIterator.advanceExact(bit)) == false) { + continue; // Skip if no more entries + } + + // Iterate over the values for the current entryId + for (int i = 0; i < sumValuesIterator.entryValueCount(); i++) { + kahanSummation.add(NumericUtils.sortableLongToDouble(sumValuesIterator.nextValue())); + counts.increment(0, countValueIterator.nextValue()); // Apply the consumer operation (e.g., max, sum) + } + } + } + + sums.set(0, kahanSummation.value()); + return new LeafBucketCollectorBase(sub, valuesSource.doubleValues(ctx)) { + @Override + public void collect(int doc, long bucket) { + throw new CollectionTerminatedException(); + } + }; + } + @Override public double metric(long owningBucketOrd) { if (valuesSource == null || owningBucketOrd >= sums.size()) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java index 0a09fae1eaebe..57389f19b4577 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregatorFactory.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.support.CoreValuesSourceType; -import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.internal.SearchContext; @@ -52,7 +52,7 @@ * * @opensearch.internal */ -class AvgAggregatorFactory extends ValuesSourceAggregatorFactory { +class AvgAggregatorFactory extends MetricAggregatorFactory { AvgAggregatorFactory( String name, @@ -65,6 +65,11 @@ class AvgAggregatorFactory extends ValuesSourceAggregatorFactory { super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); } + @Override + public MetricStat getMetricStat() { + return MetricStat.AVG; + } + static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( AvgAggregationBuilder.REGISTRY_KEY, diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java index 8108b8a726856..257109bca54bb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java @@ -37,9 +37,13 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.NumericUtils; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; @@ -55,8 +59,11 @@ import java.io.IOException; import java.util.Arrays; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; + /** * Aggregate all docs into a max value * @@ -120,6 +127,16 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc throw new CollectionTerminatedException(); } } + + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + if (supportedStarTree != null) { + return getStarTreeCollector(ctx, sub, supportedStarTree); + } + return getDefaultLeafCollector(ctx, sub); + } + + private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MAX.select(allValues); @@ -143,6 +160,23 @@ public void collect(int doc, long bucket) throws IOException { }; } + public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) + throws IOException { + AtomicReference max = new AtomicReference<>(maxes.get(0)); + return StarTreeQueryHelper.getStarTreeLeafCollector( + context, + valuesSource, + ctx, + sub, + starTree, + MetricStat.MAX.getTypeName(), + value -> { + max.set(Math.max(max.get(), (NumericUtils.sortableLongToDouble(value)))); + }, + () -> maxes.set(0, max.get()) + ); + } + @Override public double metric(long owningBucketOrd) { if (valuesSource == null || owningBucketOrd >= maxes.size()) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java index 4fe936c8b7797..c0ee471c87f29 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregatorFactory.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.support.CoreValuesSourceType; -import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.internal.SearchContext; @@ -52,7 +52,7 @@ * * @opensearch.internal */ -class MaxAggregatorFactory extends ValuesSourceAggregatorFactory { +class MaxAggregatorFactory extends MetricAggregatorFactory { static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( @@ -74,6 +74,11 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); } + @Override + public MetricStat getMetricStat() { + return MetricStat.MAX; + } + @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { return new MaxAggregator(name, config, searchContext, parent, metadata); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MetricAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MetricAggregatorFactory.java new file mode 100644 index 0000000000000..0ac630cf051d3 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MetricAggregatorFactory.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.metrics; + +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.opensearch.search.aggregations.support.ValuesSourceConfig; + +import java.io.IOException; +import java.util.Map; + +/** + * Extending ValuesSourceAggregatorFactory for aggregation factories supported by star-tree implementation + */ +public abstract class MetricAggregatorFactory extends ValuesSourceAggregatorFactory { + public MetricAggregatorFactory( + String name, + ValuesSourceConfig config, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metadata + ) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); + } + + public abstract MetricStat getMetricStat(); +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java index 946057e42ac88..a9f20bdeb5fd5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java @@ -37,9 +37,13 @@ import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.NumericUtils; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; @@ -54,8 +58,11 @@ import java.io.IOException; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; + /** * Aggregate all docs into a min value * @@ -119,6 +126,15 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc throw new CollectionTerminatedException(); } } + + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + if (supportedStarTree != null) { + return getStarTreeCollector(ctx, sub, supportedStarTree); + } + return getDefaultLeafCollector(ctx, sub); + } + + private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MIN.select(allValues); @@ -138,10 +154,26 @@ public void collect(int doc, long bucket) throws IOException { mins.set(bucket, min); } } - }; } + public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) + throws IOException { + AtomicReference min = new AtomicReference<>(mins.get(0)); + return StarTreeQueryHelper.getStarTreeLeafCollector( + context, + valuesSource, + ctx, + sub, + starTree, + MetricStat.MIN.getTypeName(), + value -> { + min.set(Math.min(min.get(), (NumericUtils.sortableLongToDouble(value)))); + }, + () -> mins.set(0, min.get()) + ); + } + @Override public double metric(long owningBucketOrd) { if (valuesSource == null || owningBucketOrd >= mins.size()) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java index 58fbe5edefd12..44c0d9d7d11eb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregatorFactory.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.support.CoreValuesSourceType; -import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.internal.SearchContext; @@ -52,7 +52,7 @@ * * @opensearch.internal */ -class MinAggregatorFactory extends ValuesSourceAggregatorFactory { +class MinAggregatorFactory extends MetricAggregatorFactory { static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( @@ -74,6 +74,11 @@ static void registerAggregators(ValuesSourceRegistry.Builder builder) { super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); } + @Override + public MetricStat getMetricStat() { + return MetricStat.MIN; + } + @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { return new MinAggregator(name, config, searchContext, parent, metadata); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java index 4b8e882cd69bc..3d237a94c5699 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java @@ -33,9 +33,13 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.util.NumericUtils; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.DoubleArray; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -49,6 +53,8 @@ import java.io.IOException; import java.util.Map; +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; + /** * Aggregate all docs into a single sum value * @@ -89,6 +95,15 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } + + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + if (supportedStarTree != null) { + return getStarTreeCollector(ctx, sub, supportedStarTree); + } + return getDefaultLeafCollector(ctx, sub); + } + + private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); final CompensatedSum kahanSummation = new CompensatedSum(0, 0); @@ -118,6 +133,21 @@ public void collect(int doc, long bucket) throws IOException { }; } + public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) + throws IOException { + final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), 0); + return StarTreeQueryHelper.getStarTreeLeafCollector( + context, + valuesSource, + ctx, + sub, + starTree, + MetricStat.SUM.getTypeName(), + value -> kahanSummation.add(NumericUtils.sortableLongToDouble(value)), + () -> sums.set(0, kahanSummation.value()) + ); + } + @Override public double metric(long owningBucketOrd) { if (valuesSource == null || owningBucketOrd >= sums.size()) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java index ef9b93920ba18..e2e25a8c25a87 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregatorFactory.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.support.CoreValuesSourceType; -import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.internal.SearchContext; @@ -52,7 +52,7 @@ * * @opensearch.internal */ -class SumAggregatorFactory extends ValuesSourceAggregatorFactory { +class SumAggregatorFactory extends MetricAggregatorFactory { SumAggregatorFactory( String name, @@ -65,6 +65,11 @@ class SumAggregatorFactory extends ValuesSourceAggregatorFactory { super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); } + @Override + public MetricStat getMetricStat() { + return MetricStat.SUM; + } + static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register( SumAggregationBuilder.REGISTRY_KEY, diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index 6f9be06231819..a156ec49983fa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -37,6 +37,9 @@ import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.LongArray; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.MultiGeoPointValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.search.aggregations.Aggregator; @@ -50,6 +53,8 @@ import java.io.IOException; import java.util.Map; +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; + /** * A field data based aggregator that counts the number of values a specific field has within the aggregation context. *

@@ -88,6 +93,12 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc final BigArrays bigArrays = context.bigArrays(); if (valuesSource instanceof ValuesSource.Numeric) { + + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + if (supportedStarTree != null) { + return getStarTreeCollector(ctx, sub, supportedStarTree); + } + final SortedNumericDocValues values = ((ValuesSource.Numeric) valuesSource).longValues(ctx); return new LeafBucketCollectorBase(sub, values) { @@ -124,10 +135,23 @@ public void collect(int doc, long bucket) throws IOException { counts.increment(bucket, values.docValueCount()); } } - }; } + public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) + throws IOException { + return StarTreeQueryHelper.getStarTreeLeafCollector( + context, + (ValuesSource.Numeric) valuesSource, + ctx, + sub, + starTree, + MetricStat.VALUE_COUNT.getTypeName(), + value -> counts.increment(0, value), + () -> {} + ); + } + @Override public double metric(long owningBucketOrd) { return (valuesSource == null || owningBucketOrd >= counts.size()) ? 0 : counts.get(owningBucketOrd); diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java index 4a04dd2e0a932..0c82279484461 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregatorFactory.java @@ -32,13 +32,13 @@ package org.opensearch.search.aggregations.metrics; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.query.QueryShardContext; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.CardinalityUpperBound; import org.opensearch.search.aggregations.support.CoreValuesSourceType; -import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.internal.SearchContext; @@ -51,7 +51,7 @@ * * @opensearch.internal */ -class ValueCountAggregatorFactory extends ValuesSourceAggregatorFactory { +class ValueCountAggregatorFactory extends MetricAggregatorFactory { public static void registerAggregators(ValuesSourceRegistry.Builder builder) { builder.register(ValueCountAggregationBuilder.REGISTRY_KEY, CoreValuesSourceType.ALL_CORE, ValueCountAggregator::new, true); @@ -68,6 +68,11 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { super(name, config, queryShardContext, parent, subFactoriesBuilder, metadata); } + @Override + public MetricStat getMetricStat() { + return MetricStat.VALUE_COUNT; + } + @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { return new ValueCountAggregator(name, config, searchContext, parent, metadata); diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java index 1f4dd429e094e..5732d545cb2d2 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSource.java @@ -625,6 +625,10 @@ public SortedNumericDocValues longValues(LeafReaderContext context) { public SortedNumericDoubleValues doubleValues(LeafReaderContext context) { return indexFieldData.load(context).getDoubleValues(); } + + public String getIndexFieldName() { + return indexFieldData.getFieldName(); + } } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index 69a4a5d8b6703..d862b2c2784de 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -102,4 +102,8 @@ protected abstract Aggregator doCreateInternal( public String getStatsSubtype() { return config.valueSourceType().typeName(); } + + public String getField() { + return config.fieldContext().field(); + } } diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 5357206e8c117..b7ea06d2989e5 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -76,6 +76,7 @@ import org.opensearch.search.query.ReduceableSearchResult; import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.sort.SortAndFormats; +import org.opensearch.search.startree.StarTreeQueryContext; import org.opensearch.search.suggest.SuggestionSearchContext; import java.util.Collection; @@ -124,8 +125,8 @@ public List toInternalAggregations(Collection co private final List releasables = new CopyOnWriteArrayList<>(); private final AtomicBoolean closed = new AtomicBoolean(false); private InnerHitsContext innerHitsContext; - private volatile boolean searchTimedOut; + private StarTreeQueryContext starTreeQueryContext; protected SearchContext() {} @@ -531,4 +532,12 @@ public boolean keywordIndexOrDocValuesEnabled() { return false; } + public SearchContext starTreeQueryContext(StarTreeQueryContext starTreeQueryContext) { + this.starTreeQueryContext = starTreeQueryContext; + return this; + } + + public StarTreeQueryContext getStarTreeQueryContext() { + return this.starTreeQueryContext; + } } diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java b/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java new file mode 100644 index 0000000000000..f7fa210691678 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.DocIdSetBuilder; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; + +/** + * Filter operator for star tree data structure. + * + * @opensearch.experimental + * @opensearch.internal + */ +public class StarTreeFilter { + private static final Logger logger = LogManager.getLogger(StarTreeFilter.class); + + /** + * First go over the star tree and try to match as many dimensions as possible + * For the remaining columns, use star-tree doc values to match them + */ + public static FixedBitSet getStarTreeResult(StarTreeValues starTreeValues, Map predicateEvaluators) throws IOException { + Map queryMap = predicateEvaluators != null ? predicateEvaluators : Collections.emptyMap(); + StarTreeResult starTreeResult = traverseStarTree(starTreeValues, queryMap); + + // Initialize FixedBitSet with size maxMatchedDoc + 1 + FixedBitSet bitSet = new FixedBitSet(starTreeResult.maxMatchedDoc + 1); + SortedNumericStarTreeValuesIterator starTreeValuesIterator = new SortedNumericStarTreeValuesIterator( + starTreeResult.matchedDocIds.build().iterator() + ); + + // No matches, return an empty FixedBitSet + if (starTreeResult.maxMatchedDoc == -1) { + return bitSet; + } + + // Set bits in FixedBitSet for initially matched documents + while (starTreeValuesIterator.nextEntry() != NO_MORE_DOCS) { + bitSet.set(starTreeValuesIterator.entryId()); + } + + // Temporary FixedBitSet reused for filtering + FixedBitSet tempBitSet = new FixedBitSet(starTreeResult.maxMatchedDoc + 1); + + // Process remaining predicate columns to further filter the results + for (String remainingPredicateColumn : starTreeResult.remainingPredicateColumns) { + logger.debug("remainingPredicateColumn : {}, maxMatchedDoc : {} ", remainingPredicateColumn, starTreeResult.maxMatchedDoc); + + SortedNumericStarTreeValuesIterator ndv = (SortedNumericStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( + remainingPredicateColumn + ); + + long queryValue = queryMap.get(remainingPredicateColumn); // Get the query value directly + + // Clear the temporary bit set before reuse + tempBitSet.clear(0, starTreeResult.maxMatchedDoc + 1); + + if (bitSet.length() > 0) { + // Iterate over the current set of matched document IDs + for (int entryId = bitSet.nextSetBit(0); entryId != DocIdSetIterator.NO_MORE_DOCS; entryId = (entryId + 1 < bitSet.length()) + ? bitSet.nextSetBit(entryId + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + if (ndv.advance(entryId) != StarTreeValuesIterator.NO_MORE_ENTRIES) { + final int valuesCount = ndv.entryValueCount(); + for (int i = 0; i < valuesCount; i++) { + long value = ndv.nextValue(); + // Compare the value with the query value + if (value == queryValue) { + tempBitSet.set(entryId); // Set bit for the matching entryId + break; // No need to check other values for this entryId + } + } + } + } + } + + // Perform intersection of the current matches with the temp results for this predicate + bitSet.and(tempBitSet); + } + + return bitSet; // Return the final FixedBitSet with all matches + } + + /** + * Helper method to traverse the star tree, get matching documents and keep track of all the + * predicate dimensions that are not matched. + */ + private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Map queryMap) throws IOException { + DocIdSetBuilder docsWithField = new DocIdSetBuilder(starTreeValues.getStarTreeDocumentCount()); + DocIdSetBuilder.BulkAdder adder; + Set globalRemainingPredicateColumns = null; + StarTreeNode starTree = starTreeValues.getRoot(); + List dimensionNames = starTreeValues.getStarTreeField() + .getDimensionsOrder() + .stream() + .map(Dimension::getField) + .collect(Collectors.toList()); + boolean foundLeafNode = starTree.isLeaf(); + assert foundLeafNode == false; // root node is never leaf + Queue queue = new ArrayDeque<>(); + queue.add(starTree); + int currentDimensionId = -1; + Set remainingPredicateColumns = new HashSet<>(queryMap.keySet()); + int matchedDocsCountInStarTree = 0; + int maxDocNum = -1; + StarTreeNode starTreeNode; + List docIds = new ArrayList<>(); + + while ((starTreeNode = queue.poll()) != null) { + int dimensionId = starTreeNode.getDimensionId(); + if (dimensionId > currentDimensionId) { + String dimension = dimensionNames.get(dimensionId); + remainingPredicateColumns.remove(dimension); + if (foundLeafNode && globalRemainingPredicateColumns == null) { + globalRemainingPredicateColumns = new HashSet<>(remainingPredicateColumns); + } + currentDimensionId = dimensionId; + } + + if (remainingPredicateColumns.isEmpty()) { + int docId = starTreeNode.getAggregatedDocId(); + docIds.add(docId); + matchedDocsCountInStarTree++; + maxDocNum = Math.max(docId, maxDocNum); + continue; + } + + if (starTreeNode.isLeaf()) { + for (long i = starTreeNode.getStartDocId(); i < starTreeNode.getEndDocId(); i++) { + docIds.add((int) i); + matchedDocsCountInStarTree++; + maxDocNum = Math.max((int) i, maxDocNum); + } + continue; + } + + String childDimension = dimensionNames.get(dimensionId + 1); + StarTreeNode starNode = null; + if (globalRemainingPredicateColumns == null || !globalRemainingPredicateColumns.contains(childDimension)) { + starNode = starTreeNode.getChildStarNode(); + } + + if (remainingPredicateColumns.contains(childDimension)) { + long queryValue = queryMap.get(childDimension); // Get the query value directly from the map + StarTreeNode matchingChild = starTreeNode.getChildForDimensionValue(queryValue); + if (matchingChild != null) { + queue.add(matchingChild); + foundLeafNode |= matchingChild.isLeaf(); + } + } else { + if (starNode != null) { + queue.add(starNode); + foundLeafNode |= starNode.isLeaf(); + } else { + Iterator childrenIterator = starTreeNode.getChildrenIterator(); + while (childrenIterator.hasNext()) { + StarTreeNode childNode = childrenIterator.next(); + if (childNode.getStarTreeNodeType() != StarTreeNodeType.STAR.getValue()) { + queue.add(childNode); + foundLeafNode |= childNode.isLeaf(); + } + } + } + } + } + + adder = docsWithField.grow(docIds.size()); + for (int id : docIds) { + adder.add(id); + } + return new StarTreeResult( + docsWithField, + globalRemainingPredicateColumns != null ? globalRemainingPredicateColumns : Collections.emptySet(), + matchedDocsCountInStarTree, + maxDocNum + ); + } + + /** + * Helper class to wrap the result from traversing the star tree. + * */ + private static class StarTreeResult { + public final DocIdSetBuilder matchedDocIds; + public final Set remainingPredicateColumns; + public final int numOfMatchedDocs; + public final int maxMatchedDoc; + + public StarTreeResult( + DocIdSetBuilder matchedDocIds, + Set remainingPredicateColumns, + int numOfMatchedDocs, + int maxMatchedDoc + ) { + this.matchedDocIds = matchedDocIds; + this.remainingPredicateColumns = remainingPredicateColumns; + this.numOfMatchedDocs = numOfMatchedDocs; + this.maxMatchedDoc = maxMatchedDoc; + } + } +} diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java new file mode 100644 index 0000000000000..cda3a25b30e53 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; + +import java.util.Map; + +/** + * Query class for querying star tree data structure. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class StarTreeQueryContext { + + /** + * Star tree field info + * This is used to get the star tree data structure + */ + private final CompositeIndexFieldInfo starTree; + + /** + * Map of field name to a value to be queried for that field + * This is used to filter the data based on the query + */ + private final Map queryMap; + + /** + * Cache for leaf results + * This is used to cache the results for each leaf reader context + * to avoid reading the filtered values from the leaf reader context multiple times + */ + private final FixedBitSet[] starTreeValues; + + public StarTreeQueryContext(CompositeIndexFieldInfo starTree, Map queryMap, int numSegmentsCache) { + this.starTree = starTree; + this.queryMap = queryMap; + if (numSegmentsCache > -1) { + starTreeValues = new FixedBitSet[numSegmentsCache]; + } else { + starTreeValues = null; + } + } + + public CompositeIndexFieldInfo getStarTree() { + return starTree; + } + + public Map getQueryMap() { + return queryMap; + } + + public FixedBitSet[] getStarTreeValues() { + return starTreeValues; + } + + public FixedBitSet getStarTreeValues(LeafReaderContext ctx) { + if (starTreeValues != null) { + return starTreeValues[ctx.ord]; + } + return null; + } + + public void setStarTreeValues(LeafReaderContext ctx, FixedBitSet values) { + if (starTreeValues != null) { + starTreeValues[ctx.ord] = values; + } + } +} diff --git a/server/src/main/java/org/opensearch/search/startree/package-info.java b/server/src/main/java/org/opensearch/search/startree/package-info.java new file mode 100644 index 0000000000000..601a588e54e69 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Star Tree query classes */ +package org.opensearch.search.startree; diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 79a70d835f773..4b5bd951f80a0 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -92,6 +92,7 @@ import org.opensearch.index.store.remote.filecache.FileCacheStats; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.repositories.IndexId; @@ -111,6 +112,7 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; +import java.util.regex.Pattern; import java.util.stream.Collectors; import static java.util.Collections.unmodifiableSet; @@ -119,10 +121,12 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_HISTORY_UUID; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; @@ -400,6 +404,13 @@ public ClusterState execute(ClusterState currentState) { overrideSettingsInternal, ignoreSettingsInternal ); + + validateReplicationTypeRestoreSettings( + snapshot, + metadata.index(index).getSettings().get(SETTING_REPLICATION_TYPE), + snapshotIndexMetadata + ); + if (isRemoteSnapshot) { snapshotIndexMetadata = addSnapshotToIndexSettings(snapshotIndexMetadata, snapshot, snapshotIndexId); } @@ -486,9 +497,7 @@ public ClusterState execute(ClusterState currentState) { // Remove all aliases - they shouldn't be restored indexMdBuilder.removeAllAliases(); } else { - for (final String alias : snapshotIndexMetadata.getAliases().keySet()) { - aliases.add(alias); - } + applyAliasesWithRename(snapshotIndexMetadata, indexMdBuilder, aliases); } IndexMetadata updatedIndexMetadata = indexMdBuilder.build(); if (partial) { @@ -533,9 +542,7 @@ public ClusterState execute(ClusterState currentState) { indexMdBuilder.putAlias(alias); } } else { - for (final String alias : snapshotIndexMetadata.getAliases().keySet()) { - aliases.add(alias); - } + applyAliasesWithRename(snapshotIndexMetadata, indexMdBuilder, aliases); } final Settings.Builder indexSettingsBuilder = Settings.builder() .put(snapshotIndexMetadata.getSettings()) @@ -665,6 +672,27 @@ private void checkAliasNameConflicts(Map renamedIndices, Set aliases + ) { + if (request.renameAliasPattern() == null || request.renameAliasReplacement() == null) { + aliases.addAll(snapshotIndexMetadata.getAliases().keySet()); + } else { + Pattern renameAliasPattern = Pattern.compile(request.renameAliasPattern()); + for (final Map.Entry alias : snapshotIndexMetadata.getAliases().entrySet()) { + String currentAliasName = alias.getKey(); + indexMdBuilder.removeAlias(currentAliasName); + String newAliasName = renameAliasPattern.matcher(currentAliasName) + .replaceAll(request.renameAliasReplacement()); + AliasMetadata newAlias = AliasMetadata.newAliasMetadata(alias.getValue(), newAliasName); + indexMdBuilder.putAlias(newAlias); + aliases.add(newAliasName); + } + } + } + private String[] getIgnoreSettingsInternal() { // for non-remote store enabled domain, we will remove all the remote store // related index settings present in the snapshot. @@ -1285,6 +1313,32 @@ private static void validateSnapshotRestorable(final String repository, final Sn } } + // Visible for testing + static void validateReplicationTypeRestoreSettings(Snapshot snapshot, String snapshotReplicationType, IndexMetadata updatedMetadata) { + int restoreNumberOfSearchReplicas = updatedMetadata.getSettings().getAsInt(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0); + + if (restoreNumberOfSearchReplicas > 0 + && ReplicationType.DOCUMENT.toString().equals(updatedMetadata.getSettings().get(SETTING_REPLICATION_TYPE))) { + throw new SnapshotRestoreException( + snapshot, + "snapshot was created with [" + + SETTING_REPLICATION_TYPE + + "]" + + " as [" + + snapshotReplicationType + + "]." + + " To restore with [" + + SETTING_REPLICATION_TYPE + + "]" + + " as [" + + ReplicationType.DOCUMENT + + "], [" + + SETTING_NUMBER_OF_SEARCH_REPLICAS + + "] must be set to [0]" + ); + } + } + public static boolean failed(SnapshotInfo snapshot, String index) { for (SnapshotShardFailure failure : snapshot.shardFailures()) { if (index.equals(failure.index())) { diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 6e66f8c958666..0972f5dad0fa2 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -617,12 +617,10 @@ public void onResponse(RepositoryData repositoryData) { ); return; } - listener.onResponse(snapshotInfo); - logger.info("created snapshot-v2 [{}] in repository [{}]", repositoryName, snapshotName); - // For snapshot-v2, we don't allow concurrent snapshots . But meanwhile non-v2 snapshot operations - // can get queued . This is triggering them. - runNextQueuedOperation(repositoryData, repositoryName, true); cleanOrphanTimestamp(repositoryName, repositoryData); + logger.info("created snapshot-v2 [{}] in repository [{}]", repositoryName, snapshotName); + leaveRepoLoop(repositoryName); + listener.onResponse(snapshotInfo); } @Override @@ -657,17 +655,11 @@ private void cleanOrphanTimestamp(String repoName, RepositoryData repositoryData if (orphanPinnedEntities.isEmpty()) { return; } - logger.info("Found {} orphan timestamps. Cleaning it up now", orphanPinnedEntities.size()); - if (tryEnterRepoLoop(repoName)) { - deleteOrphanTimestamps(pinnedEntities, orphanPinnedEntities); - leaveRepoLoop(repoName); - } else { - logger.info("Concurrent snapshot create/delete is happening. Skipping clean up of orphan timestamps"); - } + deleteOrphanTimestamps(pinnedEntities, orphanPinnedEntities); } - private boolean isOrphanPinnedEntity(String repoName, Collection snapshotUUIDs, String pinnedEntity) { + static boolean isOrphanPinnedEntity(String repoName, Collection snapshotUUIDs, String pinnedEntity) { Tuple tokens = getRepoSnapshotUUIDTuple(pinnedEntity); return Objects.equals(tokens.v1(), repoName) && snapshotUUIDs.contains(tokens.v2()) == false; } @@ -754,7 +746,8 @@ public static String getPinningEntity(String repositoryName, String snapshotUUID public static Tuple getRepoSnapshotUUIDTuple(String pinningEntity) { String[] tokens = pinningEntity.split(SNAPSHOT_PINNED_TIMESTAMP_DELIMITER); - return new Tuple<>(tokens[0], tokens[1]); + String snapUUID = String.join(SNAPSHOT_PINNED_TIMESTAMP_DELIMITER, Arrays.copyOfRange(tokens, 1, tokens.length)); + return new Tuple<>(tokens[0], snapUUID); } private void cloneSnapshotPinnedTimestamp( @@ -1026,10 +1019,8 @@ public void onResponse(RepositoryData repositoryData) { return; } logger.info("snapshot-v2 clone [{}] completed successfully", snapshot); + leaveRepoLoop(repositoryName); listener.onResponse(null); - // For snapshot-v2, we don't allow concurrent snapshots . But meanwhile non-v2 snapshot operations - // can get queued . This is triggering them. - runNextQueuedOperation(repositoryData, repositoryName, true); } @Override @@ -2569,6 +2560,19 @@ public void deleteSnapshots(final DeleteSnapshotRequest request, final ActionLis public ClusterState execute(ClusterState currentState) throws Exception { final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final List snapshotEntries = findInProgressSnapshots(snapshots, snapshotNames, repoName); + boolean isSnapshotV2 = SHALLOW_SNAPSHOT_V2.get(repository.getMetadata().settings()); + boolean remoteStoreIndexShallowCopy = remoteStoreShallowCopyEnabled(repository); + List entriesForThisRepo = snapshots.entries() + .stream() + .filter(entry -> Objects.equals(entry.repository(), repoName)) + .collect(Collectors.toList()); + if (isSnapshotV2 && remoteStoreIndexShallowCopy && entriesForThisRepo.isEmpty() == false) { + throw new ConcurrentSnapshotExecutionException( + repoName, + String.join(",", snapshotNames), + "cannot delete snapshots in v2 repo while a snapshot is in progress" + ); + } final List snapshotIds = matchingSnapshotIds( snapshotEntries.stream().map(e -> e.snapshot().getSnapshotId()).collect(Collectors.toList()), repositoryData, diff --git a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java index d1ee04bd5cb25..3d11bf77ae32a 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java @@ -85,7 +85,7 @@ public class TaskResultsService { public static final String TASK_RESULT_MAPPING_VERSION_META_FIELD = "version"; - public static final int TASK_RESULT_MAPPING_VERSION = 4; // must match version in task-index-mapping.json + public static final int TASK_RESULT_MAPPING_VERSION = 5; // must match version in task-index-mapping.json /** * The backoff policy to use when saving a task result fails. The total wait diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java b/server/src/main/java/org/opensearch/wlm/QueryGroupService.java index cb0be5597766a..14002a2b38134 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupService.java +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupService.java @@ -266,11 +266,12 @@ public void rejectIfNeeded(String queryGroupId) { return; } - // rejections will not happen for SOFT mode QueryGroups + // rejections will not happen for SOFT mode QueryGroups unless node is in duress Optional optionalQueryGroup = activeQueryGroups.stream().filter(x -> x.get_id().equals(queryGroupId)).findFirst(); - if (optionalQueryGroup.isPresent() && optionalQueryGroup.get().getResiliencyMode() == MutableQueryGroupFragment.ResiliencyMode.SOFT) - return; + if (optionalQueryGroup.isPresent() + && (optionalQueryGroup.get().getResiliencyMode() == MutableQueryGroupFragment.ResiliencyMode.SOFT + && !nodeDuressTrackers.isNodeInDuress())) return; optionalQueryGroup.ifPresent(queryGroup -> { boolean reject = false; diff --git a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json index 58b6b2d3bc873..21d418c472898 100644 --- a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json +++ b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json @@ -1,7 +1,7 @@ { "_doc" : { "_meta": { - "version": 4 + "version": 5 }, "dynamic" : "strict", "properties" : { @@ -34,6 +34,9 @@ "start_time_in_millis": { "type": "long" }, + "cancellation_time_millis": { + "type": "long" + }, "type": { "type": "keyword" }, @@ -47,6 +50,10 @@ "headers": { "type" : "object", "enabled" : false + }, + "resource_stats": { + "type" : "object", + "enabled" : false } } }, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/shards/CatShardsRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/shards/CatShardsRequestTests.java index f4215f54c1e21..e161342c3c609 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/shards/CatShardsRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/shards/CatShardsRequestTests.java @@ -95,7 +95,7 @@ public void testSerializationWithOlderVersionsParametersNotSerialized() throws E catShardsRequest.setCancelAfterTimeInterval(TimeValue.timeValueMillis(randomIntBetween(1, 5))); catShardsRequest.setIndices(new String[2]); - Version version = VersionUtils.getPreviousVersion(Version.CURRENT); + Version version = VersionUtils.getPreviousVersion(Version.V_2_18_0); try (BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); catShardsRequest.writeTo(out); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index c3de3413edd13..04cc45f3477c6 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -71,6 +71,12 @@ private RestoreSnapshotRequest randomState(RestoreSnapshotRequest instance) { if (randomBoolean()) { instance.renameReplacement(randomUnicodeOfLengthBetween(1, 100)); } + if (randomBoolean()) { + instance.renameAliasPattern(randomUnicodeOfLengthBetween(1, 100)); + } + if (randomBoolean()) { + instance.renameAliasReplacement(randomUnicodeOfLengthBetween(1, 100)); + } instance.partial(randomBoolean()); instance.includeAliases(randomBoolean()); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java new file mode 100644 index 0000000000000..ad7706292d93c --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsResponseTests.java @@ -0,0 +1,281 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.stats; + +import org.opensearch.Build; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest.IndexMetric; +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.index.Index; +import org.opensearch.index.cache.query.QueryCacheStats; +import org.opensearch.index.engine.SegmentsStats; +import org.opensearch.index.fielddata.FieldDataStats; +import org.opensearch.index.flush.FlushStats; +import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.shard.IndexingStats; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.StoreStats; +import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.monitor.jvm.JvmStats; +import org.opensearch.monitor.os.OsInfo; +import org.opensearch.monitor.process.ProcessStats; +import org.opensearch.search.suggest.completion.CompletionStats; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.TransportInfo; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +public class ClusterStatsResponseTests extends OpenSearchTestCase { + + public void testSerializationWithIndicesMappingAndAnalysisStats() throws Exception { + List defaultClusterStatsNodeResponses = new ArrayList<>(); + + int numberOfNodes = randomIntBetween(1, 4); + Index testIndex = new Index("test-index", "_na_"); + + for (int i = 0; i < numberOfNodes; i++) { + DiscoveryNode node = new DiscoveryNode("node-" + i, buildNewFakeTransportAddress(), Version.CURRENT); + CommonStats commonStats = createRandomCommonStats(); + ShardStats[] shardStats = createShardStats(node, testIndex, commonStats); + ClusterStatsNodeResponse customClusterStatsResponse = createClusterStatsNodeResponse(node, shardStats); + defaultClusterStatsNodeResponses.add(customClusterStatsResponse); + } + ClusterStatsResponse clusterStatsResponse = new ClusterStatsResponse( + 1l, + "UUID", + new ClusterName("cluster_name"), + defaultClusterStatsNodeResponses, + List.of(), + ClusterState.EMPTY_STATE, + Set.of(ClusterStatsRequest.Metric.INDICES), + Set.of(IndexMetric.MAPPINGS, IndexMetric.ANALYSIS) + ); + BytesStreamOutput output = new BytesStreamOutput(); + clusterStatsResponse.writeTo(output); + + StreamInput streamInput = output.bytes().streamInput(); + ClusterStatsResponse deserializedClusterStatsResponse = new ClusterStatsResponse(streamInput); + assertEquals(clusterStatsResponse.timestamp, deserializedClusterStatsResponse.timestamp); + assertEquals(clusterStatsResponse.status, deserializedClusterStatsResponse.status); + assertEquals(clusterStatsResponse.clusterUUID, deserializedClusterStatsResponse.clusterUUID); + assertNotNull(clusterStatsResponse.indicesStats); + assertEquals(clusterStatsResponse.indicesStats.getMappings(), deserializedClusterStatsResponse.indicesStats.getMappings()); + assertEquals(clusterStatsResponse.indicesStats.getAnalysis(), deserializedClusterStatsResponse.indicesStats.getAnalysis()); + } + + public void testSerializationWithoutIndicesMappingAndAnalysisStats() throws Exception { + List defaultClusterStatsNodeResponses = new ArrayList<>(); + + int numberOfNodes = randomIntBetween(1, 4); + Index testIndex = new Index("test-index", "_na_"); + + for (int i = 0; i < numberOfNodes; i++) { + DiscoveryNode node = new DiscoveryNode("node-" + i, buildNewFakeTransportAddress(), Version.CURRENT); + CommonStats commonStats = createRandomCommonStats(); + ShardStats[] shardStats = createShardStats(node, testIndex, commonStats); + ClusterStatsNodeResponse customClusterStatsResponse = createClusterStatsNodeResponse(node, shardStats); + defaultClusterStatsNodeResponses.add(customClusterStatsResponse); + } + ClusterStatsResponse clusterStatsResponse = new ClusterStatsResponse( + 1l, + "UUID", + new ClusterName("cluster_name"), + defaultClusterStatsNodeResponses, + List.of(), + ClusterState.EMPTY_STATE, + Set.of(ClusterStatsRequest.Metric.INDICES, ClusterStatsRequest.Metric.PROCESS, ClusterStatsRequest.Metric.JVM), + Set.of( + IndexMetric.DOCS, + IndexMetric.STORE, + IndexMetric.SEGMENTS, + IndexMetric.QUERY_CACHE, + IndexMetric.FIELDDATA, + IndexMetric.COMPLETION + ) + ); + BytesStreamOutput output = new BytesStreamOutput(); + clusterStatsResponse.writeTo(output); + + StreamInput streamInput = output.bytes().streamInput(); + ClusterStatsResponse deserializedClusterStatsResponse = new ClusterStatsResponse(streamInput); + assertEquals(clusterStatsResponse.timestamp, deserializedClusterStatsResponse.timestamp); + assertEquals(clusterStatsResponse.status, deserializedClusterStatsResponse.status); + assertEquals(clusterStatsResponse.clusterUUID, deserializedClusterStatsResponse.clusterUUID); + assertNotNull(deserializedClusterStatsResponse.nodesStats); + assertNotNull(deserializedClusterStatsResponse.nodesStats.getProcess()); + assertNotNull(deserializedClusterStatsResponse.nodesStats.getJvm()); + assertNotNull(deserializedClusterStatsResponse.indicesStats); + assertNotNull(deserializedClusterStatsResponse.indicesStats.getDocs()); + assertNotNull(deserializedClusterStatsResponse.indicesStats.getStore()); + assertNotNull(deserializedClusterStatsResponse.indicesStats.getSegments()); + assertNotNull(deserializedClusterStatsResponse.indicesStats.getQueryCache()); + assertNotNull(deserializedClusterStatsResponse.indicesStats.getFieldData()); + assertNotNull(deserializedClusterStatsResponse.indicesStats.getCompletion()); + assertNull(deserializedClusterStatsResponse.indicesStats.getMappings()); + assertNull(deserializedClusterStatsResponse.indicesStats.getAnalysis()); + } + + private ClusterStatsNodeResponse createClusterStatsNodeResponse(DiscoveryNode node, ShardStats[] shardStats) throws IOException { + JvmStats.GarbageCollector[] garbageCollectorsArray = new JvmStats.GarbageCollector[1]; + garbageCollectorsArray[0] = new JvmStats.GarbageCollector( + randomAlphaOfLengthBetween(3, 10), + randomNonNegativeLong(), + randomNonNegativeLong() + ); + JvmStats.GarbageCollectors garbageCollectors = new JvmStats.GarbageCollectors(garbageCollectorsArray); + NodeInfo nodeInfo = new NodeInfo( + Version.CURRENT, + Build.CURRENT, + node, + Settings.EMPTY, + new OsInfo(randomLong(), randomInt(), randomInt(), "name", "pretty_name", "arch", "version"), + null, + JvmInfo.jvmInfo(), + null, + new TransportInfo( + new BoundTransportAddress(new TransportAddress[] { buildNewFakeTransportAddress() }, buildNewFakeTransportAddress()), + null + ), + null, + new PluginsAndModules(Collections.emptyList(), Collections.emptyList()), + null, + null, + null, + null + ); + + NodeStats nodeStats = new NodeStats( + node, + randomNonNegativeLong(), + null, + null, + new ProcessStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + new ProcessStats.Cpu(randomShort(), randomNonNegativeLong()), + new ProcessStats.Mem(randomNonNegativeLong()) + ), + new JvmStats( + randomNonNegativeLong(), + randomNonNegativeLong(), + new JvmStats.Mem( + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + Collections.emptyList() + ), + new JvmStats.Threads(randomIntBetween(1, 1000), randomIntBetween(1, 1000)), + garbageCollectors, + Collections.emptyList(), + new JvmStats.Classes(randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()) + ), + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + return new ClusterStatsNodeResponse(node, null, nodeInfo, nodeStats, shardStats); + + } + + private CommonStats createRandomCommonStats() { + CommonStats commonStats = new CommonStats(CommonStatsFlags.NONE); + commonStats.docs = new DocsStats(randomLongBetween(0, 10000), randomLongBetween(0, 100), randomLongBetween(0, 1000)); + commonStats.store = new StoreStats(randomLongBetween(0, 100), randomLongBetween(0, 1000)); + commonStats.indexing = new IndexingStats(); + commonStats.completion = new CompletionStats(); + commonStats.flush = new FlushStats(randomLongBetween(0, 100), randomLongBetween(0, 100), randomLongBetween(0, 100)); + commonStats.fieldData = new FieldDataStats(randomLongBetween(0, 100), randomLongBetween(0, 100), null); + commonStats.queryCache = new QueryCacheStats( + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100), + randomLongBetween(0, 100) + ); + commonStats.segments = new SegmentsStats(); + + return commonStats; + } + + private ShardStats[] createShardStats(DiscoveryNode localNode, Index index, CommonStats commonStats) { + List shardStatsList = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + ShardRoutingState shardRoutingState = ShardRoutingState.fromValue((byte) randomIntBetween(2, 3)); + ShardRouting shardRouting = TestShardRouting.newShardRouting( + index.getName(), + i, + localNode.getId(), + randomBoolean(), + shardRoutingState + ); + + Path path = createTempDir().resolve("indices") + .resolve(shardRouting.shardId().getIndex().getUUID()) + .resolve(String.valueOf(shardRouting.shardId().id())); + + ShardStats shardStats = new ShardStats( + shardRouting, + new ShardPath(false, path, path, shardRouting.shardId()), + commonStats, + null, + null, + null + ); + shardStatsList.add(shardStats); + } + + return shardStatsList.toArray(new ShardStats[0]); + } + +} diff --git a/server/src/test/java/org/opensearch/action/resync/ResyncReplicationRequestTests.java b/server/src/test/java/org/opensearch/action/resync/ResyncReplicationRequestTests.java index 654dbb203b38a..9faaafc22c844 100644 --- a/server/src/test/java/org/opensearch/action/resync/ResyncReplicationRequestTests.java +++ b/server/src/test/java/org/opensearch/action/resync/ResyncReplicationRequestTests.java @@ -40,14 +40,14 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import static org.hamcrest.Matchers.equalTo; public class ResyncReplicationRequestTests extends OpenSearchTestCase { public void testSerialization() throws IOException { - final byte[] bytes = "{}".getBytes(Charset.forName("UTF-8")); + final byte[] bytes = "{}".getBytes(StandardCharsets.UTF_8); final Translog.Index index = new Translog.Index("id", 0, randomNonNegativeLong(), randomNonNegativeLong(), bytes, null, -1); final ShardId shardId = new ShardId(new Index("index", "uuid"), 0); final ResyncReplicationRequest before = new ResyncReplicationRequest(shardId, 42L, 100, new Translog.Operation[] { index }); @@ -61,4 +61,18 @@ public void testSerialization() throws IOException { assertThat(after, equalTo(before)); } + public void testContractBetweenEqualsAndHashCode() { + final byte[] bytes = "{}".getBytes(StandardCharsets.UTF_8); + final Translog.Index index = new Translog.Index("id", 0, 123L, -123L, bytes, null, -1); + final ShardId shardId = new ShardId(new Index("index", "uuid"), 0); + // Both created requests have arrays `operations` with the same content, and we want to verify that + // equals() and hashCode() are following the contract: + // If objects are equal, they have the same hash code + final ResyncReplicationRequest request1 = new ResyncReplicationRequest(shardId, 42L, 100, new Translog.Operation[] { index }); + final ResyncReplicationRequest request2 = new ResyncReplicationRequest(shardId, 42L, 100, new Translog.Operation[] { index }); + + assertEquals(request1, request2); + assertEquals(request1.hashCode(), request2.hashCode()); + } + } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java index 56bd2d94dce84..8e8e71ad33e75 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationCheckerSettingsTests.java @@ -14,7 +14,10 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchSingleNodeTestCase; +import static org.opensearch.cluster.coordination.Coordinator.PUBLISH_TIMEOUT_SETTING; +import static org.opensearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING; import static org.opensearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING; +import static org.opensearch.cluster.coordination.LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING; import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING; import static org.opensearch.common.unit.TimeValue.timeValueSeconds; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -42,10 +45,10 @@ public void testFollowerCheckTimeoutValueUpdate() { public void testFollowerCheckTimeoutMaxValue() { Setting setting1 = FOLLOWER_CHECK_TIMEOUT_SETTING; - Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "61s").build(); + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "151s").build(); assertThrows( - "failed to parse value [61s] for setting [" + setting1.getKey() + "], must be <= [60000ms]", + "failed to parse value [151s] for setting [" + setting1.getKey() + "], must be <= [150000ms]", IllegalArgumentException.class, () -> { client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); @@ -66,6 +69,38 @@ public void testFollowerCheckTimeoutMinValue() { ); } + public void testFollowerCheckIntervalValueUpdate() { + Setting setting1 = FOLLOWER_CHECK_INTERVAL_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "10s").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(timeSettings1) + .execute() + .actionGet(); + assertAcked(response); + assertEquals(timeValueSeconds(10), setting1.get(response.getPersistentSettings())); + } finally { + // cleanup + timeSettings1 = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + } + + public void testFollowerCheckIntervalMinValue() { + Setting setting1 = FOLLOWER_CHECK_INTERVAL_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "10ms").build(); + + assertThrows( + "failed to parse value [10ms] for setting [" + setting1.getKey() + "], must be >= [100ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + public void testLeaderCheckTimeoutValueUpdate() { Setting setting1 = LEADER_CHECK_TIMEOUT_SETTING; Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "60s").build(); @@ -110,4 +145,70 @@ public void testLeaderCheckTimeoutMinValue() { } ); } + + public void testClusterPublishTimeoutValueUpdate() { + Setting setting1 = PUBLISH_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "60s").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(timeSettings1) + .execute() + .actionGet(); + assertAcked(response); + assertEquals(timeValueSeconds(60), setting1.get(response.getPersistentSettings())); + } finally { + // cleanup + timeSettings1 = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + } + + public void testClusterPublishTimeoutMinValue() { + Setting setting1 = PUBLISH_TIMEOUT_SETTING; + Settings timeSettings1 = Settings.builder().put(setting1.getKey(), "0s").build(); + + assertThrows( + "failed to parse value [0s] for setting [" + setting1.getKey() + "], must be >= [1ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(timeSettings1).execute().actionGet(); + } + ); + } + + public void testLagDetectorTimeoutUpdate() { + Setting setting1 = CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING; + Settings lagDetectorTimeout = Settings.builder().put(setting1.getKey(), "30s").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(lagDetectorTimeout) + .execute() + .actionGet(); + + assertAcked(response); + assertEquals(timeValueSeconds(30), setting1.get(response.getPersistentSettings())); + } finally { + // cleanup + lagDetectorTimeout = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(lagDetectorTimeout).execute().actionGet(); + } + } + + public void testLagDetectorTimeoutMinValue() { + Setting setting1 = CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING; + Settings lagDetectorTimeout = Settings.builder().put(setting1.getKey(), "0s").build(); + + assertThrows( + "failed to parse value [0s] for setting [" + setting1.getKey() + "], must be >= [1ms]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(lagDetectorTimeout).execute().actionGet(); + } + ); + } + } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index b5d16e7be849f..f707198efb073 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -67,7 +67,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; -import static org.opensearch.gateway.remote.ClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_PUBLICATION_SETTING_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -962,7 +961,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep .previousClusterUUID(randomAlphaOfLength(10)) .clusterUUIDCommitted(true) .build(); - when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION)).thenReturn( + when(remoteClusterStateService.writeFullMetadata(clusterState, previousClusterUUID)).thenReturn( new RemoteClusterStateManifestInfo(manifest, "path/to/manifest") ); @@ -975,8 +974,7 @@ public void testHandlePrePublishAndCommitWhenRemoteStateEnabled() throws IOExcep final CoordinationState coordinationState = createCoordinationState(persistedStateRegistry, node1, remoteStateSettings()); coordinationState.handlePrePublish(clusterState); - Mockito.verify(remoteClusterStateService, Mockito.times(1)) - .writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + Mockito.verify(remoteClusterStateService, Mockito.times(1)).writeFullMetadata(clusterState, previousClusterUUID); assertThat(persistedStateRegistry.getPersistedState(PersistedStateType.REMOTE).getLastAcceptedState(), equalTo(clusterState)); when(remoteClusterStateService.markLastStateAsCommitted(any(), any(), eq(false))).thenReturn( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LagDetectorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LagDetectorTests.java index adffa27e9bc1a..315e5d6224227 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LagDetectorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LagDetectorTests.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.coordination; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; @@ -70,8 +71,9 @@ public void setupFixture() { } else { followerLagTimeout = CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.get(Settings.EMPTY); } - - lagDetector = new LagDetector(settingsBuilder.build(), deterministicTaskQueue.getThreadPool(), failedNodes::add, () -> localNode); + Settings settings = settingsBuilder.build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + lagDetector = new LagDetector(settings, clusterSettings, deterministicTaskQueue.getThreadPool(), failedNodes::add, () -> localNode); localNode = CoordinationStateTests.createNode("local"); node1 = CoordinationStateTests.createNode("node1"); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 1fdd038053eb6..0bb9ec28a1efc 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -136,6 +136,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_BLOCK; @@ -1821,6 +1822,42 @@ private void validateRemoteCustomData(Map customData, String exp assertEquals(expectedValue, customData.get(expectedKey)); } + public void testNumberOfRoutingShardsShowsInIndexSettings() { + withTemporaryClusterService(((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + indicesServices, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + null, + null, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + false, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()), + DefaultRemoteStoreSettings.INSTANCE, + repositoriesServiceSupplier + ); + final int routingNumberOfShards = 4; + Settings indexSettings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2) + .put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), routingNumberOfShards) + .build(); + CreateIndexClusterStateUpdateRequest request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + IndexMetadata indexMetadata = checkerService.buildAndValidateTemporaryIndexMetadata( + indexSettings, + request, + routingNumberOfShards + ); + assertEquals(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(indexMetadata.getSettings()).intValue(), routingNumberOfShards); + })); + } + public void testGetIndexNumberOfRoutingShardsWithNullSourceIndex() { Settings indexSettings = Settings.builder() .put("index.version.created", Version.CURRENT) diff --git a/server/src/test/java/org/opensearch/cluster/routing/SearchOnlyReplicaRestoreTests.java b/server/src/test/java/org/opensearch/cluster/routing/SearchOnlyReplicaRestoreTests.java new file mode 100644 index 0000000000000..d0effe9484533 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/routing/SearchOnlyReplicaRestoreTests.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.IndexId; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashSet; + +public class SearchOnlyReplicaRestoreTests extends OpenSearchTestCase { + + public void testSearchOnlyReplicasRestored() { + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .numberOfSearchReplicas(1) + ) + .build(); + + IndexMetadata indexMetadata = metadata.index("test"); + RecoverySource.SnapshotRecoverySource snapshotRecoverySource = new RecoverySource.SnapshotRecoverySource( + UUIDs.randomBase64UUID(), + new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), + Version.CURRENT, + new IndexId("test", UUIDs.randomBase64UUID(random())) + ); + + RoutingTable routingTable = RoutingTable.builder().addAsNewRestore(indexMetadata, snapshotRecoverySource, new HashSet<>()).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + IndexShardRoutingTable indexShardRoutingTable = clusterState.routingTable().index("test").shard(0); + + assertEquals(1, clusterState.routingTable().index("test").shards().size()); + assertEquals(3, indexShardRoutingTable.getShards().size()); + assertEquals(1, indexShardRoutingTable.searchOnlyReplicas().size()); + } +} diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index efdb3076f419c..955ea82e219e8 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -759,7 +759,7 @@ public void testRemotePersistedState() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final ClusterMetadataManifest manifest = ClusterMetadataManifest.builder().clusterTerm(1L).stateVersion(5L).build(); final String previousClusterUUID = "prev-cluster-uuid"; - Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any(), eq(MANIFEST_CURRENT_CODEC_VERSION))) + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())) .thenReturn(new RemoteClusterStateManifestInfo(manifest, "path/to/manifest")); Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())) @@ -777,7 +777,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(clusterState); - Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(clusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -789,8 +789,7 @@ public void testRemotePersistedState() throws IOException { ); remotePersistedState.setLastAcceptedState(secondClusterState); - Mockito.verify(remoteClusterStateService, times(1)) - .writeFullMetadata(secondClusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + Mockito.verify(remoteClusterStateService, times(1)).writeFullMetadata(secondClusterState, previousClusterUUID); assertThat(remotePersistedState.getLastAcceptedState(), equalTo(secondClusterState)); assertThat(remotePersistedState.getCurrentTerm(), equalTo(clusterTerm)); @@ -820,9 +819,9 @@ public void testRemotePersistedStateWithDifferentNodeConfiguration() throws IOEx .clusterTerm(1L) .stateVersion(5L) .codecVersion(CODEC_V1) - .opensearchVersion(Version.CURRENT) + .opensearchVersion(Version.V_2_15_0) .build(); - Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any(), eq(CODEC_V1))) + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())) .thenReturn(new RemoteClusterStateManifestInfo(manifest, "path/to/manifest2")); CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); @@ -833,7 +832,7 @@ public void testRemotePersistedStateWithDifferentNodeConfiguration() throws IOEx ); remotePersistedState.setLastAcceptedState(clusterState1); - Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState1, previousClusterUUID, CODEC_V1); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState1, previousClusterUUID); ClusterState clusterState2 = createClusterState( randomNonNegativeLong(), @@ -846,10 +845,10 @@ public void testRemotePersistedStateWithDifferentNodeConfiguration() throws IOEx .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) .opensearchVersion(Version.CURRENT) .build(); - Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any(), eq(MANIFEST_CURRENT_CODEC_VERSION))) + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())) .thenReturn(new RemoteClusterStateManifestInfo(manifest2, "path/to/manifest")); remotePersistedState.setLastAcceptedState(clusterState2); - Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState2, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterState2, previousClusterUUID); ClusterState clusterState3 = createClusterState( randomNonNegativeLong(), @@ -889,8 +888,7 @@ public void testRemotePersistentState_FollowerNode() throws IOException { remotePersistedState.setLastAcceptedState(clusterState); remotePersistedState.setLastAcceptedManifest(manifest); - Mockito.verify(remoteClusterStateService, never()) - .writeFullMetadata(clusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + Mockito.verify(remoteClusterStateService, never()).writeFullMetadata(clusterState, previousClusterUUID); assertEquals(clusterState, remotePersistedState.getLastAcceptedState()); assertEquals(clusterTerm, remotePersistedState.getCurrentTerm()); @@ -906,8 +904,7 @@ public void testRemotePersistentState_FollowerNode() throws IOException { ); remotePersistedState.setLastAcceptedState(secondClusterState); - Mockito.verify(remoteClusterStateService, never()) - .writeFullMetadata(secondClusterState, previousClusterUUID, MANIFEST_CURRENT_CODEC_VERSION); + Mockito.verify(remoteClusterStateService, never()).writeFullMetadata(secondClusterState, previousClusterUUID); assertEquals(secondClusterState, remotePersistedState.getLastAcceptedState()); assertEquals(clusterTerm, remotePersistedState.getCurrentTerm()); @@ -940,7 +937,7 @@ public void testRemotePersistedStateNotCommitted() throws IOException { .build(); Mockito.when(remoteClusterStateService.getLatestClusterMetadataManifest(Mockito.any(), Mockito.any())) .thenReturn(Optional.of(manifest)); - Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any(), eq(MANIFEST_CURRENT_CODEC_VERSION))) + Mockito.when(remoteClusterStateService.writeFullMetadata(Mockito.any(), Mockito.any())) .thenReturn(new RemoteClusterStateManifestInfo(manifest, "path/to/manifest")); Mockito.when(remoteClusterStateService.writeIncrementalMetadata(Mockito.any(), Mockito.any(), Mockito.any())) @@ -966,17 +963,14 @@ public void testRemotePersistedStateNotCommitted() throws IOException { remotePersistedState.setLastAcceptedState(clusterState); ArgumentCaptor previousClusterUUIDCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor clusterStateCaptor = ArgumentCaptor.forClass(ClusterState.class); - Mockito.verify(remoteClusterStateService) - .writeFullMetadata(clusterStateCaptor.capture(), previousClusterUUIDCaptor.capture(), eq(MANIFEST_CURRENT_CODEC_VERSION)); + Mockito.verify(remoteClusterStateService).writeFullMetadata(clusterStateCaptor.capture(), previousClusterUUIDCaptor.capture()); assertEquals(previousClusterUUID, previousClusterUUIDCaptor.getValue()); } public void testRemotePersistedStateExceptionOnFullStateUpload() throws IOException { final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final String previousClusterUUID = "prev-cluster-uuid"; - Mockito.doThrow(IOException.class) - .when(remoteClusterStateService) - .writeFullMetadata(Mockito.any(), Mockito.any(), eq(MANIFEST_CURRENT_CODEC_VERSION)); + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); CoordinationState.PersistedState remotePersistedState = new RemotePersistedState(remoteClusterStateService, previousClusterUUID); @@ -994,9 +988,7 @@ public void testRemotePersistedStateFailureStats() throws IOException { RemoteUploadStats remoteStateStats = new RemoteUploadStats(); final RemoteClusterStateService remoteClusterStateService = Mockito.mock(RemoteClusterStateService.class); final String previousClusterUUID = "prev-cluster-uuid"; - Mockito.doThrow(IOException.class) - .when(remoteClusterStateService) - .writeFullMetadata(Mockito.any(), Mockito.any(), eq(MANIFEST_CURRENT_CODEC_VERSION)); + Mockito.doThrow(IOException.class).when(remoteClusterStateService).writeFullMetadata(Mockito.any(), Mockito.any()); when(remoteClusterStateService.getUploadStats()).thenReturn(remoteStateStats); doAnswer((i) -> { remoteStateStats.stateFailed(); diff --git a/server/src/test/java/org/opensearch/gateway/ShardsBatchGatewayAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ShardsBatchGatewayAllocatorTests.java new file mode 100644 index 0000000000000..59fb6e2b940ba --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/ShardsBatchGatewayAllocatorTests.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import static org.opensearch.gateway.ShardsBatchGatewayAllocator.GATEWAY_ALLOCATOR_BATCH_SIZE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class ShardsBatchGatewayAllocatorTests extends OpenSearchSingleNodeTestCase { + public void testBatchSizeValueUpdate() { + Setting setting1 = GATEWAY_ALLOCATOR_BATCH_SIZE; + Settings batchSizeSetting = Settings.builder().put(setting1.getKey(), "3000").build(); + try { + ClusterUpdateSettingsResponse response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(batchSizeSetting) + .execute() + .actionGet(); + + assertAcked(response); + assertThat(setting1.get(response.getPersistentSettings()), equalTo(3000L)); + } finally { + // cleanup + batchSizeSetting = Settings.builder().putNull(setting1.getKey()).build(); + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(batchSizeSetting).execute().actionGet(); + } + } + + public void testBatchSizeMaxValue() { + Setting setting1 = GATEWAY_ALLOCATOR_BATCH_SIZE; + Settings batchSizeSetting = Settings.builder().put(setting1.getKey(), "11000").build(); + + assertThrows( + "failed to parse value [11000] for setting [" + setting1.getKey() + "], must be <= [10000]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(batchSizeSetting).execute().actionGet(); + } + ); + } + + public void testBatchSizeMinValue() { + Setting setting1 = GATEWAY_ALLOCATOR_BATCH_SIZE; + Settings batchSizeSetting = Settings.builder().put(setting1.getKey(), "0").build(); + + assertThrows( + "failed to parse value [0] for setting [" + setting1.getKey() + "], must be >= [1]", + IllegalArgumentException.class, + () -> { + client().admin().cluster().prepareUpdateSettings().setPersistentSettings(batchSizeSetting).execute().actionGet(); + } + ); + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index dffbb9d82545a..448b9cc9d78ac 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -280,8 +280,7 @@ public void testFailWriteFullMetadataNonClusterManagerNode() throws IOException final ClusterState clusterState = generateClusterStateWithOneIndex().build(); final RemoteClusterStateManifestInfo manifestDetails = remoteClusterStateService.writeFullMetadata( clusterState, - randomAlphaOfLength(10), - MANIFEST_CURRENT_CODEC_VERSION + randomAlphaOfLength(10) ); Assert.assertThat(manifestDetails, nullValue()); } @@ -327,11 +326,8 @@ public void testWriteFullMetadataSuccess() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -401,11 +397,8 @@ public void testWriteFullMetadataSuccessPublicationEnabled() throws IOException .build(); mockBlobStoreObjects(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -453,11 +446,8 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { }).when(container).asyncBlobUpload(writeContextArgumentCaptor.capture(), actionListenerArgumentCaptor.capture()); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); List indices = List.of(uploadedIndexMetadata); @@ -534,7 +524,7 @@ public void run() { remoteClusterStateService.start(); assertThrows( RemoteStateTransferException.class, - () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10), MANIFEST_CURRENT_CODEC_VERSION) + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); } @@ -578,7 +568,7 @@ public void testTimeoutWhileWritingManifestFile() throws IOException { ).thenReturn(new RemoteClusterStateUtils.UploadedMetadataResults()); RemoteStateTransferException ex = expectThrows( RemoteStateTransferException.class, - () -> spiedService.writeFullMetadata(clusterState, randomAlphaOfLength(10), MANIFEST_CURRENT_CODEC_VERSION) + () -> spiedService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); assertTrue(ex.getMessage().contains("Timed out waiting for transfer")); } @@ -600,7 +590,7 @@ public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOEx remoteClusterStateService.start(); assertThrows( RemoteStateTransferException.class, - () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10), MANIFEST_CURRENT_CODEC_VERSION) + () -> remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) ); assertEquals(0, remoteClusterStateService.getUploadStats().getSuccessCount()); } @@ -1850,7 +1840,7 @@ private void verifyCodecMigrationManifest(int previousCodec) throws IOException // global metadata is updated assertThat(manifestAfterUpdate.hasMetadataAttributesFiles(), is(true)); // During incremental update, codec version will not change. - assertThat(manifestAfterUpdate.getCodecVersion(), is(previousCodec)); + assertThat(manifestAfterUpdate.getCodecVersion(), is(MANIFEST_CURRENT_CODEC_VERSION)); } public void testWriteIncrementalGlobalMetadataFromCodecV0Success() throws IOException { @@ -1885,7 +1875,7 @@ private void verifyWriteIncrementalGlobalMetadataFromOlderCodecSuccess(ClusterMe ).getClusterMetadataManifest(); final ClusterMetadataManifest expectedManifest = ClusterMetadataManifest.builder() - .codecVersion(previousManifest.getCodecVersion()) + .codecVersion(MANIFEST_CURRENT_CODEC_VERSION) .indices(Collections.emptyList()) .clusterTerm(1L) .stateVersion(1L) @@ -2074,11 +2064,8 @@ public void testCustomMetadataDeletedUpdatedAndAdded() throws IOException { // Initial cluster state with index. final ClusterState initialClusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); - final ClusterMetadataManifest initialManifest = remoteClusterStateService.writeFullMetadata( - initialClusterState, - "_na_", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest initialManifest = remoteClusterStateService.writeFullMetadata(initialClusterState, "_na_") + .getClusterMetadataManifest(); ClusterState clusterState1 = ClusterState.builder(initialClusterState) .metadata( @@ -2156,11 +2143,8 @@ public void testIndexMetadataDeletedUpdatedAndAdded() throws IOException { // Initial cluster state with index. final ClusterState initialClusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); - final ClusterMetadataManifest initialManifest = remoteClusterStateService.writeFullMetadata( - initialClusterState, - "_na_", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest initialManifest = remoteClusterStateService.writeFullMetadata(initialClusterState, "_na_") + .getClusterMetadataManifest(); String initialIndex = "test-index"; Index index1 = new Index("test-index-1", "index-uuid-1"); Index index2 = new Index("test-index-2", "index-uuid-2"); @@ -2238,11 +2222,8 @@ private void verifyMetadataAttributeOnlyUpdated( // Initial cluster state with index. final ClusterState initialClusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); - final ClusterMetadataManifest initialManifest = remoteClusterStateService.writeFullMetadata( - initialClusterState, - "_na_", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest initialManifest = remoteClusterStateService.writeFullMetadata(initialClusterState, "_na_") + .getClusterMetadataManifest(); ClusterState newClusterState = clusterStateUpdater.apply(initialClusterState); @@ -2255,11 +2236,8 @@ private void verifyMetadataAttributeOnlyUpdated( initialManifest ).getClusterMetadataManifest(); } else { - manifestAfterMetadataUpdate = remoteClusterStateService.writeFullMetadata( - newClusterState, - initialClusterState.stateUUID(), - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + manifestAfterMetadataUpdate = remoteClusterStateService.writeFullMetadata(newClusterState, initialClusterState.stateUUID()) + .getClusterMetadataManifest(); } assertions.accept(initialManifest, manifestAfterMetadataUpdate); @@ -2742,11 +2720,8 @@ public void testRemoteStateUploadStats() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); mockBlobStoreObjects(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); assertTrue(remoteClusterStateService.getUploadStats() != null); assertEquals(1, remoteClusterStateService.getUploadStats().getSuccessCount()); @@ -2801,11 +2776,8 @@ public void testWriteFullMetadataSuccessWithRoutingTable() throws IOException { final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); final UploadedIndexMetadata uploadedIndiceRoutingMetadata = new UploadedIndexMetadata( "test-index", @@ -2854,11 +2826,8 @@ public void testWriteFullMetadataInParallelSuccessWithRoutingTable() throws IOEx when((blobStoreRepository.basePath())).thenReturn(BlobPath.cleanPath().add("base-path")); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); final UploadedIndexMetadata uploadedIndiceRoutingMetadata = new UploadedIndexMetadata( @@ -3108,11 +3077,8 @@ public void testWriteFullMetadataSuccessWithChecksumValidationEnabled() throws I final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); final UploadedIndexMetadata uploadedIndiceRoutingMetadata = new UploadedIndexMetadata( "test-index", @@ -3152,11 +3118,8 @@ public void testWriteFullMetadataSuccessWithChecksumValidationModeNone() throws final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()).build(); remoteClusterStateService.start(); - final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata( - clusterState, - "prev-cluster-uuid", - MANIFEST_CURRENT_CODEC_VERSION - ).getClusterMetadataManifest(); + final ClusterMetadataManifest manifest = remoteClusterStateService.writeFullMetadata(clusterState, "prev-cluster-uuid") + .getClusterMetadataManifest(); final UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "index-uuid", "metadata-filename"); final UploadedIndexMetadata uploadedIndiceRoutingMetadata = new UploadedIndexMetadata( "test-index", diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java index 4fe0199f89f41..f081cadc1362c 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java @@ -15,11 +15,14 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene912.Lucene912Codec; import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.BaseDocValuesFormatTestCase; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -58,9 +61,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; +import static org.opensearch.index.compositeindex.CompositeIndexConstants.STAR_TREE_DOCS_COUNT; import static org.opensearch.index.compositeindex.datacube.startree.StarTreeTestUtils.assertStarTreeDocuments; /** @@ -103,7 +109,7 @@ protected Codec getCodec() { final Logger testLogger = LogManager.getLogger(StarTreeDocValuesFormatTests.class); try { - createMapperService(getExpandedMapping()); + mapperService = createMapperService(getExpandedMapping()); } catch (IOException e) { throw new RuntimeException(e); } @@ -207,7 +213,101 @@ public void testStarTreeDocValues() throws IOException { directory.close(); } - private XContentBuilder getExpandedMapping() throws IOException { + public void testStarTreeDocValuesWithDeletions() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + int iterations = 3; + Map map = new HashMap<>(); + List allIds = new ArrayList<>(); + for (int iter = 0; iter < iterations; iter++) { + // Add 10 documents + for (int i = 0; i < 10; i++) { + String id = String.valueOf(random().nextInt() + i); + allIds.add(id); + Document doc = new Document(); + doc.add(new StringField("_id", id, Field.Store.YES)); + int fieldValue = random().nextInt(5) + 1; + doc.add(new SortedNumericDocValuesField("field", fieldValue)); + + int sndvValue = random().nextInt(3); + + doc.add(new SortedNumericDocValuesField("sndv", sndvValue)); + int dvValue = random().nextInt(3); + + doc.add(new SortedNumericDocValuesField("dv", dvValue)); + map.put(sndvValue + "-" + dvValue, fieldValue + map.getOrDefault(sndvValue + "-" + dvValue, 0)); + iw.addDocument(doc); + } + iw.flush(); + } + iw.commit(); + // Delete random number of documents + int docsToDelete = random().nextInt(9); // Delete up to 9 documents + for (int i = 0; i < docsToDelete; i++) { + if (!allIds.isEmpty()) { + String idToDelete = allIds.remove(random().nextInt(allIds.size() - 1)); + iw.deleteDocuments(new Term("_id", idToDelete)); + allIds.remove(idToDelete); + } + } + iw.flush(); + iw.commit(); + iw.forceMerge(1); + iw.close(); + + DirectoryReader ir = DirectoryReader.open(directory); + TestUtil.checkReader(ir); + assertEquals(1, ir.leaves().size()); + + // Assert star tree documents + for (LeafReaderContext context : ir.leaves()) { + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + + for (CompositeIndexFieldInfo compositeIndexFieldInfo : compositeIndexFields) { + StarTreeValues starTreeValues = (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(compositeIndexFieldInfo); + StarTreeDocument[] actualStarTreeDocuments = StarTreeTestUtils.getSegmentsStarTreeDocuments( + List.of(starTreeValues), + List.of( + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.DOUBLE, + NumberFieldMapper.NumberType.LONG + ), + Integer.parseInt(starTreeValues.getAttributes().get(STAR_TREE_DOCS_COUNT)) + ); + for (StarTreeDocument starDoc : actualStarTreeDocuments) { + Long sndvVal = null; + if (starDoc.dimensions[0] != null) { + sndvVal = starDoc.dimensions[0]; + } + Long dvVal = null; + if (starDoc.dimensions[1] != null) { + dvVal = starDoc.dimensions[1]; + } + if (starDoc.metrics[0] != null) { + double metric = (double) starDoc.metrics[0]; + if (map.containsKey(sndvVal + "-" + dvVal)) { + assertEquals((long) map.get(sndvVal + "-" + dvVal), (long) metric); + } + } + } + } + } + ir.close(); + directory.close(); + } + + public static XContentBuilder getExpandedMapping() throws IOException { return topMapping(b -> { b.startObject("composite"); b.startObject("startree"); @@ -261,13 +361,13 @@ private XContentBuilder getExpandedMapping() throws IOException { }); } - private XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { + public static XContentBuilder topMapping(CheckedConsumer buildFields) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder().startObject().startObject("_doc"); buildFields.accept(builder); return builder.endObject().endObject(); } - private void createMapperService(XContentBuilder builder) throws IOException { + public static MapperService createMapperService(XContentBuilder builder) throws IOException { Settings settings = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) @@ -277,7 +377,7 @@ private void createMapperService(XContentBuilder builder) throws IOException { .build(); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).putMapping(builder.toString()).build(); IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); - mapperService = MapperTestUtils.newMapperServiceWithHelperAnalyzer( + MapperService mapperService = MapperTestUtils.newMapperServiceWithHelperAnalyzer( new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), createTempDir(), settings, @@ -285,5 +385,6 @@ private void createMapperService(XContentBuilder builder) throws IOException { "test" ); mapperService.merge(indexMetadata, MapperService.MergeReason.INDEX_TEMPLATE); + return mapperService; } } diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java index 838f97ade9e8e..78ae90936d78e 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java @@ -755,17 +755,21 @@ public void testGetGenerationsToBeDeletedWithGenerationInRemote() throws IOExcep assertTrue(generations.isEmpty()); } - public void testGetMetadataFilesToBeDeletedNoExclusion() { + public void testGetMetadataFilesToBeDeletedExclusionDueToRefreshTimestamp() { updatePinnedTimstampTask.run(); - List metadataFiles = List.of( - "metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__9223372036854775106__1", - "metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__9223372036854775803__1", - "metadata__9223372036438563903__9223372036854775701__9223370311919910403__31__9223372036854775701__1" - ); + List metadataFiles = new ArrayList<>(); + metadataFiles.add("metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__9223372036854775106__1"); + metadataFiles.add("metadata__9223372036438563903__9223372036854775701__9223370311919910403__31__9223372036854775701__1"); + metadataFiles.add("metadata__9223372036438563903__9223372036854775800__9223370311919910398__31__9223372036854775803__1"); + // Removing file that is pinned by latest refresh timestamp + List metadataFilesToBeDeleted = new ArrayList<>(metadataFiles); + metadataFilesToBeDeleted.remove( + "metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__9223372036854775106__1" + ); assertEquals( - metadataFiles, + metadataFilesToBeDeleted, RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted(metadataFiles, new HashMap<>(), Long.MAX_VALUE, false, logger) ); } @@ -774,13 +778,15 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeOnly() { updatePinnedTimstampTask.run(); long currentTimeInMillis = System.currentTimeMillis(); String md1Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 200000); - String md2Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis + 30000); - String md3Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis + 60000); + String md2Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 400000); + String md3Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis + 30000); + String md4Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis + 60000); List metadataFiles = List.of( - "metadata__9223372036438563903__9223372036854774799__" + md1Timestamp + "__31__9223372036854775106__1", - "metadata__9223372036438563903__9223372036854775800__" + md2Timestamp + "__31__9223372036854775803__1", - "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" + "metadata__9223372036438563903__9223372036854774500__" + md1Timestamp + "__31__9223372036854775106__1", + "metadata__9223372036438563903__9223372036854774799__" + md2Timestamp + "__31__9223372036854775106__1", + "metadata__9223372036438563903__9223372036854775800__" + md3Timestamp + "__31__9223372036854775803__1", + "metadata__9223372036438563903__9223372036854775701__" + md4Timestamp + "__31__9223372036854775701__1" ); List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( @@ -791,24 +797,26 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeOnly() { logger ); assertEquals(1, metadataFilesToBeDeleted.size()); - assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); + assertEquals(metadataFiles.get(1), metadataFilesToBeDeleted.get(0)); } public void testGetMetadataFilesToBeDeletedExclusionBasedOnPinningOnly() throws IOException { long currentTimeInMillis = System.currentTimeMillis(); - String md1Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 200000); - String md2Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 300000); - String md3Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 600000); + String md1Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 190000); + String md2Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 200000); + String md3Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 300000); + String md4Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 600000); - long pinnedTimestamp = RemoteStoreUtils.invertLong(md2Timestamp) + 10000; + long pinnedTimestamp = RemoteStoreUtils.invertLong(md3Timestamp) + 10000; when(blobContainer.listBlobs()).thenReturn(Map.of(randomInt(100) + "__" + pinnedTimestamp, new PlainBlobMetadata("xyz", 100))); updatePinnedTimstampTask.run(); List metadataFiles = List.of( - "metadata__9223372036438563903__9223372036854774799__" + md1Timestamp + "__31__9223372036854775106__1", - "metadata__9223372036438563903__9223372036854775600__" + md2Timestamp + "__31__9223372036854775803__1", - "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" + "metadata__9223372036438563903__9223372036854774500__" + md1Timestamp + "__31__9223372036854775701__1", + "metadata__9223372036438563903__9223372036854774799__" + md2Timestamp + "__31__9223372036854775106__1", + "metadata__9223372036438563903__9223372036854775600__" + md3Timestamp + "__31__9223372036854775803__1", + "metadata__9223372036438563903__9223372036854775701__" + md4Timestamp + "__31__9223372036854775701__1" ); List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( @@ -819,8 +827,8 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnPinningOnly() throws logger ); assertEquals(2, metadataFilesToBeDeleted.size()); - assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); - assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(1)); + assertEquals(metadataFiles.get(1), metadataFilesToBeDeleted.get(0)); + assertEquals(metadataFiles.get(3), metadataFilesToBeDeleted.get(1)); } public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeAndPinning() throws IOException { @@ -856,6 +864,7 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationOnly() thro String md1Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 200000); String md2Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 300000); String md3Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 600000); + String md4Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 800000); when(blobContainer.listBlobs()).thenReturn(Map.of()); @@ -866,8 +875,10 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationOnly() thro "metadata__9223372036438563903__9223372036854775800__" + md1Timestamp + "__31__9223372036854775106__1", // MaxGen 12 "metadata__9223372036438563903__9223372036854775795__" + md2Timestamp + "__31__9223372036854775803__1", + // MaxGen 9 + "metadata__9223372036438563903__9223372036854775798__" + md3Timestamp + "__31__9223372036854775701__1", // MaxGen 10 - "metadata__9223372036438563903__9223372036854775798__" + md3Timestamp + "__31__9223372036854775701__1" + "metadata__9223372036438563903__9223372036854775797__" + md4Timestamp + "__31__9223372036854775701__1" ); List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( @@ -878,8 +889,8 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationOnly() thro logger ); assertEquals(2, metadataFilesToBeDeleted.size()); - assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); - assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(1)); + assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(0)); + assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(1)); } public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationDeleteIndex() throws IOException { @@ -892,13 +903,15 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationDeleteIndex updatePinnedTimstampTask.run(); - List metadataFiles = List.of( - // MaxGen 7 - "metadata__9223372036438563903__9223372036854775800__" + md1Timestamp + "__31__9223372036854775106__1", - // MaxGen 12 - "metadata__9223372036438563903__9223372036854775795__" + md2Timestamp + "__31__9223372036854775803__1", - // MaxGen 17 - "metadata__9223372036438563903__9223372036854775790__" + md3Timestamp + "__31__9223372036854775701__1" + List metadataFiles = new ArrayList<>( + List.of( + // MaxGen 12 + "metadata__9223372036438563903__9223372036854775795__" + md2Timestamp + "__31__9223372036854775803__1", + // MaxGen 7 + "metadata__9223372036438563903__9223372036854775800__" + md1Timestamp + "__31__9223372036854775106__1", + // MaxGen 17 + "metadata__9223372036438563903__9223372036854775790__" + md3Timestamp + "__31__9223372036854775701__1" + ) ); List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( @@ -908,6 +921,10 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationDeleteIndex true, logger ); + + // Metadata file corresponding to latest pinned timestamp fetch is always considered pinned + metadataFiles.remove(metadataFiles.get(2)); + assertEquals(metadataFiles, metadataFilesToBeDeleted); } diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java index 7acee449a1b46..81ea16c80dd79 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicatorTests.java @@ -10,7 +10,13 @@ import org.apache.lucene.store.IOContext; import org.opensearch.OpenSearchCorruptionException; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; @@ -19,10 +25,12 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; import java.io.IOException; import java.io.UncheckedIOException; @@ -45,6 +53,48 @@ public class SegmentReplicatorTests extends IndexShardTestCase { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); + public void testReplicationWithUnassignedPrimary() throws Exception { + final IndexShard replica = newStartedShard(false, settings, new NRTReplicationEngineFactory()); + final IndexShard primary = newStartedShard(true, settings, new NRTReplicationEngineFactory()); + SegmentReplicator replicator = new SegmentReplicator(threadPool); + + ClusterService cs = mock(ClusterService.class); + IndexShardRoutingTable.Builder shardRoutingTable = new IndexShardRoutingTable.Builder(replica.shardId()); + shardRoutingTable.addShard(replica.routingEntry()); + shardRoutingTable.addShard(primary.routingEntry().moveToUnassigned(new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "test"))); + + when(cs.state()).thenReturn(buildClusterState(replica, shardRoutingTable)); + replicator.setSourceFactory(new SegmentReplicationSourceFactory(mock(TransportService.class), mock(RecoverySettings.class), cs)); + expectThrows(IllegalStateException.class, () -> replicator.startReplication(replica)); + closeShards(replica, primary); + } + + public void testReplicationWithUnknownPrimaryNode() throws Exception { + final IndexShard replica = newStartedShard(false, settings, new NRTReplicationEngineFactory()); + final IndexShard primary = newStartedShard(true, settings, new NRTReplicationEngineFactory()); + SegmentReplicator replicator = new SegmentReplicator(threadPool); + + ClusterService cs = mock(ClusterService.class); + IndexShardRoutingTable.Builder shardRoutingTable = new IndexShardRoutingTable.Builder(replica.shardId()); + shardRoutingTable.addShard(replica.routingEntry()); + shardRoutingTable.addShard(primary.routingEntry()); + + when(cs.state()).thenReturn(buildClusterState(replica, shardRoutingTable)); + replicator.setSourceFactory(new SegmentReplicationSourceFactory(mock(TransportService.class), mock(RecoverySettings.class), cs)); + expectThrows(IllegalStateException.class, () -> replicator.startReplication(replica)); + closeShards(replica, primary); + } + + private ClusterState buildClusterState(IndexShard replica, IndexShardRoutingTable.Builder indexShard) { + return ClusterState.builder(clusterService.state()) + .routingTable( + RoutingTable.builder() + .add(IndexRoutingTable.builder(replica.shardId().getIndex()).addIndexShard(indexShard.build()).build()) + .build() + ) + .build(); + } + public void testStartReplicationWithoutSourceFactory() { ThreadPool threadpool = mock(ThreadPool.class); ExecutorService mock = mock(ExecutorService.class); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index e280ab8c7a73c..f2b06b0926b81 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -431,6 +431,13 @@ public void testRepositoryCreationShallowV2() throws Exception { ); } + // Modify repo-1 settings. This should go through + updateRepository( + client, + "test-repo-1", + Settings.builder().put(snapshotRepoSettings1).put("max_snapshot_bytes_per_sec", "10k").build() + ); + // Disable shallow snapshot V2 setting on test-repo-1 updateRepository( client, diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsActionTests.java new file mode 100644 index 0000000000000..8b46f676636fd --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestClusterStatsActionTests.java @@ -0,0 +1,171 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.object.HasToString.hasToString; +import static org.mockito.Mockito.mock; + +public class RestClusterStatsActionTests extends OpenSearchTestCase { + + private RestClusterStatsAction action; + + @Override + public void setUp() throws Exception { + super.setUp(); + action = new RestClusterStatsAction(); + } + + public void testFromRequestBasePath() { + final HashMap params = new HashMap<>(); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + ClusterStatsRequest clusterStatsRequest = RestClusterStatsAction.fromRequest(request); + assertNotNull(clusterStatsRequest); + assertTrue(clusterStatsRequest.useAggregatedNodeLevelResponses()); + assertFalse(clusterStatsRequest.computeAllMetrics()); + assertNotNull(clusterStatsRequest.requestedMetrics()); + assertFalse(clusterStatsRequest.requestedMetrics().isEmpty()); + for (ClusterStatsRequest.Metric metric : ClusterStatsRequest.Metric.values()) { + assertTrue(clusterStatsRequest.requestedMetrics().contains(metric)); + } + assertNotNull(clusterStatsRequest.indicesMetrics()); + assertFalse(clusterStatsRequest.indicesMetrics().isEmpty()); + for (ClusterStatsRequest.IndexMetric indexMetric : ClusterStatsRequest.IndexMetric.values()) { + assertTrue(clusterStatsRequest.indicesMetrics().contains(indexMetric)); + } + } + + public void testFromRequestWithNodeStatsMetricsFilter() { + Set metricsRequested = Set.of(ClusterStatsRequest.Metric.OS, ClusterStatsRequest.Metric.FS); + String metricParam = metricsRequested.stream().map(ClusterStatsRequest.Metric::metricName).collect(Collectors.joining(",")); + final HashMap params = new HashMap<>(); + params.put("metric", metricParam); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + ClusterStatsRequest clusterStatsRequest = RestClusterStatsAction.fromRequest(request); + assertNotNull(clusterStatsRequest); + assertTrue(clusterStatsRequest.useAggregatedNodeLevelResponses()); + assertFalse(clusterStatsRequest.computeAllMetrics()); + assertFalse(clusterStatsRequest.requestedMetrics().isEmpty()); + assertEquals(2, clusterStatsRequest.requestedMetrics().size()); + assertEquals(metricsRequested, clusterStatsRequest.requestedMetrics()); + assertTrue(clusterStatsRequest.indicesMetrics().isEmpty()); + } + + public void testFromRequestWithIndicesStatsMetricsFilter() { + Set metricsRequested = Set.of( + ClusterStatsRequest.Metric.OS, + ClusterStatsRequest.Metric.FS, + ClusterStatsRequest.Metric.INDICES + ); + Set indicesMetricsRequested = Set.of( + ClusterStatsRequest.IndexMetric.SHARDS, + ClusterStatsRequest.IndexMetric.SEGMENTS + ); + String metricParam = metricsRequested.stream().map(ClusterStatsRequest.Metric::metricName).collect(Collectors.joining(",")); + String indicesMetricParam = indicesMetricsRequested.stream() + .map(ClusterStatsRequest.IndexMetric::metricName) + .collect(Collectors.joining(",")); + final HashMap params = new HashMap<>(); + params.put("metric", metricParam); + params.put("index_metric", indicesMetricParam); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + ClusterStatsRequest clusterStatsRequest = RestClusterStatsAction.fromRequest(request); + assertNotNull(clusterStatsRequest); + assertTrue(clusterStatsRequest.useAggregatedNodeLevelResponses()); + assertFalse(clusterStatsRequest.computeAllMetrics()); + assertFalse(clusterStatsRequest.requestedMetrics().isEmpty()); + assertEquals(3, clusterStatsRequest.requestedMetrics().size()); + assertEquals(metricsRequested, clusterStatsRequest.requestedMetrics()); + assertFalse(clusterStatsRequest.indicesMetrics().isEmpty()); + assertEquals(2, clusterStatsRequest.indicesMetrics().size()); + assertEquals(indicesMetricsRequested, clusterStatsRequest.indicesMetrics()); + } + + public void testUnrecognizedMetric() { + final HashMap params = new HashMap<>(); + final String metric = randomAlphaOfLength(64); + params.put("metric", metric); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request [/_cluster/stats] contains unrecognized metric: [" + metric + "]"))); + } + + public void testUnrecognizedIndexMetric() { + final HashMap params = new HashMap<>(); + params.put("metric", "_all,"); + final String indexMetric = randomAlphaOfLength(64); + params.put("index_metric", indexMetric); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request [/_cluster/stats] contains unrecognized index metric: [" + indexMetric + "]"))); + } + + public void testAllMetricsRequestWithOtherMetric() { + final HashMap params = new HashMap<>(); + final String metric = randomSubsetOf(1, RestClusterStatsAction.METRIC_REQUEST_CONSUMER_MAP.keySet()).get(0); + params.put("metric", "_all," + metric); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat(e, hasToString(containsString("request [/_cluster/stats] contains _all and individual metrics [_all," + metric + "]"))); + } + + public void testAllIndexMetricsRequestWithOtherIndicesMetric() { + final HashMap params = new HashMap<>(); + params.put("metric", "_all,"); + final String indexMetric = randomSubsetOf(1, RestClusterStatsAction.INDEX_METRIC_TO_REQUEST_CONSUMER_MAP.keySet()).get(0); + params.put("index_metric", "_all," + indexMetric); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat( + e, + hasToString(containsString("request [/_cluster/stats] contains _all and individual index metrics [_all," + indexMetric + "]")) + ); + } + + public void testIndexMetricsRequestWithoutMetricIndices() { + final HashMap params = new HashMap<>(); + params.put("metric", "os"); + final String indexMetric = randomSubsetOf(1, RestClusterStatsAction.INDEX_METRIC_TO_REQUEST_CONSUMER_MAP.keySet()).get(0); + params.put("index_metric", indexMetric); + final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_cluster/stats").withParams(params).build(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> action.prepareRequest(request, mock(NodeClient.class)) + ); + assertThat( + e, + hasToString( + containsString("request [/_cluster/stats] contains index metrics [" + indexMetric + "] but indices stats not requested") + ) + ); + } + +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java new file mode 100644 index 0000000000000..0c88154ca2b38 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.opensearch.action.OriginalIndices; +import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.Strings; +import org.opensearch.index.IndexService; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.CompositeIndexSettings; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; +import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.ReaderContext; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.internal.ShardSearchRequest; +import org.opensearch.search.startree.StarTreeQueryContext; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; + +public class SearchServiceStarTreeTests extends OpenSearchSingleNodeTestCase { + + public void testParseQueryToOriginalOrStarTreeQuery() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + setStarTreeIndexSetting("true"); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + CreateIndexRequestBuilder builder = client().admin() + .indices() + .prepareCreate("test") + .setSettings(settings) + .setMapping(StarTreeDocValuesFormatTests.getExpandedMapping()); + createIndex("test", builder); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + -1, + null, + null + ); + + // Case 1: No query or aggregations, should not use star tree + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 2: MatchAllQuery present but no aggregations, should not use star tree + sourceBuilder = new SearchSourceBuilder().query(new MatchAllQueryBuilder()); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 3: MatchAllQuery and aggregations present, should use star tree + sourceBuilder = new SearchSourceBuilder().size(0) + .query(new MatchAllQueryBuilder()) + .aggregation(AggregationBuilders.max("test").field("field")); + CompositeIndexFieldInfo expectedStarTree = new CompositeIndexFieldInfo( + "startree", + CompositeMappedFieldType.CompositeFieldType.STAR_TREE + ); + Map expectedQueryMap = null; + assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + + // Case 4: MatchAllQuery and aggregations present, but postFilter specified, should not use star tree + sourceBuilder = new SearchSourceBuilder().size(0) + .query(new MatchAllQueryBuilder()) + .aggregation(AggregationBuilders.max("test").field("field")) + .postFilter(new MatchAllQueryBuilder()); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 5: TermQuery and single aggregation, should use star tree, but not initialize query cache + sourceBuilder = new SearchSourceBuilder().size(0) + .query(new TermQueryBuilder("sndv", 1)) + .aggregation(AggregationBuilders.max("test").field("field")); + expectedQueryMap = Map.of("sndv", 1L); + assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + + // Case 6: TermQuery and multiple aggregations present, should use star tree & initialize cache + sourceBuilder = new SearchSourceBuilder().size(0) + .query(new TermQueryBuilder("sndv", 1)) + .aggregation(AggregationBuilders.max("test").field("field")) + .aggregation(AggregationBuilders.sum("test2").field("field")); + expectedQueryMap = Map.of("sndv", 1L); + assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, 0), 0); + + // Case 7: No query, metric aggregations present, should use star tree + sourceBuilder = new SearchSourceBuilder().size(0).aggregation(AggregationBuilders.max("test").field("field")); + assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, null, -1), -1); + + setStarTreeIndexSetting(null); + } + + private void setStarTreeIndexSetting(String value) throws IOException { + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING.getKey(), value).build()) + .execute(); + } + + private void assertStarTreeContext( + ShardSearchRequest request, + SearchSourceBuilder sourceBuilder, + StarTreeQueryContext expectedContext, + int expectedCacheUsage + ) throws IOException { + request.source(sourceBuilder); + SearchService searchService = getInstanceFromNode(SearchService.class); + try (ReaderContext reader = searchService.createOrGetReaderContext(request, false)) { + SearchContext context = searchService.createContext(reader, request, null, true); + StarTreeQueryContext actualContext = context.getStarTreeQueryContext(); + + if (expectedContext == null) { + assertThat(context.getStarTreeQueryContext(), nullValue()); + } else { + assertThat(actualContext, notNullValue()); + assertEquals(expectedContext.getStarTree().getType(), actualContext.getStarTree().getType()); + assertEquals(expectedContext.getStarTree().getField(), actualContext.getStarTree().getField()); + assertEquals(expectedContext.getQueryMap(), actualContext.getQueryMap()); + if (expectedCacheUsage > -1) { + assertEquals(expectedCacheUsage, actualContext.getStarTreeValues().length); + } else { + assertNull(actualContext.getStarTreeValues()); + } + } + searchService.doStop(); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java new file mode 100644 index 0000000000000..0327bd9990784 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -0,0 +1,317 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene912.Lucene912Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite912.Composite912Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.aggregations.InternalAggregation; +import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.opensearch.search.aggregations.metrics.InternalAvg; +import org.opensearch.search.aggregations.metrics.InternalMax; +import org.opensearch.search.aggregations.metrics.InternalMin; +import org.opensearch.search.aggregations.metrics.InternalSum; +import org.opensearch.search.aggregations.metrics.InternalValueCount; +import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.search.aggregations.metrics.MinAggregationBuilder; +import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; +import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; +import java.util.function.BiConsumer; +import java.util.function.Function; + +import static org.opensearch.search.aggregations.AggregationBuilders.avg; +import static org.opensearch.search.aggregations.AggregationBuilders.count; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.min; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class MetricAggregatorTests extends AggregatorTestCase { + + private static final String FIELD_NAME = "field"; + private static final NumberFieldMapper.NumberType DEFAULT_FIELD_TYPE = NumberFieldMapper.NumberType.LONG; + private static final MappedFieldType DEFAULT_MAPPED_FIELD = new NumberFieldMapper.NumberFieldType(FIELD_NAME, DEFAULT_FIELD_TYPE); + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(MetricAggregatorTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService(StarTreeDocValuesFormatTests.getExpandedMapping()); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite912Codec(Lucene912Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testStarTreeDocValues() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec()); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + Random random = RandomizedTest.getRandom(); + int totalDocs = 100; + final String SNDV = "sndv"; + final String DV = "dv"; + int val; + + List docs = new ArrayList<>(); + // Index 100 random documents + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + if (random.nextBoolean()) { + val = random.nextInt(10) - 5; // Random long between -5 and 4 + doc.add(new SortedNumericDocValuesField(SNDV, val)); + } + if (random.nextBoolean()) { + val = random.nextInt(20) - 10; // Random long between -10 and 9 + doc.add(new SortedNumericDocValuesField(DV, val)); + } + if (random.nextBoolean()) { + val = random.nextInt(50); // Random long between 0 and 49 + doc.add(new SortedNumericDocValuesField(FIELD_NAME, val)); + } + iw.addDocument(doc); + docs.add(doc); + } + + if (randomBoolean()) { + iw.forceMerge(1); + } + iw.close(); + + DirectoryReader ir = DirectoryReader.open(directory); + initValuesSourceRegistry(); + LeafReaderContext context = ir.leaves().get(0); + + SegmentReader reader = Lucene.segmentReader(context.reader()); + IndexSearcher indexSearcher = newSearcher(reader, false, false); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + + SumAggregationBuilder sumAggregationBuilder = sum("_name").field(FIELD_NAME); + MaxAggregationBuilder maxAggregationBuilder = max("_name").field(FIELD_NAME); + MinAggregationBuilder minAggregationBuilder = min("_name").field(FIELD_NAME); + ValueCountAggregationBuilder valueCountAggregationBuilder = count("_name").field(FIELD_NAME); + AvgAggregationBuilder avgAggregationBuilder = avg("_name").field(FIELD_NAME); + + List supportedDimensions = new LinkedList<>(); + supportedDimensions.add(new NumericDimension(SNDV)); + supportedDimensions.add(new NumericDimension(DV)); + + Query query = new MatchAllDocsQuery(); + // match-all query + QueryBuilder queryBuilder = null; // no predicates + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalSum::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + maxAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalMax::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + minAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalMin::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + valueCountAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalValueCount::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + avgAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalAvg::getValue) + ); + + // Numeric-terms query + for (int cases = 0; cases < 100; cases++) { + String queryField; + long queryValue; + if (randomBoolean()) { + queryField = SNDV; + queryValue = random.nextInt(10); + } else { + queryField = DV; + queryValue = random.nextInt(20) - 15; + } + + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalSum::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + maxAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalMax::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + minAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalMin::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + valueCountAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalValueCount::getValue) + ); + testCase( + indexSearcher, + query, + queryBuilder, + avgAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalAvg::getValue) + ); + } + + ir.close(); + directory.close(); + } + + BiConsumer verifyAggregation(Function valueExtractor) { + return (expectedAggregation, actualAggregation) -> assertEquals( + valueExtractor.apply(expectedAggregation).doubleValue(), + valueExtractor.apply(actualAggregation).doubleValue(), + 0.0f + ); + } + + private void testCase( + IndexSearcher searcher, + Query query, + QueryBuilder queryBuilder, + T aggBuilder, + CompositeIndexFieldInfo starTree, + List supportedDimensions, + BiConsumer verify + ) throws IOException { + V starTreeAggregation = searchAndReduceStarTree( + createIndexSettings(), + searcher, + query, + queryBuilder, + aggBuilder, + starTree, + supportedDimensions, + DEFAULT_MAX_BUCKETS, + false, + DEFAULT_MAPPED_FIELD + ); + V expectedAggregation = searchAndReduceStarTree( + createIndexSettings(), + searcher, + query, + queryBuilder, + aggBuilder, + null, + null, + DEFAULT_MAX_BUCKETS, + false, + DEFAULT_MAPPED_FIELD + ); + verify.accept(expectedAggregation, starTreeAggregation); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java new file mode 100644 index 0000000000000..f8eb71a40319a --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -0,0 +1,319 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene912.Lucene912Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite912.Composite912Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.search.aggregations.AggregatorTestCase; +import org.opensearch.search.startree.StarTreeFilter; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests.topMapping; + +public class StarTreeFilterTests extends AggregatorTestCase { + + private static final String FIELD_NAME = "field"; + private static final String SNDV = "sndv"; + private static final String SDV = "sdv"; + private static final String DV = "dv"; + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) { + final Logger testLogger = LogManager.getLogger(StarTreeFilterTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService( + getExpandedMapping(maxLeafDoc, skipStarNodeCreationForSDVDimension) + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite912Codec(Lucene912Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testStarTreeFilterWithNoDocsInSVDField() throws IOException { + testStarTreeFilter(5, true); + } + + public void testStarTreeFilterWithDocsInSVDFieldButNoStarNode() throws IOException { + testStarTreeFilter(10, false); + } + + private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec(maxLeafDoc, skipStarNodeCreationForSDVDimension)); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + int totalDocs = 100; + + List docs = new ArrayList<>(); + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + doc.add(new SortedNumericDocValuesField(SNDV, i)); + doc.add(new SortedNumericDocValuesField(DV, 2 * i)); + doc.add(new SortedNumericDocValuesField(FIELD_NAME, 3 * i)); + if (skipStarNodeCreationForSDVDimension) { + // adding SDV field only star node creation is skipped for SDV dimension + doc.add(new SortedNumericDocValuesField(SDV, 4 * i)); + } + iw.addDocument(doc); + docs.add(doc); + } + iw.forceMerge(1); + iw.close(); + + DirectoryReader ir = DirectoryReader.open(directory); + initValuesSourceRegistry(); + LeafReaderContext context = ir.leaves().get(0); + SegmentReader reader = Lucene.segmentReader(context.reader()); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + long starTreeDocCount, docCount; + + // assert that all documents are included if no filters are given + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(), context); + docCount = getDocCount(docs, Map.of()); + assertEquals(totalDocs, starTreeDocCount); + assertEquals(docCount, starTreeDocCount); + + // single filter - matches docs + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L), context); + docCount = getDocCount(docs, Map.of(SNDV, 0L)); + assertEquals(1, docCount); + assertEquals(docCount, starTreeDocCount); + + // single filter on 3rd field in ordered dimension - matches docs + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(DV, 0L), context); + docCount = getDocCount(docs, Map.of(DV, 0L)); + assertEquals(1, docCount); + assertEquals(docCount, starTreeDocCount); + + // single filter - does not match docs + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 101L), context); + docCount = getDocCount(docs, Map.of(SNDV, 101L)); + assertEquals(0, docCount); + assertEquals(docCount, starTreeDocCount); + + // single filter on 3rd field in ordered dimension - does not match docs + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(DV, -101L), context); + docCount = getDocCount(docs, Map.of(SNDV, -101L)); + assertEquals(0, docCount); + assertEquals(docCount, starTreeDocCount); + + // multiple filters - matches docs + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L, DV, 0L), context); + docCount = getDocCount(docs, Map.of(SNDV, 0L, DV, 0L)); + assertEquals(1, docCount); + assertEquals(docCount, starTreeDocCount); + + // no document should match the filter + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L, DV, -11L), context); + docCount = getDocCount(docs, Map.of(SNDV, 0L, DV, -11L)); + assertEquals(0, docCount); + assertEquals(docCount, starTreeDocCount); + + // Only the first filter should match some documents, second filter matches none + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L, DV, -100L), context); + docCount = getDocCount(docs, Map.of(SNDV, 0L, DV, -100L)); + assertEquals(0, docCount); + assertEquals(docCount, starTreeDocCount); + + // non-dimension fields in filter - should throw IllegalArgumentException + expectThrows( + IllegalArgumentException.class, + () -> getDocCountFromStarTree(starTreeDocValuesReader, Map.of(FIELD_NAME, 0L), context) + ); + + if (skipStarNodeCreationForSDVDimension == true) { + // Documents are not indexed + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SDV, 4L), context); + docCount = getDocCount(docs, Map.of(SDV, 4L)); + assertEquals(1, docCount); + assertEquals(docCount, starTreeDocCount); + } else { + // Documents are indexed + starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SDV, 4L), context); + docCount = getDocCount(docs, Map.of(SDV, 4L)); + assertEquals(0, docCount); + assertEquals(docCount, starTreeDocCount); + } + + ir.close(); + directory.close(); + } + + // Counts the documents having field SNDV & applied filters + private long getDocCount(List documents, Map filters) { + long count = 0; + for (Document doc : documents) { + // Check if SNDV field is present + IndexableField sndvField = doc.getField(SNDV); + if (sndvField == null) continue; // Skip if SNDV is not present + + // Apply filters if provided + if (!filters.isEmpty()) { + boolean matches = filters.entrySet().stream().allMatch(entry -> { + IndexableField field = doc.getField(entry.getKey()); + return field != null && field.numericValue().longValue() == entry.getValue(); + }); + if (!matches) continue; + } + + // Increment count if the document passes all conditions + count++; + } + return count; + } + + // Returns count of documents in the star tree having field SNDV & applied filters + private long getDocCountFromStarTree(CompositeIndexReader starTreeDocValuesReader, Map filters, LeafReaderContext context) + throws IOException { + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(context, starTree); + FixedBitSet filteredValues = StarTreeFilter.getStarTreeResult(starTreeValues, filters); + + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator( + StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + SNDV, + MetricStat.VALUE_COUNT.getTypeName() + ) + ); + + long docCount = 0; + int numBits = filteredValues.length(); + if (numBits > 0) { + for (int bit = filteredValues.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) + ? filteredValues.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + + // Assert that we can advance to the document ID in the values iterator + boolean canAdvance = valuesIterator.advanceExact(bit); + assert canAdvance : "Cannot advance to document ID " + bit + " in values iterator."; + + // Iterate over values for the current document ID + for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { + long value = valuesIterator.nextValue(); + // Assert that the value is as expected using the provided consumer + docCount += value; + } + } + } + return docCount; + } + + public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipStarNodeCreationForSDVDimension) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", maxLeafDocs); + if (skipStarNodeCreationForSDVDimension) { + b.startArray("skip_star_node_creation_for_dimensions"); + b.value("sdv"); + b.endArray(); + } + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "sndv"); + b.endObject(); + b.startObject(); + b.field("name", "sdv"); + b.endObject(); + b.startObject(); + b.field("name", "dv"); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", "field"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("avg"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", "sndv"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("avg"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("sndv"); + b.field("type", "integer"); + b.endObject(); + b.startObject("sdv"); + b.field("type", "integer"); + b.endObject(); + b.startObject("dv"); + b.field("type", "integer"); + b.endObject(); + b.startObject("field"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + }); + } +} diff --git a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java index 9c022aade5dc6..4919cbc599892 100644 --- a/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java +++ b/server/src/test/java/org/opensearch/search/approximate/ApproximatePointRangeQueryTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; +import org.apache.lucene.search.TotalHits.Relation; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.opensearch.search.internal.SearchContext; @@ -175,6 +176,7 @@ public void testApproximateRangeWithSizeOverDefault() throws IOException { try { long lower = 0; long upper = 12000; + long maxHits = 12001; Query approximateQuery = new ApproximatePointRangeQuery( "point", pack(lower).bytes, @@ -188,7 +190,13 @@ protected String toString(int dimension, byte[] value) { }; IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(approximateQuery, 11000); - assertEquals(topDocs.totalHits, new TotalHits(11000, TotalHits.Relation.EQUAL_TO)); + + if (topDocs.totalHits.relation == Relation.EQUAL_TO) { + assertEquals(topDocs.totalHits.value, 11000); + } else { + assertTrue(11000 <= topDocs.totalHits.value); + assertTrue(maxHits >= topDocs.totalHits.value); + } } catch (IOException e) { throw new RuntimeException(e); } @@ -226,7 +234,7 @@ protected String toString(int dimension, byte[] value) { } }; Query query = LongPoint.newRangeQuery("point", lower, upper); - ; + IndexSearcher searcher = new IndexSearcher(reader); TopDocs topDocs = searcher.search(approximateQuery, 10); TopDocs topDocs1 = searcher.search(query, 10); @@ -235,7 +243,6 @@ protected String toString(int dimension, byte[] value) { assertNotEquals(topDocs.totalHits, topDocs1.totalHits); assertEquals(topDocs.totalHits, new TotalHits(10, TotalHits.Relation.EQUAL_TO)); assertEquals(topDocs1.totalHits, new TotalHits(101, TotalHits.Relation.EQUAL_TO)); - } catch (IOException e) { throw new RuntimeException(e); } @@ -278,7 +285,7 @@ protected String toString(int dimension, byte[] value) { } }; Query query = LongPoint.newRangeQuery("point", lower, upper); - ; + IndexSearcher searcher = new IndexSearcher(reader); Sort sort = new Sort(new SortField("point", SortField.Type.LONG)); TopDocs topDocs = searcher.search(approximateQuery, 10, sort); diff --git a/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java b/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java new file mode 100644 index 0000000000000..92da980d70f34 --- /dev/null +++ b/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java @@ -0,0 +1,297 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.StepListener; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.close.CloseIndexRequest; +import org.opensearch.action.admin.indices.close.CloseIndexResponse; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest; +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse; +import org.opensearch.action.admin.indices.open.OpenIndexRequest; +import org.opensearch.action.admin.indices.open.OpenIndexResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.After; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +public class RestoreServiceIntegTests extends OpenSearchSingleNodeTestCase { + private final String indexName = "index_1"; + private final String renamedIndexName = "index_2"; + private final String aliasName = "alias_1"; + private final String renamedAliasName = "alias_2"; + private final String repoName = "repo_1"; + private final String snapShotName = "snap_1"; + private final int waitInSeconds = 60; + private boolean exists; + private boolean closed; + private boolean includeAlias; + private boolean renameAliases; + private boolean renameIndexes; + + public RestoreServiceIntegTests(TestCase testCase) { + this.exists = testCase.exists; + this.closed = testCase.closed; + this.includeAlias = testCase.includeAlias; + this.renameAliases = testCase.renameAliases; + this.renameIndexes = testCase.renameIndexes; + } + + public static class TestCase { + public boolean exists; + public boolean closed; + public boolean includeAlias; + public boolean renameAliases; + public boolean renameIndexes; + + public TestCase(boolean exists, boolean closed, boolean includeAlias, boolean renameAliases, boolean renameIndexes) { + this.exists = exists; + this.closed = closed; + this.includeAlias = includeAlias; + this.renameAliases = renameAliases; + this.renameIndexes = renameIndexes; + } + + public String toString() { + return String.join( + " and ", + new String[] { + exists ? "target index exists and is" + (closed ? "closed" : "open") : "doesn't exist", + includeAlias ? "including aliases" : "not including aliases", + renameIndexes ? "renaming indexes" : "not renaming indexes", + renameAliases ? "renaming aliases" : "not renaming aliases" } + ); + } + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { new TestCase(false, false, true, true, true) }, + new Object[] { new TestCase(false, false, false, true, true) }, + new Object[] { new TestCase(false, false, true, false, false) }, + new Object[] { new TestCase(false, false, false, false, false) }, + new Object[] { new TestCase(true, false, true, true, true) }, + new Object[] { new TestCase(true, false, false, true, true) }, + new Object[] { new TestCase(true, true, true, true, true) }, + new Object[] { new TestCase(true, true, false, true, true) }, + new Object[] { new TestCase(true, false, false, false, false) }, + new Object[] { new TestCase(true, false, true, false, false) }, + new Object[] { new TestCase(true, true, false, false, false) }, + new Object[] { new TestCase(true, true, true, false, false) } + ); + } + + @After + public void cleanup() throws InterruptedException { + final CountDownLatch allDeleted = new CountDownLatch(3); + for (String indexName : new String[] { indexName, renamedIndexName }) { + final StepListener existsIndexResponseStepListener = new StepListener<>(); + client().admin().indices().exists(new IndicesExistsRequest(indexName), existsIndexResponseStepListener); + continueOrDie(existsIndexResponseStepListener, resp -> { + if (resp.isExists()) { + final StepListener deleteIndexResponseStepListener = new StepListener<>(); + client().admin().indices().delete(new DeleteIndexRequest(indexName), deleteIndexResponseStepListener); + continueOrDie(deleteIndexResponseStepListener, ignored -> allDeleted.countDown()); + } else { + allDeleted.countDown(); + } + }); + } + + final StepListener snapStatusResponseStepListener = new StepListener<>(); + client().admin().cluster().getSnapshots(new GetSnapshotsRequest(repoName), snapStatusResponseStepListener); + continueOrDie(snapStatusResponseStepListener, resp -> { + if (resp.getSnapshots().stream().anyMatch(s -> s.snapshotId().getName().equals(snapShotName))) { + final StepListener deleteSnapResponseStepListener = new StepListener<>(); + client().admin() + .cluster() + .deleteSnapshot(new DeleteSnapshotRequest(repoName, snapShotName), deleteSnapResponseStepListener); + continueOrDie(deleteSnapResponseStepListener, ignored -> allDeleted.countDown()); + } else { + allDeleted.countDown(); + } + }); + + allDeleted.await(waitInSeconds, TimeUnit.SECONDS); + } + + public void testRestoreWithRename() throws Exception { + + assert this.exists || !this.closed; // index close state doesn't exist when the index doesn't exist - so only permit one value of + // closed to avoid pointless duplicate tests + final boolean expectSuccess = !this.exists || this.closed; + final int documents = randomIntBetween(1, 100); + + this.createIndex(indexName); + if (this.exists && this.renameIndexes) { + this.createIndex(renamedIndexName); + } + + final StepListener putRepositoryResponseStepListener = new StepListener<>(); + Settings.Builder settings = Settings.builder().put("location", randomAlphaOfLength(10)); + OpenSearchIntegTestCase.putRepository( + client().admin().cluster(), + repoName, + FsRepository.TYPE, + settings, + putRepositoryResponseStepListener + ); + + final StepListener createAliasResponseStepListener = new StepListener<>(); + client().admin() + .indices() + .aliases( + new IndicesAliasesRequest().addAliasAction(IndicesAliasesRequest.AliasActions.add().alias(aliasName).index(indexName)), + createAliasResponseStepListener + ); + + final CountDownLatch isDocumentFinished = new CountDownLatch(1); + continueOrDie(createAliasResponseStepListener, ignored -> { + final BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < documents; ++i) { + bulkRequest.add(new IndexRequest(indexName).source(Collections.singletonMap("foo", "bar" + i))); + } + final StepListener bulkResponseStepListener = new StepListener<>(); + client().bulk(bulkRequest, bulkResponseStepListener); + continueOrDie(bulkResponseStepListener, bulkResponse -> { + assertFalse("Failures in bulk response: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); + assertEquals(documents, bulkResponse.getItems().length); + isDocumentFinished.countDown(); + }); + }); + + isDocumentFinished.await(waitInSeconds, TimeUnit.SECONDS); + + if (this.closed) { + final CountDownLatch isClosed = new CountDownLatch(1); + final StepListener closeIndexResponseStepListener = new StepListener<>(); + final String indexToClose = this.renameIndexes ? renamedIndexName : indexName; + client().admin().indices().close(new CloseIndexRequest(indexToClose), closeIndexResponseStepListener); + + continueOrDie(closeIndexResponseStepListener, ignored -> { isClosed.countDown(); }); + isClosed.await(waitInSeconds, TimeUnit.SECONDS); + } + + final StepListener createSnapshotResponseStepListener = new StepListener<>(); + continueOrDie(putRepositoryResponseStepListener, ignored -> { + client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapShotName) + .setWaitForCompletion(true) + .setPartial(true) + .execute(createSnapshotResponseStepListener); + }); + + final CountDownLatch isRestorable = new CountDownLatch(1); + + if (!this.exists && !this.renameIndexes) { + final StepListener deleteIndexResponseStepListener = new StepListener<>(); + continueOrDie(createSnapshotResponseStepListener, ignored -> { + client().admin().indices().delete(new DeleteIndexRequest(indexName), deleteIndexResponseStepListener); + }); + continueOrDie(deleteIndexResponseStepListener, ignored -> isRestorable.countDown()); + } else { + continueOrDie(createSnapshotResponseStepListener, ignored -> isRestorable.countDown()); + } + + isRestorable.await(waitInSeconds, TimeUnit.SECONDS); + + final StepListener restoreSnapshotResponseStepListener = new StepListener<>(); + final CountDownLatch isRestored = new CountDownLatch(1); + RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repoName, snapShotName).includeAliases(this.includeAlias) + .waitForCompletion(true); + if (this.renameAliases) { + restoreSnapshotRequest = restoreSnapshotRequest.renameAliasPattern("1").renameAliasReplacement("2"); + } + if (this.renameIndexes) { + restoreSnapshotRequest = restoreSnapshotRequest.renamePattern("1").renameReplacement("2"); + } + client().admin().cluster().restoreSnapshot(restoreSnapshotRequest, restoreSnapshotResponseStepListener); + + restoreSnapshotResponseStepListener.whenComplete(ignored -> { + isRestored.countDown(); + assertTrue("unexpected sucesssful restore", expectSuccess); + }, e -> { + isRestored.countDown(); + if (expectSuccess) { + throw new RuntimeException(e); + } + }); + + isRestored.await(waitInSeconds, TimeUnit.SECONDS); + + if (expectSuccess) { + final String indexToSearch = this.renameIndexes ? renamedIndexName : indexName; + final String aliasToSearch = this.renameAliases ? renamedAliasName : aliasName; + + if (this.closed) { + final CountDownLatch isOpened = new CountDownLatch(1); + final StepListener openIndexResponseStepListener = new StepListener<>(); + client().admin().indices().open(new OpenIndexRequest(indexToSearch).waitForActiveShards(1), openIndexResponseStepListener); + continueOrDie(openIndexResponseStepListener, ignored -> { isOpened.countDown(); }); + + isOpened.await(waitInSeconds, TimeUnit.SECONDS); + } + + final CountDownLatch isSearchDone = new CountDownLatch(this.includeAlias ? 2 : 1); + final StepListener searchIndexResponseListener = new StepListener<>(); + final StepListener searchAliasResponseListener = new StepListener<>(); + client().search( + new SearchRequest(indexToSearch).source(new SearchSourceBuilder().size(0).trackTotalHits(true)), + searchIndexResponseListener + ); + continueOrDie(searchIndexResponseListener, ignored -> { isSearchDone.countDown(); }); + if (this.includeAlias) { + client().search( + new SearchRequest(aliasToSearch).source(new SearchSourceBuilder().size(0).trackTotalHits(true)), + searchAliasResponseListener + ); + continueOrDie(searchAliasResponseListener, ignored -> { isSearchDone.countDown(); }); + } + + isSearchDone.await(waitInSeconds, TimeUnit.SECONDS); + + assertEquals(documents, Objects.requireNonNull(searchIndexResponseListener.result().getHits().getTotalHits()).value); + if (this.includeAlias) { + assertEquals(documents, Objects.requireNonNull(searchAliasResponseListener.result().getHits().getTotalHits()).value); + } + } + } + + private static void continueOrDie(StepListener listener, CheckedConsumer onResponse) { + listener.whenComplete(onResponse, e -> { throw new AssertionError(e); }); + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/RestoreServiceTests.java b/server/src/test/java/org/opensearch/snapshots/RestoreServiceTests.java index b56f1b47edc33..3629f324a62e4 100644 --- a/server/src/test/java/org/opensearch/snapshots/RestoreServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/RestoreServiceTests.java @@ -36,13 +36,17 @@ import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; import java.util.List; import static org.opensearch.cluster.DataStreamTestHelper.createTimestampField; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -122,4 +126,37 @@ public void testPrefixNotChanged() { assertEquals(renamedDataStreamName, renamedDataStream.getName()); assertEquals(Collections.singletonList(renamedIndex), renamedDataStream.getIndices()); } + + public void testValidateReplicationTypeRestoreSettings_WhenSnapshotIsDocument_RestoreToDocument() { + SnapshotId snapshotId = new SnapshotId("snapshotId", "123"); + Snapshot snapshot = new Snapshot("testRepo", snapshotId); + IndexMetadata indexMetadata = mock(IndexMetadata.class); + Settings settings = Settings.builder() + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT.toString()) + .build(); + when(indexMetadata.getSettings()).thenReturn(settings); + + assertThrows( + SnapshotRestoreException.class, + () -> RestoreService.validateReplicationTypeRestoreSettings(snapshot, ReplicationType.DOCUMENT.toString(), indexMetadata) + ); + + } + + public void testValidateReplicationTypeRestoreSettings_WhenSnapshotIsSegment_RestoreToDocument() { + SnapshotId snapshotId = new SnapshotId("snapshotId", "123"); + Snapshot snapshot = new Snapshot("testRepo", snapshotId); + IndexMetadata indexMetadata = mock(IndexMetadata.class); + Settings settings = Settings.builder() + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT.toString()) + .build(); + when(indexMetadata.getSettings()).thenReturn(settings); + + assertThrows( + SnapshotRestoreException.class, + () -> RestoreService.validateReplicationTypeRestoreSettings(snapshot, ReplicationType.SEGMENT.toString(), indexMetadata) + ); + } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java index a00c74f669eac..87ab95fef6a53 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotRequestsTests.java @@ -77,6 +77,8 @@ public void testRestoreSnapshotRequestParsing() throws IOException { builder.field("allow_no_indices", indicesOptions.allowNoIndices()); builder.field("rename_pattern", "rename-from"); builder.field("rename_replacement", "rename-to"); + builder.field("rename_alias_pattern", "alias-rename-from"); + builder.field("rename_alias_replacement", "alias-rename-to"); boolean partial = randomBoolean(); builder.field("partial", partial); builder.startObject("settings").field("set1", "val1").endObject(); @@ -103,6 +105,8 @@ public void testRestoreSnapshotRequestParsing() throws IOException { assertArrayEquals(request.indices(), new String[] { "foo", "bar", "baz" }); assertEquals("rename-from", request.renamePattern()); assertEquals("rename-to", request.renameReplacement()); + assertEquals("alias-rename-from", request.renameAliasPattern()); + assertEquals("alias-rename-to", request.renameAliasReplacement()); assertEquals(partial, request.partial()); assertArrayEquals(request.ignoreIndexSettings(), new String[] { "set2", "set3" }); boolean expectedIgnoreAvailable = includeIgnoreUnavailable diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index d17e661615b0d..d21282ff0441f 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -66,6 +66,9 @@ import org.opensearch.action.admin.cluster.state.ClusterStateRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.cluster.state.TransportClusterStateAction; +import org.opensearch.action.admin.indices.alias.IndicesAliasesAction; +import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.opensearch.action.admin.indices.alias.TransportIndicesAliasesAction; import org.opensearch.action.admin.indices.create.CreateIndexAction; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.create.CreateIndexResponse; @@ -141,6 +144,7 @@ import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.MetadataCreateIndexService; import org.opensearch.cluster.metadata.MetadataDeleteIndexService; +import org.opensearch.cluster.metadata.MetadataIndexAliasesService; import org.opensearch.cluster.metadata.MetadataIndexUpgradeService; import org.opensearch.cluster.metadata.MetadataMappingService; import org.opensearch.cluster.node.DiscoveryNode; @@ -958,6 +962,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { String repoName = "repo"; String snapshotName = "snapshot"; final String index = "test"; + final String alias = "test_alias"; final int shards = randomIntBetween(1, 10); TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( @@ -967,9 +972,8 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { final StepListener createSnapshotResponseStepListener = new StepListener<>(); final int documentsFirstSnapshot = randomIntBetween(0, 100); - continueOrDie( - createRepoAndIndex(repoName, index, shards), + createRepoAndIndexAndAlias(repoName, index, shards, alias), createIndexResponse -> indexNDocuments( documentsFirstSnapshot, index, @@ -1009,19 +1013,27 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { .cluster() .restoreSnapshot( new RestoreSnapshotRequest(repoName, secondSnapshotName).waitForCompletion(true) + .includeAliases(true) .renamePattern("(.+)") - .renameReplacement("restored_$1"), + .renameReplacement("restored_$1") + .renameAliasPattern("(.+)") + .renameAliasReplacement("restored_alias_$1"), restoreSnapshotResponseListener ) ); }); - final StepListener searchResponseListener = new StepListener<>(); + final StepListener searchIndexResponseListener = new StepListener<>(); + final StepListener searchAliasResponseListener = new StepListener<>(); continueOrDie(restoreSnapshotResponseListener, restoreSnapshotResponse -> { assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards()); client().search( new SearchRequest("restored_" + index).source(new SearchSourceBuilder().size(0).trackTotalHits(true)), - searchResponseListener + searchIndexResponseListener + ); + client().search( + new SearchRequest("restored_alias_" + alias).source(new SearchSourceBuilder().size(0).trackTotalHits(true)), + searchAliasResponseListener ); }); @@ -1029,7 +1041,11 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { assertEquals( documentsFirstSnapshot + documentsSecondSnapshot, - Objects.requireNonNull(searchResponseListener.result().getHits().getTotalHits()).value + Objects.requireNonNull(searchIndexResponseListener.result().getHits().getTotalHits()).value + ); + assertEquals( + documentsFirstSnapshot + documentsSecondSnapshot, + Objects.requireNonNull(searchAliasResponseListener.result().getHits().getTotalHits()).value ); assertThat(deleteSnapshotStepListener.result().isAcknowledged(), is(true)); assertThat(restoreSnapshotResponseListener.result().getRestoreInfo().failedShards(), is(0)); @@ -1520,6 +1536,22 @@ private StepListener createRepoAndIndex(String repoName, St return createIndexResponseStepListener; } + private StepListener createRepoAndIndexAndAlias(String repoName, String index, int shards, String alias) { + final StepListener createAliasListener = new StepListener<>(); + + continueOrDie( + createRepoAndIndex(repoName, index, shards), + acknowledgedResponse -> client().admin() + .indices() + .aliases( + new IndicesAliasesRequest().addAliasAction(IndicesAliasesRequest.AliasActions.add().index(index).alias(alias)), + createAliasListener + ) + ); + + return createAliasListener; + } + private void clearDisruptionsAndAwaitSync() { testClusterNodes.clearNetworkDisruptions(); stabilize(); @@ -2171,6 +2203,30 @@ public void onFailure(final Exception e) { indexNameExpressionResolver ) ); + final MetadataDeleteIndexService metadataDeleteIndexService = new MetadataDeleteIndexService( + settings, + clusterService, + allocationService + ); + final MetadataIndexAliasesService metadataIndexAliasesService = new MetadataIndexAliasesService( + clusterService, + indicesService, + new AliasValidator(), + metadataDeleteIndexService, + namedXContentRegistry + ); + actions.put( + IndicesAliasesAction.INSTANCE, + new TransportIndicesAliasesAction( + transportService, + clusterService, + threadPool, + metadataIndexAliasesService, + actionFilters, + indexNameExpressionResolver, + new RequestValidators<>(Collections.emptyList()) + ) + ); final MappingUpdatedAction mappingUpdatedAction = new MappingUpdatedAction(settings, clusterSettings, clusterService); mappingUpdatedAction.setClient(client); final TransportShardBulkAction transportShardBulkAction = new TransportShardBulkAction( @@ -2337,7 +2393,7 @@ public void onFailure(final Exception e) { transportService, clusterService, threadPool, - new MetadataDeleteIndexService(settings, clusterService, allocationService), + metadataDeleteIndexService, actionFilters, indexNameExpressionResolver, new DestructiveOperations(settings, clusterSettings) diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java index e374636f60d22..2aa061366465e 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; @@ -68,6 +69,7 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -78,6 +80,7 @@ import org.mockito.ArgumentCaptor; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; +import static org.opensearch.snapshots.SnapshotsService.getRepoSnapshotUUIDTuple; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -804,6 +807,36 @@ public void testRunReadyCloneCompletionListenerFailure() throws Exception { assertEquals(expectedUpdate.hashCode(), capturedUpdate.hashCode()); } + public void testGetRepoSnapshotUUIDTuple() { + String repoName = "repoName"; + String pinningEntity = "repoName__OstrHGrERqaR__-597zHYQ"; + Tuple t = getRepoSnapshotUUIDTuple(pinningEntity); + assertEquals(repoName, t.v1()); + assertEquals("OstrHGrERqaR__-597zHYQ", t.v2()); + } + + public void testIsOrphanPinnedEntity() { + String repoName = "repoName"; + ArrayList snapshotUUIDs = new ArrayList<>( + Arrays.asList("OouZCQ30TqypFBZGgk1C7g", "RSP6GLJfSO6SsMmUjZNAaA", "OstrHGrERqaR__-597zHYQ, Zjlnf8IHRxqFBijj0m52gw") + ); + + ArrayList pinnedEntities = new ArrayList<>( + Arrays.asList( + "repoName__OouZCQ30TqypFBZGgk1C7g", + "repoName__RSP6GLJfSO6SsMmUjZNAaA", + "repoName__OstrHGrERqaR__-597zHYQ, Zjlnf8IHRxqFBijj0m52gw" + ) + ); + + for (String pinnedEntity : pinnedEntities) { + assertFalse(SnapshotsService.isOrphanPinnedEntity(repoName, snapshotUUIDs, pinnedEntity)); + } + + String orphanEntity = "repoName__orphan"; + assertTrue(SnapshotsService.isOrphanPinnedEntity(repoName, snapshotUUIDs, orphanEntity)); + } + /** * Helper method to create a SnapshotsService instance with a provided ClusterService. * This method mocks all necessary dependencies for the SnapshotsService. diff --git a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java b/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java index 45428865259c3..f22759ce968aa 100644 --- a/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java +++ b/server/src/test/java/org/opensearch/wlm/QueryGroupServiceTests.java @@ -48,6 +48,7 @@ import static org.mockito.Mockito.when; public class QueryGroupServiceTests extends OpenSearchTestCase { + public static final String QUERY_GROUP_ID = "queryGroupId1"; private QueryGroupService queryGroupService; private QueryGroupTaskCancellationService mockCancellationService; private ClusterService mockClusterService; @@ -68,6 +69,7 @@ public void setUp() throws Exception { mockNodeDuressTrackers = Mockito.mock(NodeDuressTrackers.class); mockCancellationService = Mockito.mock(TestQueryGroupCancellationService.class); mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(); + when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(false); queryGroupService = new QueryGroupService( mockCancellationService, @@ -203,26 +205,52 @@ public void testRejectIfNeeded_whenQueryGroupIdIsNullOrDefaultOne() { verify(spyMap, never()).get(any()); } + public void testRejectIfNeeded_whenSoftModeQueryGroupIsContendedAndNodeInDuress() { + Set activeQueryGroups = getActiveQueryGroups( + "testQueryGroup", + QUERY_GROUP_ID, + MutableQueryGroupFragment.ResiliencyMode.SOFT, + Map.of(ResourceType.CPU, 0.10) + ); + mockQueryGroupStateMap = new HashMap<>(); + mockQueryGroupStateMap.put("queryGroupId1", new QueryGroupState()); + QueryGroupState state = new QueryGroupState(); + QueryGroupState.ResourceTypeState cpuResourceState = new QueryGroupState.ResourceTypeState(ResourceType.CPU); + cpuResourceState.setLastRecordedUsage(0.10); + state.getResourceState().put(ResourceType.CPU, cpuResourceState); + QueryGroupState spyState = spy(state); + mockQueryGroupStateMap.put(QUERY_GROUP_ID, spyState); + + mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); + + queryGroupService = new QueryGroupService( + mockCancellationService, + mockClusterService, + mockThreadPool, + mockWorkloadManagementSettings, + mockNodeDuressTrackers, + mockQueryGroupsStateAccessor, + activeQueryGroups, + new HashSet<>() + ); + when(mockWorkloadManagementSettings.getWlmMode()).thenReturn(WlmMode.ENABLED); + when(mockNodeDuressTrackers.isNodeInDuress()).thenReturn(true); + assertThrows(OpenSearchRejectedExecutionException.class, () -> queryGroupService.rejectIfNeeded("queryGroupId1")); + } + public void testRejectIfNeeded_whenQueryGroupIsSoftMode() { - QueryGroup testQueryGroup = new QueryGroup( + Set activeQueryGroups = getActiveQueryGroups( "testQueryGroup", - "queryGroupId1", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.SOFT, Map.of(ResourceType.CPU, 0.10)), - 1L + QUERY_GROUP_ID, + MutableQueryGroupFragment.ResiliencyMode.SOFT, + Map.of(ResourceType.CPU, 0.10) ); - Set activeQueryGroups = new HashSet<>() { - { - add(testQueryGroup); - } - }; mockQueryGroupStateMap = new HashMap<>(); QueryGroupState spyState = spy(new QueryGroupState()); mockQueryGroupStateMap.put("queryGroupId1", spyState); mockQueryGroupsStateAccessor = new QueryGroupsStateAccessor(mockQueryGroupStateMap); - Map spyMap = spy(mockQueryGroupStateMap); - queryGroupService = new QueryGroupService( mockCancellationService, mockClusterService, @@ -239,11 +267,11 @@ public void testRejectIfNeeded_whenQueryGroupIsSoftMode() { } public void testRejectIfNeeded_whenQueryGroupIsEnforcedMode_andNotBreaching() { - QueryGroup testQueryGroup = new QueryGroup( + QueryGroup testQueryGroup = getQueryGroup( "testQueryGroup", "queryGroupId1", - new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.CPU, 0.10)), - 1L + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.10) ); QueryGroup spuQueryGroup = spy(testQueryGroup); Set activeQueryGroups = new HashSet<>() { @@ -464,6 +492,31 @@ public void testShouldSBPHandle() { } + private static Set getActiveQueryGroups( + String name, + String id, + MutableQueryGroupFragment.ResiliencyMode mode, + Map resourceLimits + ) { + QueryGroup testQueryGroup = getQueryGroup(name, id, mode, resourceLimits); + Set activeQueryGroups = new HashSet<>() { + { + add(testQueryGroup); + } + }; + return activeQueryGroups; + } + + private static QueryGroup getQueryGroup( + String name, + String id, + MutableQueryGroupFragment.ResiliencyMode mode, + Map resourceLimits + ) { + QueryGroup testQueryGroup = new QueryGroup(name, id, new MutableQueryGroupFragment(mode, resourceLimits), 1L); + return testQueryGroup; + } + // This is needed to test the behavior of QueryGroupService#doRun method static class TestQueryGroupCancellationService extends QueryGroupTaskCancellationService { public TestQueryGroupCancellationService( diff --git a/server/src/test/resources/org/opensearch/tasks/missing-fields-task-index-mapping.json b/server/src/test/resources/org/opensearch/tasks/missing-fields-task-index-mapping.json new file mode 100644 index 0000000000000..2e59bbc4803bf --- /dev/null +++ b/server/src/test/resources/org/opensearch/tasks/missing-fields-task-index-mapping.json @@ -0,0 +1,63 @@ +{ + "_doc" : { + "_meta": { + "version": 5 + }, + "dynamic" : "strict", + "properties" : { + "completed": { + "type": "boolean" + }, + "task" : { + "properties": { + "action": { + "type": "keyword" + }, + "cancellable": { + "type": "boolean" + }, + "cancelled": { + "type": "boolean" + }, + "id": { + "type": "long" + }, + "parent_task_id": { + "type": "keyword" + }, + "node": { + "type": "keyword" + }, + "running_time_in_nanos": { + "type": "long" + }, + "start_time_in_millis": { + "type": "long" + }, + "type": { + "type": "keyword" + }, + "status": { + "type" : "object", + "enabled" : false + }, + "description": { + "type": "text" + }, + "headers": { + "type" : "object", + "enabled" : false + } + } + }, + "response" : { + "type" : "object", + "enabled" : false + }, + "error" : { + "type" : "object", + "enabled" : false + } + } + } +} diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 65fb55afbc7bd..2bd0268ca136b 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -77,7 +77,7 @@ dependencies { api "ch.qos.logback:logback-core:1.5.10" api "ch.qos.logback:logback-classic:1.2.13" api "org.jboss.xnio:xnio-nio:3.8.16.Final" - api 'org.jline:jline:3.27.0' + api 'org.jline:jline:3.27.1' api 'org.apache.commons:commons-configuration2:2.11.0' api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api ('org.apache.kerby:kerb-admin:2.1.0') { diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 4abd7fbea9cff..e1728c4476699 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -91,11 +91,16 @@ import org.opensearch.index.cache.bitset.BitsetFilterCache; import org.opensearch.index.cache.bitset.BitsetFilterCache.Listener; import org.opensearch.index.cache.query.DisabledQueryCache; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; +import org.opensearch.index.mapper.CompositeMappedFieldType; import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DateFieldMapper; @@ -117,6 +122,7 @@ import org.opensearch.index.mapper.RangeType; import org.opensearch.index.mapper.StarTreeMapper; import org.opensearch.index.mapper.TextFieldMapper; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.SearchOperationListener; @@ -135,12 +141,14 @@ import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.aggregations.support.ValuesSourceType; +import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.fetch.subphase.FetchDocValuesPhase; import org.opensearch.search.fetch.subphase.FetchSourcePhase; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.startree.StarTreeQueryContext; import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; @@ -155,6 +163,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.function.Function; @@ -331,6 +340,35 @@ protected A createAggregator(AggregationBuilder aggregati return aggregator; } + protected CountingAggregator createCountingAggregator( + Query query, + QueryBuilder queryBuilder, + AggregationBuilder aggregationBuilder, + IndexSearcher indexSearcher, + IndexSettings indexSettings, + CompositeIndexFieldInfo starTree, + List supportedDimensions, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes + ) throws IOException { + SearchContext searchContext; + if (starTree != null) { + searchContext = createSearchContextWithStarTreeContext( + indexSearcher, + indexSettings, + query, + queryBuilder, + starTree, + supportedDimensions, + bucketConsumer, + fieldTypes + ); + } else { + searchContext = createSearchContext(indexSearcher, indexSettings, query, bucketConsumer, fieldTypes); + } + return new CountingAggregator(new AtomicInteger(), createAggregator(aggregationBuilder, searchContext)); + } + /** * Create a {@linkplain SearchContext} for testing an {@link Aggregator}. */ @@ -344,6 +382,49 @@ protected SearchContext createSearchContext( return createSearchContext(indexSearcher, indexSettings, query, bucketConsumer, new NoneCircuitBreakerService(), fieldTypes); } + protected SearchContext createSearchContextWithStarTreeContext( + IndexSearcher indexSearcher, + IndexSettings indexSettings, + Query query, + QueryBuilder queryBuilder, + CompositeIndexFieldInfo starTree, + List supportedDimensions, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes + ) throws IOException { + SearchContext searchContext = createSearchContext( + indexSearcher, + indexSettings, + query, + bucketConsumer, + new NoneCircuitBreakerService(), + fieldTypes + ); + + // Mock SearchContextAggregations + SearchContextAggregations searchContextAggregations = mock(SearchContextAggregations.class); + AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); + when(searchContext.aggregations()).thenReturn(searchContextAggregations); + when(searchContextAggregations.factories()).thenReturn(aggregatorFactories); + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + + CompositeDataCubeFieldType compositeMappedFieldType = mock(CompositeDataCubeFieldType.class); + when(compositeMappedFieldType.name()).thenReturn(starTree.getField()); + when(compositeMappedFieldType.getCompositeIndexType()).thenReturn(starTree.getType()); + Set compositeFieldTypes = Set.of(compositeMappedFieldType); + + when((compositeMappedFieldType).getDimensions()).thenReturn(supportedDimensions); + MapperService mapperService = mock(MapperService.class); + when(mapperService.getCompositeFieldTypes()).thenReturn(compositeFieldTypes); + when(searchContext.mapperService()).thenReturn(mapperService); + + SearchSourceBuilder sb = new SearchSourceBuilder().query(queryBuilder); + StarTreeQueryContext starTreeQueryContext = StarTreeQueryHelper.getStarTreeQueryContext(searchContext, sb); + + when(searchContext.getStarTreeQueryContext()).thenReturn(starTreeQueryContext); + return searchContext; + } + protected SearchContext createSearchContext( IndexSearcher indexSearcher, IndexSettings indexSettings, @@ -651,6 +732,67 @@ protected A searchAndReduc return internalAgg; } + protected A searchAndReduceStarTree( + IndexSettings indexSettings, + IndexSearcher searcher, + Query query, + QueryBuilder queryBuilder, + AggregationBuilder builder, + CompositeIndexFieldInfo compositeIndexFieldInfo, + List supportedDimensions, + int maxBucket, + boolean hasNested, + MappedFieldType... fieldTypes + ) throws IOException { + query = query.rewrite(searcher); + final IndexReaderContext ctx = searcher.getTopReaderContext(); + final PipelineTree pipelines = builder.buildPipelineTree(); + List aggs = new ArrayList<>(); + if (hasNested) { + query = Queries.filtered(query, Queries.newNonNestedFilter()); + } + + MultiBucketConsumer bucketConsumer = new MultiBucketConsumer( + maxBucket, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + CountingAggregator countingAggregator = createCountingAggregator( + query, + queryBuilder, + builder, + searcher, + indexSettings, + compositeIndexFieldInfo, + supportedDimensions, + bucketConsumer, + fieldTypes + ); + + countingAggregator.preCollection(); + searcher.search(query, countingAggregator); + countingAggregator.postCollection(); + aggs.add(countingAggregator.buildTopLevel()); + if (compositeIndexFieldInfo != null) { + assertEquals(0, countingAggregator.collectCounter.get()); + } + + MultiBucketConsumer reduceBucketConsumer = new MultiBucketConsumer( + maxBucket, + new NoneCircuitBreakerService().getBreaker(CircuitBreaker.REQUEST) + ); + InternalAggregation.ReduceContext context = InternalAggregation.ReduceContext.forFinalReduction( + countingAggregator.context().bigArrays(), + getMockScriptService(), + reduceBucketConsumer, + pipelines + ); + + @SuppressWarnings("unchecked") + A internalAgg = (A) aggs.get(0).reduce(aggs, context); + doAssertReducedMultiBucketConsumer(internalAgg, reduceBucketConsumer); + return internalAgg; + } + protected void doAssertReducedMultiBucketConsumer(Aggregation agg, MultiBucketConsumerService.MultiBucketConsumer bucketConsumer) { InternalAggregationTestCase.assertMultiBucketConsumer(agg, bucketConsumer); } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 0bfa70a771f65..9bf725766069c 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -33,6 +33,7 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; @@ -61,6 +62,7 @@ import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.compress.CompressorRegistry; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -506,6 +508,26 @@ protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, Li return snapshotInfo; } + protected void restoreSnapshot( + String repositoryName, + String snapshotName, + String indexName, + String restoredIndexName, + Settings indexSettings + ) { + logger.info("--> restoring snapshot [{}] of {} in [{}] to [{}]", snapshotName, indexName, repositoryName, restoredIndexName); + RestoreSnapshotRequestBuilder builder = client().admin() + .cluster() + .prepareRestoreSnapshot(repositoryName, snapshotName) + .setWaitForCompletion(false) + .setRenamePattern(indexName) + .setRenameReplacement(restoredIndexName); + if (indexSettings != null) { + builder.setIndexSettings(indexSettings); + } + assertEquals(builder.get().status(), RestStatus.ACCEPTED); + } + protected void createIndexWithRandomDocs(String indexName, int docCount) throws InterruptedException { createIndex(indexName); ensureGreen();