diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 6b86da2c91261..cfaadc5ed1e5e 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -23,4 +23,6 @@ BWC_VERSION: - "2.9.0" - "2.9.1" - "2.10.0" + - "2.10.1" - "2.11.0" + - "2.12.0" diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index cd7c1bb980eec..c47b9e0b69256 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -19,7 +19,7 @@ Resolves #[Issue number to be closed when this PR is merged] - [ ] New functionality has javadoc added - [ ] Commits are signed per the DCO using --signoff - [ ] Commit changes are listed out in CHANGELOG.md file (See: [Changelog](../blob/main/CONTRIBUTING.md#changelog)) -- [ ] GitHub issue/PR created in [OpenSearch documentation repo](https://github.com/opensearch-project/documentation-website) for the required public documentation changes (#[Issue/PR number]) +- [ ] Public documentation issue/PR [created](https://github.com/opensearch-project/documentation-website/issues/new/choose) By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/OpenSearch/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). diff --git a/.github/workflows/assemble.yml b/.github/workflows/assemble.yml new file mode 100644 index 0000000000000..6a66ac5fb5609 --- /dev/null +++ b/.github/workflows/assemble.yml @@ -0,0 +1,26 @@ +name: Gradle Assemble +on: [pull_request] + +jobs: + assemble: + if: github.repository == 'opensearch-project/OpenSearch' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v4 + - name: Set up JDK 11 + uses: actions/setup-java@v3 + with: + java-version: 11 + distribution: temurin + - name: Setup docker (missing on MacOS) + if: runner.os == 'macos' + run: | + brew install docker + colima start + sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock + - name: Run Gradle (assemble) + run: | + ./gradlew assemble --parallel --no-build-cache -PDISABLE_BUILD_CACHE diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index f4622859916c7..b04f404b11c55 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -1,4 +1,4 @@ -name: Gradle Precommit and Assemble +name: Gradle Precommit on: [pull_request] jobs: @@ -19,12 +19,3 @@ jobs: - name: Run Gradle (precommit) run: | ./gradlew javadoc precommit --parallel - - name: Setup docker (missing on MacOS) - if: runner.os == 'macos' - run: | - brew install docker - colima start - sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock - - name: Run Gradle (assemble) - run: | - ./gradlew assemble --parallel diff --git a/CHANGELOG.md b/CHANGELOG.md index 50922b85a0c0d..a9e5bb3982708 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,7 +33,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 - Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-compress` from 1.22 to 1.23.0 - Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 - Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) @@ -54,6 +53,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) +- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) + ### Deprecated @@ -82,65 +84,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added -- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386)) -- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681)) -- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694)) -- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666)) -- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131)) -- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189)) -- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562)) -- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204)) -- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839)) ### Dependencies -- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575)) -- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968)) -- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950)) -- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972)) -- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971)) -- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022)) -- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098)) -- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125)) -- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752)) -- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126)) -- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299)) -- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.0 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208)) -- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))` -- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) -- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302)) -- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303)) -- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301)) ### Changed -- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415)) -- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916)) -- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) -- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042)) -- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122)) -- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246)) -- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200)) -- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356)) ### Deprecated ### Removed -- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447)) ### Fixed -- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725)) -- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045)) -- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082)) -- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089)) -- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194)) -- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.11...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index d7bdd09ea882e..6d3e0f018657e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -103,7 +103,7 @@ dependencies { api localGroovy() api 'commons-codec:commons-codec:1.16.0' - api 'org.apache.commons:commons-compress:1.23.0' + api 'org.apache.commons:commons-compress:1.24.0' api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0' @@ -114,7 +114,7 @@ dependencies { api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" - api 'de.thetaphi:forbiddenapis:3.5.1' + api 'de.thetaphi:forbiddenapis:3.6' api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.4' diff --git a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java index c5b4de157c75c..662510fbbf61c 100644 --- a/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java +++ b/buildSrc/reaper/src/main/java/org/opensearch/gradle/reaper/Reaper.java @@ -45,17 +45,16 @@ /** * A standalone process that will reap external services after a build dies. - * *
* The main method will wait indefinitely on the parent process (Gradle) by * reading from stdin. When Gradle shuts down, whether normally or abruptly, the * pipe will be broken and read will return. - * + *
* The reaper will then iterate over the files in the configured directory, * and execute the given commands. If any commands fail, a failure message is * written to stderr. Otherwise, the input file will be deleted. If no inputs diff --git a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java index cddd03ccc2019..4d45640b75e3d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/BwcVersions.java @@ -52,15 +52,15 @@ /** * A container for opensearch supported version information used in BWC testing. - * + *
* Parse the Java source file containing the versions declarations and use the known rules to figure out which are all * the version the current one is wire and index compatible with. * On top of this, figure out which of these are unreleased and provide the branch they can be built from. - * + *
* Note that in this context, currentVersion is the unreleased version this build operates on. * At any point in time there will surely be four such unreleased versions being worked on, * thus currentVersion will be one of these. - * + *
* Considering: *
* Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class. * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased * version number to server in all branches when a version is released. diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java index 5ae7ad1595e2f..5259700b3a63d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggingOutputStream.java @@ -38,7 +38,7 @@ /** * Writes data passed to this stream as log messages. - * + *
* The stream will be flushed whenever a newline is detected.
* Allows setting an optional prefix before each line of output.
*/
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java
index 159270d28e3d6..c6e49dc44d6bd 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalBwcGitPlugin.java
@@ -76,7 +76,7 @@ public InternalBwcGitPlugin(ProviderFactory providerFactory, ExecOperations exec
public void apply(Project project) {
this.project = project;
this.gitExtension = project.getExtensions().create("bwcGitConfig", BwcGitExtension.class);
- Provider
* See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue
*/
class S3RetryingInputStream extends InputStream {
diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
index b13672b4179f8..b1b3e19eac275 100644
--- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
+++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java
@@ -90,6 +90,7 @@
import java.security.SecureRandom;
import java.time.Duration;
import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
import static java.util.Collections.emptyMap;
@@ -100,7 +101,7 @@ class S3Service implements Closeable {
private static final String DEFAULT_S3_ENDPOINT = "s3.amazonaws.com";
- private volatile Map
* It is important to note that this implementation does have some downsides in that each invocation of the
* {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a
* view of the underlying values.
diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java
index dfa72d6d59a0d..55920bab4efd3 100644
--- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java
+++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransport.java
@@ -52,6 +52,7 @@
import org.opensearch.nio.NioSelector;
import org.opensearch.nio.NioSocketChannel;
import org.opensearch.nio.ServerChannelContext;
+import org.opensearch.telemetry.tracing.Tracer;
import org.opensearch.threadpool.ThreadPool;
import org.opensearch.transport.TcpTransport;
import org.opensearch.transport.TransportSettings;
@@ -84,9 +85,10 @@ protected NioTransport(
PageCacheRecycler pageCacheRecycler,
NamedWriteableRegistry namedWriteableRegistry,
CircuitBreakerService circuitBreakerService,
- NioGroupFactory groupFactory
+ NioGroupFactory groupFactory,
+ Tracer tracer
) {
- super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService);
+ super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService, tracer);
this.pageAllocator = new PageAllocator(pageCacheRecycler);
this.groupFactory = groupFactory;
}
diff --git a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java
index ec266d76eff3d..d4be876867651 100644
--- a/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java
+++ b/plugins/transport-nio/src/main/java/org/opensearch/transport/nio/NioTransportPlugin.java
@@ -91,7 +91,8 @@ public Map
* The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the
* json format of a query being tested here then feel free to change this.
*/
diff --git a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java
index c3c332aecfd4c..8ca90791f649e 100644
--- a/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java
+++ b/qa/no-bootstrap-tests/src/test/java/org/opensearch/bootstrap/SpawnerNoBootstrapTests.java
@@ -65,7 +65,7 @@
/**
* Create a simple "daemon controller", put it in the right place and check that it runs.
- *
+ *
* Extends LuceneTestCase rather than OpenSearchTestCase as OpenSearchTestCase installs a system call filter, and
* that prevents the Spawner class from doing its job. Also needs to run in a separate JVM to other
* tests that extend OpenSearchTestCase for the same reason.
diff --git a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java
index 02a613be320c2..4bb3877fc04a8 100644
--- a/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java
+++ b/qa/os/src/test/java/org/opensearch/packaging/test/PackagingTestCase.java
@@ -441,7 +441,7 @@ public static Path createTempDir(String prefix) throws IOException {
/**
* Run the given action with a temporary copy of the config directory.
- *
+ *
* Files under the path passed to the action may be modified as necessary for the
* test to execute, and running OpenSearch with {@link #startOpenSearch()} will
* use the temporary directory.
diff --git a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java
index 7904d1a046916..958de24848178 100644
--- a/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java
+++ b/qa/os/src/test/java/org/opensearch/packaging/util/FileMatcher.java
@@ -51,7 +51,7 @@
/**
* Asserts that a file at a path matches its status as Directory/File, and its owner. If on a posix system, also matches the permission
* set is what we expect.
- *
+ *
* This class saves information about its failed matches in instance variables and so instances should not be reused
*/
public class FileMatcher extends TypeSafeMatcher
* For packages this is root, and for archives it is the user doing the installation.
*/
public String getOwner() {
diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java
index aef363058b394..f963f8d221bb5 100644
--- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java
+++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java
@@ -98,11 +98,11 @@ private void waitForSearchableDocs(String index, int shardCount, int replicaCoun
// Verify segment store
assertBusy(() -> {
- /**
- * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by
- * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging
- * to primary while remaining *replicaCount* records belongs to replica copies
- * */
+ /*
+ Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by
+ line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging
+ to primary while remaining *replicaCount* records belongs to replica copies
+ */
Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica");
segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count");
Response segrepStatsResponse = client().performRequest(segrepStatsRequest);
@@ -259,7 +259,8 @@ public void testIndexing() throws Exception {
* This test verifies that during rolling upgrades the segment replication does not break when replica shards can
* be running on older codec versions.
*
- * @throws Exception exception
+ * @throws Exception if index creation fail
+ * @throws UnsupportedOperationException if cluster type is unknown
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7679")
public void testIndexingWithSegRep() throws Exception {
diff --git a/release-notes/opensearch.release-notes-2.11.0.md b/release-notes/opensearch.release-notes-2.11.0.md
new file mode 100644
index 0000000000000..d7e9182f2a656
--- /dev/null
+++ b/release-notes/opensearch.release-notes-2.11.0.md
@@ -0,0 +1,75 @@
+## 2023-10-12 Version 2.11.0 Release Notes
+
+## [2.11]
+
+### Added
+- Add coordinator level stats for search latency ([#8386](https://github.com/opensearch-project/OpenSearch/issues/8386))
+- Add metrics for thread_pool task wait time ([#9681](https://github.com/opensearch-project/OpenSearch/pull/9681))
+- Async blob read support for S3 plugin ([#9694](https://github.com/opensearch-project/OpenSearch/pull/9694))
+- [Telemetry-Otel] Added support for OtlpGrpcSpanExporter exporter ([#9666](https://github.com/opensearch-project/OpenSearch/pull/9666))
+- Async blob read support for encrypted containers ([#10131](https://github.com/opensearch-project/OpenSearch/pull/10131))
+- Add capability to restrict async durability mode for remote indexes ([#10189](https://github.com/opensearch-project/OpenSearch/pull/10189))
+- Add Doc Status Counter for Indexing Engine ([#4562](https://github.com/opensearch-project/OpenSearch/issues/4562))
+- Add unreferenced file cleanup count to merge stats ([#10204](https://github.com/opensearch-project/OpenSearch/pull/10204))
+- [Remote Store] Add support to restrict creation & deletion if system repository and mutation of immutable settings of system repository ([#9839](https://github.com/opensearch-project/OpenSearch/pull/9839))
+- Improve compressed request handling ([#10261](https://github.com/opensearch-project/OpenSearch/pull/10261))
+
+### Dependencies
+- Bump `peter-evans/create-or-update-comment` from 2 to 3 ([#9575](https://github.com/opensearch-project/OpenSearch/pull/9575))
+- Bump `actions/checkout` from 2 to 4 ([#9968](https://github.com/opensearch-project/OpenSearch/pull/9968))
+- Bump OpenTelemetry from 1.26.0 to 1.30.1 ([#9950](https://github.com/opensearch-project/OpenSearch/pull/9950))
+- Bump `org.apache.commons:commons-compress` from 1.23.0 to 1.24.0 ([#9973, #9972](https://github.com/opensearch-project/OpenSearch/pull/9973, https://github.com/opensearch-project/OpenSearch/pull/9972))
+- Bump `com.google.cloud:google-cloud-core-http` from 2.21.1 to 2.23.0 ([#9971](https://github.com/opensearch-project/OpenSearch/pull/9971))
+- Bump `mockito` from 5.4.0 to 5.5.0 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022))
+- Bump `bytebuddy` from 1.14.3 to 1.14.7 ([#10022](https://github.com/opensearch-project/OpenSearch/pull/10022))
+- Bump `com.zaxxer:SparseBitSet` from 1.2 to 1.3 ([#10098](https://github.com/opensearch-project/OpenSearch/pull/10098))
+- Bump `tibdex/github-app-token` from 1.5.0 to 2.1.0 ([#10125](https://github.com/opensearch-project/OpenSearch/pull/10125))
+- Bump `org.wiremock:wiremock-standalone` from 2.35.0 to 3.1.0 ([#9752](https://github.com/opensearch-project/OpenSearch/pull/9752))
+- Bump `com.google.http-client:google-http-client-jackson2` from 1.43.2 to 1.43.3 ([#10126](https://github.com/opensearch-project/OpenSearch/pull/10126))
+- Bump `org.xerial.snappy:snappy-java` from 1.1.10.3 to 1.1.10.5 ([#10206](https://github.com/opensearch-project/OpenSearch/pull/10206), [#10299](https://github.com/opensearch-project/OpenSearch/pull/10299))
+- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298))
+- Bump `codecov/codecov-action` from 2 to 3 ([#10209](https://github.com/opensearch-project/OpenSearch/pull/10209))
+- Bump `org.bouncycastle:bcpkix-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
+- Bump `org.bouncycastle:bcprov-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
+- Bump `org.bouncycastle:bcmail-jdk15to18` from 1.75 to 1.76 ([10219](https://github.com/opensearch-project/OpenSearch/pull/10219))`
+- Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276))
+- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295))
+- Bump asm from 9.5 to 9.6 ([#10302](https://github.com/opensearch-project/OpenSearch/pull/10302))
+- Bump netty from 4.1.97.Final to 4.1.99.Final ([#10303](https://github.com/opensearch-project/OpenSearch/pull/10303))
+- Bump `peter-evans/create-pull-request` from 3 to 5 ([#10301](https://github.com/opensearch-project/OpenSearch/pull/10301))
+- Bump `org.apache.avro:avro` from 1.11.2 to 1.11.3 ([#10210](https://github.com/opensearch-project/OpenSearch/pull/10210))
+- Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297))
+- Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506))
+- Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508))
+- Bump `commons-io:commons-io` from 2.13.0 to 2.14.0 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294))
+- Bump `netty` from 4.1.99.Final to 4.1.100.Final ([#10564](https://github.com/opensearch-project/OpenSearch/pull/10564))
+
+### Changed
+- Add instrumentation in rest and network layer. ([#9415](https://github.com/opensearch-project/OpenSearch/pull/9415))
+- Allow parameterization of tests with OpenSearchIntegTestCase.SuiteScopeTestCase annotation ([#9916](https://github.com/opensearch-project/OpenSearch/pull/9916))
+- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840))
+- Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036))
+- Add instrumentation in transport service. ([#10042](https://github.com/opensearch-project/OpenSearch/pull/10042))
+- [Tracing Framework] Add support for SpanKind. ([#10122](https://github.com/opensearch-project/OpenSearch/pull/10122))
+- Pass parent filter to inner query in nested query ([#10246](https://github.com/opensearch-project/OpenSearch/pull/10246))
+- Disable concurrent segment search when terminate_after is used ([#10200](https://github.com/opensearch-project/OpenSearch/pull/10200))
+- Add instrumentation in Inbound Handler. ([#100143](https://github.com/opensearch-project/OpenSearch/pull/10143))
+- Enable remote segment upload backpressure by default ([#10356](https://github.com/opensearch-project/OpenSearch/pull/10356))
+- [Remote Store] Add support to reload repository metadata inplace ([#9569](https://github.com/opensearch-project/OpenSearch/pull/9569))
+- [Metrics Framework] Add Metrics framework. ([#10241](https://github.com/opensearch-project/OpenSearch/pull/10241))
+- Updating the separator for RemoteStoreLockManager since underscore is allowed in base64UUID url charset ([#10379](https://github.com/opensearch-project/OpenSearch/pull/10379))
+- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562))
+
+### Removed
+- Remove spurious SGID bit on directories ([#9447](https://github.com/opensearch-project/OpenSearch/pull/9447))
+
+### Fixed
+- Fix ignore_missing parameter has no effect when using template snippet in rename ingest processor ([#9725](https://github.com/opensearch-project/OpenSearch/pull/9725))
+- Fix broken backward compatibility from 2.7 for IndexSorted field indices ([#10045](https://github.com/opensearch-project/OpenSearch/pull/10045))
+- Fix concurrent search NPE when track_total_hits, terminate_after and size=0 are used ([#10082](https://github.com/opensearch-project/OpenSearch/pull/10082))
+- Fix remove ingest processor handing ignore_missing parameter not correctly ([10089](https://github.com/opensearch-project/OpenSearch/pull/10089))
+- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101))
+- Fix circular dependency in Settings initialization ([10194](https://github.com/opensearch-project/OpenSearch/pull/10194))
+- Fix registration and initialization of multiple extensions ([10256](https://github.com/opensearch-project/OpenSearch/pull/10256))
+- Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370))
+- Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496))
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java
index 6343bd127c458..4c9f49df71257 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/HotThreadsIT.java
@@ -60,8 +60,8 @@
public class HotThreadsIT extends OpenSearchIntegTestCase {
public void testHotThreadsDontFail() throws ExecutionException, InterruptedException {
- /**
- * This test just checks if nothing crashes or gets stuck etc.
+ /*
+ This test just checks if nothing crashes or gets stuck etc.
*/
createIndex("test");
final int iters = scaledRandomIntBetween(2, 20);
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java
index 0197ccf059737..44ba585016d8e 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/node/tasks/AbstractTasksIT.java
@@ -112,7 +112,7 @@ protected int numberOfEvents(String actionMasks, Function
* The way the test framework bootstraps the test cluster makes it difficult to parameterize the feature flag.
* Once concurrent search is moved behind a cluster setting we can parameterize these tests behind the setting.
*/
@@ -72,7 +72,7 @@ protected Settings featureFlagSettings() {
/**
* Tests the number of threads that worked on a search task.
- *
+ *
* Currently, we try to control concurrency by creating an index with 7 segments and rely on
* the way concurrent search creates leaf slices from segments. Once more concurrency controls are introduced
* we should improve this test to use those methods.
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
index aff7c5d9876ac..36fe3748e9d10 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java
@@ -46,7 +46,7 @@
/**
* This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only.
- *
+ *
* The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only".
*/
@ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
index 347011721c728..78fb01b07b6b1 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java
@@ -53,7 +53,7 @@
/**
* This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only.
- *
+ *
* The @NodeScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only".
*/
@ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST)
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java
index 737c0acc309fd..cd6cb0ca3b172 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkProcessorRetryIT.java
@@ -194,7 +194,7 @@ private static void indexDocs(BulkProcessor processor, int numDocs) {
/**
* Internal helper class to correlate backoff states with bulk responses. This is needed to check whether we maxed out the number
* of retries but still got rejected (which is perfectly fine and can also happen from time to time under heavy load).
- *
+ *
* This implementation relies on an implementation detail in Retry, namely that the bulk listener is notified on the same thread
* as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code.
*/
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java
index c62c61d5919d6..aefabcb9bc14f 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/ingest/AsyncIngestProcessorIT.java
@@ -69,7 +69,7 @@
/**
* The purpose of this test is to verify that when a processor executes an operation asynchronously that
* the expected result is the same as if the same operation happens synchronously.
- *
+ *
* In this test two test processor are defined that basically do the same operation, but a single processor
* executes asynchronously. The result of the operation should be the same and also the order in which the
* bulk responses are returned should be the same as how the corresponding index requests were defined.
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java
index f0a3b5a5901ce..b1934f901ac65 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/search/TransportSearchIT.java
@@ -109,8 +109,8 @@ public List
* This test in general passes without primary shard balance as well due to nature of allocation algorithm which
* assigns all primary shards first followed by replica copies.
*/
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java
index 8e68a8bde39d5..1d93eecd6b245 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java
@@ -197,9 +197,10 @@ protected IndexShard getIndexShard(String node, ShardId shardId, String indexNam
protected IndexShard getIndexShard(String node, String indexName) {
final Index index = resolveIndex(indexName);
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node);
- IndexService indexService = indicesService.indexServiceSafe(index);
+ IndexService indexService = indicesService.indexService(index);
+ assertNotNull(indexService);
final Optional
* The error happens when the bucket from the "unmapped" index is received first in the reduce phase, however the case can
* be recreated when aggregating about a single index with an unmapped date field and also getting "empty" buckets.
*/
@@ -1624,8 +1624,8 @@ public void testScriptCaching() throws Exception {
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
.get()
);
- String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1));
- String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1));
+ String date = DateFieldMapper.getDefaultDateTimeFormatter().format(date(1, 1));
+ String date2 = DateFieldMapper.getDefaultDateTimeFormatter().format(date(2, 1));
indexRandom(
true,
client().prepareIndex("cache_test_idx").setId("1").setSource("d", date),
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java
index 04115f69172da..d44071e1ef9c5 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DateHistogramOffsetIT.java
@@ -92,7 +92,7 @@ protected Settings featureFlagSettings() {
}
private ZonedDateTime date(String date) {
- return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date));
+ return DateFormatters.from(DateFieldMapper.getDefaultDateTimeFormatter().parse(date));
}
@Before
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java
index 5e95073209c71..865dd670fbf68 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/DiversifiedSamplerIT.java
@@ -221,6 +221,10 @@ public void testNestedDiversity() throws Exception {
}
public void testNestedSamples() throws Exception {
+ assumeFalse(
+ "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10046",
+ internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
+ );
// Test samples nested under samples
int MAX_DOCS_PER_AUTHOR = 1;
int MAX_DOCS_PER_GENRE = 2;
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java
index bb90c1294ecb8..dc3b690c7f78f 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java
@@ -544,13 +544,13 @@ public void testNested() throws Exception {
/**
* https://github.com/elastic/elasticsearch/issues/33514
- *
+ *
* This bug manifests as the max_bucket agg ("peak") being added to the response twice, because
* the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps.
* The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms
* delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then
* execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values.
- *
+ *
* Applies to any pipeline agg, not just max.
*/
public void testFieldIsntWrittenOutTwice() throws Exception {
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java
index 0cf89778c6e99..2aad0d2d38901 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MovAvgIT.java
@@ -1168,7 +1168,7 @@ public void testHoltWintersMinimization() {
* the default settings. Which means our mock histo will match the generated result (which it won't
* if the minimizer is actually working, since the coefficients will be different and thus generate different
* data)
- *
+ *
* We can simulate this by setting the window size == size of histo
*/
public void testMinimizeNotEnoughData() {
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
index 4cdf5ae8e674f..42d91ac945662 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/fetch/subphase/highlight/HighlighterSearchIT.java
@@ -3343,6 +3343,10 @@ public void testFiltersFunctionScoreQueryHighlight() throws Exception {
}
public void testHighlightQueryRewriteDatesWithNow() throws Exception {
+ assumeFalse(
+ "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10434",
+ internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
+ );
assertAcked(
client().admin()
.indices()
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java
index f5d1b8234558e..6b95405b3ebd4 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/fieldcaps/FieldCapabilitiesIT.java
@@ -244,6 +244,10 @@ public void testWithIndexAlias() {
}
public void testWithIndexFilter() throws InterruptedException {
+ assumeFalse(
+ "Concurrent search case muted pending fix: https://github.com/opensearch-project/OpenSearch/issues/10433",
+ internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING)
+ );
assertAcked(prepareCreate("index-1").setMapping("timestamp", "type=date", "field1", "type=keyword"));
assertAcked(prepareCreate("index-2").setMapping("timestamp", "type=date", "field1", "type=long"));
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java
index 43b7179a335f8..4a178e7066846 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/DeletePitMultiNodeIT.java
@@ -93,8 +93,8 @@ public void testDeletePit() throws Exception {
assertTrue(deletePitInfo.isSuccessful());
}
validatePitStats("index", 0, 10);
- /**
- * Checking deleting the same PIT id again results in succeeded
+ /*
+ Checking deleting the same PIT id again results in succeeded
*/
deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest);
deletePITResponse = deleteExecute.get();
@@ -113,8 +113,8 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception {
pitIds.add(pitResponse.getId());
validatePitStats("index", 5, 0);
- /**
- * Delete Pit #1
+ /*
+ Delete Pit #1
*/
DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds);
ActionFuture
* See https://github.com/elastic/elasticsearch/issues/20876
*/
public void testSnapshotCanceledOnRemovedShard() throws Exception {
diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java
index c651689e21d3d..7f016caf22149 100644
--- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentSeqNoVersioningIT.java
@@ -93,7 +93,7 @@
* provided the primaryTerm and seqNo still matches. The reason we cannot assume it will not take place after receiving the failure
* is that a request can fork into two because of retries on disconnect, and now race against itself. The retry might complete (and do a
* dirty or stale read) before the forked off request gets to execute, and that one might still subsequently succeed.
- *
+ *
* Such writes are not necessarily fully replicated and can be lost. There is no
* guarantee that the previous value did not have the specified primaryTerm and seqNo
*
* TODO: We should maybe rewrite this with the flexible query parser which matches the same syntax with more freedom.
*/
public class XQueryParser extends QueryParser {
diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java
index e4c299ba572b1..9ca0491bc29f5 100644
--- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java
+++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java
@@ -50,7 +50,7 @@
* A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs}
* output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key.
* The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}.
- *
+ *
* TODO: If the sort is based on score we should propagate the mininum competitive score when
* This is useful to split passages created by {@link BreakIterator}s like `sentence` that
* can create big outliers on semi-structured text.
- *
+ *
*
* WARNING: This break iterator is designed to work with the {@link UnifiedHighlighter}.
- *
+ *
* TODO: We should be able to create passages incrementally, starting from the offset of the first match and expanding or not
* depending on the offsets of subsequent matches. This is currently impossible because {@link FieldHighlighter} uses
* only the first matching offset to derive the start and end of each passage.
diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java
index 4777b77cfbfed..9e9f6d1fd817d 100644
--- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java
+++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64.java
@@ -38,8 +38,8 @@
/**
* Forked from Lucene 8.x; removed in Lucene 9.0
*
- * @todo further investigate a better alternative
- *
+ * TODO: further investigate a better alternative
+ *
* Space optimized random access capable array of values with a fixed number of bits/value. Values
* are packed contiguously.
*
diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java
index 0324522e9a68d..53cf4ed8e2273 100644
--- a/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java
+++ b/server/src/main/java/org/apache/lucene/util/packed/XPacked64SingleBlock.java
@@ -25,8 +25,8 @@
/**
* Forked from Lucene 8.x; removed in Lucene 9.0
*
- * @todo further investigate a better alternative
- *
+ * TODO: further investigate a better alternative
+ *
* This class is similar to {@link Packed64} except that it trades space for speed by ensuring that
* a single block needs to be read/written in order to read/write a value.
*/
diff --git a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java
index f94a4531a7db9..4260d34ead7c9 100644
--- a/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java
+++ b/server/src/main/java/org/apache/lucene/util/packed/XPackedInts.java
@@ -35,9 +35,9 @@
/**
* Forked from Lucene 8.x; removed in Lucene 8.9
- *
+ *
* Todo: further investigate a better alternative
- *
+ *
* Simplistic compression for array of unsigned long values. Each value is {@code >= 0} and {@code
* <=} a specified maximum value. The values are stored as packed ints, with each value consuming a
* fixed number of bits.
diff --git a/server/src/main/java/org/opensearch/action/AliasesRequest.java b/server/src/main/java/org/opensearch/action/AliasesRequest.java
index 4c5d5628b1aac..3632ba2d7304f 100644
--- a/server/src/main/java/org/opensearch/action/AliasesRequest.java
+++ b/server/src/main/java/org/opensearch/action/AliasesRequest.java
@@ -54,7 +54,7 @@ public interface AliasesRequest extends IndicesRequest.Replaceable {
/**
* Replaces current aliases with the provided aliases.
- *
+ *
* Sometimes aliases expressions need to be resolved to concrete aliases prior to executing the transport action.
*/
void replaceAliases(String... aliases);
diff --git a/server/src/main/java/org/opensearch/action/DocWriteRequest.java b/server/src/main/java/org/opensearch/action/DocWriteRequest.java
index df6414470ab6b..31f61e76c74ff 100644
--- a/server/src/main/java/org/opensearch/action/DocWriteRequest.java
+++ b/server/src/main/java/org/opensearch/action/DocWriteRequest.java
@@ -149,7 +149,7 @@ public interface DocWriteRequest
* If the document last modification was assigned a different term a
* {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown.
*/
diff --git a/server/src/main/java/org/opensearch/action/DocWriteResponse.java b/server/src/main/java/org/opensearch/action/DocWriteResponse.java
index afdb1d3a0bdd9..e3052b3b80035 100644
--- a/server/src/main/java/org/opensearch/action/DocWriteResponse.java
+++ b/server/src/main/java/org/opensearch/action/DocWriteResponse.java
@@ -341,7 +341,7 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t
/**
* Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method.
- *
+ *
* This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning
* {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly
* if needed and then immediately returns.
diff --git a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java
index 0930bd2741810..5948dd3e2b7cb 100644
--- a/server/src/main/java/org/opensearch/action/TaskOperationFailure.java
+++ b/server/src/main/java/org/opensearch/action/TaskOperationFailure.java
@@ -50,7 +50,7 @@
/**
* Information about task operation failures
- *
+ *
* The class is final due to serialization limitations
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
index 625aa91e6ea7f..3dec781f0acf4 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java
@@ -95,7 +95,7 @@ public ClusterAllocationExplainRequest(StreamInput in) throws IOException {
* Create a new allocation explain request. If {@code primary} is false, the first unassigned replica
* will be picked for explanation. If no replicas are unassigned, the first assigned replica will
* be explained.
- *
+ *
* Package private for testing.
*/
ClusterAllocationExplainRequest(String index, int shard, boolean primary, @Nullable String currentNode) {
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
index 8d82827f4ee50..e62c83490d810 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
@@ -67,7 +67,7 @@
/**
* ActionType to get a single task. If the task isn't running then it'll try to request the status from request index.
- *
+ *
* The general flow is:
*
* The steps taken by the repository cleanup operation are as follows:
*
* By default, the operation will return as soon as snapshot is initialized. It can be changed by setting this
* flag to true.
*
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java
index 256850da0d503..c688b13ed9fdd 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java
@@ -497,7 +497,7 @@ public Settings indexSettings() {
* this is the snapshot that this request restores. If the client can only identify a snapshot by its name then there is a risk that the
* desired snapshot may be deleted and replaced by a new snapshot with the same name which is inconsistent with the original one. This
* method lets us fail the restore if the precise snapshot we want is not available.
- *
+ *
* This is for internal use only and is not exposed in the REST layer.
*/
public RestoreSnapshotRequest snapshotUuid(String snapshotUuid) {
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java
index 7cda041b7d9cf..814a65e2a5bf0 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java
@@ -81,7 +81,7 @@ public Builder addIndices(String... indices) {
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
- *
+ *
* For example indices that don't exist.
*/
@SuppressWarnings("unchecked")
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java
index 7cda0c1948d24..d048a3008f588 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java
@@ -243,7 +243,7 @@ public CreateIndexRequest settings(Map
* The mapping should be in the form of a JSON string, with an outer _doc key
*
* Note that the definition should *not* be nested under a type name.
*
* @param source The mapping source
@@ -296,7 +296,7 @@ private CreateIndexRequest mapping(BytesReference source, XContentType xContentT
/**
* Adds mapping that will be added when the index gets created.
- *
+ *
* Note that the definition should *not* be nested under a type name.
*
* @param source The mapping source
@@ -432,7 +432,7 @@ public CreateIndexRequest source(String source, XContentType xContentType) {
/**
* Sets the settings and mappings as a single source.
- *
+ *
* Note that the mapping definition should *not* be nested under a type name.
*/
public CreateIndexRequest source(String source, MediaType mediaType) {
@@ -458,7 +458,7 @@ public CreateIndexRequest source(byte[] source, XContentType xContentType) {
/**
* Sets the settings and mappings as a single source.
- *
+ *
* Note that the mapping definition should *not* be nested under a type name.
*/
public CreateIndexRequest source(byte[] source, MediaType mediaType) {
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java
index 0cff01af536dc..384ae2e028bba 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java
@@ -130,7 +130,7 @@ protected void writeNodesTo(StreamOutput out, List
* Note: there is a new class with the same name for the Java HLRC that uses a typeless format.
* Any changes done to this class should go to that client class as well.
*
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java
index d874b5bb6b1ac..94c88e30295a8 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetFieldMappingsResponse.java
@@ -61,7 +61,7 @@
/**
* Response object for {@link GetFieldMappingsRequest} API
- *
+ *
* Note: there is a new class with the same name for the Java HLRC that uses a typeless format.
* Any changes done to this class should go to that client class as well.
*
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java
index 4a2d2fbe8e950..32f751ceb1c5a 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java
@@ -210,7 +210,7 @@ public String source() {
/**
* A specialized simplified mapping source method, takes the form of simple properties definition:
* ("field1", "type=string,store=true").
- *
+ *
* Also supports metadata mapping fields such as `_all` and `_parent` as property definition, these metadata
* mapping fields will automatically be put on the top level mapping object.
*/
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java
index c46940fbfecf9..b82b68f6f9489 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/replication/SegmentReplicationStatsRequest.java
@@ -89,7 +89,7 @@ public void activeOnly(boolean activeOnly) {
/**
* Contains list of shard id's if shards are passed, empty otherwise. Array is empty by default.
*
- * @return list of shard id's if shards are passed, empty otherwise
+ * @return array of shard id's if shards are passed, empty otherwise
*/
public String[] shards() {
return shards;
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java
index b25bc94a5c8e2..353cdbbbc840c 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java
@@ -54,7 +54,7 @@
/**
* Request class to swap index under an alias or increment data stream generation upon satisfying conditions
- *
+ *
* Note: there is a new class with the same name for the Java HLRC that uses a typeless format.
* Any changes done to this class should also go to that client class.
*
diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java
index 55ee65d0a4973..a66fcc9e9bcf2 100644
--- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java
+++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java
@@ -51,7 +51,7 @@
/**
* Response object for {@link RolloverRequest} API
- *
+ *
* Note: there is a new class with the same name for the Java HLRC that uses a typeless format.
* Any changes done to this class should also go to that client class.
*
diff --git a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java
index 0d6d122e31261..25a2c081f8441 100644
--- a/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java
+++ b/server/src/main/java/org/opensearch/action/bulk/BackoffPolicy.java
@@ -40,9 +40,9 @@
/**
* Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal
* thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally.
- *
+ *
* Notes for implementing custom subclasses:
- *
+ *
* The underlying mathematical principle of
* use @{link {@link #Failure(String, String, Exception, long, long)}}
* to record operation sequence no with failure
*/
diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java
index baf64b3e80af6..141ec24fc390f 100644
--- a/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java
+++ b/server/src/main/java/org/opensearch/action/bulk/BulkProcessor.java
@@ -185,7 +185,7 @@ public Builder setGlobalPipeline(String globalPipeline) {
/**
* Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally
* in case they have failed due to resource constraints (i.e. a thread pool was full).
- *
+ *
* The default is to back off exponentially.
*
* @see org.opensearch.action.bulk.BackoffPolicy#exponentialBackoff()
diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java
index 65043da6c2684..f2f3077001a13 100644
--- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java
+++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java
@@ -67,7 +67,7 @@
/**
* A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
* and allows to executes it in a single batch.
- *
+ *
* Note that we only support refresh on the bulk request not per item.
* @see org.opensearch.client.Client#bulk(BulkRequest)
*
@@ -123,7 +123,7 @@ public BulkRequest add(DocWriteRequest>... requests) {
/**
* Add a request to the current BulkRequest.
- *
+ *
* Note for internal callers: This method does not respect all global parameters.
* Only the global index is applied to the request objects.
* Global parameters would be respected if the request was serialized for a REST call as it is
@@ -347,7 +347,7 @@ public final BulkRequest timeout(TimeValue timeout) {
/**
* Note for internal callers (NOT high level rest client),
* the global parameter setting is ignored when used with:
- *
+ *
* - {@link BulkRequest#add(IndexRequest)}
* - {@link BulkRequest#add(UpdateRequest)}
* - {@link BulkRequest#add(DocWriteRequest)}
@@ -364,7 +364,7 @@ public final BulkRequest pipeline(String globalPipeline) {
/**
* Note for internal callers (NOT high level rest client),
* the global parameter setting is ignored when used with:
- *
+ *
- {@link BulkRequest#add(IndexRequest)}
- {@link BulkRequest#add(UpdateRequest)}
- {@link BulkRequest#add(DocWriteRequest)}
@@ -404,7 +404,7 @@ public Boolean requireAlias() {
/**
* Note for internal callers (NOT high level rest client),
* the global parameter setting is ignored when used with:
- *
+ *
* - {@link BulkRequest#add(IndexRequest)}
* - {@link BulkRequest#add(UpdateRequest)}
* - {@link BulkRequest#add(DocWriteRequest)}
diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java
index 81ae3d78e8ced..9a9e861ad8055 100644
--- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java
+++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java
@@ -205,7 +205,7 @@ public long ifSeqNo() {
/**
* If set, only perform this delete request if the document was last modification was assigned this primary term.
- *
+ *
* If the document last modification was assigned a different term a
* {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown.
*/
diff --git a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java
index 039214459ac21..6cbabfec6d763 100644
--- a/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java
+++ b/server/src/main/java/org/opensearch/action/delete/TransportDeleteAction.java
@@ -40,7 +40,7 @@
/**
* Performs the delete operation.
- *
+ *
* Deprecated use TransportBulkAction with a single item instead
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java
index d0b78cfd49d4b..c7926e65cf792 100644
--- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java
+++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java
@@ -76,14 +76,14 @@
/**
* Index request to index a typed JSON document into a specific index and make it searchable. Best
* created using {@link org.opensearch.client.Requests#indexRequest(String)}.
- *
+ *
* The index requires the {@link #index()}, {@link #id(String)} and
* {@link #source(byte[], MediaType)} to be set.
- *
+ *
* The source (content to index) can be set in its bytes form using ({@link #source(byte[], MediaType)}),
* its string form ({@link #source(String, MediaType)}) or using a {@link XContentBuilder}
* ({@link #source(XContentBuilder)}).
- *
+ *
* If the {@link #id(String)} is not set, it will be automatically generated.
*
* @see IndexResponse
@@ -388,7 +388,7 @@ public IndexRequest source(Map
* Note, its preferable to either set it using {@link #source(XContentBuilder)}
* or using the {@link #source(byte[], MediaType)}.
*/
@@ -591,7 +591,7 @@ public long ifSeqNo() {
/**
* If set, only perform this indexing request if the document was last modification was assigned this primary term.
- *
+ *
* If the document last modification was assigned a different term a
* {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown.
*/
diff --git a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java
index fe4f80bf0c065..ce32840f6751b 100644
--- a/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java
+++ b/server/src/main/java/org/opensearch/action/index/TransportIndexAction.java
@@ -40,7 +40,7 @@
/**
* Performs the index operation.
- *
+ *
* Allows for the following settings:
*
* TODO: move this into IngestService and make index/bulk actions call that
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java
index 2234934499609..ec3ee981b646f 100644
--- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java
+++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java
@@ -218,7 +218,12 @@ private static List
* Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate
* the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same.
*
@@ -284,7 +284,7 @@ public List
* Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named
* completion suggestion ordered by suggestion name
*/
diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java
index 762022460ebdb..21cf0ed97b9da 100644
--- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java
+++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java
@@ -600,7 +600,7 @@ public void setMaxConcurrentShardRequests(int maxConcurrentShardRequests) {
* the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for
* instance a shard can not match any documents based on its rewrite method ie. if date filters are mandatory to match but the shard
* bounds and the query are disjoint.
- *
+ *
* When unspecified, the pre-filter phase is executed if any of these conditions is met:
*
* When unspecified, the pre-filter phase is executed if any of these conditions is met:
*
* When unspecified, the pre-filter phase is executed if any of these conditions is met:
*
* The reason why this class exists is that the high level REST client uses its own classes
* to parse aggregations into, which are not serializable. This is the common part that can be
* shared between core and client.
diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java
index 614d576324026..b15a4b66e8870 100644
--- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java
+++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java
@@ -76,7 +76,7 @@ private void deletePits(ActionListener
* For Cross cluster PITs :
* - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be
* one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which
diff --git a/server/src/main/java/org/opensearch/action/support/TransportAction.java b/server/src/main/java/org/opensearch/action/support/TransportAction.java
index daa11c2d7d80f..72aae210d61ae 100644
--- a/server/src/main/java/org/opensearch/action/support/TransportAction.java
+++ b/server/src/main/java/org/opensearch/action/support/TransportAction.java
@@ -81,7 +81,7 @@ private Releasable registerChildNode(TaskId parentTask) {
/**
* Use this method when the transport action call should result in creation of a new task associated with the call.
- *
+ *
* This is a typical behavior.
*/
public final Task execute(Request request, ActionListener
* See {@link DiscoveryNodes#resolveNodes} for a full description of the options.
- *
+ *
* TODO: get rid of this and resolve it to concrete nodes in the rest layer
**/
private String[] nodesIds;
diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java
index 60c490a50575a..9f69d41d83f5b 100644
--- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java
+++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java
@@ -479,7 +479,7 @@ public interface Primary<
/**
* Notifies the primary of a local checkpoint for the given allocation.
- *
+ *
* Note: The primary will use this information to advance the global checkpoint if possible.
*
* @param allocationId allocation ID of the shard corresponding to the supplied local checkpoint
diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java
index de5a92fdcc4b1..b68bd13cfed80 100644
--- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java
@@ -100,7 +100,7 @@
/**
* Base class for requests that should be executed on a primary copy followed by replica copies.
* Subclasses can resolve the target shard and provide implementation for primary and replica operations.
- *
+ *
* The action samples cluster state on the receiving node to reroute to node with primary copy and on the
* primary node to validate request before primary operation followed by sampling state again for resolving
* nodes with replica copies to perform replication.
@@ -866,7 +866,7 @@ protected IndexShard getIndexShard(final ShardId shardId) {
* Responsible for routing and retrying failed operations on the primary.
* The actual primary operation is done in {@link ReplicationOperation} on the
* node with primary copy.
- *
+ *
* Resolves index and shard id for the request before routing it to target node
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java
index 62cbfbde9780a..a0b5299805868 100644
--- a/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java
+++ b/server/src/main/java/org/opensearch/action/support/replication/TransportWriteAction.java
@@ -266,7 +266,7 @@ protected abstract void dispatchedShardOperationOnReplica(
/**
* Result of taking the action on the primary.
- *
+ *
* NOTE: public for testing
*
* @opensearch.internal
@@ -496,7 +496,7 @@ void run() {
* A proxy for write operations that need to be performed on the
* replicas, where a failure to execute the operation should fail
* the replica shard and/or mark the replica as stale.
- *
+ *
* This extends {@code TransportReplicationAction.ReplicasProxy} to do the
* failing and stale-ing.
*
diff --git a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java
index c474096ff94e4..56b34aea8248d 100644
--- a/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java
+++ b/server/src/main/java/org/opensearch/action/support/single/shard/SingleShardRequest.java
@@ -55,7 +55,7 @@ public abstract class SingleShardRequest
* By default tasks with any ids are returned.
*/
public TaskId getTaskId() {
diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java
index b354482871521..946c3c2446173 100644
--- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java
+++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java
@@ -589,7 +589,7 @@ public long ifSeqNo() {
/**
* If set, only perform this update request if the document was last modification was assigned this primary term.
- *
+ *
* If the document last modification was assigned a different term a
* {@link org.opensearch.index.engine.VersionConflictEngineException} will be thrown.
*/
diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java
index f9661e71d60e6..e43c42446de2c 100644
--- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java
+++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java
@@ -73,7 +73,7 @@
/**
* We enforce bootstrap checks once a node has the transport protocol bound to a non-loopback interface or if the system property {@code
- * opensearch.enforce.bootstrap.checks} is set to {@true}. In this case we assume the node is running in production and
+ * opensearch.enforce.bootstrap.checks} is set to {@code true}. In this case we assume the node is running in production and
* all bootstrap checks must pass.
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java
index 8e556df4b2f9b..91da34fb7216d 100644
--- a/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java
+++ b/server/src/main/java/org/opensearch/bootstrap/JNAKernel32Library.java
@@ -141,7 +141,7 @@ public boolean callback(long dwCtrlType) {
/**
* Memory protection constraints
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/aa366786%28v=vs.85%29.aspx
*/
public static final int PAGE_NOACCESS = 0x0001;
@@ -151,7 +151,7 @@ public boolean callback(long dwCtrlType) {
/**
* Contains information about a range of pages in the virtual address space of a process.
* The VirtualQuery and VirtualQueryEx functions use this structure.
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/aa366775%28v=vs.85%29.aspx
*/
public static class MemoryBasicInformation extends Structure {
@@ -186,7 +186,7 @@ public SizeT() {
/**
* Locks the specified region of the process's virtual address space into physical
* memory, ensuring that subsequent access to the region will not incur a page fault.
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/aa366895%28v=vs.85%29.aspx
*
* @param address A pointer to the base address of the region of pages to be locked.
@@ -197,7 +197,7 @@ public SizeT() {
/**
* Retrieves information about a range of pages within the virtual address space of a specified process.
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/aa366907%28v=vs.85%29.aspx
*
* @param handle A handle to the process whose memory information is queried.
@@ -210,7 +210,7 @@ public SizeT() {
/**
* Sets the minimum and maximum working set sizes for the specified process.
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms686234%28v=vs.85%29.aspx
*
* @param handle A handle to the process whose working set sizes is to be set.
@@ -222,7 +222,7 @@ public SizeT() {
/**
* Retrieves a pseudo handle for the current process.
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms683179%28v=vs.85%29.aspx
*
* @return a pseudo handle to the current process.
@@ -231,7 +231,7 @@ public SizeT() {
/**
* Closes an open object handle.
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms724211%28v=vs.85%29.aspx
*
* @param handle A valid handle to an open object.
@@ -252,7 +252,7 @@ public SizeT() {
/**
* Creates or opens a new job object
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms682409%28v=vs.85%29.aspx
*
* @param jobAttributes security attributes
@@ -263,7 +263,7 @@ public SizeT() {
/**
* Associates a process with an existing job
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms681949%28v=vs.85%29.aspx
*
* @param job job handle
@@ -274,7 +274,7 @@ public SizeT() {
/**
* Basic limit information for a job object
- *
+ *
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684147%28v=vs.85%29.aspx
*/
public static class JOBOBJECT_BASIC_LIMIT_INFORMATION extends Structure implements Structure.ByReference {
@@ -316,7 +316,7 @@ protected List
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms684925%28v=vs.85%29.aspx
*
* @param job job handle
@@ -330,7 +330,7 @@ protected List
* https://msdn.microsoft.com/en-us/library/windows/desktop/ms686216%28v=vs.85%29.aspx
*
* @param job job handle
diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java
index ab52ae5a43a2a..4d36efff0e192 100644
--- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java
+++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java
@@ -188,9 +188,9 @@ void init(final boolean daemonize, final Path pidFile, final boolean quiet, Envi
/**
* Required method that's called by Apache Commons procrun when
* running as a service on Windows, when the service is stopped.
- *
+ *
* http://commons.apache.org/proper/commons-daemon/procrun.html
- *
+ *
* NOTE: If this method is renamed and/or moved, make sure to
* update opensearch-service.bat!
*/
diff --git a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java
index 482087be1c8eb..28e1e7c53cb9c 100644
--- a/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java
+++ b/server/src/main/java/org/opensearch/cluster/AckedClusterStateTaskListener.java
@@ -44,7 +44,7 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener
/**
* Called to determine which nodes the acknowledgement is expected from.
- *
+ *
* As this method will be called multiple times to determine the set of acking nodes,
* it is crucial for it to return consistent results: Given the same listener instance
* and the same node parameter, the method implementation should return the same result.
diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java
index 50beeb1f03deb..bf8494cc36857 100644
--- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java
+++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java
@@ -70,7 +70,7 @@ default boolean runOnlyOnMaster() {
/**
* Callback invoked after new cluster state is published. Note that
* this method is not invoked if the cluster state was not updated.
- *
+ *
* Note that this method will be executed using system context.
*
* @param clusterChangedEvent the change event for this cluster state change, containing
@@ -80,7 +80,7 @@ default void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {}
/**
* Builds a concise description of a list of tasks (to be used in logging etc.).
- *
+ *
* Note that the tasks given are not necessarily the same as those that will be passed to {@link #execute(ClusterState, List)}.
* but are guaranteed to be a subset of them. This method can be called multiple times with different lists before execution.
* This allows groupd task description but the submitting source.
diff --git a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java
index dd2232968114e..a38fc81bebc08 100644
--- a/server/src/main/java/org/opensearch/cluster/DiffableUtils.java
+++ b/server/src/main/java/org/opensearch/cluster/DiffableUtils.java
@@ -182,7 +182,7 @@ public Map
* Implements serialization. How differences are applied is left to subclasses.
*
* @param
* Also provides operations to distinguish whether map values are diffable.
- *
+ *
* Should not be directly implemented, instead implement either
* {@link DiffableValueSerializer} or {@link NonDiffableValueSerializer}.
*
@@ -517,7 +517,7 @@ public Diff
* Note: this implementation is ignoring the key.
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java
index 35490d2f37a49..e381b8f244bf3 100644
--- a/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java
+++ b/server/src/main/java/org/opensearch/cluster/InternalClusterInfoService.java
@@ -82,7 +82,7 @@
* to 30 seconds). The InternalClusterInfoService only runs on the cluster-manager node.
* Listens for changes in the number of data nodes and immediately submits a
* ClusterInfoUpdateJob if a node has been added.
- *
+ *
* Every time the timer runs, gathers information about the disk usage and
* shard sizes across the cluster.
*
diff --git a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java
index 042a4743ca25d..3e0c78099e6b4 100644
--- a/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java
+++ b/server/src/main/java/org/opensearch/cluster/RestoreInProgress.java
@@ -189,7 +189,7 @@ public Snapshot snapshot() {
/**
* Returns list of shards that being restore and their status
*
- * @return list of shards
+ * @return map of shard id to shard restore status
*/
public Map
* The publishListener allows to wait for the publication to complete, which can be either successful completion, timing out or failing.
* The method is guaranteed to pass back a {@link FailedToCommitClusterStateException} to the publishListener if the change is not
* committed and should be rejected. Any other exception signals that something bad happened but the change is committed.
- *
+ *
* The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether
* they updated their own cluster state or not.
*/
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java
index 15eaf9c8bcc1e..f701a2f52277d 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java
@@ -483,10 +483,10 @@ public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata)
* 2. The joining node has to be a non-remote store backed if it is joining a non-remote store backed cluster.
* Validates no remote store attributes are present in joining node as existing nodes in the cluster doesn't have
* remote store attributes.
- *
+ *
* A remote store backed node is the one which holds all the remote store attributes and a remote store backed
* cluster is the one which has only homogeneous remote store backed nodes with same node attributes
- *
+ *
* TODO: When we support moving from remote store cluster to non remote store and vice versa the this logic will
* needs to be modified.
*/
diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java
index 1570a84ab871f..128bd42fd7947 100644
--- a/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java
+++ b/server/src/main/java/org/opensearch/cluster/coordination/Reconfigurator.java
@@ -63,7 +63,7 @@ public class Reconfigurator {
* and still the cluster would be unavailable. However not reducing the voting configuration size can also hamper resilience: in a
* five-node cluster we could lose two nodes and by reducing the voting configuration to the remaining three nodes we could tolerate the
* loss of a further node before failing.
- *
+ *
* We offer two options: either we auto-shrink the voting configuration as long as it contains more than three nodes, or we don't and we
* require the user to control the voting configuration manually using the retirement API. The former, default, option, guarantees that
* as long as there have been at least three cluster-manager-eligible nodes in the cluster and no more than one of them is currently unavailable,
diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java
index d06e89d9ea170..cbc63565228f9 100644
--- a/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java
+++ b/server/src/main/java/org/opensearch/cluster/health/ClusterShardHealth.java
@@ -219,13 +219,13 @@ public void writeTo(final StreamOutput out) throws IOException {
/**
* Checks if an inactive primary shard should cause the cluster health to go RED.
- *
+ *
* An inactive primary shard in an index should cause the cluster health to be RED to make it visible that some of the existing data is
* unavailable. In case of index creation, snapshot restore or index shrinking, which are unexceptional events in the cluster lifecycle,
* cluster health should not turn RED for the time where primaries are still in the initializing state but go to YELLOW instead.
* However, in case of exceptional events, for example when the primary shard cannot be assigned to a node or initialization fails at
* some point, cluster health should still turn RED.
- *
+ *
* NB: this method should *not* be called on active shards nor on non-primary shards.
*/
public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting) {
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java
index 0da948dc78c5d..85a203e5e059a 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexGraveyard.java
@@ -61,7 +61,7 @@
/**
* A collection of tombstones for explicitly marking indices as deleted in the cluster state.
- *
+ *
* The cluster state contains a list of index tombstones for indices that have been
* deleted in the cluster. Because cluster states are processed asynchronously by
* nodes and a node could be removed from the cluster for a period of time, the
@@ -250,7 +250,7 @@ public int getNumPurged() {
/**
* Purge tombstone entries. Returns the number of entries that were purged.
- *
+ *
* Tombstones are purged if the number of tombstones in the list
* is greater than the input parameter of maximum allowed tombstones.
* Tombstones are purged until the list is equal to the maximum allowed.
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java
index 52df72b342b3e..5d8b751b241e2 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java
@@ -779,7 +779,7 @@ public long getAliasesVersion() {
/**
* The term of the current selected primary. This is a non-negative number incremented when
* a primary shard is assigned after a full cluster restart or a replica shard is promoted to a primary.
- *
+ *
* Note: since we increment the term every time a shard is assigned, the term for any operational shard (i.e., a shard
* that can be indexed into) is larger than 0. See {@link IndexMetadataUpdater#applyChanges}.
**/
@@ -1884,7 +1884,7 @@ public static Settings addHumanReadableSettings(Settings settings) {
/**
* Return the version the index was created from the provided index settings
- *
+ *
* This looks for the presence of the {@link Version} object with key {@link IndexMetadata#SETTING_VERSION_CREATED}
*/
public static Version indexCreated(final Settings indexSettings) {
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java
index 272bb132197af..c5efb55316b84 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexTemplateMetadata.java
@@ -369,7 +369,7 @@ public IndexTemplateMetadata build() {
/**
* Serializes the template to xContent, using the legacy format where the mappings are
* nested under the type name.
- *
+ *
* This method is used for serializing templates before storing them in the cluster metadata,
* and also in the REST layer when returning a deprecated typed response.
*/
@@ -386,7 +386,7 @@ public static void toXContentWithTypes(
/**
* Serializes the template to xContent, making sure not to nest mappings under the
* type name.
- *
+ *
* Note that this method should currently only be used for creating REST responses,
* and not when directly updating stored templates. Index templates are still stored
* in the old, typed format, and have yet to be migrated to be typeless.
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
index 146193b8d22c4..626903877b0c6 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
@@ -1666,7 +1666,7 @@ private SortedMap
* E.g., if data stream `foo` has backing indices [`.ds-foo-000001`, `.ds-foo-000002`] and the indices lookup contains indices
* `.ds-foo-000001`, `.ds-foo-000002` and `.ds-foo-000006` this will throw an IllegalStateException (as attempting to rollover the
* `foo` data stream from generation 5 to 6 will not be possible)
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
index b2861e566dd4b..8d76a39712ee3 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
@@ -754,7 +754,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata(
/**
* Parses the provided mappings json and the inheritable mappings from the templates (if any)
* into a map.
- *
+ *
* The template mappings are applied in the order they are encountered in the list (clients
* should make sure the lower index, closer to the head of the list, templates have the highest
* {@link IndexTemplateMetadata#order()}). This merging makes no distinction between field
@@ -792,7 +792,7 @@ static Map
* The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer
* to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()})
*
@@ -1009,7 +1009,7 @@ static int getIndexNumberOfRoutingShards(Settings indexSettings, @Nullable Index
/**
* Validate and resolve the aliases explicitly set for the index, together with the ones inherited from the specified
* templates.
- *
+ *
* The template mappings are applied in the order they are encountered in the list (clients should make sure the lower index, closer
* to the head of the list, templates have the highest {@link IndexTemplateMetadata#order()})
*
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java
index 91c996448ea8f..e30e878f1b31a 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java
@@ -166,7 +166,7 @@ public MetadataIndexStateService(
/**
* Closes one or more indices.
- *
+ *
* Closing indices is a 3 steps process: it first adds a write block to every indices to close, then waits for the operations on shards
* to be terminated and finally closes the indices by moving their state to CLOSE.
*/
@@ -302,7 +302,7 @@ public TimeValue timeout() {
/**
* Step 1 - Start closing indices by adding a write block
- *
+ *
* This step builds the list of indices to close (the ones explicitly requested that are not in CLOSE state) and adds a unique cluster
* block (or reuses an existing one) to every index to close in the cluster state. After the cluster state is published, the shards
* should start to reject writing operations and we can proceed with step 2.
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java
index 71b86ec853ce4..1093ac09777e7 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java
@@ -747,7 +747,7 @@ public static Map
* Based on the provided checkPriority and priority parameters this aims to report the overlapping
* index templates regardless of the priority (ie. checkPriority == false) or otherwise overlapping
* templates with the same priority as the given priority parameter (this is useful when trying to
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java
index a5ef337c3b62a..e3689d046193c 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/RepositoriesMetadata.java
@@ -70,6 +70,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable
* If there are no non-client nodes, Version.CURRENT will be returned.
*
* @return the oldest version in the cluster
@@ -367,7 +367,7 @@ public Version getSmallestNonClientNodeVersion() {
/**
* Returns the version of the node with the youngest version in the cluster that is not a client node.
- *
+ *
* If there are no non-client nodes, Version.CURRENT will be returned.
*
* @return the youngest version in the cluster
@@ -417,16 +417,16 @@ public DiscoveryNode resolveNode(String node) {
/**
* Resolves a set of nodes according to the given sequence of node specifications. Implements the logic in various APIs that allow the
* user to run the action on a subset of the nodes in the cluster. See [Node specification] in the reference manual for full details.
- *
+ *
* Works by tracking the current set of nodes and applying each node specification in sequence. The set starts out empty and each node
* specification may either add or remove nodes. For instance:
- *
+ *
* - _local, _cluster_manager (_master) and _all respectively add to the subset the local node, the currently-elected cluster_manager, and all the nodes
* - node IDs, names, hostnames and IP addresses all add to the subset any nodes which match
* - a wildcard-based pattern of the form "attr*:value*" adds to the subset all nodes with a matching attribute with a matching value
* - role:true adds to the subset all nodes with a matching role
* - role:false removes from the subset all nodes with a matching role.
- *
+ *
* An empty sequence of node specifications returns all nodes, since the corresponding actions run on all nodes by default.
*/
public String[] resolveNodes(String... nodes) {
@@ -813,7 +813,7 @@ public Builder localNodeId(String localNodeId) {
* Checks that a node can be safely added to this node collection.
*
* @return null if all is OK or an error message explaining why a node can not be added.
- *
+ *
* Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an
* exception
*/
diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
index 9cc09c6e4c31c..2dd57431d0375 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java
@@ -463,7 +463,7 @@ private static Map
* This adjustment takes the "winning" node's statistics and adds the average of those statistics with each non-winning node. Let's say
* the winning node had a queue size of 10 and a non-winning node had a queue of 18. The average queue size is (10 + 18) / 2 = 14 so the
* non-winning node will have statistics added for a queue size of 14. This is repeated for the response time and service times as well.
diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java
index 5cef46689ffc7..b01e074ce40c2 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java
@@ -49,7 +49,7 @@
/**
* Represents the recovery source of a shard. Available recovery types are:
- *
+ *
* - {@link EmptyStoreRecoverySource} recovery from an empty store
* - {@link ExistingStoreRecoverySource} recovery from an existing store
* - {@link PeerRecoverySource} recovery from a primary on another node
diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java
index 4f7b935f15f93..5a4352653cc89 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java
@@ -70,7 +70,7 @@
* {@link RoutingNodes} represents a copy the routing information contained in the {@link ClusterState cluster state}.
* It can be either initialized as mutable or immutable (see {@link #RoutingNodes(ClusterState, boolean)}), allowing
* or disallowing changes to its elements.
- *
+ *
* The main methods used to update routing entries are:
*
* Since replicas could possibly be on nodes with an older version of OpenSearch than
* the primary is, this will return replicas on the highest version of OpenSearch when document
* replication is enabled.
@@ -395,7 +395,7 @@ public ShardRouting activeReplicaWithHighestVersion(ShardId shardId) {
/**
* Returns one active replica shard for the given shard id or
* Since replicas could possibly be on nodes with a higher version of OpenSearch than
* the primary is, this will return replicas on the oldest version of OpenSearch when segment
* replication is enabled to allow for replica to read segments from primary.
@@ -544,9 +544,9 @@ public Tuple
* Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source.
- *
+ *
* If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their
* recovery source changes
*
@@ -605,9 +605,9 @@ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, Ro
/**
* Applies the relevant logic to handle a cancelled or failed shard.
- *
+ *
* Moves the shard to unassigned or completely removes the shard (if relocation target).
- *
+ *
* - If shard is a primary, this also fails initializing replicas.
* - If shard is an active primary, this also promotes an active replica to primary (if such a replica exists).
* - If shard is a relocating primary, this also removes the primary relocation target shard.
diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java
index cfdeed5c227b6..7f5109416494e 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/ShardMovementStrategy.java
@@ -14,7 +14,7 @@
/**
* ShardMovementStrategy defines the order in which shard movement occurs.
- *
+ *
* ShardMovementStrategy values or rather their string representation to be used with
* {@link BalancedShardsAllocator#SHARD_MOVEMENT_STRATEGY_SETTING} via cluster settings.
*
diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java
index de36547b10707..5e748df5eed2d 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java
@@ -153,7 +153,7 @@ public enum Reason {
/**
* Captures the status of an unsuccessful allocation attempt for the shard,
* causing it to remain in the unassigned state.
- *
+ *
* Note, ordering of the enum is important, make sure to add new values
* at the end and handle version serialization properly.
*
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java
index 19601483d5607..6fc0e535ef4dc 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AwarenessReplicaBalance.java
@@ -26,7 +26,7 @@
* This {@link AwarenessReplicaBalance} gives total unique values of awareness attributes
* It takes in effect only iff cluster.routing.allocation.awareness.attributes and
* cluster.routing.allocation.awareness.force.zone.values both are specified.
- *
+ *
* This is used in enforcing total copy of shard is a maximum of unique values of awareness attributes
* Helps in balancing shards across all awareness attributes and ensuring high availability of data.
*/
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java
index f209e993518c1..ae2d4a0926194 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java
@@ -36,14 +36,14 @@ public class ConstraintTypes {
/**
* Constraint to control number of shards of an index allocated on a single
* node.
- *
+ *
* In current weight function implementation, when a node has significantly
* fewer shards than other nodes (e.g. during single new node addition or node
* replacement), its weight is much less than other nodes. All shard allocations
* at this time tend to land on the new node with skewed weight. This breaks
* index level balance in the cluster, by creating all shards of the same index
* on one node, often resulting in a hotspot on that node.
- *
+ *
* This constraint is breached when balancer attempts to allocate more than
* average shards per index per node.
*/
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java
index ae8d92dae6811..7fc78b05880f3 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/IndexMetadataUpdater.java
@@ -59,9 +59,9 @@
/**
* Observer that tracks changes made to RoutingNodes in order to update the primary terms and in-sync allocation ids in
* {@link IndexMetadata} once the allocation round has completed.
- *
+ *
* Primary terms are updated on primary initialization or when an active primary fails.
- *
+ *
* Allocation ids are added for shards that become active and removed for shards that stop being active.
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
index 90eff50fd9b5d..41ace0e7661fe 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java
@@ -336,7 +336,7 @@ public boolean getPreferPrimaryBalance() {
*
* package-private for testing
*/
static class WeightFunction {
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java
index 3365b58d92a63..75448520a499c 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java
@@ -530,7 +530,7 @@ private void checkAndAddInEligibleTargetNode(RoutingNode targetNode) {
/**
* Move started shards that can not be allocated to a node anymore
- *
+ *
* For each shard to be moved this function executes a move operation
* to the minimal eligible node with respect to the
* weight function. If a shard is moved the shard will be set to
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java
index 63d8c656f5049..29e9acca4e6c2 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/ShardsAllocator.java
@@ -63,12 +63,12 @@ public interface ShardsAllocator {
* Returns the decision for where a shard should reside in the cluster. If the shard is unassigned,
* then the {@link AllocateUnassignedDecision} will be non-null. If the shard is not in the unassigned
* state, then the {@link MoveDecision} will be non-null.
- *
+ *
* This method is primarily used by the cluster allocation explain API to provide detailed explanations
* for the allocation of a single shard. Implementations of the {@link #allocate(RoutingAllocation)} method
* may use the results of this method implementation to decide on allocating shards in the routing table
* to the cluster.
- *
+ *
* If an implementation of this interface does not support explaining decisions for a single shard through
* the cluster explain API, then this method should throw a {@code UnsupportedOperationException}.
*/
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java
index 7fffb0299af85..def0411853643 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/command/AllocationCommand.java
@@ -43,7 +43,7 @@
/**
* A command to move shards in some way.
- *
+ *
* Commands are registered in {@link NetworkModule}.
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java
index 24c3fd7f34e4a..85f193c8c5580 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AllocationDecider.java
@@ -109,7 +109,7 @@ public Decision canRebalance(RoutingAllocation allocation) {
* Returns a {@link Decision} whether the given primary shard can be
* forcibly allocated on the given node. This method should only be called
* for unassigned primary shards where the node has a shard copy on disk.
- *
+ *
* Note: all implementations that override this behavior should take into account
* the results of {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)}
* before making a decision on force allocation, because force allocation should only
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
index 1bd47f111591d..2c7df6b81e676 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
@@ -73,23 +73,23 @@
/**
* The {@link DiskThresholdDecider} checks that the node a shard is potentially
* being allocated to has enough disk space.
- *
+ *
* It has three configurable settings, all of which can be changed dynamically:
- *
+ *
*
*
* Both watermark settings are expressed in terms of used disk percentage, or
* exact byte values for free space (like "500mb")
- *
+ *
*
* If subtractShardsMovingAway is true then the size of shards moving away is subtracted from the total size of all shards
*/
public static long sizeOfRelocatingShards(
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
index 1680f2d8cad1d..c2eccdbc6ed26 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/SameShardAllocationDecider.java
@@ -44,7 +44,7 @@
/**
* An allocation decider that prevents multiple instances of the same shard to
* be allocated on the same {@code node}.
- *
+ *
* The {@link #CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING} setting allows to perform a check to prevent
* allocation of multiple instances of the same shard on a single {@code host},
* based on host name and host address. Defaults to `false`, meaning that no
diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
index 3a9fdf0ea10cf..26a04de31ce39 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java
@@ -323,7 +323,7 @@ private Decision allocateShardCopies(
* - the initializing shard routing if we want to assign the initializing shard to this node instead
* - the started shard routing in case if we want to check if we can relocate to this node.
* - the relocating shard routing if we want to relocate to this node now instead.
- *
+ *
* This method returns the corresponding initializing shard that would be allocated to this node.
*/
private ShardRouting initializingShard(ShardRouting shardRouting, String currentNodeId) {
diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java
index 007508162ba14..8e94e7cab23d3 100644
--- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java
+++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java
@@ -294,7 +294,7 @@ public void addLocalNodeMasterListener(LocalNodeMasterListener listener) {
/**
* Adds a cluster state listener that is expected to be removed during a short period of time.
* If provided, the listener will be notified once a specific time has elapsed.
- *
+ *
* NOTE: the listener is not removed on timeout. This is the responsibility of the caller.
*/
public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) {
diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java
index 8da6b1b941f83..afc4e36ec352e 100644
--- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java
+++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java
@@ -29,7 +29,7 @@
/**
* This class does throttling on task submission to cluster manager node, it uses throttling key defined in various executors
* as key for throttling. Throttling will be performed over task executor's class level, different task types have different executors class.
- *
+ *
* Set specific setting to for setting the threshold of throttling of particular task type.
* e.g : Set "cluster_manager.throttling.thresholds.put_mapping" to set throttling limit of "put mapping" tasks,
* Set it to default value(-1) to disable the throttling for this task type.
@@ -117,9 +117,9 @@ public static TimeValue getMaxDelayForRetry() {
* * Register task to cluster service with task key,
* * override getClusterManagerThrottlingKey method with above task key in task executor.
* * Verify that throttled tasks would be retried from data nodes
- *
+ *
* Added retry mechanism in TransportClusterManagerNodeAction, so it would be retried for customer generated tasks.
- *
+ *
* If tasks are not getting retried then we can register with false flag, so user won't be able to configure threshold limits for it.
*/
protected ThrottlingKey registerClusterManagerTask(String taskKey, boolean throttlingEnabled) {
@@ -236,7 +236,7 @@ public void onBeginSubmit(List extends TaskBatcher.BatchedTask> tasks) {
* It may start throwing throttling exception to older nodes in cluster.
* Older version nodes will not be equipped to handle the throttling exception and
* this may result in unexpected behavior where internal tasks would start failing without any retries.
- *
+ *
* For every task submission request, it will validate if nodes version is greater or equal to 2.5.0 and set the startThrottling flag.
* Once the startThrottling flag is set, it will not perform check for next set of tasks.
*/
diff --git a/server/src/main/java/org/opensearch/common/Randomness.java b/server/src/main/java/org/opensearch/common/Randomness.java
index 2c60e848b9db9..221bc95c41f31 100644
--- a/server/src/main/java/org/opensearch/common/Randomness.java
+++ b/server/src/main/java/org/opensearch/common/Randomness.java
@@ -127,7 +127,7 @@ public static Random get() {
/**
* Provides a secure source of randomness.
- *
+ *
* This acts exactly similar to {@link #get()}, but returning a new {@link SecureRandom}.
*/
public static SecureRandom createSecure() {
diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java
index 3cdb1ce30b68d..2e25a532b5abf 100644
--- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java
+++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java
@@ -93,10 +93,10 @@ public interface BlobContainer {
/**
* Provides a hint to clients for a suitable length to use with {@link BlobContainer#readBlob(String, long, long)}.
- *
+ *
* Some blob containers have nontrivial costs attached to each readBlob call, so it is a good idea for consumers to speculatively
* request more data than they need right now and to re-use this stream for future needs if possible.
- *
+ *
* Also, some blob containers return streams that are expensive to close before the stream has been fully consumed, and the cost may
* depend on the length of the data that was left unconsumed. For these containers it's best to bound the cost of a partial read by
* bounding the length of the data requested.
@@ -131,7 +131,7 @@ default long readBlobPreferredLength() {
/**
* Reads blob content from the input stream and writes it to the container in a new blob with the given name,
* using an atomic write operation if the implementation supports it.
- *
+ *
* This method assumes the container does not already contain a blob of the same blobName. If a blob by the
* same name already exists, the operation will fail and an {@link IOException} will be thrown.
*
diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java
index ab40b1e2a082e..2ee3e9557b354 100644
--- a/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java
+++ b/server/src/main/java/org/opensearch/common/blobstore/BlobStore.java
@@ -31,6 +31,8 @@
package org.opensearch.common.blobstore;
+import org.opensearch.cluster.metadata.RepositoryMetadata;
+
import java.io.Closeable;
import java.util.Collections;
import java.util.Map;
@@ -53,4 +55,9 @@ public interface BlobStore extends Closeable {
default Map
* Note that the methods in this implementation of {@link org.opensearch.common.blobstore.BlobContainer} may
* additionally throw a {@link java.lang.SecurityException} if the configured {@link java.lang.SecurityManager}
* does not permit read and/or write access to the underlying files.
@@ -258,7 +258,7 @@ public static String tempBlobName(final String blobName) {
/**
* Returns true if the blob is a leftover temporary blob.
- *
+ *
* The temporary blobs might be left after failed atomic write operation.
*/
public static boolean isTempBlobName(final String blobName) {
diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java
index 2914fd0c440fa..c77f2384ace0d 100644
--- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java
+++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListener.java
@@ -10,7 +10,10 @@
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import org.apache.lucene.util.IOUtils;
import org.opensearch.action.support.GroupedActionListener;
+import org.opensearch.common.SuppressForbidden;
+import org.opensearch.common.UUIDs;
import org.opensearch.common.annotation.InternalApi;
import org.opensearch.common.blobstore.stream.read.ReadContext;
import org.opensearch.core.action.ActionListener;
@@ -20,6 +23,8 @@
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.nio.file.StandardCopyOption;
+import java.util.Collection;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executor;
@@ -33,9 +38,11 @@
@InternalApi
public class ReadContextListener implements ActionListener
* Also does not check with the parent breaker to see if the parent limit
* has been exceeded.
*
diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java
index c7c28e13496e5..9e91866cde2df 100644
--- a/server/src/main/java/org/opensearch/common/cache/Cache.java
+++ b/server/src/main/java/org/opensearch/common/cache/Cache.java
@@ -403,7 +403,7 @@ private V get(K key, long now, Consumer
* Use of different {@link CacheLoader} implementations on the same key concurrently may result in only the first
* loader function being called and the second will be returned the result provided by the first including any exceptions
* thrown during the execution of the first.
diff --git a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java
index 5ce77cdc75fe5..de4304f0e1fba 100644
--- a/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java
+++ b/server/src/main/java/org/opensearch/common/collect/CopyOnWriteHashMap.java
@@ -49,15 +49,15 @@
/**
* An immutable map whose writes result in a new copy of the map to be created.
- *
+ *
* This is essentially a hash array mapped trie: inner nodes use a bitmap in
* order to map hashes to slots by counting ones. In case of a collision (two
* values having the same 32-bits hash), a leaf node is created which stores
* and searches for values sequentially.
- *
+ *
* Reads and writes both perform in logarithmic time. Null keys and values are
* not supported.
- *
+ *
* This structure might need to perform several object creations per write so
* it is better suited for work-loads that are not too write-intensive.
*
@@ -250,7 +250,7 @@ public static
* As a consequence, the number of slots in an inner node is equal to the
* number of one bits in the bitmap.
*
diff --git a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java
index 0334d367ffdbc..1622457ba27cc 100644
--- a/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java
+++ b/server/src/main/java/org/opensearch/common/geo/GeoShapeType.java
@@ -221,11 +221,11 @@ void validateLinearRing(CoordinateNode coordinates, boolean coerce) {
@Override
CoordinateNode validate(CoordinateNode coordinates, boolean coerce) {
- /**
- * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring)
- * A LinearRing is closed LineString with 4 or more positions. The first and last positions
- * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly
- * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition.
+ /*
+ Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring)
+ A LinearRing is closed LineString with 4 or more positions. The first and last positions
+ are equivalent (they represent equivalent points). Though a LinearRing is not explicitly
+ represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition.
*/
if (coordinates.children == null || coordinates.children.isEmpty()) {
throw new OpenSearchParseException(
diff --git a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java
index 393c238cb3b2f..8c566c4191e4f 100644
--- a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java
+++ b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java
@@ -665,7 +665,7 @@ public static GeoPoint parseFromString(String val) {
/**
* Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m".
- *
+ *
* The precision is expressed as a number between 1 and 12 and indicates the length of geohash
* used to represent geo points.
*
@@ -696,7 +696,7 @@ public static int parsePrecision(XContentParser parser) throws IOException, Open
/**
* Checks that the precision is within range supported by opensearch - between 1 and 12
- *
+ *
* Returns the precision value if it is in the range and throws an IllegalArgumentException if it
* is outside the range.
*/
diff --git a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java
index 56146fc8197be..93c7f4b93679a 100644
--- a/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java
+++ b/server/src/main/java/org/opensearch/common/geo/GeometryFormat.java
@@ -63,7 +63,7 @@ public interface GeometryFormat
* For example, the GeoJson format returns the geometry as a map, while WKT returns a string.
*/
Object toXContentAsObject(ParsedFormat geometry);
diff --git a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java
index b436787220eb0..9e118ab2de3a5 100644
--- a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java
+++ b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java
@@ -177,11 +177,11 @@ public PolygonBuilder close() {
}
private static void validateLinearRing(LineStringBuilder lineString) {
- /**
- * Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring)
- * A LinearRing is closed LineString with 4 or more positions. The first and last positions
- * are equivalent (they represent equivalent points). Though a LinearRing is not explicitly
- * represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition.
+ /*
+ Per GeoJSON spec (http://geojson.org/geojson-spec.html#linestring)
+ A LinearRing is closed LineString with 4 or more positions. The first and last positions
+ are equivalent (they represent equivalent points). Though a LinearRing is not explicitly
+ represented as a GeoJSON geometry type, it is referred to in the Polygon geometry type definition.
*/
List
* complies with geojson specification: https://tools.ietf.org/html/rfc7946
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java
index d99d1daf46a2a..b199da0f3691a 100644
--- a/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java
+++ b/server/src/main/java/org/opensearch/common/geo/parsers/GeoWKTParser.java
@@ -58,7 +58,7 @@
/**
* Parses shape geometry represented in WKT format
- *
+ *
* complies with OGC® document: 12-063r5 and ISO/IEC 13249-3:2016 standard
* located at http://docs.opengeospatial.org/is/12-063r5/12-063r5.html
*
diff --git a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java
index 8ba0bd7ee1be4..e481ffd460798 100644
--- a/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java
+++ b/server/src/main/java/org/opensearch/common/hash/MurmurHash3.java
@@ -93,7 +93,7 @@ protected static long fmix(long k) {
/**
* Compute the hash of the MurmurHash3_x64_128 hashing function.
- *
+ *
* Note, this hashing function might be used to persist hashes, so if the way hashes are computed
* changes for some reason, it needs to be addressed (like in BloomFilter and MurmurHashField).
*/
diff --git a/server/src/main/java/org/opensearch/common/inject/Initializer.java b/server/src/main/java/org/opensearch/common/inject/Initializer.java
index e806eba6df707..b88b01c03c018 100644
--- a/server/src/main/java/org/opensearch/common/inject/Initializer.java
+++ b/server/src/main/java/org/opensearch/common/inject/Initializer.java
@@ -68,9 +68,8 @@ class Initializer {
/**
* Registers an instance for member injection when that step is performed.
*
- * @param instance an instance that optionally has members to be injected (each annotated with
- * @param source the source location that this injection was requested
- * @Inject).
+ * @param instance an instance that optionally has members to be injected (each annotated with {@code @Inject}).
+ * @param source the source location that this injection was requested
*/
public
* The format of the datetime is configurable, and unix timestamps can also be used. Datemath
* is appended to a datetime with the following syntax:
*
* Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from
* LoggingEvent into a multiline string
*
diff --git a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java
index 0ffd633e5a967..17b75ab22f3ed 100644
--- a/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java
+++ b/server/src/main/java/org/opensearch/common/lucene/ShardCoreKeyMap.java
@@ -55,7 +55,7 @@
* mappings as segments that were not known before are added and prevents the
* structure from growing indefinitely by registering close listeners on these
* segments so that at any time it only tracks live segments.
- *
+ *
* NOTE: This is heavy. Avoid using this class unless absolutely required.
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java
index 8870e26c373e9..0734659d8ee72 100644
--- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java
+++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java
@@ -174,7 +174,8 @@ public NetworkModule(
pageCacheRecycler,
circuitBreakerService,
namedWriteableRegistry,
- networkService
+ networkService,
+ tracer
);
for (Map.Entry
* Use the java based Rounding class where applicable
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java
index 8b7a2a82e5cb1..117ed66fcb451 100644
--- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java
@@ -254,7 +254,7 @@ public synchronized
* Also automatically adds empty consumers for all settings in order to activate logging
*/
public synchronized void addSettingsUpdateConsumer(Consumer
* Also automatically adds empty consumers for all settings in order to activate logging
*/
public synchronized void addSettingsUpdateConsumer(
diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java
index 90abc0a0765c1..387b0c9753574 100644
--- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java
+++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java
@@ -39,7 +39,8 @@ protected FeatureFlagSettings(
FeatureFlags.EXTENSIONS_SETTING,
FeatureFlags.IDENTITY_SETTING,
FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING,
- FeatureFlags.TELEMETRY_SETTING
+ FeatureFlags.TELEMETRY_SETTING,
+ FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING
)
)
);
diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java
index f25dd872fc703..1ad3b7ab8875a 100644
--- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java
+++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java
@@ -88,7 +88,7 @@
/**
* A disk based container for sensitive settings in OpenSearch.
- *
+ *
* Loading a keystore has 2 phases. First, call {@link #load(Path)}. Then call
* {@link #decrypt(char[])} with the keystore password, or an empty char array if
* {@link #hasPassword()} is {@code false}. Loading and decrypting should happen
@@ -147,7 +147,7 @@ private static class Entry {
/**
* The number of bits for the cipher key.
- *
+ *
* Note: The Oracle JDK 8 ships with a limited JCE policy that restricts key length for AES to 128 bits.
* This can be increased to 256 bits once minimum java 9 is the minimum java version.
* See http://www.oracle.com/technetwork/java/javase/terms/readme/jdk9-readme-3852447.html#jce
@@ -234,7 +234,7 @@ public static KeyStoreWrapper load(Path configDir) throws IOException {
/**
* Loads information about the OpenSearch keystore from the provided config directory.
- *
+ *
* {@link #decrypt(char[])} must be called before reading or writing any entries.
* Returns {@code null} if no keystore exists.
*/
@@ -358,7 +358,7 @@ private Cipher createCipher(int opmode, char[] password, byte[] salt, byte[] iv)
/**
* Decrypts the underlying keystore data.
- *
+ *
* This may only be called once.
*/
public void decrypt(char[] password) throws GeneralSecurityException, IOException {
diff --git a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java
index f2ccc01a4c7e6..1855270b016b3 100644
--- a/server/src/main/java/org/opensearch/common/settings/SecureSetting.java
+++ b/server/src/main/java/org/opensearch/common/settings/SecureSetting.java
@@ -45,7 +45,7 @@
/**
* A secure setting.
- *
+ *
* This class allows access to settings from the OpenSearch keystore.
*
* @opensearch.internal
@@ -152,7 +152,7 @@ public void diff(Settings.Builder builder, Settings source, Settings defaultSett
/**
* A setting which contains a sensitive string.
- *
+ *
* This may be any sensitive string, e.g. a username, a password, an auth token, etc.
*/
public static Setting
* This may be any sensitive file, e.g. a set of credentials normally in plaintext.
*/
public static Setting
* If a setting doesn't start with the prefix, the builder appends the prefix to such setting.
*/
public Builder normalizePrefix(String prefix) {
diff --git a/server/src/main/java/org/opensearch/common/time/DateFormatter.java b/server/src/main/java/org/opensearch/common/time/DateFormatter.java
index d57fd441b9bf4..c98bd853dfced 100644
--- a/server/src/main/java/org/opensearch/common/time/DateFormatter.java
+++ b/server/src/main/java/org/opensearch/common/time/DateFormatter.java
@@ -126,6 +126,14 @@ default String formatJoda(DateTime dateTime) {
*/
String pattern();
+ /**
+ * A name based format for this formatter. Can be one of the registered formatters like
* This action assumes that we can reliably fall back to some defaults if not all parts of a
* zoned date time are set
- *
+ *
* - If a zoned date time is passed, it is returned
* - If no timezone is found, ZoneOffset.UTC is used
* - If we find a time and a date, converting to a ZonedDateTime is straight forward,
diff --git a/server/src/main/java/org/opensearch/common/time/DateMathParser.java b/server/src/main/java/org/opensearch/common/time/DateMathParser.java
index f6573eaa90286..7088d6cb7a498 100644
--- a/server/src/main/java/org/opensearch/common/time/DateMathParser.java
+++ b/server/src/main/java/org/opensearch/common/time/DateMathParser.java
@@ -64,12 +64,12 @@ default Instant parse(String text, LongSupplier now, boolean roundUpProperty, Da
/**
* Parse text, that potentially contains date math into the milliseconds since the epoch
- *
+ *
* Examples are
- *
+ *
*
* Supported rounding units are
* y year
* M month
diff --git a/server/src/main/java/org/opensearch/common/time/DateUtils.java b/server/src/main/java/org/opensearch/common/time/DateUtils.java
index 021b8a3be8b23..7ab395a1117e7 100644
--- a/server/src/main/java/org/opensearch/common/time/DateUtils.java
+++ b/server/src/main/java/org/opensearch/common/time/DateUtils.java
@@ -342,7 +342,7 @@ public static long toMilliSeconds(long nanoSecondsSinceEpoch) {
/**
* Rounds the given utc milliseconds sicne the epoch down to the next unit millis
- *
+ *
* Note: This does not check for correctness of the result, as this only works with units smaller or equal than a day
* In order to ensure the performance of this methods, there are no guards or checks in it
*
diff --git a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java
index f3459a5857b9e..7fc39e063efb5 100644
--- a/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java
+++ b/server/src/main/java/org/opensearch/common/time/DateUtilsRounding.java
@@ -32,12 +32,12 @@
* This class has been copied from different locations within the joda time package, as
* these methods fast when used for rounding, as they do not require conversion to java
* time objects
- *
+ *
* This code has been copied from jodatime 2.10.1
* The source can be found at https://github.com/JodaOrg/joda-time/tree/v2.10.1
- *
+ *
* See following methods have been copied (along with required helper variables)
- *
+ *
* - org.joda.time.chrono.GregorianChronology.calculateFirstDayOfYearMillis(int year)
* - org.joda.time.chrono.BasicChronology.getYear(int year)
* - org.joda.time.chrono.BasicGJChronology.getMonthOfYear(long utcMillis, int year)
diff --git a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java
index 07013a3dc75f2..f711b14aeb928 100644
--- a/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java
+++ b/server/src/main/java/org/opensearch/common/time/JavaDateFormatter.java
@@ -32,6 +32,7 @@
package org.opensearch.common.time;
+import org.opensearch.common.util.FeatureFlags;
import org.opensearch.core.common.Strings;
import java.text.ParsePosition;
@@ -51,6 +52,7 @@
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
+import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiConsumer;
import java.util.stream.Collectors;
@@ -67,9 +69,12 @@ class JavaDateFormatter implements DateFormatter {
}
private final String format;
+ private final String printFormat;
private final DateTimeFormatter printer;
private final List
* The approach with collection of parsers was taken because java-time requires ordering on optional (composite)
* patterns. Joda does not suffer from this.
* https://bugs.openjdk.java.net/browse/JDK-8188771
@@ -233,13 +286,23 @@ public TemporalAccessor parse(String input) {
*/
private TemporalAccessor doParse(String input) {
if (parsers.size() > 1) {
+ Object object = null;
+ if (canCacheLastParsedFormatter && lastParsedformatter != null) {
+ ParsePosition pos = new ParsePosition(0);
+ object = lastParsedformatter.toFormat().parseObject(input, pos);
+ if (parsingSucceeded(object, input, pos)) {
+ return (TemporalAccessor) object;
+ }
+ }
for (DateTimeFormatter formatter : parsers) {
ParsePosition pos = new ParsePosition(0);
- Object object = formatter.toFormat().parseObject(input, pos);
+ object = formatter.toFormat().parseObject(input, pos);
if (parsingSucceeded(object, input, pos)) {
+ lastParsedformatter = formatter;
return (TemporalAccessor) object;
}
}
+
throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0);
}
return this.parsers.get(0).parse(input);
@@ -255,12 +318,14 @@ public DateFormatter withZone(ZoneId zoneId) {
if (zoneId.equals(zone())) {
return this;
}
- List
* The format of the datetime is configurable, and unix timestamps can also be used. Datemath
* is appended to a datetime with the following syntax:
*
* To do a search, create a subclass and implement custom {@link #compare(int)} and {@link #distance(int)} methods.
- *
+ *
* {@link BinarySearcher} knows nothing about the value being searched for or the underlying data structure.
* These things should be determined by the subclass in its overridden methods.
- *
+ *
* Refer to {@link BigArrays.DoubleBinarySearcher} for an example.
- *
+ *
* NOTE: this class is not thread safe
*
* @opensearch.internal
@@ -74,7 +74,7 @@ private int getClosestIndex(int index1, int index2) {
/**
* Uses a binary search to determine the index of the element within the index range {from, ... , to} that is
* closest to the search value.
- *
+ *
* Unlike most binary search implementations, the value being searched for is not an argument to search method.
* Rather, this value should be stored by the subclass along with the underlying array.
*
diff --git a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java
index 8bc3ca3affb12..67dd4b848f4c0 100644
--- a/server/src/main/java/org/opensearch/common/util/CancellableThreads.java
+++ b/server/src/main/java/org/opensearch/common/util/CancellableThreads.java
@@ -45,7 +45,7 @@
* A utility class for multi threaded operation that needs to be cancellable via interrupts. Every cancellable operation should be
* executed via {@link #execute(Interruptible)}, which will capture the executing thread and make sure it is interrupted in the case
* of cancellation.
- *
+ *
* Cancellation policy: This class does not support external interruption via
* CuckooFilters are similar to Bloom Filters in usage; values are inserted, and the Cuckoo
* can be asked if it has seen a particular value before. Because the structure is approximate,
* it can return false positives (says it has seen an item when it has not). False negatives
* are not possible though; if the structure says it _has not_ seen an item, that can be
* trusted.
- *
+ *
* The filter can "saturate" at which point the map has hit it's configured load factor (or near enough
* that a large number of evictions are not able to find a free slot) and will refuse to accept
* any new insertions.
- *
+ *
* NOTE: this version does not support deletions, and as such does not save duplicate
* fingerprints (e.g. when inserting, if the fingerprint is already present in the
* candidate buckets, it is not inserted). By not saving duplicates, the CuckooFilter
* loses the ability to delete values. But not by allowing deletions, we can save space
* (do not need to waste slots on duplicate fingerprints), and we do not need to worry
* about inserts "overflowing" a bucket because the same item has been repeated repeatedly
- *
+ *
* NOTE: this CuckooFilter exposes a number of Expert APIs which assume the caller has
* intimate knowledge about how the algorithm works. It is recommended to use
* {@link SetBackedScalingCuckooFilter} instead.
- *
+ *
* Based on the paper:
- *
+ *
* Fan, Bin, et al. "Cuckoo filter: Practically better than bloom."
* Proceedings of the 10th ACM International on Conference on emerging Networking Experiments and Technologies. ACM, 2014.
- *
+ *
* https://www.cs.cmu.edu/~dga/papers/cuckoo-conext2014.pdf
*
* @opensearch.internal
@@ -200,7 +200,7 @@ public int getCount() {
/**
* Returns the number of buckets that has been chosen based
* on the initial configuration
- *
+ *
* Expert-level API
*/
int getNumBuckets() {
@@ -209,7 +209,7 @@ int getNumBuckets() {
/**
* Returns the number of bits used per entry
- *
+ *
* Expert-level API
*/
int getBitsPerEntry() {
@@ -220,7 +220,7 @@ int getBitsPerEntry() {
* Returns the cached fingerprint mask. This is simply a mask for the
* first bitsPerEntry bits, used by {@link CuckooFilter#fingerprint(int, int, int)}
* to generate the fingerprint of a hash
- *
+ *
* Expert-level API
*/
int getFingerprintMask() {
@@ -230,7 +230,7 @@ int getFingerprintMask() {
/**
* Returns an iterator that returns the long[] representation of each bucket. The value
* inside each long will be a fingerprint (or 0L, representing empty).
- *
+ *
* Expert-level API
*/
Iterator
* Expert-level API, use {@link CuckooFilter#mightContain(long)} to check if
* a value is in the filter.
*/
@@ -307,7 +307,7 @@ boolean add(long hash) {
/**
* Attempts to merge the fingerprint into the specified bucket or it's alternate bucket.
* Returns true if the insertion was successful, false if the filter is saturated.
- *
+ *
* Expert-level API, use {@link CuckooFilter#add(long)} to insert
* values into the filter
*/
@@ -351,7 +351,7 @@ boolean mergeFingerprint(int bucket, int fingerprint) {
* Low-level insert method. Attempts to write the fingerprint into an empty entry
* at this bucket's position. Returns true if that was sucessful, false if all entries
* were occupied.
- *
+ *
* If the fingerprint already exists in one of the entries, it will not duplicate the
* fingerprint like the original paper. This means the filter _cannot_ support deletes,
* but is not sensitive to "overflowing" buckets with repeated inserts
@@ -376,10 +376,10 @@ private boolean tryInsert(int bucket, int fingerprint) {
/**
* Converts a hash into a bucket index (primary or alternate).
- *
+ *
* If the hash is negative, this flips the bits. The hash is then modulo numBuckets
* to get the final index.
- *
+ *
* Expert-level API
*/
static int hashToIndex(int hash, int numBuckets) {
@@ -388,16 +388,16 @@ static int hashToIndex(int hash, int numBuckets) {
/**
* Calculates the alternate bucket for a given bucket:fingerprint tuple
- *
+ *
* The alternate bucket is the fingerprint multiplied by a mixing constant,
* then xor'd against the bucket. This new value is modulo'd against
* the buckets via {@link CuckooFilter#hashToIndex(int, int)} to get the final
* index.
- *
+ *
* Note that the xor makes this operation reversible as long as we have the
* fingerprint and current bucket (regardless of if that bucket was the primary
* or alternate).
- *
+ *
* Expert-level API
*/
static int alternateIndex(int bucket, int fingerprint, int numBuckets) {
@@ -424,10 +424,10 @@ private int getOffset(int bucket, int position) {
/**
* Calculates the fingerprint for a given hash.
- *
+ *
* The fingerprint is simply the first `bitsPerEntry` number of bits that are non-zero.
* If the entire hash is zero, `(int) 1` is used
- *
+ *
* Expert-level API
*/
static int fingerprint(int hash, int bitsPerEntry, int fingerprintMask) {
@@ -501,7 +501,7 @@ private double getLoadFactor(int b) {
* Calculates the optimal number of buckets for this filter. The xor used in the bucketing
* algorithm requires this to be a power of two, so the optimal number of buckets will
* be rounded to the next largest power of two where applicable.
- *
+ *
* TODO: there are schemes to avoid powers of two, might want to investigate those
*/
private int getNumBuckets(long capacity, double loadFactor, int b) {
diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java
index b89d2d0549823..4e9b417e3433b 100644
--- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java
+++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java
@@ -55,6 +55,11 @@ public class FeatureFlags {
*/
public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled";
+ /**
+ * Gates the optimization of datetime formatters caching along with change in default datetime formatter.
+ */
+ public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled";
+
/**
* Should store the settings from opensearch.yml.
*/
@@ -83,6 +88,17 @@ public static boolean isEnabled(String featureFlagName) {
return settings != null && settings.getAsBoolean(featureFlagName, false);
}
+ public static boolean isEnabled(Setting
* package-private for testing
*/
Set
* CuckooFilter's can "saturate" and refuse to accept any new values. When this happens,
* the datastructure scales by adding a new filter. This new filter's bytes will be tracked
* in the registered breaker when configured.
diff --git a/server/src/main/java/org/opensearch/common/util/TokenBucket.java b/server/src/main/java/org/opensearch/common/util/TokenBucket.java
index d2e7e836bf07f..a9ebb86eed8a2 100644
--- a/server/src/main/java/org/opensearch/common/util/TokenBucket.java
+++ b/server/src/main/java/org/opensearch/common/util/TokenBucket.java
@@ -20,7 +20,7 @@
public class TokenBucket {
/**
* Defines a monotonically increasing counter.
- *
+ *
* Usage examples:
* 1. clock = System::nanoTime can be used to perform rate-limiting per unit time
* 2. clock = AtomicLong::get can be used to perform rate-limiting per unit number of operations
diff --git a/server/src/main/java/org/opensearch/common/util/URIPattern.java b/server/src/main/java/org/opensearch/common/util/URIPattern.java
index a3c385e5ea660..49e4b53e20740 100644
--- a/server/src/main/java/org/opensearch/common/util/URIPattern.java
+++ b/server/src/main/java/org/opensearch/common/util/URIPattern.java
@@ -39,9 +39,9 @@
/**
* URI Pattern matcher
- *
+ *
* The pattern is URI in which authority, path, query and fragment can be replace with simple pattern.
- *
+ *
* For example: foobar://*.local/some_path/*?*#* will match all uris with schema foobar in local domain
* with any port, with path that starts some_path and with any query and fragment.
*
diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java
index b1f4714a90e8e..4357254176358 100644
--- a/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java
+++ b/server/src/main/java/org/opensearch/common/util/concurrent/ListenableFuture.java
@@ -73,7 +73,7 @@ public void addListener(ActionListener
* It will apply the provided ThreadContext (if not null) when executing the listening.
*/
public void addListener(ActionListener
* This uses ParseField's logger because that is the logger that
* we have been using for many releases for deprecated fields.
* Changing that will require some research to make super duper
diff --git a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java
index 798a58551457f..17bb0a1de267b 100644
--- a/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java
+++ b/server/src/main/java/org/opensearch/common/xcontent/XContentHelper.java
@@ -494,7 +494,7 @@ public static BytesReference toXContent(ToXContent toXContent, XContentType xCon
/**
* Returns the contents of an object as an unparsed BytesReference
- *
+ *
* This is useful for things like mappings where we're copying bytes around but don't
* actually need to parse their contents, and so avoids building large maps of maps
* unnecessarily
diff --git a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java
index adfa871cbfcbe..a87edbb949d39 100644
--- a/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java
+++ b/server/src/main/java/org/opensearch/common/xcontent/support/XContentMapValues.java
@@ -117,12 +117,11 @@ private static void extractRawValues(List values, ListorderedGroups
is full.
* This is safe for collapsing since the group sort
is the same as the query sort.
*/
diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java
index 6fde39b16a59a..4edcdea42b53b 100644
--- a/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java
+++ b/server/src/main/java/org/apache/lucene/search/uhighlight/BoundedBreakIteratorScanner.java
@@ -43,13 +43,13 @@
* If the {@link BreakIterator} cannot find a passage smaller than the maximum length,
* a secondary break iterator is used to re-split the passage at the first boundary after
* maximum length.
- *
+ *
*
*
* .mapping("{\"_doc\":{\"properties\": ... }}")
@@ -269,7 +269,7 @@ public CreateIndexRequest mapping(String source, XContentType xContentType) {
/**
* Adds mapping that will be added when the index gets created.
- *
+ *
BackoffPolicy
are progressions which can be either finite or infinite although
* the latter should not be used for retrying. A progression can be mapped to a java.util.Iterator
with the following
* semantics:
@@ -241,7 +241,7 @@ private static class ExponentialEqualJitterBackoffIterator implements Iterator
*
sortedDocs
using fetchResultsArr
,
* merges suggestions, aggregations and profile results
- *
+ *
*
*
*
*
* null
if
* no active replica is found.
- *
+ * null
if
* no active replica is found.
- *
+ * weight(node, index) = weightindex(node, index) + weightnode(node, index)
- *
+ * cluster.routing.allocation.disk.watermark.low
is the low disk
* watermark. New shards will not allocated to a node with usage higher than this,
* although this watermark may be passed by allocating a shard. It defaults to
* 0.85 (85.0%).
- *
+ * cluster.routing.allocation.disk.watermark.high
is the high disk
* watermark. If a node has usage higher than this, shards are not allowed to
* remain on the node. In addition, if allocating a shard to a node causes the
* node to pass this watermark, it will not be allowed. It defaults to
* 0.90 (90.0%).
- *
+ * cluster.routing.allocation.disk.threshold_enabled
is used to
* enable or disable this decider. It defaults to true (enabled).
*
@@ -119,7 +119,7 @@ public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings)
/**
* Returns the size of all shards that are currently being relocated to
* the node, but may not be finished transferring yet.
- *
+ * ||[+-/](\d+)?[yMwdhHms]
.
diff --git a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java
index e259d5d9e3e33..ed324e4e62d8f 100644
--- a/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java
+++ b/server/src/main/java/org/opensearch/common/logging/JsonThrowablePatternConverter.java
@@ -47,7 +47,7 @@
* Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array
* "exception": [ "stacktrace", "lines", "as", "array", "elements" ]
- *
+ * epoch_millis
or
+ * a configured format like HH:mm:ss
+ *
+ * @return The name of this formatter
+ */
+ String printPattern();
+
/**
* Returns the configured locale of the date formatter
*
@@ -147,7 +155,7 @@ default String formatJoda(DateTime dateTime) {
*/
DateMathParser toDateMathParser();
- static DateFormatter forPattern(String input) {
+ static DateFormatter forPattern(String input, String printPattern, Boolean canCacheFormatter) {
if (Strings.hasLength(input) == false) {
throw new IllegalArgumentException("No date pattern provided");
@@ -158,7 +166,28 @@ static DateFormatter forPattern(String input) {
List2014-11-18||-2y
subtracts two years from the input date
* now/m
rounds the current time to minute granularity
- *
+ * DateTimeFormatter.ClassicFormat.parseObject
* which does not throw exceptions when parsing failed.
- *
+ * ||[+-/](\d+)?[yMwdhHms]
.
diff --git a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java
index ca63c170c0ccd..e4315f8699206 100644
--- a/server/src/main/java/org/opensearch/common/util/BinarySearcher.java
+++ b/server/src/main/java/org/opensearch/common/util/BinarySearcher.java
@@ -34,14 +34,14 @@
/**
* Performs binary search on an arbitrary data structure.
- *
+ * Thread#interrupt()
. Always use #cancel() instead.
*
* @opensearch.internal
diff --git a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java
index 0c792b37ccfa9..28b55f70855d6 100644
--- a/server/src/main/java/org/opensearch/common/util/CuckooFilter.java
+++ b/server/src/main/java/org/opensearch/common/util/CuckooFilter.java
@@ -46,33 +46,33 @@
/**
* An approximate set membership datastructure
- *
+ *