diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 78d8796c624d7..f3a9aa2787a80 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -30,4 +30,5 @@ BWC_VERSION: - "2.12.0" - "2.12.1" - "2.13.0" + - "2.13.1" - "2.14.0" diff --git a/.github/workflows/changelog_verifier.yml b/.github/workflows/changelog_verifier.yml index 9456fbf8b4ca0..04e2ed5006269 100644 --- a/.github/workflows/changelog_verifier.yml +++ b/.github/workflows/changelog_verifier.yml @@ -1,7 +1,7 @@ name: "Changelog Verifier" on: pull_request: - types: [opened, edited, review_requested, synchronize, reopened, ready_for_review, labeled, unlabeled] + types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] jobs: # Enforces the update of a changelog file on every pull request @@ -13,7 +13,19 @@ jobs: with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ github.event.pull_request.head.sha }} - - uses: dangoslen/changelog-enforcer@v3 + id: verify-changelog-3x + with: + skipLabels: "autocut, skip-changelog" + changeLogPath: 'CHANGELOG-3.0.md' + continue-on-error: true + - uses: dangoslen/changelog-enforcer@v3 + id: verify-changelog with: skipLabels: "autocut, skip-changelog" + changeLogPath: 'CHANGELOG.md' + continue-on-error: true + - run: | + if [[ ${{ steps.verify-changelog-3x.outcome }} == 'failure' && ${{ steps.verify-changelog.outcome }} == 'failure' ]]; then + exit 1 + fi diff --git a/.github/workflows/pull-request-checks.yml b/.github/workflows/pull-request-checks.yml index 7efcf529588ed..a62ea9cfa179b 100644 --- a/.github/workflows/pull-request-checks.yml +++ b/.github/workflows/pull-request-checks.yml @@ -18,6 +18,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: peternied/check-pull-request-description-checklist@v1.1 + if: github.actor != 'dependabot[bot]' with: checklist-items: | New functionality includes testing. diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md new file mode 100644 index 0000000000000..0715c6de49ca4 --- /dev/null +++ b/CHANGELOG-3.0.md @@ -0,0 +1,104 @@ +# CHANGELOG +All notable changes to this project are documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. + +## [Unreleased 3.0] +### Added +- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) +- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) +- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) +- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) +- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) +- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) +- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) +- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) +- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) +- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) +- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) +- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) +- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) +- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) + +### Dependencies +- Bump `log4j-core` from 2.18.0 to 2.19.0 +- Bump `forbiddenapis` from 3.3 to 3.4 +- Bump `avro` from 1.11.1 to 1.11.2 +- Bump `woodstox-core` from 6.3.0 to 6.3.1 +- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) +- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) +- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) +- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) +- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) +- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) +- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) +- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 +- Bump `gson` from 2.10 to 2.10.1 +- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 +- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 +- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 +- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 +- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) +- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) +- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) +- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) +- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) +- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) +- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) +- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) +- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) +- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) +- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) + +### Changed +- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) +- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) +- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) +- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) +- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) +- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) +- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) +- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) +- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) + +### Deprecated + +### Removed +- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) +- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) +- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) +- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) +- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) +- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) +- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) +- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) +- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) +- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) + +### Fixed +- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) +- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) +- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) +- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) +- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) +- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) +- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) +- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) + +### Security + +[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e7652bf00462..94d47182ede45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,104 +3,6 @@ All notable changes to this project are documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). See the [CONTRIBUTING guide](./CONTRIBUTING.md#Changelog) for instructions on how to add changelog entries. -## [Unreleased 3.0] -### Added -- Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) -- Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) -- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) -- Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) -- Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) -- Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) -- GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) -- Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) -- [S3 Repository] Add setting to control connection count for sync client ([#12028](https://github.com/opensearch-project/OpenSearch/pull/12028)) -- Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) -- Add Remote Store Migration Experimental flag and allow mixed mode clusters under same ([#11986](https://github.com/opensearch-project/OpenSearch/pull/11986)) -- Remote reindex: Add support for configurable retry mechanism ([#12561](https://github.com/opensearch-project/OpenSearch/pull/12561)) -- [Admission Control] Integrate IO Usage Tracker to the Resource Usage Collector Service and Emit IO Usage Stats ([#11880](https://github.com/opensearch-project/OpenSearch/pull/11880)) -- Tracing for deep search path ([#12103](https://github.com/opensearch-project/OpenSearch/pull/12103)) -- Add explicit dependency to validatePom and generatePom tasks ([#12807](https://github.com/opensearch-project/OpenSearch/pull/12807)) -- Replace configureEach with all for publication iteration ([#12876](https://github.com/opensearch-project/OpenSearch/pull/12876)) - -### Dependencies -- Bump `log4j-core` from 2.18.0 to 2.19.0 -- Bump `forbiddenapis` from 3.3 to 3.4 -- Bump `avro` from 1.11.1 to 1.11.2 -- Bump `woodstox-core` from 6.3.0 to 6.3.1 -- Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) -- Bump `reactive-streams` from 1.0.3 to 1.0.4 ([#4488](https://github.com/opensearch-project/OpenSearch/pull/4488)) -- Bump `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) -- Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) -- Update Apache Lucene to 9.5.0-snapshot-d5cef1c ([#5570](https://github.com/opensearch-project/OpenSearch/pull/5570)) -- Bump `maven-model` from 3.6.2 to 3.8.6 ([#5599](https://github.com/opensearch-project/OpenSearch/pull/5599)) -- Bump `maxmind-db` from 2.1.0 to 3.0.0 ([#5601](https://github.com/opensearch-project/OpenSearch/pull/5601)) -- Bump `wiremock-jre8-standalone` from 2.33.2 to 2.35.0 -- Bump `gson` from 2.10 to 2.10.1 -- Bump `com.google.code.gson:gson` from 2.10 to 2.10.1 -- Bump `com.maxmind.geoip2:geoip2` from 4.0.0 to 4.0.1 -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.11 to 0.16.12 -- Bump `org.apache.commons:commons-configuration2` from 2.8.0 to 2.9.0 -- Bump `com.netflix.nebula:nebula-publishing-plugin` from 19.2.0 to 20.3.0 -- Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) -- OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) -- Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) -- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) -- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) -- Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) -- Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) -- Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) -- Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) -- Bump `org.bouncycastle:bcprov-jdk15to18` to `org.bouncycastle:bcprov-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcmail-jdk15to18` to `org.bouncycastle:bcmail-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump `org.bouncycastle:bcpkix-jdk15to18` to `org.bouncycastle:bcpkix-jdk18on` version 1.77 ([#12317](https://github.com/opensearch-project/OpenSearch/pull/12317)) -- Bump Jackson version from 2.16.1 to 2.16.2 ([#12611](https://github.com/opensearch-project/OpenSearch/pull/12611)) -- Bump `aws-sdk-java` from 2.20.55 to 2.20.86 ([#12251](https://github.com/opensearch-project/OpenSearch/pull/12251)) - -### Changed -- [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) -- Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) -- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) -- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) -- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) -- Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) -- Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) -- Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) -- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) -- Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) - -### Deprecated - -### Removed -- Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) -- Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) -- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) -- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) -- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) -- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) -- Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) -- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) -- Remove unused private methods ([#4926](https://github.com/opensearch-project/OpenSearch/pull/4926)) -- Remove LegacyESVersion.V_7_8_ and V_7_9_ Constants ([#4855](https://github.com/opensearch-project/OpenSearch/pull/4855)) -- Remove LegacyESVersion.V_7_6_ and V_7_7_ Constants ([#4837](https://github.com/opensearch-project/OpenSearch/pull/4837)) -- Remove LegacyESVersion.V_7_10_ Constants ([#5018](https://github.com/opensearch-project/OpenSearch/pull/5018)) -- Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) -- Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) - -### Fixed -- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) -- Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) -- Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) -- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) -- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) -- [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) -- Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) -- Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) - -### Security - ## [Unreleased 2.x] ### Added - Constant Keyword Field ([#12285](https://github.com/opensearch-project/OpenSearch/pull/12285)) @@ -112,6 +14,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Derived fields support to derive field values at query time without indexing ([#12569](https://github.com/opensearch-project/OpenSearch/pull/12569)) - Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) - Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) +- [Remote Store] Make translog transfer timeout configurable ([#12704](https://github.com/opensearch-project/OpenSearch/pull/12704)) +- Reject Resize index requests (i.e, split, shrink and clone), While DocRep to SegRep migration is in progress.([#12686](https://github.com/opensearch-project/OpenSearch/pull/12686)) +- Add support for more than one protocol for transport ([#12967](https://github.com/opensearch-project/OpenSearch/pull/12967)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) @@ -119,20 +24,29 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) - Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) - Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996), [#12998](https://github.com/opensearch-project/OpenSearch/pull/12998), [#12999](https://github.com/opensearch-project/OpenSearch/pull/12999)) +- Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.26.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `org.apache.commons:commonscodec` from 1.15 to 1.16.1 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `org.apache.commons:commonslang` from 3.13.0 to 3.14.0 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump Apache Tika from 2.6.0 to 2.9.2 ([#12627](https://github.com/opensearch-project/OpenSearch/pull/12627)) +- Bump `com.gradle.enterprise` from 3.16.2 to 3.17 ([#13116](https://github.com/opensearch-project/OpenSearch/pull/13116)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) - Improve built-in secure transports support ([#12907](https://github.com/opensearch-project/OpenSearch/pull/12907)) +- Update links to documentation in rest-api-spec ([#13043](https://github.com/opensearch-project/OpenSearch/pull/13043)) ### Deprecated ### Removed +- Remove handling of index.mapper.dynamic in AutoCreateIndex([#13067](https://github.com/opensearch-project/OpenSearch/pull/13067)) ### Fixed +- Fix bulk API ignores ingest pipeline for upsert ([#12883](https://github.com/opensearch-project/OpenSearch/pull/12883)) - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) - Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) +- Client with Java 8 runtime and Apache HttpClient 5 Transport fails with java.lang.NoSuchMethodError: java.nio.ByteBuffer.flip()Ljava/nio/ByteBuffer ([#13100](https://github.com/opensearch-project/opensearch-java/pull/13100)) +- Fix implement mark() and markSupported() in class FilterStreamInput ([#13098](https://github.com/opensearch-project/OpenSearch/pull/13098)) ### Security -[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD [Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4a1162cf2558b..bce6ca0d49294 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,18 +1,18 @@ - [Contributing to OpenSearch](#contributing-to-opensearch) - - [First Things First](#first-things-first) - - [Ways to Contribute](#ways-to-contribute) +- [First Things First](#first-things-first) +- [Ways to Contribute](#ways-to-contribute) - [Bug Reports](#bug-reports) - [Feature Requests](#feature-requests) - [Documentation Changes](#documentation-changes) - [Contributing Code](#contributing-code) - - [Developer Certificate of Origin](#developer-certificate-of-origin) - - [Changelog](#changelog) - - [Review Process](#review-process) - - [Troubleshooting Failing Builds](#troubleshooting-failing-builds) +- [Developer Certificate of Origin](#developer-certificate-of-origin) +- [Changelog](#changelog) +- [Review Process](#review-process) + - [Tips for Success](#tips) # Contributing to OpenSearch -OpenSearch is a community project that is built and maintained by people just like **you**. We're glad you're interested in helping out. There are several different ways you can do it, but before we talk about that, let's talk about how to get started. +OpenSearch is a community project built and maintained by people just like **you**. We're glad you're interested in helping out. There are several different ways you can do it, but before we talk about that, let's talk about how to get started. ## First Things First @@ -30,9 +30,9 @@ Ugh! Bugs! A bug is when software behaves in a way that you didn't expect and the developer didn't intend. To help us understand what's going on, we first want to make sure you're working from the latest version. Please make sure you're testing against the [latest version](https://github.com/opensearch-project/OpenSearch). -Once you've confirmed that the bug still exists in the latest version, you'll want to check to make sure it's not something we already know about on the [open issues GitHub page](https://github.com/opensearch-project/OpenSearch/issues). +Once you've confirmed that the bug still exists in the latest version, you'll want to check the bug is not something we already know about. A good way to figure this out is to search for your bug on the [open issues GitHub page](https://github.com/opensearch-project/OpenSearch/issues). -If you've upgraded to the latest version and you can't find it in our open issues list, then you'll need to tell us how to reproduce it. To make the behavior as clear as possible, please provided your steps as `curl` commands which we can copy and paste into a terminal to run it locally, for example: +If you've upgraded to the latest version and you can't find it in our open issues list, then you'll need to tell us how to reproduce it. To make the behavior as clear as possible, please provide your steps as `curl` commands which we can copy and paste into a terminal to run it locally, for example: ```sh # delete the index @@ -47,11 +47,11 @@ curl -x PUT localhost:9200/test/test/1 -d '{ curl .... ``` -Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed. +Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed. It is generally always helpful to provide the basic details of your cluster configuration alongside your reproduction steps. ### Feature Requests -If you've thought of a way that OpenSearch could be better, we want to hear about it. We track feature requests using GitHub, so please feel free to open an issue which describes the feature you would like to see, why you need it, and how it should work. +If you've thought of a way that OpenSearch could be better, we want to hear about it. We track feature requests using GitHub, so please feel free to open an issue which describes the feature you would like to see, why you need it, and how it should work. After opening an issue, the fastest way to see your change made is to open a pull request following the requested changes you detailed in your issue. You can learn more about opening a pull request in the [contributing code section](#contributing-code). ### Documentation Changes @@ -146,12 +146,12 @@ Adding in the change is two step process: 2. Update the entry for your change in [`CHANGELOG.md`](CHANGELOG.md) and make sure that you reference the pull request there. ### Where should I put my CHANGELOG entry? -Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain sections for the _next major_ and _next minor_ releases. Your entry should go into the section it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will likely be in that section. +Please review the [branching strategy](https://github.com/opensearch-project/.github/blob/main/RELEASING.md#opensearch-branching) document. The changelog on the `main` branch will contain **two files**: `CHANGELOG.md` which corresponds to unreleased changes intended for the _next minor_ release and `CHANGELOG-3.0.md` which correspond to unreleased changes intended for the _next major_ release. Your entry should go into file corresponding to the version it is intended to be released in. In practice, most changes to `main` will be backported to the next minor release so most entries will be in the `CHANGELOG.md` file. The following examples assume the _next major_ release on main is 3.0, then _next minor_ release is 2.5, and the _current_ release is 2.4. -- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry). -- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` on main, do not backport. +- **Add a new feature to release in next minor:** Add a changelog entry to `[Unreleased 2.x]` in CHANGELOG.md on main, then backport to 2.x (including the changelog entry). +- **Introduce a breaking API change to release in next major:** Add a changelog entry to `[Unreleased 3.0]` to CHANGELOG-3.0.md on main, do not backport. - **Upgrade a dependency to fix a CVE:** Add a changelog entry to `[Unreleased 2.x]` on main, then backport to 2.x (including the changelog entry), then backport to 2.4 and ensure the changelog entry is added to `[Unreleased 2.4.1]`. ## Review Process @@ -164,13 +164,19 @@ If we accept the PR, a [maintainer](MAINTAINERS.md) will merge your change and u If we reject the PR, we will close the pull request with a comment explaining why. This decision isn't always final: if you feel we have misunderstood your intended change or otherwise think that we should reconsider then please continue the conversation with a comment on the PR and we'll do our best to address any further points you raise. -## Troubleshooting Failing Builds +### Tips for Success (#tips) -The OpenSearch testing framework offers many capabilities but exhibits significant complexity (it does lot of randomization internally to cover as many edge cases and variations as possible). Unfortunately, this posses a challenge by making it harder to discover important issues/bugs in straightforward way and may lead to so called flaky tests - the tests which flip randomly from success to failure without any code changes. +We have a lot of mechanisms to help expedite towards an accepted PR. Here are some tips for success: +1. *Minimize BWC guarantees*: The first PR review cycle heavily focuses on the public facing APIs. This is what we have to "guarantee" as non-breaking for [bwc across major versions](./DEVELOPER_GUIDE.md#backwards-compatibility). +2. *Do not copy non-compliant code*: Ensure that code is APLv2 compatible. This means that you have not copied any code from other sources unless that code is also APLv2 compatible. +3. *Utilize feature flags*: Features that are safeguarded behind feature flags are more likely to be merged and backported, as they come with an additional layer of protection. Refer to this [example PR](https://github.com/opensearch-project/OpenSearch/pull/4959) for implementation details. +4. *Use appropriate Java tags*: + - `@opensearch.internal`: Marks internal classes subject to rapid changes. + - `@opensearch.api`: Marks public-facing API classes with backward compatibility guarantees. + - `@opensearch.experimental`: Indicates rapidly changing [experimental code](./DEVELOPER_GUIDE.md#experimental-development). +5. *Employ sandbox for significant core changes*: Any new features or enhancements that make changes to core classes (e.g., search phases, codecs, or specialized lucene APIs) are more likely to. be merged if they are sandboxed. This can only be enabled on the java CLI (`-Dsandbox.enabled=true`). +6. *Micro-benchmark critical path*: This is a lesser known mechanism, but if you have critical path changes you're afraid will impact performance (the changes touch the garbage collector, heap, direct memory, or CPU) then including a [microbenchmark](https://github.com/opensearch-project/OpenSearch/tree/main/benchmarks) with your PR (and jfr or flamegraph results in the description) is a *GREAT IDEA* and will help expedite the review process. +7. *Test rigorously*: Ensure thorough testing ([OpenSearchTestCase](./test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java) for unit tests, [OpenSearchIntegTestCase](./test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java) for integration & cluster tests, [OpenSearchRestTestCase](./test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java) for testing REST endpoint interfaces, and yaml tests with [ClientYamlTestSuiteIT](./rest-api-spec/src/yamlRestTest/java/org/opensearch/test/rest/ClientYamlTestSuiteIT.java) for REST integration tests) + +In general, adding more guardrails to your changes increases the likelihood of swift PR acceptance. We can always relax these guard rails in smaller followup PRs. Reverting a GA feature is much more difficult. Check out the [DEVELOPER_GUIDE](./DEVELOPER_GUIDE.md#submitting-changes) for more useful tips. -If your pull request reports a failing test(s) on one of the checks, please: - - look if there is an existing [issue](https://github.com/opensearch-project/OpenSearch/issues) reported for the test in question - - if not, please make sure this is not caused by your changes, run the failing test(s) locally for some time - - if you are sure the failure is not related, please open a new [bug](https://github.com/opensearch-project/OpenSearch/issues/new?assignees=&labels=bug%2C+untriaged&projects=&template=bug_template.md&title=%5BBUG%5D) with `flaky-test` label - - add a comment referencing the issue(s) or bug report(s) to your pull request explaining the failing build(s) - - as a bonus point, try to contribute by fixing the flaky test(s) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0562ecc6ee61b..c68cc0406d3a6 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -102,14 +102,14 @@ dependencies { api localGroovy() - api 'commons-codec:commons-codec:1.16.0' - api 'org.apache.commons:commons-compress:1.25.0' + api "commons-codec:commons-codec:${props.getProperty('commonscodec')}" + api "org.apache.commons:commons-compress:${props.getProperty('commonscompress')}" api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' - api 'commons-io:commons-io:2.15.1' + api "commons-io:commons-io:${props.getProperty('commonsio')}" api "net.java.dev.jna:jna:5.14.0" api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 5c9cd25bb79ad..c60f42dc1bb89 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -40,9 +40,10 @@ httpclient = 4.5.14 httpcore = 4.4.16 httpasyncclient = 4.1.5 commonslogging = 1.2 -commonscodec = 1.15 -commonslang = 3.13.0 -commonscompress = 1.24.0 +commonscodec = 1.16.1 +commonslang = 3.14.0 +commonscompress = 1.26.1 +commonsio = 2.16.0 # plugin dependencies aws = 2.20.86 reactivestreams = 1.0.4 diff --git a/client/rest/licenses/commons-codec-1.15.jar.sha1 b/client/rest/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/client/rest/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/rest/licenses/commons-codec-1.16.1.jar.sha1 b/client/rest/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/client/rest/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java index 81fe77ddcfbed..4e6fd6f3d6f9d 100644 --- a/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java +++ b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java @@ -16,6 +16,7 @@ import org.apache.hc.core5.util.Asserts; import java.io.IOException; +import java.nio.Buffer; import java.nio.ByteBuffer; import java.nio.channels.Channels; import java.nio.channels.ReadableByteChannel; @@ -141,7 +142,7 @@ public void produce(final DataStreamChannel channel) throws IOException { } } if (byteBuffer.position() > 0) { - byteBuffer.flip(); + ((Buffer) byteBuffer).flip(); channel.write(byteBuffer); byteBuffer.compact(); } diff --git a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 b/client/sniffer/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/client/sniffer/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/client/sniffer/licenses/commons-codec-1.16.1.jar.sha1 b/client/sniffer/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/client/sniffer/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index f40fb1c4b0a9f..446dbaad8466e 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -44,7 +44,7 @@ dependencies { testRuntimeOnly("com.google.guava:guava:${versions.guava}") { transitive = false } - + api "commons-io:commons-io:${versions.commonsio}" implementation "org.apache.commons:commons-compress:${versions.commonscompress}" } @@ -104,5 +104,8 @@ thirdPartyAudit.ignoreMissingClasses( 'org.tukaani.xz.MemoryLimitException', 'org.tukaani.xz.UnsupportedOptionsException', 'org.tukaani.xz.XZ', - 'org.tukaani.xz.XZOutputStream' + 'org.tukaani.xz.XZOutputStream', + 'org.apache.commons.codec.digest.PureJavaCrc32C', + 'org.apache.commons.codec.digest.XXHash32', + 'org.apache.commons.lang3.reflect.FieldUtils' ) diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 deleted file mode 100644 index 23999d1bfbde4..0000000000000 --- a/distribution/tools/plugin-cli/licenses/commons-compress-1.24.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 new file mode 100644 index 0000000000000..912bda85de18a --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-compress-1.26.1.jar.sha1 @@ -0,0 +1 @@ +44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-io-2.16.0.jar.sha1 b/distribution/tools/plugin-cli/licenses/commons-io-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..6a7b638719fa3 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-io-2.16.0.jar.sha1 @@ -0,0 +1 @@ +27875a7935f1ddcc13267eb6fae1f719e0409572 \ No newline at end of file diff --git a/distribution/tools/plugin-cli/licenses/commons-io-LICENSE.txt b/distribution/tools/plugin-cli/licenses/commons-io-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-io-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/distribution/tools/plugin-cli/licenses/commons-io-NOTICE.txt b/distribution/tools/plugin-cli/licenses/commons-io-NOTICE.txt new file mode 100644 index 0000000000000..a6b77d1eb6089 --- /dev/null +++ b/distribution/tools/plugin-cli/licenses/commons-io-NOTICE.txt @@ -0,0 +1,5 @@ +Apache Commons IO +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 56df46ae94d44..f312c484a4842 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -101,6 +101,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_12_1 = new Version(2120199, org.apache.lucene.util.Version.LUCENE_9_9_2); public static final Version V_2_13_0 = new Version(2130099, org.apache.lucene.util.Version.LUCENE_9_10_0); + public static final Version V_2_13_1 = new Version(2130199, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_2_14_0 = new Version(2140099, org.apache.lucene.util.Version.LUCENE_9_10_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0); public static final Version CURRENT = V_3_0_0; diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java index a6e49567ac7d5..ee67fd4f271a2 100644 --- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java +++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/FilterStreamInput.java @@ -80,6 +80,16 @@ public void reset() throws IOException { delegate.reset(); } + @Override + public void mark(int readlimit) { + delegate.mark(readlimit); + } + + @Override + public boolean markSupported() { + return delegate.markSupported(); + } + @Override public int read() throws IOException { return delegate.read(); diff --git a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java index a044586e095e3..ab6dfbc2feb25 100644 --- a/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java +++ b/libs/core/src/test/java/org/opensearch/core/common/io/stream/FilterStreamInputTests.java @@ -12,6 +12,9 @@ import org.opensearch.core.common.bytes.BytesReference; import java.io.IOException; +import java.nio.ByteBuffer; + +import static org.hamcrest.Matchers.is; /** test the FilterStreamInput using the same BaseStreamTests */ public class FilterStreamInputTests extends BaseStreamTests { @@ -21,4 +24,24 @@ protected StreamInput getStreamInput(BytesReference bytesReference) throws IOExc return new FilterStreamInput(StreamInput.wrap(br.bytes, br.offset, br.length)) { }; } + + public void testMarkAndReset() throws IOException { + FilterStreamInputTests filterStreamInputTests = new FilterStreamInputTests(); + + ByteBuffer buffer = ByteBuffer.wrap(new byte[20]); + for (int i = 0; i < buffer.limit(); i++) { + buffer.put((byte) i); + } + buffer.rewind(); + BytesReference bytesReference = BytesReference.fromByteBuffer(buffer); + StreamInput streamInput = filterStreamInputTests.getStreamInput(bytesReference); + streamInput.read(); + assertThat(streamInput.markSupported(), is(true)); + streamInput.mark(-1); + int int1 = streamInput.read(); + int int2 = streamInput.read(); + streamInput.reset(); + assertEquals(int1, streamInput.read()); + assertEquals(int2, streamInput.read()); + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml index 2dfa17174b139..d7be48a92908c 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/70_bulk.yml @@ -144,3 +144,26 @@ teardown: - is_false: _source.field1 - match: {_source.field2: value2} + +# related issue: https://github.com/opensearch-project/OpenSearch/issues/12854 +--- +"Test bulk honors pipeline in update action with upsert": + - skip: + version: " - 2.13.99" + reason: "fixed in 2.14.0" + + - do: + bulk: + refresh: true + body: + - '{"update": {"_index": "test_index", "_id": "test_id3", "pipeline": "pipeline1"}}' + - '{"upsert": {"f1": "v2", "f2": 47}, "doc": {"x": 1}}' + + - match: { errors: false } + - match: { items.0.update.result: created } + + - do: + get: + index: test_index + id: test_id3 + - match: { _source: {"f1": "v2", "f2": 47, "field1": "value1"}} diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java index c7638b3c41c63..55dc23f665d2e 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/PainlessModulePlugin.java @@ -60,6 +60,7 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; +import org.opensearch.script.DerivedFieldScript; import org.opensearch.script.IngestScript; import org.opensearch.script.ScoreScript; import org.opensearch.script.ScriptContext; @@ -108,6 +109,11 @@ public final class PainlessModulePlugin extends Plugin implements ScriptPlugin, ingest.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.ingest.txt")); map.put(IngestScript.CONTEXT, ingest); + // Functions available to derived fields + List derived = new ArrayList<>(Allowlist.BASE_ALLOWLISTS); + derived.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.derived.txt")); + map.put(DerivedFieldScript.CONTEXT, derived); + allowlists = map; } diff --git a/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.derived.txt b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.derived.txt new file mode 100644 index 0000000000000..9a3dd4894b286 --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/opensearch/painless/spi/org.opensearch.derived.txt @@ -0,0 +1,17 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + +# This file contains an allowlist for functions to be used in derived field context + +class org.opensearch.script.DerivedFieldScript @no_import { +} + +static_import { + void emit(org.opensearch.script.DerivedFieldScript, Object) bound_to org.opensearch.script.ScriptEmitValues$EmitSingle + void emit(org.opensearch.script.DerivedFieldScript, double, double) bound_to org.opensearch.script.ScriptEmitValues$GeoPoint +} diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/DerivedFieldScriptTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/DerivedFieldScriptTests.java new file mode 100644 index 0000000000000..2340e5b238ebb --- /dev/null +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/DerivedFieldScriptTests.java @@ -0,0 +1,227 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.painless; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.fielddata.IndexGeoPointFieldData; +import org.opensearch.index.fielddata.IndexNumericFieldData; +import org.opensearch.index.fielddata.LeafGeoPointFieldData; +import org.opensearch.index.fielddata.LeafNumericFieldData; +import org.opensearch.index.fielddata.MultiGeoPointValues; +import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.fielddata.plain.AbstractLeafGeoPointFieldData; +import org.opensearch.index.fielddata.plain.LeafDoubleFieldData; +import org.opensearch.index.mapper.GeoPointFieldMapper.GeoPointFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.index.mapper.NumberFieldMapper.NumberType; +import org.opensearch.painless.spi.Allowlist; +import org.opensearch.painless.spi.AllowlistLoader; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.ScriptContext; +import org.opensearch.script.ScriptException; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldScriptTests extends ScriptTestCase { + + private static PainlessScriptEngine SCRIPT_ENGINE; + + @Override + public void setUp() throws Exception { + super.setUp(); + + // Adding derived field script to the contexts for the script engine + Map, List> contexts = newDefaultContexts(); + List allowlists = new ArrayList<>(Allowlist.BASE_ALLOWLISTS); + allowlists.add(AllowlistLoader.loadFromResourceFiles(Allowlist.class, "org.opensearch.derived.txt")); + contexts.put(DerivedFieldScript.CONTEXT, allowlists); + + SCRIPT_ENGINE = new PainlessScriptEngine(Settings.EMPTY, contexts); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + SCRIPT_ENGINE = null; + } + + @Override + protected PainlessScriptEngine getEngine() { + return SCRIPT_ENGINE; + } + + private DerivedFieldScript.LeafFactory compile(String expression, SearchLookup lookup) { + DerivedFieldScript.Factory factory = SCRIPT_ENGINE.compile( + "derived_script_test", + expression, + DerivedFieldScript.CONTEXT, + Collections.emptyMap() + ); + return factory.newFactory(Collections.emptyMap(), lookup); + } + + public void testEmittingDoubleField() throws IOException { + // Mocking field value to be returned + NumberFieldType fieldType = new NumberFieldType("test_double_field", NumberType.DOUBLE); + MapperService mapperService = mock(MapperService.class); + when(mapperService.fieldType("test_double_field")).thenReturn(fieldType); + + SortedNumericDoubleValues doubleValues = mock(SortedNumericDoubleValues.class); + when(doubleValues.docValueCount()).thenReturn(1); + when(doubleValues.advanceExact(anyInt())).thenReturn(true); + when(doubleValues.nextValue()).thenReturn(2.718); + + LeafNumericFieldData atomicFieldData = mock(LeafDoubleFieldData.class); // SortedNumericDoubleFieldData + when(atomicFieldData.getDoubleValues()).thenReturn(doubleValues); + + IndexNumericFieldData fieldData = mock(IndexNumericFieldData.class); // SortedNumericIndexFieldData + when(fieldData.getFieldName()).thenReturn("test_double_field"); + when(fieldData.load(any())).thenReturn(atomicFieldData); + + SearchLookup lookup = new SearchLookup(mapperService, (ignored, searchLookup) -> fieldData); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + // Execute the script + DerivedFieldScript script = compile("emit(doc['test_double_field'].value)", lookup).newInstance(leafReaderContext); + script.setDocument(1); + script.execute(); + + List result = script.getEmittedValues(); + assertEquals(List.of(2.718), result); + } + + public void testEmittingGeoPoint() throws IOException { + // Mocking field value to be returned + GeoPointFieldType fieldType = new GeoPointFieldType("test_geo_field"); + MapperService mapperService = mock(MapperService.class); + when(mapperService.fieldType("test_geo_field")).thenReturn(fieldType); + + MultiGeoPointValues geoPointValues = mock(MultiGeoPointValues.class); + when(geoPointValues.docValueCount()).thenReturn(1); + when(geoPointValues.advanceExact(anyInt())).thenReturn(true); + when(geoPointValues.nextValue()).thenReturn(new GeoPoint(5, 8)); + + LeafGeoPointFieldData atomicFieldData = mock(AbstractLeafGeoPointFieldData.class); // LatLonPointDVLeafFieldData + when(atomicFieldData.getGeoPointValues()).thenReturn(geoPointValues); + + IndexGeoPointFieldData fieldData = mock(IndexGeoPointFieldData.class); + when(fieldData.getFieldName()).thenReturn("test_geo_field"); + when(fieldData.load(any())).thenReturn(atomicFieldData); + + SearchLookup lookup = new SearchLookup(mapperService, (ignored, searchLookup) -> fieldData); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + // Execute the script + DerivedFieldScript script = compile("emit(doc['test_geo_field'].value.getLat(), doc['test_geo_field'].value.getLon())", lookup) + .newInstance(leafReaderContext); + script.setDocument(1); + script.execute(); + + List result = script.getEmittedValues(); + assertEquals(List.of(new Tuple<>(5.0, 8.0)), result); + } + + public void testEmittingMultipleValues() throws IOException { + SearchLookup lookup = mock(SearchLookup.class); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + LeafSearchLookup leafSearchLookup = mock(LeafSearchLookup.class); + when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafSearchLookup); + + // Execute the script + DerivedFieldScript script = compile( + "def l = new ArrayList(); l.add('test'); l.add('multiple'); l.add('values'); for (String x : l) emit(x)", + lookup + ).newInstance(leafReaderContext); + script.setDocument(1); + script.execute(); + + List result = script.getEmittedValues(); + assertEquals(List.of("test", "multiple", "values"), result); + } + + public void testExceedingByteSizeLimit() throws IOException { + SearchLookup lookup = mock(SearchLookup.class); + + // We don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + MemoryIndex index = new MemoryIndex(); + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + LeafSearchLookup leafSearchLookup = mock(LeafSearchLookup.class); + when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafSearchLookup); + + // Emitting a large string to exceed the byte size limit + DerivedFieldScript stringScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit('a' + i);", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, () -> { + stringScript.setDocument(1); + stringScript.execute(); + }); + + // Emitting an integer to check byte size limit + DerivedFieldScript intScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(42)", lookup).newInstance(leafReaderContext); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + intScript.setDocument(1); + intScript.execute(); + }); + + // Emitting a long to check byte size limit + DerivedFieldScript longScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(1234567890123456789L)", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + longScript.setDocument(1); + longScript.execute(); + }); + + // Emitting a double to check byte size limit + DerivedFieldScript doubleScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(3.14159)", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + doubleScript.setDocument(1); + doubleScript.execute(); + }); + + // Emitting a GeoPoint to check byte size limit + DerivedFieldScript geoPointScript = compile("for (int i = 0; i < 1024 * 1024; i++) emit(1.23, 4.56);", lookup).newInstance( + leafReaderContext + ); + expectThrows(ScriptException.class, "Expected IllegalStateException for exceeding byte size limit", () -> { + geoPointScript.setDocument(1); + geoPointScript.execute(); + }); + } +} diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/analysis-phonetic/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/analysis-phonetic/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 b/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/crypto-kms/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/crypto-kms/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/crypto-kms/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 68bda0933daa7..7f34cec94499c 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -53,7 +53,7 @@ dependencies { api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-lang:commons-lang:2.6" - api "commons-io:commons-io:2.16.0" + api "commons-io:commons-io:${versions.commonsio}" api 'javax.mail:mail:1.4.7' api 'javax.inject:javax.inject:1' api "com.sun.jersey:jersey-client:${versions.jersey}" diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/discovery-azure-classic/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/discovery-ec2/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/discovery-ec2/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/discovery-gce/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/discovery-gce/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/discovery-gce/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index a931f45802318..4749aa911886d 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,10 +38,10 @@ opensearchplugin { } versions << [ - 'tika' : '2.6.0', - 'pdfbox': '2.0.27', - 'poi' : '5.2.3', - 'mime4j': '0.8.8' + 'tika' : '2.9.2', + 'pdfbox': '2.0.31', + 'poi' : '5.2.5', + 'mime4j': '0.8.11' ] dependencies { @@ -50,6 +50,16 @@ dependencies { api "org.apache.tika:tika-parsers:${versions.tika}" // Required for the various document parsers api "org.apache.tika:tika-parsers-standard-package:${versions.tika}" + api "org.apache.tika:tika-parser-apple-module:${versions.tika}" + api "org.apache.tika:tika-parser-html-module:${versions.tika}" + api "org.apache.tika:tika-parser-microsoft-module:${versions.tika}" + api "org.apache.tika:tika-parser-miscoffice-module:${versions.tika}" + api "org.apache.tika:tika-parser-pdf-module:${versions.tika}" + api "org.apache.tika:tika-parser-text-module:${versions.tika}" + api "org.apache.tika:tika-parser-xml-module:${versions.tika}" + // Utilities consumed by document parsers + api "org.apache.tika:tika-parser-xmp-commons:${versions.tika}" + api "org.apache.tika:tika-parser-zip-commons:${versions.tika}" // Required for language detection api "org.apache.tika:tika-langdetect-optimaize:${versions.tika}" // Optimaize libraries/dependencies @@ -57,7 +67,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.16.0' + api "commons-io:commons-io:${versions.commonsio}" api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.11.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.11.jar.sha1 new file mode 100644 index 0000000000000..82d9bf2617ce6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.11.jar.sha1 @@ -0,0 +1 @@ +6d1eb5f7b84eaa9d38fca13b761f01c693aef3da \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 deleted file mode 100644 index 77c36691d36b5..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7330de23c52f71617cbec7f1d2760dae32e687cd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.11.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.11.jar.sha1 new file mode 100644 index 0000000000000..7a494aba6a231 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.11.jar.sha1 @@ -0,0 +1 @@ +f0d42ab9a5832b5f5d05afc004b31245b838e0fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 deleted file mode 100644 index fb9c5fed27162..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e76715563a6bd150f84ccb0adb920aec8faf4779 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 deleted file mode 100644 index 23999d1bfbde4..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.24.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 new file mode 100644 index 0000000000000..912bda85de18a --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.26.1.jar.sha1 @@ -0,0 +1 @@ +44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 deleted file mode 100644 index d0c2f2486ee1f..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-lang3-3.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-lang3-3.14.0.jar.sha1 new file mode 100644 index 0000000000000..d783e07e40902 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-lang3-3.14.0.jar.sha1 @@ -0,0 +1 @@ +1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 deleted file mode 100644 index d578dffbfa3f6..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d08c064d18b2b149da937d15c0d1708cba03f29d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 new file mode 100644 index 0000000000000..d45d45a66e072 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.31.jar.sha1 @@ -0,0 +1 @@ +96999ecdb7324bf718b88724818fa62f81286c36 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 deleted file mode 100644 index 4f670b7f95e8c..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -416a9dfce3714116bfdf793b15368df04266845f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 new file mode 100644 index 0000000000000..fa256ed9a65d2 --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.31.jar.sha1 @@ -0,0 +1 @@ +29b25053099bc30784a766ccb821417e06f4b8a1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 deleted file mode 100644 index 3d8b3daf606ad..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2fb22ae74ad5aea6af1a9c64b9542f2ccf348604 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..0eca17726eb0b --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-5.2.5.jar.sha1 @@ -0,0 +1 @@ +7e00f6b2f76375fe89022d5a7db8acb71cbd55f5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 deleted file mode 100644 index 8371593cf0841..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02efd11c940adb18c03eb9ce7ad88fc40ee6a196 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..6b14be4461425 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.5.jar.sha1 @@ -0,0 +1 @@ +df9f2c52371eeba24db8ea8cafa77285c3cc0742 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 deleted file mode 100644 index 5c6365876b7be..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -db113c8e9051b0ff967f4911fa20336c8325a7c5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..f9a473173a297 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.5.jar.sha1 @@ -0,0 +1 @@ +eaa61452d8f0d13080fbb4757a392f09f90e4c49 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 deleted file mode 100644 index 3c8f92498f1a4..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a7fce47e22b7fedb1b277347ff4fe36d6eda50d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 new file mode 100644 index 0000000000000..68665ddafd7d8 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.5.jar.sha1 @@ -0,0 +1 @@ +fc600cf765a49d73935a6e48a5b84f4abcdd0518 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 deleted file mode 100644 index c66c2f3f39401..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6ed6356dd4a9bd269d873f65494376685e6192e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..80635a63d29fe --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.9.2.jar.sha1 @@ -0,0 +1 @@ +796a21391780339e3d4862626339b49df170024e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 deleted file mode 100644 index e7bc59bb5ae49..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -72b784a7bdab0ffde005fa64d15e3f077331d6fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..a4bb6d48c6a08 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.9.2.jar.sha1 @@ -0,0 +1 @@ +7a48a287e464b456a85c79f318d7bad7db201518 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..dbaee880d1251 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-apple-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +758dac27c246c51b019562bab7e266d2da6a6e01 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..b4806746301ef --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-html-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +47f6a4c46b92616d14e82cd7ad4d05cb43077b83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..da1ae42bac652 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-microsoft-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +235a20823c02c699ce3d57f3d6b9550db05d91a9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..7ceed9e1643b8 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-miscoffice-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +7688a4220d07c32b505230479f957cd495c0bef2 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..e780c1b92d525 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-pdf-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +4d0f0e3f6eff184040402094f4fabbb3c5c7d09f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..6e56fcffc5f88 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-text-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +b3a93e538ba6cb4066aba96d629febf181ec9f92 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..27062077b92bf --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-xml-module-2.9.2.jar.sha1 @@ -0,0 +1 @@ +ff707716c0c4748ffeb21996aefa8d269b3eab5b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..396e2655b14db --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-xmp-commons-2.9.2.jar.sha1 @@ -0,0 +1 @@ +69104107ff85194df5acf682178128771863e442 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..bda62033e4e8c --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parser-zip-commons-2.9.2.jar.sha1 @@ -0,0 +1 @@ +2fcea85a56f93a5c0cb81f3d6dd8673f3d81c598 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 deleted file mode 100644 index 83c0777fcbe8a..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -00980e70b1df13c1236b750f0ca1462edd5d7417 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 new file mode 100644 index 0000000000000..bb76974b6344e --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.9.2.jar.sha1 @@ -0,0 +1 @@ +c8408deb51fa617ef4e912b4d161712e695d3a29 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 deleted file mode 100644 index d0c2f2486ee1f..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-3.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.14.0.jar.sha1 new file mode 100644 index 0000000000000..d783e07e40902 --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-3.14.0.jar.sha1 @@ -0,0 +1 @@ +1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/repository-gcs/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/repository-gcs/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6faf0383d3ba2..2c51bb4cbea53 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -75,7 +75,7 @@ dependencies { api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.10.1' - api 'commons-io:commons-io:2.16.0' + api "commons-io:commons-io:${versions.commonsio}" api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 deleted file mode 100644 index 23999d1bfbde4..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-compress-1.24.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4b1b5a3d9573b2970fddab236102c0a4d27d35e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-compress-1.26.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-compress-1.26.1.jar.sha1 new file mode 100644 index 0000000000000..912bda85de18a --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-compress-1.26.1.jar.sha1 @@ -0,0 +1 @@ +44331c1130c370e726a2e1a3e6fba6d2558ef04a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 deleted file mode 100644 index 62d99837b87e1..0000000000000 --- a/plugins/repository-s3/licenses/commons-codec-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49d94806b6e3dc933dacbd8acb0fdbab8ebd1e5d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/commons-codec-1.16.1.jar.sha1 b/plugins/repository-s3/licenses/commons-codec-1.16.1.jar.sha1 new file mode 100644 index 0000000000000..6b8803089c6d7 --- /dev/null +++ b/plugins/repository-s3/licenses/commons-codec-1.16.1.jar.sha1 @@ -0,0 +1 @@ +47bd4d333fba53406f6c6c51884ddbca435c8862 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 25f361b40636e..1f23a09a047f2 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -285,7 +285,7 @@ public void readBlobAsync(String blobName, ActionListener listener) ); } } - listener.onResponse(new ReadContext(blobSize, blobPartInputStreamFutures, blobChecksum)); + listener.onResponse(new ReadContext.Builder(blobSize, blobPartInputStreamFutures).blobChecksum(blobChecksum).build()); } catch (Exception ex) { listener.onFailure(ex); } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java index 8c7e196d7c812..9e830c409a58b 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerMockClientTests.java @@ -30,7 +30,7 @@ import org.apache.lucene.store.IndexInput; import org.opensearch.cluster.metadata.RepositoryMetadata; -import org.opensearch.common.CheckedTriFunction; +import org.opensearch.common.CheckedConsumer; import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.stream.write.StreamContextSupplier; @@ -49,6 +49,7 @@ import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.test.OpenSearchTestCase; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import java.io.IOException; @@ -466,24 +467,30 @@ private void testWriteBlobByStreams(boolean expectException, boolean throwExcept exceptionRef.set(ex); countDownLatch.countDown(); }); - blobContainer.asyncBlobUpload(new WriteContext("write_blob_by_streams_max_retries", new StreamContextSupplier() { - @Override - public StreamContext supplyStreamContext(long partSize) { - return new StreamContext(new CheckedTriFunction() { - @Override - public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { - InputStream inputStream = new OffsetRangeIndexInputStream(new ByteArrayIndexInput("desc", bytes), size, position); - openInputStreams.add(inputStream); - return new InputStreamContainer(inputStream, size, position); - } - }, partSize, calculateLastPartSize(bytes.length, partSize), calculateNumberOfParts(bytes.length, partSize)); - } - }, bytes.length, false, WritePriority.NORMAL, uploadSuccess -> { + + StreamContextSupplier streamContextSupplier = partSize -> new StreamContext((partNo, size, position) -> { + InputStream inputStream = new OffsetRangeIndexInputStream(new ByteArrayIndexInput("desc", bytes), size, position); + openInputStreams.add(inputStream); + return new InputStreamContainer(inputStream, size, position); + }, partSize, calculateLastPartSize(bytes.length, partSize), calculateNumberOfParts(bytes.length, partSize)); + + CheckedConsumer uploadFinalizer = uploadSuccess -> { assertTrue(uploadSuccess); if (throwExceptionOnFinalizeUpload) { throw new RuntimeException(); } - }, false, null), completionListener); + }; + + WriteContext writeContext = new WriteContext.Builder().fileName("write_blob_by_streams_max_retries") + .streamContextSupplier(streamContextSupplier) + .fileSize(bytes.length) + .failIfAlreadyExists(false) + .writePriority(WritePriority.NORMAL) + .uploadFinalizer(uploadFinalizer) + .doRemoteDataIntegrityCheck(false) + .build(); + + blobContainer.asyncBlobUpload(writeContext, completionListener); assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS)); // wait for completableFuture to finish @@ -516,24 +523,30 @@ private void testWriteBlobByStreamsLargeBlob(boolean expectException, boolean th countDownLatch.countDown(); }); List openInputStreams = new ArrayList<>(); - blobContainer.asyncBlobUpload(new WriteContext("write_large_blob", new StreamContextSupplier() { - @Override - public StreamContext supplyStreamContext(long partSize) { - return new StreamContext(new CheckedTriFunction() { - @Override - public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { - InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); - openInputStreams.add(inputStream); - return new InputStreamContainer(inputStream, size, position); - } - }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); - } - }, blobSize, false, WritePriority.NORMAL, uploadSuccess -> { + + StreamContextSupplier streamContextSupplier = partSize1 -> new StreamContext((partNo, size, position) -> { + InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); + openInputStreams.add(inputStream); + return new InputStreamContainer(inputStream, size, position); + }, partSize1, calculateLastPartSize(blobSize, partSize1), calculateNumberOfParts(blobSize, partSize1)); + + CheckedConsumer uploadFinalizer = uploadSuccess -> { assertTrue(uploadSuccess); if (throwExceptionOnFinalizeUpload) { throw new RuntimeException(); } - }, false, null), completionListener); + }; + + WriteContext writeContext = new WriteContext.Builder().fileName("write_large_blob") + .streamContextSupplier(streamContextSupplier) + .fileSize(blobSize) + .failIfAlreadyExists(false) + .writePriority(WritePriority.NORMAL) + .uploadFinalizer(uploadFinalizer) + .doRemoteDataIntegrityCheck(false) + .build(); + + blobContainer.asyncBlobUpload(writeContext, completionListener); assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS)); if (expectException || throwExceptionOnFinalizeUpload) { @@ -632,20 +645,23 @@ private void testLargeFilesRedirectedToSlowSyncClient(boolean expectException) t List openInputStreams = new ArrayList<>(); final S3BlobContainer s3BlobContainer = Mockito.spy(new S3BlobContainer(blobPath, blobStore)); - s3BlobContainer.asyncBlobUpload(new WriteContext("write_large_blob", new StreamContextSupplier() { - @Override - public StreamContext supplyStreamContext(long partSize) { - return new StreamContext(new CheckedTriFunction() { - @Override - public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { - InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); - openInputStreams.add(inputStream); - return new InputStreamContainer(inputStream, size, position); - } - }, partSize, calculateLastPartSize(blobSize, partSize), calculateNumberOfParts(blobSize, partSize)); - } - }, blobSize, false, WritePriority.HIGH, uploadSuccess -> { assertTrue(uploadSuccess); }, false, null), completionListener); + StreamContextSupplier streamContextSupplier = partSize1 -> new StreamContext((partNo, size, position) -> { + InputStream inputStream = new OffsetRangeIndexInputStream(new ZeroIndexInput("desc", blobSize), size, position); + openInputStreams.add(inputStream); + return new InputStreamContainer(inputStream, size, position); + }, partSize1, calculateLastPartSize(blobSize, partSize1), calculateNumberOfParts(blobSize, partSize1)); + + WriteContext writeContext = new WriteContext.Builder().fileName("write_large_blob") + .streamContextSupplier(streamContextSupplier) + .fileSize(blobSize) + .failIfAlreadyExists(false) + .writePriority(WritePriority.HIGH) + .uploadFinalizer(Assert::assertTrue) + .doRemoteDataIntegrityCheck(false) + .build(); + + s3BlobContainer.asyncBlobUpload(writeContext, completionListener); assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS)); if (expectException) { assertNotNull(exceptionRef.get()); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java index ceab06bd051e9..8e25ba4d950ef 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -37,7 +37,6 @@ import org.apache.http.HttpStatus; import org.opensearch.cluster.metadata.RepositoryMetadata; -import org.opensearch.common.CheckedTriFunction; import org.opensearch.common.Nullable; import org.opensearch.common.StreamContext; import org.opensearch.common.SuppressForbidden; @@ -332,22 +331,24 @@ public void testWriteBlobByStreamsWithRetries() throws Exception { exceptionRef.set(ex); countDownLatch.countDown(); }); - blobContainer.asyncBlobUpload(new WriteContext("write_blob_by_streams_max_retries", new StreamContextSupplier() { - @Override - public StreamContext supplyStreamContext(long partSize) { - return new StreamContext(new CheckedTriFunction() { - @Override - public InputStreamContainer apply(Integer partNo, Long size, Long position) throws IOException { - InputStream inputStream = new OffsetRangeIndexInputStream(new ByteArrayIndexInput("desc", bytes), size, position); - openInputStreams.add(inputStream); - return new InputStreamContainer(inputStream, size, position); - } - }, partSize, calculateLastPartSize(bytes.length, partSize), calculateNumberOfParts(bytes.length, partSize)); - } - }, bytes.length, false, WritePriority.NORMAL, Assert::assertTrue, false, null), completionListener); + StreamContextSupplier streamContextSupplier = partSize -> new StreamContext((partNo, size, position) -> { + InputStream inputStream = new OffsetRangeIndexInputStream(new ByteArrayIndexInput("desc", bytes), size, position); + openInputStreams.add(inputStream); + return new InputStreamContainer(inputStream, size, position); + }, partSize, calculateLastPartSize(bytes.length, partSize), calculateNumberOfParts(bytes.length, partSize)); + + WriteContext writeContext = new WriteContext.Builder().fileName("write_blob_by_streams_max_retries") + .streamContextSupplier(streamContextSupplier) + .fileSize(bytes.length) + .failIfAlreadyExists(false) + .writePriority(WritePriority.NORMAL) + .uploadFinalizer(Assert::assertTrue) + .doRemoteDataIntegrityCheck(false) + .build(); + + blobContainer.asyncBlobUpload(writeContext, completionListener); assertTrue(countDownLatch.await(5000, TimeUnit.SECONDS)); - assertThat(countDown.isCountedDown(), is(true)); openInputStreams.forEach(inputStream -> { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index 1ba12b00c8178..b280452e313da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -1,7 +1,7 @@ { "cat.aliases":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-alias.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-aliases/", "description":"Shows information about currently configured aliases to indices including filter and routing infos." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json index 717c1c49808f6..a56d3e9e8a95e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.allocation.json @@ -1,7 +1,7 @@ { "cat.allocation":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-allocation.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-allocation/", "description":"Provides a snapshot of how many shards are allocated to each data node and how much disk space they are using." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json index cd96038ad0693..3b999d98eb60d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.cluster_manager.json @@ -1,7 +1,7 @@ { "cat.cluster_manager":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/cat/cat-master/", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-cluster_manager/", "description":"Returns information about the cluster-manager node." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json index 8cfaddf8db83b..e85f399c29fd3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json @@ -1,7 +1,7 @@ { "cat.count":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-count.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-count/", "description":"Provides quick access to the document count of the entire cluster, or individual indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json index 9fbde4736b5ef..f2a3cfa8250cb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json @@ -1,7 +1,7 @@ { "cat.fielddata":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-fielddata.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-field-data/", "description":"Shows how much heap memory is currently being used by fielddata on every data node in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json index 54ab6d6e5168c..c2f1771977cd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.help.json @@ -1,7 +1,7 @@ { "cat.help":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/index/", "description":"Returns help for the Cat APIs." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json index 2491ab309531d..aa36f8fd809aa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.indices.json @@ -1,7 +1,7 @@ { "cat.indices":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-indices.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-indices/", "description":"Returns information about indices: number of primaries and replicas, document counts, disk size, ..." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json index c8afa4cb17039..92a7e14b234ce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodeattrs.json @@ -1,7 +1,7 @@ { "cat.nodeattrs":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodeattrs.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-nodeattrs/", "description":"Returns information about custom node attributes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json index abb13d015b4d5..63c70afcb9465 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.nodes.json @@ -1,7 +1,7 @@ { "cat.nodes":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-nodes.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-nodes/", "description":"Returns basic statistics about performance of cluster nodes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json index 9c0edf8c53d90..d89ce8af72270 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.pending_tasks.json @@ -1,7 +1,7 @@ { "cat.pending_tasks":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-pending-tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-pending-tasks/", "description":"Returns a concise representation of the cluster pending tasks." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json index 0b5b39b01ee58..5dd76c0dec4e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.plugins.json @@ -1,7 +1,7 @@ { "cat.plugins":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-plugins.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-plugins/", "description":"Returns information about installed plugins across nodes node." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json index 7baf0b8ded609..a3dee81e73abc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json @@ -1,7 +1,7 @@ { "cat.recovery":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-recovery.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-recovery/", "description":"Returns information about index shard recoveries, both on-going completed." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json index 58960709a99bb..a8174bbd48e07 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.repositories.json @@ -1,7 +1,7 @@ { "cat.repositories":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-repositories.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-repositories/", "description":"Returns information about snapshot repositories registered in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json index a815cd5b1101b..91d913d7c9340 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segment_replication.json @@ -1,7 +1,7 @@ { "cat.segment_replication":{ "documentation":{ - "url":"https://github.com/opensearch-project/documentation-website/issues/2627", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-segment-replication/", "description":"Returns information about both on-going and latest completed Segment Replication events" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json index 5107353c7b14f..f9b0de3e2eb50 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.segments.json @@ -1,7 +1,7 @@ { "cat.segments":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-segments.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-segments/", "description":"Provides low-level information about the segments in the shards of an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json index fab381a098e3f..05a04c4b580ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.shards.json @@ -1,7 +1,7 @@ { "cat.shards":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-shards.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-shards/", "description":"Provides a detailed view of shard allocation on nodes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json index 1320207abfe75..c6eb24a50e435 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.snapshots.json @@ -1,7 +1,7 @@ { "cat.snapshots":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-snapshots/", "description":"Returns all snapshots in a specific repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json index 384668f839642..806b993849940 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.tasks.json @@ -1,7 +1,7 @@ { "cat.tasks":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-tasks/", "description":"Returns information about the tasks currently executing on one or more nodes in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json index d45593b7bb2c8..2f4a5f6e40186 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.templates.json @@ -1,7 +1,7 @@ { "cat.templates":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-templates.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-templates/", "description":"Returns information about existing templates." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json index 1165703490d1a..e5bffbb43de5b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.thread_pool.json @@ -1,7 +1,7 @@ { "cat.thread_pool":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cat-thread-pool.html", + "url":"https://opensearch.org/docs/latest/api-reference/cat/cat-thread-pool/", "description":"Returns cluster-wide thread pool statistics per node.\nBy default the active, queue and rejected statistics are returned for all thread pools." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json index b0e50045cd7cd..ed158978e44a4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json @@ -1,7 +1,7 @@ { "clear_scroll":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-scroll-api.html", + "url":"https://opensearch.org/docs/latest/api-reference/scroll/", "description":"Explicitly clears the search context for a scroll." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json index e46218a781e1b..2f5985faa5f63 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.allocation_explain.json @@ -1,7 +1,7 @@ { "cluster.allocation_explain":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-allocation-explain.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-allocation/", "description":"Provides explanations for shard allocations in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json index 43e14ad0e2dd8..737716cec2527 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_component_template.json @@ -1,7 +1,7 @@ { "cluster.delete_component_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates/#create-a-component-template", "description":"Deletes a component template" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json index 13ea101169e60..24e9a59baa742 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json @@ -1,7 +1,7 @@ { "cluster.delete_decommission_awareness": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-decommission/", "description": "Delete any existing decommission." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json index 2cd4081b645e8..a6b09e34e488e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_weighted_routing.json @@ -1,7 +1,7 @@ { "cluster.delete_weighted_routing": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/delete", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-awareness", "description": "Delete weighted shard routing weights" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json index 302dea4ec31a7..feb760c2d7783 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_decommission_awareness.json @@ -1,7 +1,7 @@ { "cluster.get_decommission_awareness": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-decommission/", "description": "Get details and status of decommissioned attribute" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json index c60230dbc43b3..df3e6adb6596d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_settings.json @@ -1,7 +1,7 @@ { "cluster.get_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-settings/", "description":"Returns cluster settings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json index 45eb3d2b62a84..c800314f1297e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.get_weighted_routing.json @@ -1,7 +1,7 @@ { "cluster.get_weighted_routing": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/get", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-awareness/", "description": "Fetches weighted shard routing weights" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json index b96340d682546..db820bc4b2c85 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json @@ -1,7 +1,7 @@ { "cluster.health":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-health.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-health/", "description":"Returns basic information about the health of the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json index 05558bc7bfc50..5ac77f0f5eeb1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_component_template.json @@ -1,7 +1,7 @@ { "cluster.put_component_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-component-template.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates/#create-a-component-template", "description":"Creates or updates a component template" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json index bf4ffd454d9df..9bb0b4e96ff2b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_decommission_awareness.json @@ -1,7 +1,7 @@ { "cluster.put_decommission_awareness": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-decommission/", "description": "Decommissions an awareness attribute" }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json index 1e36acc51544d..eec2a24057095 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_settings.json @@ -1,7 +1,7 @@ { "cluster.put_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-update-settings.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-settings/", "description":"Updates the cluster settings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json index 88498517ba336..f22402def2fc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.put_weighted_routing.json @@ -1,7 +1,7 @@ { "cluster.put_weighted_routing": { "documentation": { - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/weighted-routing/put", + "url": "https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-awareness", "description": "Updates weighted shard routing weights" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json index 4eac0b55ce6f1..32315ec39f1e4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.remote_info.json @@ -1,7 +1,7 @@ { "cluster.remote_info":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-remote-info.html", + "url":"https://opensearch.org/docs/latest/api-reference/remote-info/", "description":"Returns the information about configured remote clusters." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json index b43ab901785bd..b4c7240548a79 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.state.json @@ -1,7 +1,7 @@ { "cluster.state":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-state.html", + "url":"https://opensearch.org/docs/latest/api-reference/count/", "description":"Returns a comprehensive information about the state of the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json index f36db0979f4f7..aaaab17fb891a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.stats.json @@ -1,7 +1,7 @@ { "cluster.stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-stats.html", + "url":"https://opensearch.org/docs/latest/api-reference/cluster-api/cluster-stats/", "description":"Returns high-level overview of cluster statistics." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 8cdb3db7c12cd..38eb435e29a9e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -1,7 +1,7 @@ { "count":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-count.html", + "url":"https://opensearch.org/docs/latest/api-reference/count/", "description":"Returns number of documents matching a query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json index 767af84b82258..b3e678ace4751 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create.json @@ -1,7 +1,7 @@ { "create":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/index-document/", "description":"Creates a new document in the index.\n\nReturns a 409 response when a document with a same ID already exists in the index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json index d3a2104c01bc0..81fb3f7c3b973 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -2,7 +2,7 @@ { "create_pit":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Creates point in time context." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json index 5d832fc794f4f..d0c4aaefbcadf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.delete_dangling_index.json @@ -1,7 +1,7 @@ { "dangling_indices.delete_dangling_index": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/dangling-index/", "description": "Deletes the specified dangling index" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json index 5b056e1fa145f..f6cedbe41ef53 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.import_dangling_index.json @@ -1,7 +1,7 @@ { "dangling_indices.import_dangling_index": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/dangling-index/", "description": "Imports the specified dangling index" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json index dfc21f56ddfac..0d14c8380c7fe 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/dangling_indices.list_dangling_indices.json @@ -1,7 +1,7 @@ { "dangling_indices.list_dangling_indices": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-gateway-dangling-indices.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/dangling-index/", "description": "Returns all dangling indices." }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index 76dceb455627f..668976d360999 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -1,7 +1,7 @@ { "delete":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/delete-document/", "description":"Removes a document from the index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json index 5ff01aa746df9..1e329e3c27b5b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -1,7 +1,7 @@ { "delete_all_pits":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Deletes all active point in time searches." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 4c32974583aac..0c493a092e244 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -1,7 +1,7 @@ { "delete_by_query":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/delete-by-query/", "description":"Deletes documents matching the provided query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json index 112bfc8a7d2e0..c9660fe2e4427 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query_rethrottle.json @@ -1,7 +1,7 @@ { "delete_by_query_rethrottle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/delete-by-query/", "description":"Changes the number of requests per second for a particular Delete By Query operation." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json index b54d9f76204f4..234001349d889 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -1,7 +1,7 @@ { "delete_pit":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Deletes one or more point in time searches based on the IDs passed." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json index acaa389738606..b858f71eb2812 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_script.json @@ -1,7 +1,7 @@ { "delete_script":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "url":"https://opensearch.org/docs/latest/api-reference/script-apis/delete-script/", "description":"Deletes a script." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json index fd221b474a070..0b4f588a673ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists.json @@ -1,7 +1,7 @@ { "exists":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns information about whether a document exists in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index bdbf818fb5d81..5c44f57c289b5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -1,7 +1,7 @@ { "exists_source":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns information about whether a document source exists in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json index 7f630f7666f30..4ad263262fe14 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/explain.json @@ -1,7 +1,7 @@ { "explain":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-explain.html", + "url":"https://opensearch.org/docs/latest/api-reference/explain/", "description":"Returns information about why a specific matches (or doesn't match) a query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 2ce77f17aff10..990ec4685fa54 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -1,7 +1,7 @@ { "get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns a document." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json index 544a8cb11b002..9176d499e754b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json @@ -1,7 +1,7 @@ { "get_all_pits":{ "documentation":{ - "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "url":"https://opensearch.org/docs/latest/search-plugins/searching-data/point-in-time-api", "description":"Lists all active point in time searches." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json index aa770ee9d9f2e..c5034e48e6e65 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_context.json @@ -1,7 +1,7 @@ { "get_script_context":{ "documentation":{ - "url": "https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-contexts.html", + "url": "https://opensearch.org/docs/latest/api-reference/script-apis/get-script-language/", "description":"Returns all script contexts." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json index a5e06cb88901b..006f34a9762b0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_script_languages.json @@ -1,7 +1,7 @@ { "get_script_languages":{ "documentation":{ - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "url": "https://opensearch.org/docs/latest/api-reference/script-apis/get-script-language/", "description":"Returns available script types, languages and contexts" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index ad79678388590..ab269fcc0980f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -1,7 +1,7 @@ { "get_source":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-get.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/get-documents/", "description":"Returns the source of a document." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index b4865403331b0..a02e5bbd2c6e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -1,7 +1,7 @@ { "index":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-index_.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/index-document/", "description":"Creates or updates a document in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json index aa8e84c1985d6..ddf6b93bba0c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.analyze.json @@ -1,7 +1,7 @@ { "indices.analyze":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-analyze.html", + "url":"https://opensearch.org/docs/latest/api-reference/analyze-apis/", "description":"Performs the analysis process on a text and return the tokens breakdown of the text." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json index 0c7eca8c8e6f5..09ff5af473335 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json @@ -1,7 +1,7 @@ { "indices.clear_cache":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/clear-index-cache/", "description":"Clears all or specific caches for one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json index 2d874f4933768..0872e1b0d6266 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json @@ -1,7 +1,7 @@ { "indices.clone": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clone-index.html", + "url": "https://opensearch.org/docs/latest/api-reference/index-apis/clone/", "description": "Clones an index" }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json index 1182b73541f93..863080befd3d6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.close.json @@ -1,7 +1,7 @@ { "indices.close":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/close-index/", "description":"Closes an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json index 53ea4cbd80803..c35876c9e6f15 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.create.json @@ -1,7 +1,7 @@ { "indices.create":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-create-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/create-index/", "description":"Creates an index with optional settings and mappings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json index 67f90c48eb79f..4dea89d4189b1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.data_streams_stats.json @@ -1,7 +1,7 @@ { "indices.data_streams_stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "url":"https://opensearch.org/docs/latest/im-plugin/data-streams/", "description":"Provides statistics on operations happening in a data stream." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json index 53fdf44bb36a1..ea8a3f6b769e9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json @@ -1,7 +1,7 @@ { "indices.delete":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-delete-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/delete-index/", "description":"Deletes an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json index f824fb5207d46..e695fb4cfcf60 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_data_stream.json @@ -1,7 +1,7 @@ { "indices.delete_data_stream":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "url":"https://opensearch.org/docs/latest/im-plugin/data-streams/", "description":"Deletes a data stream." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json index 74dbb1822b64a..252ab47a8df6f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete_template.json @@ -1,7 +1,7 @@ { "indices.delete_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Deletes an index template." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json index 7539f44a81eed..7be004cd9c4ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists.json @@ -1,7 +1,7 @@ { "indices.exists":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-exists.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/exists/", "description":"Returns information about whether a particular index exists." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index 66e5ce92cbbe5..88d1c00076757 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -1,7 +1,7 @@ { "indices.exists_alias":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", "description":"Returns information about whether a particular alias exists." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json index c5312680fa880..8b050881240da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_index_template.json @@ -1,7 +1,7 @@ { "indices.exists_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns information about whether a particular index template exists." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json index 9796bdd9d21ff..08ab5cf81e57d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_template.json @@ -1,7 +1,7 @@ { "indices.exists_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns information about whether a particular index template exists." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json index 986bce55f41e5..a1e066c558cb2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.forcemerge.json @@ -1,7 +1,7 @@ { "indices.forcemerge":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/force-merge/", "description":"Performs the force merge operation on one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json index 0a43f6481d86d..ca701cc81b0c7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get.json @@ -1,7 +1,7 @@ { "indices.get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/get-index/", "description":"Returns information about one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json index ce19186bea6a9..99f419e593a6f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_data_stream.json @@ -1,7 +1,7 @@ { "indices.get_data_stream":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "url":"https://opensearch.org/docs/latest/im-plugin/data-streams/", "description":"Returns data streams." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json index 0e71b6d395777..ef627502f5250 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_field_mapping.json @@ -1,7 +1,7 @@ { "indices.get_field_mapping":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-field-mapping.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/put-mapping/", "description":"Returns mapping for one or more fields." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json index fbd03f99d2547..3d10894a787f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_index_template.json @@ -1,7 +1,7 @@ { "indices.get_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns an index template." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json index 321bfaba4f941..5f401872c98c4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_mapping.json @@ -1,7 +1,7 @@ { "indices.get_mapping":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-mapping.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/put-mapping/", "description":"Returns mappings for one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json index 1bdaea01f87bf..b3db1ca1cbf90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_settings.json @@ -1,7 +1,7 @@ { "indices.get_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-get-settings.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/get-settings/", "description":"Returns settings for one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json index 52aeb17913db4..7df19812f9d6b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.get_template.json @@ -1,7 +1,7 @@ { "indices.get_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Returns an index template." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json index f44fb04102a7f..e7fe207a254f6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.open.json @@ -1,7 +1,7 @@ { "indices.open":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-open-close.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/open-index/", "description":"Opens an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json index 00767afbaec04..c3ccd25da9f86 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_alias.json @@ -1,7 +1,7 @@ { "indices.put_alias":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", "description":"Creates or updates an alias." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json index a2ceb259a4376..439f96943db04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_index_template.json @@ -1,7 +1,7 @@ { "indices.put_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Creates or updates an index template." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json index c8b63d4e1cee1..2db333dc1e8ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_mapping.json @@ -1,7 +1,7 @@ { "indices.put_mapping":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-put-mapping.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/put-mapping/", "description":"Updates the index mappings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json index 3b1c230178bb8..bdedd5519076d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_template.json @@ -1,7 +1,7 @@ { "indices.put_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description":"Creates or updates an index template." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json index a20014a1444ec..941ea1127954d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json @@ -1,7 +1,7 @@ { "indices.shrink":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-shrink-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/shrink-index/", "description":"Allow to shrink an existing index into a new index with fewer primary shards." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json index 0e42ba6028a9f..1ddb4e54d8bbd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_index_template.json @@ -1,7 +1,7 @@ { "indices.simulate_index_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description": "Simulate matching the given index name against the index templates in the system" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json index 65b555082c3b1..04a33fe62e1ec 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.simulate_template.json @@ -1,7 +1,7 @@ { "indices.simulate_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-templates.html", + "url":"https://opensearch.org/docs/latest/im-plugin/index-templates", "description": "Simulate resolving the given template name or body" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json index d399bf9dbdb8a..af041f426b644 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -1,7 +1,7 @@ { "indices.split":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/split/", "description":"Allows you to split an existing index into a new index with more primary shards." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index 382bb9efde0ff..71ce6dbd443f0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -1,7 +1,7 @@ { "indices.stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-stats.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/stats/", "description":"Provides statistics on operations happening in an index." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json index c31cb8fe59c0f..467cc444b6e53 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.update_aliases.json @@ -1,7 +1,7 @@ { "indices.update_aliases":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-aliases.html", + "url":"https://opensearch.org/docs/latest/api-reference/index-apis/alias/", "description":"Updates index aliases." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json index 3e40136f556fa..2e615ece82c64 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.delete_pipeline.json @@ -1,7 +1,7 @@ { "ingest.delete_pipeline":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/delete-ingest/", "description":"Deletes a pipeline." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json index cde980e67c8c9..498aac4ca5a4a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.get_pipeline.json @@ -1,7 +1,7 @@ { "ingest.get_pipeline":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/get-ingest/", "description":"Returns a pipeline." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json index 5475905e7b99f..0043a32a55310 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.put_pipeline.json @@ -1,7 +1,7 @@ { "ingest.put_pipeline":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/put-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/create-ingest/", "description":"Creates or updates a pipeline." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json index 8122f7a0ffa19..6f6b153195255 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ingest.simulate.json @@ -1,7 +1,7 @@ { "ingest.simulate":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html", + "url":"https://opensearch.org/docs/latest/ingest-pipelines/simulate-ingest/", "description":"Allows to simulate a pipeline with example documents." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json index 3a3a6ebe1bff5..e4b1c2a798a41 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch.json @@ -1,7 +1,7 @@ { "msearch":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-multi-search.html", + "url":"https://opensearch.org/docs/latest/api-reference/multi-search/", "description":"Allows to execute several search operations in one request." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json index 7ac194f91bf56..42aa8a23b1510 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/msearch_template.json @@ -1,7 +1,7 @@ { "msearch_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html", + "url":"https://opensearch.org/docs/latest/api-reference/search-template/", "description":"Allows to execute several search template operations in one request." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json index 0830344dc4ad4..bbbe79b2693c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json @@ -1,7 +1,7 @@ { "nodes.hot_threads":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-hot-threads.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-hot-threads/", "description":"Returns information about hot threads on each node in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json index 37279edd3106f..4dce8ec54635a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.info.json @@ -1,7 +1,7 @@ { "nodes.info":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-info.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-info/", "description":"Returns information about nodes in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json index 25dc72b6cc037..6ecbf96c1925b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.reload_secure_settings.json @@ -1,7 +1,7 @@ { "nodes.reload_secure_settings":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/secure-settings.html#reloadable-secure-settings", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-reload-secure/", "description":"Reloads secure settings." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index cc1a9e8185093..a23b2e5428fb6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -1,7 +1,7 @@ { "nodes.stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-stats.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-stats/", "description":"Returns statistical information about nodes in the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json index 5acbf7a51116c..e23011ce432a8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.usage.json @@ -1,7 +1,7 @@ { "nodes.usage":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-usage.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-usage/", "description":"Returns low-level information about REST actions usage on nodes." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json index c8413d1476402..1c13ea41e3470 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/put_script.json @@ -1,7 +1,7 @@ { "put_script":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-scripting.html", + "url":"https://opensearch.org/docs/latest/api-reference/nodes-apis/nodes-usage/", "description":"Creates or updates a script." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json index eadf240192394..04d337a45ec3e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/rank_eval.json @@ -1,7 +1,7 @@ { "rank_eval":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-rank-eval.html", + "url":"https://opensearch.org/docs/latest/api-reference/rank-eval/", "description":"Allows to evaluate the quality of ranked search results over a set of typical search queries" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json index 2fbaf86cab616..e2cb76d9ab8ca 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json @@ -1,7 +1,7 @@ { "reindex":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html", + "url":"https://opensearch.org/docs/latest/im-plugin/reindex-data/", "description":"Allows to copy documents from one index to another, optionally filtering the source\ndocuments by a query, changing the destination index settings, or fetching the\ndocuments from a remote cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json index d91365b3c49a5..eafd1122f21c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json @@ -1,7 +1,7 @@ { "reindex_rethrottle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html", + "url":"https://opensearch.org/docs/latest/im-plugin/reindex-data/", "description":"Changes the number of requests per second for a particular Reindex operation." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json index 6af49f75b9f6e..4d8efb9411bff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.restore.json @@ -1,7 +1,7 @@ { "remote_store.restore":{ "documentation":{ - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/remote-store#restore", + "url": "https://opensearch.org/docs/latest/tuning-your-cluster/availability-and-recovery/remote-store/index/#restoring-from-a-backup", "description":"Restores from remote store." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json index 437a4439bbcb5..5b456ca35d77a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote_store.stats.json @@ -1,7 +1,7 @@ { "remote_store.stats":{ "documentation":{ - "url": "https://opensearch.org/docs/latest/tuning-your-cluster/availability-and-recovery/remote", + "url": "https://opensearch.org/docs/latest/tuning-your-cluster/availability-and-recovery/remote-store/remote-store-stats-api/", "description":"Stats for remote store." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json index c2c474edd9853..a16839aa83a7d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json @@ -1,7 +1,7 @@ { "render_search_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html#_validating_templates", + "url":"https://opensearch.org/docs/latest/api-reference/search-template/", "description":"Allows to use the Mustache language to pre-render a search definition." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json index 9f761fb452ba1..168620c0d1f3c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scripts_painless_execute.json @@ -1,7 +1,7 @@ { "scripts_painless_execute":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/painless/master/painless-execute-api.html", + "url":"https://opensearch.org/docs/latest/api-reference/script-apis/exec-stored-script/", "description":"Allows an arbitrary script to be executed and a result to be returned" }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json index ea0cbe2675325..3a4987ff2f6f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/scroll.json @@ -1,7 +1,7 @@ { "scroll":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll", + "url":"https://opensearch.org/docs/latest/api-reference/scroll/", "description":"Allows to retrieve a large numbers of results from a single search request." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index e78d49a67a98a..01120eb07250d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -1,7 +1,7 @@ { "search":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html", + "url":"https://opensearch.org/docs/latest/api-reference/search/", "description":"Returns results matching a query." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json index 1fa7060b974dc..260bf482822cc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.delete.json @@ -2,7 +2,7 @@ "search_pipeline.delete": { "documentation": { "description": "Deletes a search pipeline.", - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/search_pipelines/" + "url": "https://opensearch.org/docs/latest/search-plugins/search-pipelines/index/" }, "stability": "stable", "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json index 7cac6e7aa4bcf..05aec5d5de860 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.get.json @@ -2,7 +2,7 @@ "search_pipeline.get": { "documentation": { "description": "Returns a search pipeline", - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/search_pipelines/" + "url": "https://opensearch.org/docs/latest/search-plugins/search-pipelines/retrieving-search-pipeline/" }, "stability": "stable", "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json index b7375d36825a2..90d4baea4ecf2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_pipeline.put.json @@ -2,7 +2,7 @@ "search_pipeline.put": { "documentation": { "description": "Creates or updates a search pipeline.", - "url": "https://opensearch.org/docs/latest/opensearch/rest-api/search_pipelines/" + "url": "https://opensearch.org/docs/latest/search-plugins/search-pipelines/creating-search-pipeline/" }, "stability": "stable", "url": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index 4230b660523b8..00fa06bb96c7e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -1,7 +1,7 @@ { "search_template":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html", + "url":"https://opensearch.org/docs/latest/api-reference/search-template/", "description":"Allows to use the Mustache language to pre-render a search definition." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json index 05eb3309b11e6..826660ce58dea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json @@ -1,7 +1,7 @@ { "snapshot.cleanup_repository": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/clean-up-snapshot-repo-api.html", + "url": "https://opensearch.org/docs/latest/api-reference/snapshots/index/", "description": "Removes stale data from repository." }, "stability": "stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json index c79460fc30a48..ae83d9fa6497a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.clone.json @@ -1,7 +1,7 @@ { "snapshot.clone":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/index/", "description":"Clones indices from one snapshot into another snapshot in the same repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json index 64aaeaef9d897..9b4abd1b41a93 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create.json @@ -1,7 +1,7 @@ { "snapshot.create":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/create-snapshot/", "description":"Creates a snapshot in a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json index 4965162bcd86c..02f7350495cfc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.create_repository.json @@ -1,7 +1,7 @@ { "snapshot.create_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/create-repository/", "description":"Creates a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json index 2e21a08219942..205872dfa95ea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete.json @@ -1,7 +1,7 @@ { "snapshot.delete":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/delete-snapshot/", "description":"Deletes a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json index 3fc22f969784c..61a8c2b5f8086 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.delete_repository.json @@ -1,7 +1,7 @@ { "snapshot.delete_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/delete-snapshot-repository/", "description":"Deletes a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json index e084a997a61b1..41b7d728da63f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get.json @@ -1,7 +1,7 @@ { "snapshot.get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/get-snapshot/", "description":"Returns information about a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json index cf03bab18c03f..dc3e4a91d2a77 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.get_repository.json @@ -1,7 +1,7 @@ { "snapshot.get_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/get-snapshot-repository/", "description":"Returns information about a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json index 07148c7d261f4..401f612a33203 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json @@ -1,7 +1,7 @@ { "snapshot.restore":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/restore-snapshot/", "description":"Restores a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json index 4f22c24fd9a56..1ac6042941013 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.status.json @@ -1,7 +1,7 @@ { "snapshot.status":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/get-snapshot-status/", "description":"Returns information about the status of a snapshot." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json index 865eb15d11310..fc0fd400f8bbd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.verify_repository.json @@ -1,7 +1,7 @@ { "snapshot.verify_repository":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "url":"https://opensearch.org/docs/latest/api-reference/snapshots/verify-snapshot-repository/", "description":"Verifies a repository." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json index 32f90abab60f6..70dc99d7eb2da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json @@ -1,7 +1,7 @@ { "tasks.cancel":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/tasks/", "description":"Cancels a task, if it can be cancelled through an API." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json index 63646ae539de5..d10fe62aa84b8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json @@ -1,7 +1,7 @@ { "tasks.get":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/tasks/", "description":"Returns information about a task." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index 7137114c96bff..20a6cb5f7f7d6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -1,7 +1,7 @@ { "tasks.list":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/tasks.html", + "url":"https://opensearch.org/docs/latest/api-reference/tasks/", "description":"Returns a list of tasks." }, "stability":"experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index c8d1ed435756b..131036486e024 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -1,7 +1,7 @@ { "update":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/update-document/", "description":"Updates a document with a script or partial document." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 71a0c1fc8ad95..7f6cb797da861 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -1,7 +1,7 @@ { "update_by_query":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-update-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/update-by-query/", "description":"Performs an update on every document in the index without changing the source,\nfor example to pick up a mapping change." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json index bd70f6e1231c9..bc266ed7df68f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query_rethrottle.json @@ -1,7 +1,7 @@ { "update_by_query_rethrottle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html", + "url":"https://opensearch.org/docs/latest/api-reference/document-apis/update-by-query/", "description":"Changes the number of requests per second for a particular Update By Query operation." }, "stability":"stable", diff --git a/server/build.gradle b/server/build.gradle index 7d52849844aaa..cb48142a61159 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -387,7 +387,7 @@ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { onlyModified = true failOnModification = true ignoreMissingClasses = true - annotationIncludes = ['@org.opensearch.common.annotation.PublicApi'] + annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi'] txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") dependsOn downloadSnapshot diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 52b4dad553180..ec5637cec6485 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -650,6 +650,7 @@ public void testCacheWithInvalidation() throws Exception { .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.refresh_interval", -1) ) .get() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java index 3df4ecff5250c..a2543f0592145 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/ReplicaToPrimaryPromotionIT.java @@ -56,6 +56,11 @@ protected int numberOfReplicas() { return 1; } + @Override + public boolean useRandomReplicationStrategy() { + return true; + } + public void testPromoteReplicaToPrimary() throws Exception { final String indexName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); createIndex(indexName); @@ -65,7 +70,7 @@ public void testPromoteReplicaToPrimary() throws Exception { try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) { waitForDocs(numOfDocs, indexer); } - refresh(indexName); + refreshAndWaitForReplication(indexName); } assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java new file mode 100644 index 0000000000000..de425ffc63816 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationAllocationDeciderIT.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteMigrationAllocationDeciderIT extends MigrationBaseTestCase { + + // When the primary is on doc rep node, existing replica copy can get allocated on excluded docrep node. + public void testFilterAllocationSkipsReplica() throws IOException { + addRemote = false; + List docRepNodes = internalCluster().startNodes(3); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", docRepNodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomDataNode(); + ensureGreen("test"); + } + + // When the primary is on remote node, new replica copy shouldn't get allocated on an excluded docrep node. + public void testFilterAllocationSkipsReplicaOnExcludedNode() throws IOException { + addRemote = false; + List nodes = internalCluster().startNodes(2); + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "0") + .build() + ); + ensureGreen("test"); + addRemote = true; + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + String remoteNode = internalCluster().startNode(); + + client().admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand("test", 0, primaryNodeName("test"), remoteNode)) + .execute() + .actionGet(); + client().admin() + .cluster() + .prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) + .setWaitForEvents(Priority.LANGUID) + .setWaitForNoRelocatingShards(true) + .execute() + .actionGet(); + assertEquals(remoteNode, primaryNodeName("test")); + + assertTrue( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.routing.allocation.exclude._name", String.join(",", nodes))) + .execute() + .actionGet() + .isAcknowledged() + ); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNodeName("test"))); + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(2)) + .execute() + .actionGet(); + assertTrue(clusterHealthResponse.isTimedOut()); + ensureYellow("test"); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index a31d203058565..640b83f194c1c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -19,7 +19,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java new file mode 100644 index 0000000000000..0548ce4a7955f --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/ResizeIndexMigrationTestCase.java @@ -0,0 +1,208 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.support.ActiveShardCount; +import org.opensearch.client.Client; +import org.opensearch.common.settings.Settings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.List; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class ResizeIndexMigrationTestCase extends MigrationBaseTestCase { + private static final String TEST_INDEX = "test_index"; + private final static String REMOTE_STORE_DIRECTION = "remote_store"; + private final static String DOC_REP_DIRECTION = "docrep"; + private final static String NONE_DIRECTION = "none"; + private final static String STRICT_MODE = "strict"; + private final static String MIXED_MODE = "mixed"; + + /* + * This test will verify the resize request failure, when cluster mode is mixed + * and index is on DocRep node, and migration to remote store is in progress. + * */ + public void testFailResizeIndexWhileDocRepToRemoteStoreMigration() throws Exception { + + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List cmNodes = internalCluster().startNodes(1); + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), MIXED_MODE)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // Adding a non remote and a remote node + addRemote = false; + String nonRemoteNodeName = internalCluster().startNode(); + + addRemote = true; + String remoteNodeName = internalCluster().startNode(); + + logger.info("-->Create index on non-remote node and SETTING_REMOTE_STORE_ENABLED is false. Resize should not happen"); + Settings.Builder builder = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + client.admin() + .indices() + .prepareCreate(TEST_INDEX) + .setSettings( + builder.put("index.number_of_shards", 10) + .put("index.number_of_replicas", 0) + .put("index.routing.allocation.include._name", nonRemoteNodeName) + .put("index.routing.allocation.exclude._name", remoteNodeName) + ) + .setWaitForActiveShards(ActiveShardCount.ALL) + .execute() + .actionGet(); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), REMOTE_STORE_DIRECTION)); + assertAcked(client.admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + ResizeType resizeType; + int resizeShardsNum; + String cause; + switch (randomIntBetween(0, 2)) { + case 0: + resizeType = ResizeType.SHRINK; + resizeShardsNum = 5; + cause = "shrink_index"; + break; + case 1: + resizeType = ResizeType.SPLIT; + resizeShardsNum = 20; + cause = "split_index"; + break; + default: + resizeType = ResizeType.CLONE; + resizeShardsNum = 10; + cause = "clone_index"; + } + + client.admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put("index.blocks.write", true)) + .execute() + .actionGet(); + + ensureGreen(TEST_INDEX); + + Settings.Builder resizeSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", resizeShardsNum) + .putNull("index.blocks.write"); + + IllegalStateException ex = expectThrows( + IllegalStateException.class, + () -> client().admin() + .indices() + .prepareResizeIndex(TEST_INDEX, "first_split") + .setResizeType(resizeType) + .setSettings(resizeSettingsBuilder.build()) + .get() + ); + assertEquals( + ex.getMessage(), + "Index " + resizeType + " is not allowed as remote migration mode is mixed" + " and index is remote store disabled" + ); + } + + /* + * This test will verify the resize request failure, when cluster mode is mixed + * and index is on Remote Store node, and migration to DocRep node is in progress. + * */ + public void testFailResizeIndexWhileRemoteStoreToDocRepMigration() throws Exception { + + addRemote = true; + internalCluster().setBootstrapClusterManagerNodeIndex(0); + List cmNodes = internalCluster().startNodes(1); + Client client = internalCluster().client(cmNodes.get(0)); + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), MIXED_MODE)); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // Adding a non remote and a remote node + String remoteNodeName = internalCluster().startNode(); + + addRemote = false; + String nonRemoteNodeName = internalCluster().startNode(); + + logger.info("-->Create index on remote node and SETTING_REMOTE_STORE_ENABLED is true. Resize should not happen"); + Settings.Builder builder = Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + client.admin() + .indices() + .prepareCreate(TEST_INDEX) + .setSettings( + builder.put("index.number_of_shards", 10) + .put("index.number_of_replicas", 0) + .put("index.routing.allocation.include._name", remoteNodeName) + .put("index.routing.allocation.exclude._name", nonRemoteNodeName) + ) + .setWaitForActiveShards(ActiveShardCount.ALL) + .execute() + .actionGet(); + + updateSettingsRequest.persistentSettings(Settings.builder().put(MIGRATION_DIRECTION_SETTING.getKey(), DOC_REP_DIRECTION)); + assertAcked(client.admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + ResizeType resizeType; + int resizeShardsNum; + String cause; + switch (randomIntBetween(0, 2)) { + case 0: + resizeType = ResizeType.SHRINK; + resizeShardsNum = 5; + cause = "shrink_index"; + break; + case 1: + resizeType = ResizeType.SPLIT; + resizeShardsNum = 20; + cause = "split_index"; + break; + default: + resizeType = ResizeType.CLONE; + resizeShardsNum = 10; + cause = "clone_index"; + } + + client.admin() + .indices() + .prepareUpdateSettings(TEST_INDEX) + .setSettings(Settings.builder().put("index.blocks.write", true)) + .execute() + .actionGet(); + + ensureGreen(TEST_INDEX); + + Settings.Builder resizeSettingsBuilder = Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", resizeShardsNum) + .putNull("index.blocks.write"); + + IllegalStateException ex = expectThrows( + IllegalStateException.class, + () -> client().admin() + .indices() + .prepareResizeIndex(TEST_INDEX, "first_split") + .setResizeType(resizeType) + .setSettings(resizeSettingsBuilder.build()) + .get() + ); + assertEquals( + ex.getMessage(), + "Index " + resizeType + " is not allowed as remote migration mode is mixed" + " and index is remote store enabled" + ); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 181f242aecd09..f5f9d515f2712 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -20,6 +20,8 @@ import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; @@ -47,6 +49,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.stream.Collectors; @@ -54,6 +57,10 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; @@ -277,6 +284,11 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { String restoredIndexName1version1 = indexName1 + "-restored-1"; String restoredIndexName1version2 = indexName1 + "-restored-2"; + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .get(); createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); Client client = client(); Settings indexSettings = getIndexSettings(1, 0).build(); @@ -284,7 +296,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { indexDocuments(client, indexName1, randomIntBetween(5, 10)); ensureGreen(indexName1); - validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); + validatePathType(indexName1, PathType.FIXED); logger.info("--> snapshot"); SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); @@ -301,7 +313,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version1); - validatePathType(restoredIndexName1version1, PathType.FIXED, PathHashAlgorithm.FNV_1A); + validatePathType(restoredIndexName1version1, PathType.FIXED); client(clusterManagerNode).admin() .cluster() @@ -327,16 +339,50 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated - validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); + validatePathType(indexName1, PathType.FIXED); + + // Create Snapshot of index 2 + String snapshotName2 = "test-restore-snapshot2"; + snapshotInfo = createSnapshot(snapshotRepoName, snapshotName2, new ArrayList<>(List.of(indexName2))); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertTrue(snapshotInfo.successfulShards() > 0); + assertEquals(snapshotInfo.totalShards(), snapshotInfo.successfulShards()); + + // Update cluster settings to FIXED + client(clusterManagerNode).admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.FIXED)) + .get(); + + // Close index 2 + assertAcked(client().admin().indices().prepareClose(indexName2)); + restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .get(); + assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); + ensureGreen(indexName2); + + // Validating that custom data has not changed for testindex2 which was created before the cluster setting got updated + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); + } + + private void validatePathType(String index, PathType pathType) { + validatePathType(index, pathType, null); } - private void validatePathType(String index, PathType pathType, PathHashAlgorithm pathHashAlgorithm) { + private void validatePathType(String index, PathType pathType, @Nullable PathHashAlgorithm pathHashAlgorithm) { ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); // Validate that the remote_store custom data is present in index metadata for the created index. Map remoteCustomData = state.metadata().index(index).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); assertNotNull(remoteCustomData); assertEquals(pathType.name(), remoteCustomData.get(PathType.NAME)); - assertEquals(pathHashAlgorithm.name(), remoteCustomData.get(PathHashAlgorithm.NAME)); + if (Objects.nonNull(pathHashAlgorithm)) { + assertEquals(pathHashAlgorithm.name(), remoteCustomData.get(PathHashAlgorithm.NAME)); + } } public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { @@ -440,12 +486,15 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { } void assertRemoteSegmentsAndTranslogUploaded(String idx) throws IOException { - String indexUUID = client().admin().indices().prepareGetSettings(idx).get().getSetting(idx, IndexMetadata.SETTING_INDEX_UUID); - - Path remoteTranslogMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/metadata"); - Path remoteTranslogDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/translog/data"); - Path segmentMetadataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/metadata"); - Path segmentDataPath = Path.of(String.valueOf(remoteRepoPath), indexUUID, "/0/segments/data"); + Client client = client(); + String path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", TRANSLOG, METADATA).buildAsString(); + Path remoteTranslogMetadataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", TRANSLOG, DATA).buildAsString(); + Path remoteTranslogDataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path segmentMetadataPath = Path.of(remoteRepoPath + "/" + path); + path = getShardLevelBlobPath(client, idx, new BlobPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataPath = Path.of(remoteRepoPath + "/" + path); try ( Stream translogMetadata = Files.list(remoteTranslogMetadataPath); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index ba90cbe96e157..d7ad0daa43524 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -28,7 +28,6 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; @@ -57,11 +56,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; -import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -191,121 +187,6 @@ protected BulkResponse indexBulk(String indexName, int numDocs) { return client().bulk(bulkRequest).actionGet(); } - public static Settings remoteStoreClusterSettings(String name, Path path) { - return remoteStoreClusterSettings(name, path, name, path); - } - - public static Settings remoteStoreClusterSettings( - String segmentRepoName, - Path segmentRepoPath, - String segmentRepoType, - String translogRepoName, - Path translogRepoPath, - String translogRepoType - ) { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put( - buildRemoteStoreNodeAttributes( - segmentRepoName, - segmentRepoPath, - segmentRepoType, - translogRepoName, - translogRepoPath, - translogRepoType, - false - ) - ); - return settingsBuilder.build(); - } - - public static Settings remoteStoreClusterSettings( - String segmentRepoName, - Path segmentRepoPath, - String translogRepoName, - Path translogRepoPath - ) { - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); - return settingsBuilder.build(); - } - - public static Settings buildRemoteStoreNodeAttributes( - String segmentRepoName, - Path segmentRepoPath, - String translogRepoName, - Path translogRepoPath, - boolean withRateLimiterAttributes - ) { - return buildRemoteStoreNodeAttributes( - segmentRepoName, - segmentRepoPath, - ReloadableFsRepository.TYPE, - translogRepoName, - translogRepoPath, - ReloadableFsRepository.TYPE, - withRateLimiterAttributes - ); - } - - public static Settings buildRemoteStoreNodeAttributes( - String segmentRepoName, - Path segmentRepoPath, - String segmentRepoType, - String translogRepoName, - Path translogRepoPath, - String translogRepoType, - boolean withRateLimiterAttributes - ) { - String segmentRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - segmentRepoName - ); - String segmentRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - segmentRepoName - ); - String translogRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - translogRepoName - ); - String translogRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - translogRepoName - ); - String stateRepoTypeAttributeKey = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, - segmentRepoName - ); - String stateRepoSettingsAttributeKeyPrefix = String.format( - Locale.getDefault(), - "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, - segmentRepoName - ); - - Settings.Builder settings = Settings.builder() - .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(segmentRepoTypeAttributeKey, segmentRepoType) - .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) - .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) - .put(translogRepoTypeAttributeKey, translogRepoType) - .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath) - .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) - .put(stateRepoTypeAttributeKey, segmentRepoType) - .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); - - if (withRateLimiterAttributes) { - settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) - .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); - } - - return settings.build(); - } - Settings defaultIndexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 46e5b7aa28318..b767ffff05e3a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -23,6 +23,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.Priority; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.BufferedAsyncIOProcessor; @@ -57,7 +58,11 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -182,13 +187,9 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); - + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); + ; IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. @@ -212,12 +213,8 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, false, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); @@ -231,12 +228,8 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(4))); @@ -250,12 +243,9 @@ public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Excepti createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(12, 18); indexData(numberOfIterations, true, INDEX_NAME); - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, METADATA).buildAsString(); + Path indexPath = Path.of(segmentRepoPath + "/" + shardPath); + ; int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations + 1))); @@ -589,12 +579,8 @@ public void testFallbackToNodeToNodeSegmentCopy() throws Exception { flushAndRefresh(INDEX_NAME); // 3. Delete data from remote segment store - String indexUUID = client().admin() - .indices() - .prepareGetSettings(INDEX_NAME) - .get() - .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path segmentDataPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/data"); + String shardPath = getShardLevelBlobPath(client(), INDEX_NAME, BlobPath.cleanPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataPath = Path.of(segmentRepoPath + "/" + shardPath); try (Stream files = Files.list(segmentDataPath)) { files.forEach(p -> { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java index acdb21d072320..65016c4976157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRefreshListenerIT.java @@ -11,6 +11,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchIntegTestCase; @@ -22,7 +23,10 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStorePressureSettings.REMOTE_REFRESH_SEGMENT_PRESSURE_ENABLED; +import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRefreshListenerIT extends AbstractRemoteStoreMockRepositoryIntegTestCase { @@ -45,8 +49,10 @@ public void testRemoteRefreshRetryOnFailure() throws Exception { IndicesStatsResponse response = client().admin().indices().stats(new IndicesStatsRequest()).get(); assertEquals(1, response.getShards().length); + String indexName = response.getShards()[0].getShardRouting().index().getName(); String indexUuid = response.getShards()[0].getShardRouting().index().getUUID(); - Path segmentDataRepoPath = location.resolve(String.format(Locale.ROOT, "%s/0/segments/data", indexUuid)); + String shardPath = getShardLevelBlobPath(client(), indexName, new BlobPath(), "0", SEGMENTS, DATA).buildAsString(); + Path segmentDataRepoPath = location.resolve(shardPath); String segmentDataLocalPath = String.format(Locale.ROOT, "%s/indices/%s/0/index", response.getShards()[0].getDataPath(), indexUuid); logger.info("--> Verify that the segment files are same on local and repository eventually"); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java index 36987ac2d4991..d45b4e3deb798 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java @@ -131,7 +131,7 @@ public void readBlobAsync(String blobName, ActionListener listener) InputStreamContainer blobPartStream = new InputStreamContainer(readBlob(blobName, offset, partSize), partSize, offset); blobPartStreams.add(() -> CompletableFuture.completedFuture(blobPartStream)); } - ReadContext blobReadContext = new ReadContext(contentLength, blobPartStreams, null); + ReadContext blobReadContext = new ReadContext.Builder(contentLength, blobPartStreams).build(); listener.onResponse(blobReadContext); } catch (Exception e) { listener.onFailure(e); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 78827849a8037..e688a4491b1a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -14,9 +14,14 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; @@ -27,6 +32,8 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Stream; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.comparesEqualTo; @@ -307,7 +314,21 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { SnapshotInfo snapshotInfo1 = createFullSnapshot(snapshotRepoName, "snap1"); SnapshotInfo snapshotInfo2 = createFullSnapshot(snapshotRepoName, "snap2"); - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME); + final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(REMOTE_REPO_NAME); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreEnabledIndexName, + remoteStoreRepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); + BlobContainer blobContainer = remoteStoreRepository.blobStore().blobContainer(shardLevelBlobPath); + String[] lockFiles; + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + lockFiles = lockDirectory.listAll(); + } assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); // delete remote store index @@ -320,7 +341,9 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { .get(); assertAcked(deleteSnapshotResponse); - lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME, indexUUID); + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + lockFiles = lockDirectory.listAll(); + } assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); assertTrue(lockFiles[0].contains(snapshotInfo2.snapshotId().getUUID())); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index ca4c16935c2b9..cb41325c18a22 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; @@ -57,6 +58,9 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.DocsStats; import org.opensearch.index.store.StoreStats; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -67,6 +71,7 @@ import java.util.function.IntFunction; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; /** * Main class to initiate resizing (shrink / split) an index into a new index @@ -140,8 +145,8 @@ protected void clusterManagerOperation( // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); - IndexMetadata indexMetadata = state.metadata().index(sourceIndex); + ClusterSettings clusterSettings = clusterService.getClusterSettings(); if (resizeRequest.getResizeType().equals(ResizeType.SHRINK) && state.metadata().isSegmentReplicationEnabled(sourceIndex) && indexMetadata != null @@ -161,7 +166,7 @@ protected void clusterManagerOperation( CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); - }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + }, indicesStatsResponse.getPrimaries().store, clusterSettings, sourceIndex, targetIndex); if (indicesStatsResponse.getIndex(sourceIndex) .getTotal() @@ -200,7 +205,7 @@ protected void clusterManagerOperation( CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); - }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); + }, indicesStatsResponse.getPrimaries().store, clusterSettings, sourceIndex, targetIndex); createIndexService.createIndex( updateRequest, ActionListener.map( @@ -223,6 +228,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( final ClusterState state, final IntFunction perShardDocStats, final StoreStats primaryShardsStoreStats, + final ClusterSettings clusterSettings, String sourceIndexName, String targetIndexName ) { @@ -231,6 +237,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( if (metadata == null) { throw new IndexNotFoundException(sourceIndexName); } + validateRemoteMigrationModeSettings(resizeRequest.getResizeType(), metadata, clusterSettings); final Settings.Builder targetIndexSettingsBuilder = Settings.builder() .put(targetIndex.settings()) .normalizePrefix(IndexMetadata.INDEX_SETTING_PREFIX); @@ -368,4 +375,39 @@ protected static int calculateTargetIndexShardsNum( protected String getClusterManagerActionName(DiscoveryNode node) { return super.getClusterManagerActionName(node); } + + /** + * Reject resize request if cluster mode is [Mixed] and migration direction is [RemoteStore] and index is not on + * REMOTE_STORE_ENABLED node or [DocRep] and index is on REMOTE_STORE_ENABLED node. + * @param type resize type + * @param sourceIndexMetadata source index's metadata + * @param clusterSettings cluster settings + * @throws IllegalStateException if cluster mode is [Mixed] and migration direction is [RemoteStore] or [DocRep] and + * index's SETTING_REMOTE_STORE_ENABLED is not equal to the migration direction's value. + * For example, if migration direction is [RemoteStore] and index's SETTING_REMOTE_STORE_ENABLED + * is false, then throw IllegalStateException. If migration direction is [DocRep] and + * index's SETTING_REMOTE_STORE_ENABLED is true, then throw IllegalStateException. + */ + private static void validateRemoteMigrationModeSettings( + final ResizeType type, + IndexMetadata sourceIndexMetadata, + ClusterSettings clusterSettings + ) { + CompatibilityMode compatibilityMode = clusterSettings.get(RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING); + if (compatibilityMode == CompatibilityMode.MIXED) { + boolean isRemoteStoreEnabled = sourceIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false); + Direction migrationDirection = clusterSettings.get(RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING); + boolean invalidConfiguration = (migrationDirection == Direction.REMOTE_STORE && isRemoteStoreEnabled == false) + || (migrationDirection == Direction.DOCREP && isRemoteStoreEnabled); + if (invalidConfiguration) { + throw new IllegalStateException( + "Index " + + type + + " is not allowed as remote migration mode is mixed" + + " and index is remote store " + + (isRemoteStoreEnabled ? "enabled" : "disabled") + ); + } + } + } } diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 3fadfe5f2cd6a..f705a218fb8e2 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -371,7 +371,7 @@ public void parse( } IndexRequest upsertRequest = updateRequest.upsertRequest(); if (upsertRequest != null) { - upsertRequest.setPipeline(defaultPipeline); + upsertRequest.setPipeline(pipeline); } updateRequestConsumer.accept(updateRequest); diff --git a/server/src/main/java/org/opensearch/action/support/AutoCreateIndex.java b/server/src/main/java/org/opensearch/action/support/AutoCreateIndex.java index 9e8cbd7bf40c2..ce0a8f84d066f 100644 --- a/server/src/main/java/org/opensearch/action/support/AutoCreateIndex.java +++ b/server/src/main/java/org/opensearch/action/support/AutoCreateIndex.java @@ -43,7 +43,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.index.IndexNotFoundException; -import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.SystemIndices; import java.util.ArrayList; @@ -64,8 +63,6 @@ public final class AutoCreateIndex { Property.NodeScope, Setting.Property.Dynamic ); - - private final boolean dynamicMappingDisabled; private final IndexNameExpressionResolver resolver; private final SystemIndices systemIndices; private volatile AutoCreate autoCreate; @@ -77,7 +74,6 @@ public AutoCreateIndex( SystemIndices systemIndices ) { this.resolver = resolver; - dynamicMappingDisabled = !MapperService.INDEX_MAPPER_DYNAMIC_SETTING.get(settings); this.systemIndices = systemIndices; this.autoCreate = AUTO_CREATE_INDEX_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(AUTO_CREATE_INDEX_SETTING, this::setAutoCreate); @@ -109,9 +105,6 @@ public boolean shouldAutoCreate(String index, ClusterState state) { if (autoCreate.autoCreateIndex == false) { throw new IndexNotFoundException("[" + AUTO_CREATE_INDEX_SETTING.getKey() + "] is [false]", index); } - if (dynamicMappingDisabled) { - throw new IndexNotFoundException("[" + MapperService.INDEX_MAPPER_DYNAMIC_SETTING.getKey() + "] is [false]", index); - } // matches not set, default value of "true" if (autoCreate.expressions.isEmpty()) { return true; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 451871b10d5eb..64bea79c9e47b 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -206,8 +206,9 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); + Supplier minNodeVersionSupplier = () -> clusterService.state().nodes().getMinNodeVersion(); remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings()) + ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings(), minNodeVersionSupplier) : null; } @@ -572,28 +573,23 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( * @param assertNullOldType flag to verify that the old remote store path type is null */ public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { - if (remoteStorePathStrategyResolver != null) { - // It is possible that remote custom data exists already. In such cases, we need to only update the path type - // in the remote store custom data map. - Map existingRemoteCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); - Map remoteCustomData = existingRemoteCustomData == null - ? new HashMap<>() - : new HashMap<>(existingRemoteCustomData); - // Determine the path type for use using the remoteStorePathResolver. - RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); - String oldPathType = remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); - String oldHashAlgorithm = remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); - assert !assertNullOldType || (Objects.isNull(oldPathType) && Objects.isNull(oldHashAlgorithm)); - logger.trace( - () -> new ParameterizedMessage( - "Added newPathStrategy={}, replaced oldPathType={} oldHashAlgorithm={}", - newPathStrategy, - oldPathType, - oldHashAlgorithm - ) - ); - tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); + if (remoteStorePathStrategyResolver == null) { + return; + } + // It is possible that remote custom data exists already. In such cases, we need to only update the path type + // in the remote store custom data map. + Map existingCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + assert assertNullOldType == false || Objects.isNull(existingCustomData); + + // Determine the path type for use using the remoteStorePathResolver. + RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); + Map remoteCustomData = new HashMap<>(); + remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); + if (Objects.nonNull(newPathStrategy.getHashAlgorithm())) { + remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); } + logger.trace(() -> new ParameterizedMessage("Added newStrategy={}, replaced oldStrategy={}", remoteCustomData, existingCustomData)); + tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } private ClusterState applyCreateIndexRequestWithV1Templates( diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index 36149d014ea84..2c250f6a5d86e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; @@ -63,7 +62,6 @@ import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; -import java.util.stream.Stream; import static java.util.Collections.emptyMap; @@ -96,8 +94,8 @@ public class IndexShardRoutingTable implements Iterable { private volatile Map initializingShardsByAttributes = emptyMap(); private final Object shardsByAttributeMutex = new Object(); private final Object shardsByWeightMutex = new Object(); - private volatile Map> activeShardsByWeight = emptyMap(); - private volatile Map> initializingShardsByWeight = emptyMap(); + private volatile Map activeShardsByWeight = emptyMap(); + private volatile Map initializingShardsByWeight = emptyMap(); private static final Logger logger = LogManager.getLogger(IndexShardRoutingTable.class); @@ -249,7 +247,7 @@ public List assignedShards() { return this.assignedShards; } - public Map> getActiveShardsByWeight() { + public Map getActiveShardsByWeight() { return activeShardsByWeight; } @@ -338,23 +336,7 @@ public ShardIterator activeInitializingShardsWeightedIt( // append shards for attribute value with weight zero, so that shard search requests can be tried on // shard copies in case of request failure from other attribute values. if (isFailOpenEnabled) { - try { - Stream keys = weightedRouting.weights() - .entrySet() - .stream() - .filter(entry -> entry.getValue().intValue() == WeightedRoutingMetadata.WEIGHED_AWAY_WEIGHT) - .map(Map.Entry::getKey); - keys.forEach(key -> { - ShardIterator iterator = onlyNodeSelectorActiveInitializingShardsIt(weightedRouting.attributeName() + ":" + key, nodes); - while (iterator.remaining() > 0) { - ordered.add(iterator.nextOrNull()); - } - }); - } catch (IllegalArgumentException e) { - // this exception is thrown by {@link onlyNodeSelectorActiveInitializingShardsIt} in case count of shard - // copies found is zero - logger.debug("no shard copies found for shard id [{}] for node attribute with weight zero", shardId); - } + ordered.addAll(activeInitializingShardsWithoutWeights(weightedRouting, nodes, defaultWeight)); } return new PlainShardIterator(shardId, ordered); @@ -378,6 +360,18 @@ private List activeInitializingShardsWithWeights( return orderedListWithDistinctShards; } + private List activeInitializingShardsWithoutWeights( + WeightedRouting weightedRouting, + DiscoveryNodes nodes, + double defaultWeight + ) { + List ordered = new ArrayList<>(getActiveShardsWithoutWeight(weightedRouting, nodes, defaultWeight)); + if (!allInitializingShards.isEmpty()) { + ordered.addAll(getInitializingShardsWithoutWeight(weightedRouting, nodes, defaultWeight)); + } + return ordered.stream().distinct().collect(Collectors.toList()); + } + /** * Returns a list containing shard routings ordered using weighted round-robin scheduling. */ @@ -949,20 +943,60 @@ public int hashCode() { } } + /** + * Holder class for shard routing(s) which are classified and stored based on their weights. + * + * @opensearch.api + */ + @PublicApi(since = "2.14.0") + public static class WeightedShardRoutings { + private final List shardRoutingsWithWeight; + private final List shardRoutingWithoutWeight; + + public WeightedShardRoutings(List shardRoutingsWithWeight, List shardRoutingWithoutWeight) { + this.shardRoutingsWithWeight = Collections.unmodifiableList(shardRoutingsWithWeight); + this.shardRoutingWithoutWeight = Collections.unmodifiableList(shardRoutingWithoutWeight); + } + + public List getShardRoutingsWithWeight() { + return shardRoutingsWithWeight; + } + + public List getShardRoutingWithoutWeight() { + return shardRoutingWithoutWeight; + } + } + /** * * * Gets active shard routing from memory if available, else calculates and put it in memory. */ private List getActiveShardsByWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); - List shardRoutings = activeShardsByWeight.get(key); - if (shardRoutings == null) { - synchronized (shardsByWeightMutex) { - shardRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); - activeShardsByWeight = new MapBuilder().put(key, shardRoutings).immutableMap(); - } + if (activeShardsByWeight.get(key) == null) { + populateActiveShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return activeShardsByWeight.get(key).getShardRoutingsWithWeight(); + } + + private List getActiveShardsWithoutWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + if (activeShardsByWeight.get(key) == null) { + populateActiveShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return activeShardsByWeight.get(key).getShardRoutingWithoutWeight(); + } + + private void populateActiveShardWeightsMap(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + List weightedRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); + List nonWeightedRoutings = activeShards.stream() + .filter(shard -> !weightedRoutings.contains(shard)) + .collect(Collectors.toUnmodifiableList()); + synchronized (shardsByWeightMutex) { + activeShardsByWeight = new MapBuilder().put(key, new WeightedShardRoutings(weightedRoutings, nonWeightedRoutings)) + .immutableMap(); } - return shardRoutings; } /** @@ -971,14 +1005,34 @@ private List getActiveShardsByWeight(WeightedRouting weightedRouti */ private List getInitializingShardsByWeight(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); - List shardRoutings = initializingShardsByWeight.get(key); - if (shardRoutings == null) { - synchronized (shardsByWeightMutex) { - shardRoutings = shardsOrderedByWeight(activeShards, weightedRouting, nodes, defaultWeight); - initializingShardsByWeight = new MapBuilder().put(key, shardRoutings).immutableMap(); - } + if (initializingShardsByWeight.get(key) == null) { + populateInitializingShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return initializingShardsByWeight.get(key).getShardRoutingsWithWeight(); + } + + private List getInitializingShardsWithoutWeight( + WeightedRouting weightedRouting, + DiscoveryNodes nodes, + double defaultWeight + ) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + if (initializingShardsByWeight.get(key) == null) { + populateInitializingShardWeightsMap(weightedRouting, nodes, defaultWeight); + } + return initializingShardsByWeight.get(key).getShardRoutingWithoutWeight(); + } + + private void populateInitializingShardWeightsMap(WeightedRouting weightedRouting, DiscoveryNodes nodes, double defaultWeight) { + WeightedRoutingKey key = new WeightedRoutingKey(weightedRouting); + List weightedRoutings = shardsOrderedByWeight(allInitializingShards, weightedRouting, nodes, defaultWeight); + List nonWeightedRoutings = allInitializingShards.stream() + .filter(shard -> !weightedRoutings.contains(shard)) + .collect(Collectors.toUnmodifiableList()); + synchronized (shardsByWeightMutex) { + initializingShardsByWeight = new MapBuilder().put(key, new WeightedShardRoutings(weightedRoutings, nonWeightedRoutings)) + .immutableMap(); } - return shardRoutings; } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java index 468fac08d2946..6f0e4fe90cfff 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRouting.java @@ -14,6 +14,7 @@ import org.opensearch.core.common.io.stream.Writeable; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -25,27 +26,26 @@ */ @PublicApi(since = "2.4.0") public class WeightedRouting implements Writeable { - private String attributeName; - private Map weights; + private final String attributeName; + private final Map weights; + private final int hashCode; public WeightedRouting() { - this.attributeName = ""; - this.weights = new HashMap<>(3); + this("", new HashMap<>(3)); } public WeightedRouting(String attributeName, Map weights) { this.attributeName = attributeName; - this.weights = weights; + this.weights = Collections.unmodifiableMap(weights); + this.hashCode = Objects.hash(this.attributeName, this.weights); } public WeightedRouting(WeightedRouting weightedRouting) { - this.attributeName = weightedRouting.attributeName(); - this.weights = weightedRouting.weights; + this(weightedRouting.attributeName(), weightedRouting.weights); } public WeightedRouting(StreamInput in) throws IOException { - attributeName = in.readString(); - weights = (Map) in.readGenericValue(); + this(in.readString(), (Map) in.readGenericValue()); } public boolean isSet() { @@ -70,7 +70,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(attributeName, weights); + return hashCode; } @Override diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index af4b2c61a95b1..d3200c1bc9d75 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -38,11 +38,13 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import java.util.Map; @@ -102,14 +104,32 @@ public class FilterAllocationDecider extends AllocationDecider { private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters; + private volatile RemoteStoreNodeService.Direction migrationDirection; + private volatile RemoteStoreNodeService.CompatibilityMode compatibilityMode; public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings)); setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings)); setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings)); + this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); + this.compatibilityMode = RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(settings); + clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters, (a, b) -> {}); clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters, (a, b) -> {}); + clusterSettings.addSettingsUpdateConsumer(RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, this::setMigrationDirection); + clusterSettings.addSettingsUpdateConsumer( + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, + this::setCompatibilityMode + ); + } + + private void setMigrationDirection(RemoteStoreNodeService.Direction migrationDirection) { + this.migrationDirection = migrationDirection; + } + + private void setCompatibilityMode(RemoteStoreNodeService.CompatibilityMode compatibilityMode) { + this.compatibilityMode = compatibilityMode; } @Override @@ -127,10 +147,28 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing "initial allocation of the shrunken index is only allowed on nodes [%s] that hold a copy of every shard in the index"; return allocation.decision(Decision.NO, NAME, explanation, initialRecoveryFilters); } + + Decision decision = isRemoteStoreMigrationReplicaDecision(shardRouting, allocation); + if (decision != null) return decision; } return shouldFilter(shardRouting, node.node(), allocation); } + public Decision isRemoteStoreMigrationReplicaDecision(ShardRouting shardRouting, RoutingAllocation allocation) { + assert shardRouting.unassigned(); + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(shardRouting.shardId(), allocation); + if (shardRouting.primary() == false + && shardRouting.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED + && (compatibilityMode.equals(RemoteStoreNodeService.CompatibilityMode.MIXED)) + && (migrationDirection.equals(RemoteStoreNodeService.Direction.REMOTE_STORE)) + && primaryOnRemote == false) { + String explanation = + "in remote store migration, allocation filters are not applicable for replica copies whose primary is on doc rep node"; + return allocation.decision(Decision.YES, NAME, explanation); + } + return null; + } + @Override public Decision canAllocate(IndexMetadata indexMetadata, RoutingNode node, RoutingAllocation allocation) { return shouldFilter(indexMetadata, node.node(), allocation); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java index 27ebe5390ea6d..7d40aacb71e25 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/RemoteStoreMigrationAllocationDecider.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; import org.opensearch.node.remotestore.RemoteStoreNodeService.Direction; @@ -60,9 +61,8 @@ public class RemoteStoreMigrationAllocationDecider extends AllocationDecider { public static final String NAME = "remote_store_migration"; - private Direction migrationDirection; - private CompatibilityMode compatibilityMode; - private boolean remoteStoreBackedIndex; + volatile private Direction migrationDirection; + volatile private CompatibilityMode compatibilityMode; public RemoteStoreMigrationAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.migrationDirection = RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING.get(settings); @@ -106,9 +106,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // check for remote store backed indices IndexMetadata indexMetadata = allocation.metadata().getIndexSafe(shardRouting.index()); - if (IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.exists(indexMetadata.getSettings())) { - remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); - } + boolean remoteStoreBackedIndex = IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING.get(indexMetadata.getSettings()); if (remoteStoreBackedIndex && targetNode.isRemoteStoreNode() == false) { // allocations and relocations must be to a remote node String reason = String.format( @@ -133,15 +131,20 @@ private Decision primaryShardDecision(ShardRouting primaryShardRouting, Discover return allocation.decision(Decision.YES, NAME, getDecisionDetails(true, primaryShardRouting, targetNode, "")); } + // Checks if primary shard is on a remote node. + static boolean isPrimaryOnRemote(ShardId shardId, RoutingAllocation allocation) { + ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(shardId); + if (primaryShardRouting != null) { + DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); + return primaryShardNode.isRemoteStoreNode(); + } + return false; + } + private Decision replicaShardDecision(ShardRouting replicaShardRouting, DiscoveryNode targetNode, RoutingAllocation allocation) { if (targetNode.isRemoteStoreNode()) { - ShardRouting primaryShardRouting = allocation.routingNodes().activePrimary(replicaShardRouting.shardId()); - boolean primaryHasMigratedToRemote = false; - if (primaryShardRouting != null) { - DiscoveryNode primaryShardNode = allocation.nodes().getNodes().get(primaryShardRouting.currentNodeId()); - primaryHasMigratedToRemote = primaryShardNode.isRemoteStoreNode(); - } - if (primaryHasMigratedToRemote == false) { + boolean primaryOnRemote = RemoteStoreMigrationAllocationDecider.isPrimaryOnRemote(replicaShardRouting.shardId(), allocation); + if (primaryOnRemote == false) { return allocation.decision( Decision.NO, NAME, diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index 2e25a532b5abf..e16e75de7f27d 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -32,6 +32,7 @@ package org.opensearch.common.blobstore; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.action.ActionListener; import java.io.IOException; @@ -77,6 +78,20 @@ public interface BlobContainer { */ InputStream readBlob(String blobName) throws IOException; + /** + * Creates a new {@link BlobDownloadResponse} for the given blob name. + * + * @param blobName + * The name of the blob to get an {@link InputStream} for. + * @return The {@link BlobDownloadResponse} of the blob. + * @throws NoSuchFileException if the blob does not exist + * @throws IOException if the blob can not be read. + */ + @ExperimentalApi + default BlobDownloadResponse readBlobWithMetadata(String blobName) throws IOException { + throw new UnsupportedOperationException("readBlobWithMetadata is not implemented yet"); + }; + /** * Creates a new {@link InputStream} that can be used to read the given blob starting from * a specific {@code position} in the blob. The {@code length} is an indication of the @@ -128,6 +143,36 @@ default long readBlobPreferredLength() { */ void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException; + /** + * Reads blob content from the input stream and writes it to the container in a new blob with the given name, and metadata. + * This method assumes the container does not already contain a blob of the same blobName. If a blob by the + * same name already exists, the operation will fail and an {@link IOException} will be thrown. + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param inputStream + * The input stream from which to retrieve the bytes to write to the blob. + * @param metadata + * The metadata to be associate with the blob upload. + * @param blobSize + * The size of the blob to be written, in bytes. It is implementation dependent whether + * this value is used in writing the blob to the repository. + * @param failIfAlreadyExists + * whether to throw a FileAlreadyExistsException if the given blob already exists + * @throws FileAlreadyExistsException if failIfAlreadyExists is true and a blob by the same name already exists + * @throws IOException if the input stream could not be read, or the target blob could not be written to. + */ + @ExperimentalApi + default void writeBlobWithMetadata( + String blobName, + InputStream inputStream, + Map metadata, + long blobSize, + boolean failIfAlreadyExists + ) throws IOException { + throw new UnsupportedOperationException("writeBlobWithMetadata is not implemented yet"); + }; + /** * Reads blob content from the input stream and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. @@ -149,6 +194,38 @@ default long readBlobPreferredLength() { */ void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException; + /** + * Reads blob content from the input stream and writes it to the container in a new blob with the given name, and metadata + * using an atomic write operation if the implementation supports it. + *

+ * This method assumes the container does not already contain a blob of the same blobName. If a blob by the + * same name already exists, the operation will fail and an {@link IOException} will be thrown. + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param inputStream + * The input stream from which to retrieve the bytes to write to the blob. + * @param metadata + * The metadata to be associate with the blob upload. + * @param blobSize + * The size of the blob to be written, in bytes. It is implementation dependent whether + * this value is used in writing the blob to the repository. + * @param failIfAlreadyExists + * whether to throw a FileAlreadyExistsException if the given blob already exists + * @throws FileAlreadyExistsException if failIfAlreadyExists is true and a blob by the same name already exists + * @throws IOException if the input stream could not be read, or the target blob could not be written to. + */ + @ExperimentalApi + default void writeBlobAtomicWithMetadata( + String blobName, + InputStream inputStream, + Map metadata, + long blobSize, + boolean failIfAlreadyExists + ) throws IOException { + throw new UnsupportedOperationException("writeBlobAtomicWithMetadata is not implemented yet"); + }; + /** * Deletes this container and all its contents from the repository. * diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobDownloadResponse.java b/server/src/main/java/org/opensearch/common/blobstore/BlobDownloadResponse.java new file mode 100644 index 0000000000000..97f3e4a16a76c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobDownloadResponse.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.blobstore; + +import java.io.InputStream; +import java.util.Map; + +/** + * Represents the response from a blob download operation, containing both the + * input stream of the blob content and the associated metadata. + * + * @opensearch.experimental + */ +public class BlobDownloadResponse { + + /** + * Downloaded blob InputStream + */ + private final InputStream inputStream; + + /** + * Metadata of the downloaded blob + */ + private final Map metadata; + + public InputStream getInputStream() { + return inputStream; + } + + public Map getMetadata() { + return metadata; + } + + public BlobDownloadResponse(InputStream inputStream, Map metadata) { + this.inputStream = inputStream; + this.metadata = metadata; + } + +} diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index 763594ed52977..6f3e8be7c28b8 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -79,6 +79,15 @@ public BlobPath add(String path) { return new BlobPath(Collections.unmodifiableList(paths)); } + /** + * Add additional level of paths to the existing path and returns new {@link BlobPath} with the updated paths. + */ + public BlobPath add(Iterable paths) { + List updatedPaths = new ArrayList<>(this.paths); + paths.iterator().forEachRemaining(updatedPaths::add); + return new BlobPath(Collections.unmodifiableList(updatedPaths)); + } + public String buildAsString() { String p = String.join(SEPARATOR, paths); if (p.isEmpty() || p.endsWith(SEPARATOR)) { diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java index 1264551401b4c..36ee46c0fc2c8 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/read/ReadContext.java @@ -12,6 +12,7 @@ import org.opensearch.common.io.InputStreamContainer; import java.util.List; +import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.function.Supplier; @@ -25,23 +26,30 @@ public class ReadContext { private final long blobSize; private final List asyncPartStreams; private final String blobChecksum; + private final Map metadata; - public ReadContext(long blobSize, List asyncPartStreams, String blobChecksum) { + private ReadContext(long blobSize, List asyncPartStreams, String blobChecksum, Map metadata) { this.blobSize = blobSize; this.asyncPartStreams = asyncPartStreams; this.blobChecksum = blobChecksum; + this.metadata = metadata; } public ReadContext(ReadContext readContext) { this.blobSize = readContext.blobSize; this.asyncPartStreams = readContext.asyncPartStreams; this.blobChecksum = readContext.blobChecksum; + this.metadata = readContext.metadata; } public String getBlobChecksum() { return blobChecksum; } + public Map getMetadata() { + return metadata; + } + public int getNumberOfParts() { return asyncPartStreams.size(); } @@ -64,7 +72,7 @@ public List getPartStreams() { @ExperimentalApi public interface StreamPartCreator extends Supplier> { /** - * Kicks off a async process to start streaming. + * Kicks off an async process to start streaming. * * @return When the returned future is completed, streaming has * just begun. Clients must fully consume the resulting stream. @@ -72,4 +80,36 @@ public interface StreamPartCreator extends Supplier get(); } + + /** + * Builder for {@link ReadContext}. + * + * @opensearch.experimental + */ + public static class Builder { + private final long blobSize; + private final List asyncPartStreams; + private String blobChecksum; + private Map metadata; + + public Builder(long blobSize, List asyncPartStreams) { + this.blobSize = blobSize; + this.asyncPartStreams = asyncPartStreams; + } + + public Builder blobChecksum(String blobChecksum) { + this.blobChecksum = blobChecksum; + return this; + } + + public Builder metadata(Map metadata) { + this.metadata = metadata; + return this; + } + + public ReadContext build() { + return new ReadContext(blobSize, asyncPartStreams, blobChecksum, metadata); + } + + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java index e74462f82400d..d14800e82e495 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java +++ b/server/src/main/java/org/opensearch/common/blobstore/stream/write/WriteContext.java @@ -13,6 +13,7 @@ import org.opensearch.common.StreamContext; import java.io.IOException; +import java.util.Map; /** * WriteContext is used to encapsulate all data needed by BlobContainer#writeStreams @@ -29,6 +30,7 @@ public class WriteContext { private final CheckedConsumer uploadFinalizer; private final boolean doRemoteDataIntegrityCheck; private final Long expectedChecksum; + private final Map metadata; /** * Construct a new WriteContext object @@ -41,7 +43,7 @@ public class WriteContext { * @param doRemoteDataIntegrityCheck A boolean to inform vendor plugins whether remote data integrity checks need to be done * @param expectedChecksum This parameter expected only when the vendor plugin is expected to do server side data integrity verification */ - public WriteContext( + private WriteContext( String fileName, StreamContextSupplier streamContextSupplier, long fileSize, @@ -49,7 +51,8 @@ public WriteContext( WritePriority writePriority, CheckedConsumer uploadFinalizer, boolean doRemoteDataIntegrityCheck, - @Nullable Long expectedChecksum + @Nullable Long expectedChecksum, + Map metadata ) { this.fileName = fileName; this.streamContextSupplier = streamContextSupplier; @@ -59,6 +62,7 @@ public WriteContext( this.uploadFinalizer = uploadFinalizer; this.doRemoteDataIntegrityCheck = doRemoteDataIntegrityCheck; this.expectedChecksum = expectedChecksum; + this.metadata = metadata; } /** @@ -73,6 +77,7 @@ protected WriteContext(WriteContext writeContext) { this.uploadFinalizer = writeContext.uploadFinalizer; this.doRemoteDataIntegrityCheck = writeContext.doRemoteDataIntegrityCheck; this.expectedChecksum = writeContext.expectedChecksum; + this.metadata = writeContext.metadata; } /** @@ -131,4 +136,87 @@ public boolean doRemoteDataIntegrityCheck() { public Long getExpectedChecksum() { return expectedChecksum; } + + /** + * @return the upload metadata. + */ + public Map getMetadata() { + return metadata; + } + + /** + * Builder for {@link WriteContext}. + * + * @opensearch.internal + */ + public static class Builder { + private String fileName; + private StreamContextSupplier streamContextSupplier; + private long fileSize; + private boolean failIfAlreadyExists; + private WritePriority writePriority; + private CheckedConsumer uploadFinalizer; + private boolean doRemoteDataIntegrityCheck; + private Long expectedChecksum; + private Map metadata; + + public Builder fileName(String fileName) { + this.fileName = fileName; + return this; + } + + public Builder streamContextSupplier(StreamContextSupplier streamContextSupplier) { + this.streamContextSupplier = streamContextSupplier; + return this; + } + + public Builder fileSize(long fileSize) { + this.fileSize = fileSize; + return this; + } + + public Builder writePriority(WritePriority writePriority) { + this.writePriority = writePriority; + return this; + } + + public Builder failIfAlreadyExists(boolean failIfAlreadyExists) { + this.failIfAlreadyExists = failIfAlreadyExists; + return this; + } + + public Builder uploadFinalizer(CheckedConsumer uploadFinalizer) { + this.uploadFinalizer = uploadFinalizer; + return this; + } + + public Builder doRemoteDataIntegrityCheck(boolean doRemoteDataIntegrityCheck) { + this.doRemoteDataIntegrityCheck = doRemoteDataIntegrityCheck; + return this; + } + + public Builder expectedChecksum(Long expectedChecksum) { + this.expectedChecksum = expectedChecksum; + return this; + } + + public Builder metadata(Map metadata) { + this.metadata = metadata; + return this; + } + + public WriteContext build() { + return new WriteContext( + fileName, + streamContextSupplier, + fileSize, + failIfAlreadyExists, + writePriority, + uploadFinalizer, + doRemoteDataIntegrityCheck, + expectedChecksum, + metadata + ); + } + } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java index 2047c99d9e13b..cd2ef22327ebb 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/transfer/RemoteTransferContainer.java @@ -27,6 +27,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.util.Map; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; @@ -55,6 +56,7 @@ public class RemoteTransferContainer implements Closeable { private final OffsetRangeInputStreamSupplier offsetRangeInputStreamSupplier; private final boolean isRemoteDataIntegritySupported; private final AtomicBoolean readBlock = new AtomicBoolean(); + private final Map metadata; private static final Logger log = LogManager.getLogger(RemoteTransferContainer.class); @@ -79,6 +81,43 @@ public RemoteTransferContainer( OffsetRangeInputStreamSupplier offsetRangeInputStreamSupplier, long expectedChecksum, boolean isRemoteDataIntegritySupported + ) { + this( + fileName, + remoteFileName, + contentLength, + failTransferIfFileExists, + writePriority, + offsetRangeInputStreamSupplier, + expectedChecksum, + isRemoteDataIntegritySupported, + null + ); + } + + /** + * Construct a new RemoteTransferContainer object with metadata. + * + * @param fileName Name of the local file + * @param remoteFileName Name of the remote file + * @param contentLength Total content length of the file to be uploaded + * @param failTransferIfFileExists A boolean to determine if upload has to be failed if file exists + * @param writePriority The {@link WritePriority} of current upload + * @param offsetRangeInputStreamSupplier A supplier to create OffsetRangeInputStreams + * @param expectedChecksum The expected checksum value for the file being uploaded. This checksum will be used for local or remote data integrity checks + * @param isRemoteDataIntegritySupported A boolean to signify whether the remote repository supports server side data integrity verification + * @param metadata Object metadata to be store with the file. + */ + public RemoteTransferContainer( + String fileName, + String remoteFileName, + long contentLength, + boolean failTransferIfFileExists, + WritePriority writePriority, + OffsetRangeInputStreamSupplier offsetRangeInputStreamSupplier, + long expectedChecksum, + boolean isRemoteDataIntegritySupported, + Map metadata ) { this.fileName = fileName; this.remoteFileName = remoteFileName; @@ -88,22 +127,23 @@ public RemoteTransferContainer( this.offsetRangeInputStreamSupplier = offsetRangeInputStreamSupplier; this.expectedChecksum = expectedChecksum; this.isRemoteDataIntegritySupported = isRemoteDataIntegritySupported; + this.metadata = metadata; } /** * @return The {@link WriteContext} for the current upload */ public WriteContext createWriteContext() { - return new WriteContext( - remoteFileName, - this::supplyStreamContext, - contentLength, - failTransferIfFileExists, - writePriority, - this::finalizeUpload, - isRemoteDataIntegrityCheckPossible(), - isRemoteDataIntegrityCheckPossible() ? expectedChecksum : null - ); + return new WriteContext.Builder().fileName(remoteFileName) + .streamContextSupplier(this::supplyStreamContext) + .fileSize(contentLength) + .failIfAlreadyExists(failTransferIfFileExists) + .writePriority(writePriority) + .uploadFinalizer(this::finalizeUpload) + .doRemoteDataIntegrityCheck(isRemoteDataIntegrityCheckPossible()) + .expectedChecksum(isRemoteDataIntegrityCheckPossible() ? expectedChecksum : null) + .metadata(metadata) + .build(); } // package-private for testing diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index ef42240f80e29..06664ed73e984 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -728,7 +728,8 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING, RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, - RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 82875564c1c07..388de65ca58a1 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -63,6 +63,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -764,6 +765,7 @@ public static IndexMergePolicy fromString(String text) { private volatile String defaultSearchPipeline; private final boolean widenIndexSortType; private final boolean assignedOnRemoteNode; + private final RemoteStorePathStrategy remoteStorePathStrategy; /** * The maximum age of a retention lease before it is considered expired. @@ -988,6 +990,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); + remoteStorePathStrategy = determineRemoteStorePathStrategy(); setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1908,15 +1911,19 @@ public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePo this.docIdFuzzySetFalsePositiveProbability = docIdFuzzySetFalsePositiveProbability; } - public RemoteStorePathStrategy getRemoteStorePathStrategy() { + private RemoteStorePathStrategy determineRemoteStorePathStrategy() { Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); - if (remoteCustomData != null - && remoteCustomData.containsKey(PathType.NAME) - && remoteCustomData.containsKey(PathHashAlgorithm.NAME)) { + assert remoteCustomData == null || remoteCustomData.containsKey(PathType.NAME); + if (remoteCustomData != null && remoteCustomData.containsKey(PathType.NAME)) { PathType pathType = PathType.parseString(remoteCustomData.get(PathType.NAME)); - PathHashAlgorithm pathHashAlgorithm = PathHashAlgorithm.parseString(remoteCustomData.get(PathHashAlgorithm.NAME)); - return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + String hashAlgoStr = remoteCustomData.get(PathHashAlgorithm.NAME); + PathHashAlgorithm hashAlgorithm = Objects.nonNull(hashAlgoStr) ? PathHashAlgorithm.parseString(hashAlgoStr) : null; + return new RemoteStorePathStrategy(pathType, hashAlgorithm); } return new RemoteStorePathStrategy(PathType.FIXED); } + + public RemoteStorePathStrategy getRemoteStorePathStrategy() { + return remoteStorePathStrategy; + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java index f3bf0c613415a..40aa2f9890965 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java @@ -31,8 +31,8 @@ public DerivedFieldValueFetcher(DerivedFieldScript.LeafFactory derivedFieldScrip @Override public List fetchValues(SourceLookup lookup) { derivedFieldScript.setDocument(lookup.docId()); - // TODO: remove List.of() when derivedFieldScript.execute() returns list of objects. - return List.of(derivedFieldScript.execute()); + derivedFieldScript.execute(); + return derivedFieldScript.getEmittedValues(); } public void setNextReader(LeafReaderContext context) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index 4e557d8c24431..b51abf19fc000 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -8,14 +8,19 @@ package org.opensearch.index.remote; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.hash.FNV1a; import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; +import java.util.HashMap; import java.util.Locale; +import java.util.Map; +import java.util.Objects; import java.util.Set; +import static java.util.Collections.unmodifiableMap; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; @@ -78,9 +83,10 @@ public String getName() { */ @PublicApi(since = "2.14.0") public enum PathType { - FIXED { + FIXED(0) { @Override public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + assert Objects.isNull(hashAlgorithm) : "hashAlgorithm is expected to be null with fixed remote store path type"; // Hash algorithm is not used in FIXED path type return pathInput.basePath() .add(pathInput.indexUUID()) @@ -94,12 +100,30 @@ boolean requiresHashAlgorithm() { return false; } }, - HASHED_PREFIX { + HASHED_PREFIX(1) { @Override public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { - // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. - // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + assert Objects.nonNull(hashAlgorithm) : "hashAlgorithm is expected to be non-null"; + return BlobPath.cleanPath() + .add(hashAlgorithm.hash(pathInput)) + .add(pathInput.basePath()) + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return true; + } + }, + HASHED_INFIX(2) { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + assert Objects.nonNull(hashAlgorithm) : "hashAlgorithm is expected to be non-null"; return pathInput.basePath() + .add(hashAlgorithm.hash(pathInput)) .add(pathInput.indexUUID()) .add(pathInput.shardId()) .add(pathInput.dataCategory().getName()) @@ -112,6 +136,40 @@ boolean requiresHashAlgorithm() { } }; + private final int code; + + PathType(int code) { + this.code = code; + } + + public int getCode() { + return code; + } + + private static final Map CODE_TO_ENUM; + + static { + PathType[] values = values(); + Map codeToStatus = new HashMap<>(values.length); + for (PathType value : values) { + int code = value.code; + if (codeToStatus.containsKey(code)) { + throw new IllegalStateException( + new ParameterizedMessage("{} has same code as {}", codeToStatus.get(code), value).getFormattedMessage() + ); + } + codeToStatus.put(code, value); + } + CODE_TO_ENUM = unmodifiableMap(codeToStatus); + } + + /** + * Turn a status code into a {@link PathType}. + */ + public static PathType fromCode(int code) { + return CODE_TO_ENUM.get(code); + } + /** * This method generates the path for the given path input which constitutes multiple fields and characteristics * of the data. @@ -131,7 +189,7 @@ public BlobPath path(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { return generatePath(pathInput, hashAlgorithm); } - abstract BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm); + protected abstract BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm); abstract boolean requiresHashAlgorithm(); @@ -158,16 +216,51 @@ public static PathType parseString(String pathType) { @PublicApi(since = "2.14.0") public enum PathHashAlgorithm { - FNV_1A { + FNV_1A(0) { @Override - long hash(PathInput pathInput) { + String hash(PathInput pathInput) { String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() .getName(); - return FNV1a.hash32(input); + long hash = FNV1a.hash64(input); + return RemoteStoreUtils.longToUrlBase64(hash); } }; - abstract long hash(PathInput pathInput); + private final int code; + + PathHashAlgorithm(int code) { + this.code = code; + } + + public int getCode() { + return code; + } + + private static final Map CODE_TO_ENUM; + + static { + PathHashAlgorithm[] values = values(); + Map codeToStatus = new HashMap<>(values.length); + for (PathHashAlgorithm value : values) { + int code = value.code; + if (codeToStatus.containsKey(code)) { + throw new IllegalStateException( + new ParameterizedMessage("{} has same code as {}", codeToStatus.get(code), value).getFormattedMessage() + ); + } + codeToStatus.put(code, value); + } + CODE_TO_ENUM = unmodifiableMap(codeToStatus); + } + + /** + * Turn a status code into a {@link PathHashAlgorithm}. + */ + public static PathHashAlgorithm fromCode(int code) { + return CODE_TO_ENUM.get(code); + } + + abstract String hash(PathInput pathInput); public static PathHashAlgorithm parseString(String pathHashAlgorithm) { try { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java index ce5a6748fd9d4..775f8fe19e4ef 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; @@ -36,11 +37,21 @@ public RemoteStorePathStrategy(PathType type) { } public RemoteStorePathStrategy(PathType type, PathHashAlgorithm hashAlgorithm) { - assert type.requiresHashAlgorithm() == false || Objects.nonNull(hashAlgorithm); - this.type = Objects.requireNonNull(type); + Objects.requireNonNull(type, "pathType can not be null"); + if (isCompatible(type, hashAlgorithm) == false) { + throw new IllegalArgumentException( + new ParameterizedMessage("pathType={} pathHashAlgorithm={} are incompatible", type, hashAlgorithm).getFormattedMessage() + ); + } + this.type = type; this.hashAlgorithm = hashAlgorithm; } + public static boolean isCompatible(PathType type, PathHashAlgorithm hashAlgorithm) { + return (type.requiresHashAlgorithm() == false && Objects.isNull(hashAlgorithm)) + || (type.requiresHashAlgorithm() && Objects.nonNull(hashAlgorithm)); + } + public PathType getType() { return type; } @@ -55,7 +66,7 @@ public String toString() { } public BlobPath generatePath(PathInput pathInput) { - return type.generatePath(pathInput, hashAlgorithm); + return type.path(pathInput, hashAlgorithm); } /** diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 20fc516132220..5b067115df781 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -8,11 +8,14 @@ package org.opensearch.index.remote; +import org.opensearch.Version; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.indices.IndicesService; +import java.util.function.Supplier; + /** * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. * @@ -22,13 +25,22 @@ public class RemoteStorePathStrategyResolver { private volatile PathType type; - public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings) { + private final Supplier minNodeVersionSupplier; + + public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings, Supplier minNodeVersionSupplier) { + this.minNodeVersionSupplier = minNodeVersionSupplier; type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); } public RemoteStorePathStrategy get() { - return new RemoteStorePathStrategy(type, PathHashAlgorithm.FNV_1A); + PathType pathType; + PathHashAlgorithm pathHashAlgorithm; + // Min node version check ensures that we are enabling the new prefix type only when all the nodes understand it. + pathType = Version.CURRENT.compareTo(minNodeVersionSupplier.get()) <= 0 ? type : PathType.FIXED; + // If the path type is fixed, hash algorithm is not applicable. + pathHashAlgorithm = pathType == PathType.FIXED ? null : PathHashAlgorithm.FNV_1A; + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); } private void setType(PathType type) { diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java index b4c33d781af86..7d0743e70b6cb 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -10,7 +10,9 @@ import org.opensearch.common.collect.Tuple; +import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -101,4 +103,17 @@ public static void verifyNoMultipleWriters(List mdFiles, Function fileNames; + private final PathType pathType; + private final PathHashAlgorithm pathHashAlgorithm; - static final String DEFAULT_VERSION = "1"; + static final String DEFAULT_VERSION = "2"; static final String NAME = "name"; static final String VERSION = "version"; static final String INDEX_VERSION = "index_version"; @@ -61,6 +68,8 @@ public class RemoteStoreShardShallowCopySnapshot implements ToXContentFragment, static final String TOTAL_FILE_COUNT = "number_of_files"; static final String TOTAL_SIZE = "total_size"; + static final String PATH_TYPE = "path_type"; + static final String PATH_HASH_ALGORITHM = "path_hash_algorithm"; private static final ParseField PARSE_NAME = new ParseField(NAME); private static final ParseField PARSE_VERSION = new ParseField(VERSION); @@ -75,6 +84,8 @@ public class RemoteStoreShardShallowCopySnapshot implements ToXContentFragment, private static final ParseField PARSE_REMOTE_STORE_REPOSITORY = new ParseField(REMOTE_STORE_REPOSITORY); private static final ParseField PARSE_REPOSITORY_BASE_PATH = new ParseField(REPOSITORY_BASE_PATH); private static final ParseField PARSE_FILE_NAMES = new ParseField(FILE_NAMES); + private static final ParseField PARSE_PATH_TYPE = new ParseField(PATH_TYPE); + private static final ParseField PARSE_PATH_HASH_ALGORITHM = new ParseField(PATH_HASH_ALGORITHM); /** * Serializes shard snapshot metadata info into JSON @@ -101,6 +112,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(fileName); } builder.endArray(); + // We are handling NP check since a cluster can have indexes created earlier which do not have remote store + // path type and path hash algorithm in its custom data in index metadata. + if (Objects.nonNull(pathType)) { + builder.field(PATH_TYPE, pathType.getCode()); + } + if (Objects.nonNull(pathHashAlgorithm)) { + builder.field(PATH_HASH_ALGORITHM, pathHashAlgorithm.getCode()); + } return builder; } @@ -116,34 +135,30 @@ public RemoteStoreShardShallowCopySnapshot( String indexUUID, String remoteStoreRepository, String repositoryBasePath, - List fileNames + List fileNames, + PathType pathType, + PathHashAlgorithm pathHashAlgorithm ) { - this.version = DEFAULT_VERSION; - verifyParameters( - version, + this( + DEFAULT_VERSION, snapshot, indexVersion, primaryTerm, commitGeneration, + startTime, + time, + totalFileCount, + totalSize, indexUUID, remoteStoreRepository, - repositoryBasePath + repositoryBasePath, + fileNames, + pathType, + pathHashAlgorithm ); - this.snapshot = snapshot; - this.indexVersion = indexVersion; - this.primaryTerm = primaryTerm; - this.commitGeneration = commitGeneration; - this.startTime = startTime; - this.time = time; - this.totalFileCount = totalFileCount; - this.totalSize = totalSize; - this.indexUUID = indexUUID; - this.remoteStoreRepository = remoteStoreRepository; - this.repositoryBasePath = repositoryBasePath; - this.fileNames = fileNames; } - private RemoteStoreShardShallowCopySnapshot( + RemoteStoreShardShallowCopySnapshot( String version, String snapshot, long indexVersion, @@ -156,7 +171,9 @@ private RemoteStoreShardShallowCopySnapshot( String indexUUID, String remoteStoreRepository, String repositoryBasePath, - List fileNames + List fileNames, + PathType pathType, + PathHashAlgorithm pathHashAlgorithm ) { verifyParameters( version, @@ -166,7 +183,9 @@ private RemoteStoreShardShallowCopySnapshot( commitGeneration, indexUUID, remoteStoreRepository, - repositoryBasePath + repositoryBasePath, + pathType, + pathHashAlgorithm ); this.version = version; this.snapshot = snapshot; @@ -181,6 +200,8 @@ private RemoteStoreShardShallowCopySnapshot( this.remoteStoreRepository = remoteStoreRepository; this.repositoryBasePath = repositoryBasePath; this.fileNames = fileNames; + this.pathType = pathType; + this.pathHashAlgorithm = pathHashAlgorithm; } /** @@ -203,6 +224,8 @@ public static RemoteStoreShardShallowCopySnapshot fromXContent(XContentParser pa long primaryTerm = -1; long commitGeneration = -1; List fileNames = new ArrayList<>(); + PathType pathType = null; + PathHashAlgorithm pathHashAlgorithm = null; if (parser.currentToken() == null) { // fresh parser? move to the first token parser.nextToken(); @@ -237,6 +260,10 @@ public static RemoteStoreShardShallowCopySnapshot fromXContent(XContentParser pa remoteStoreRepository = parser.text(); } else if (PARSE_REPOSITORY_BASE_PATH.match(currentFieldName, parser.getDeprecationHandler())) { repositoryBasePath = parser.text(); + } else if (PARSE_PATH_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + pathType = PathType.fromCode(parser.intValue()); + } else if (PARSE_PATH_HASH_ALGORITHM.match(currentFieldName, parser.getDeprecationHandler())) { + pathHashAlgorithm = PathHashAlgorithm.fromCode(parser.intValue()); } else { throw new OpenSearchParseException("unknown parameter [{}]", currentFieldName); } @@ -266,7 +293,9 @@ public static RemoteStoreShardShallowCopySnapshot fromXContent(XContentParser pa indexUUID, remoteStoreRepository, repositoryBasePath, - fileNames + fileNames, + pathType, + pathHashAlgorithm ); } @@ -380,38 +409,47 @@ private void verifyParameters( long commitGeneration, String indexUUID, String remoteStoreRepository, - String repositoryBasePath + String repositoryBasePath, + PathType pathType, + PathHashAlgorithm pathHashAlgorithm ) { - String exceptionStr = null; - if (version == null) { - exceptionStr = "Invalid Version Provided"; - } - if (snapshot == null) { - exceptionStr = "Invalid/Missing Snapshot Name"; - } - if (indexVersion < 0) { - exceptionStr = "Invalid Index Version"; - } - if (primaryTerm < 0) { - exceptionStr = "Invalid Primary Term"; - } - if (commitGeneration < 0) { - exceptionStr = "Invalid Commit Generation"; - } - if (indexUUID == null) { - exceptionStr = "Invalid/Missing Index UUID"; - } - if (remoteStoreRepository == null) { - exceptionStr = "Invalid/Missing Remote Store Repository"; - } - if (repositoryBasePath == null) { - exceptionStr = "Invalid/Missing Repository Base Path"; - } - if (exceptionStr != null) { + + throwExceptionIfInvalid(Objects.isNull(version), "Invalid Version Provided"); + throwExceptionIfInvalid(Objects.isNull(snapshot), "Invalid/Missing Snapshot Name"); + throwExceptionIfInvalid(indexVersion < 0, "Invalid Index Version"); + throwExceptionIfInvalid(primaryTerm < 0, "Invalid Primary Term"); + throwExceptionIfInvalid(commitGeneration < 0, "Invalid Commit Generation"); + throwExceptionIfInvalid(Objects.isNull(indexUUID), "Invalid/Missing Index UUID"); + throwExceptionIfInvalid(Objects.isNull(remoteStoreRepository), "Invalid/Missing Remote Store Repository"); + throwExceptionIfInvalid(Objects.isNull(repositoryBasePath), "Invalid/Missing Repository Base Path"); + throwExceptionIfInvalid( + isValidRemotePathConfiguration(version, pathType, pathHashAlgorithm) == false, + new ParameterizedMessage( + "Invalid combination of pathType={} pathHashAlgorithm={} for version={}", + pathType, + pathHashAlgorithm, + version + ).getFormattedMessage() + ); + } + + private void throwExceptionIfInvalid(boolean isInvalid, String exceptionStr) { + if (isInvalid) { throw new IllegalArgumentException(exceptionStr); } } + private boolean isValidRemotePathConfiguration(String version, PathType pathType, PathHashAlgorithm pathHashAlgorithm) { + switch (version) { + case "1": + return Objects.isNull(pathType) && Objects.isNull(pathHashAlgorithm); + case "2": + return Objects.nonNull(pathType) && RemoteStorePathStrategy.isCompatible(pathType, pathHashAlgorithm); + default: + return false; + } + } + /** * Creates a new instance which has a different name and zero incremental file counts but is identical to this instance in terms of the files * it references. @@ -433,7 +471,9 @@ public RemoteStoreShardShallowCopySnapshot asClone(String targetSnapshotName, lo indexUUID, remoteStoreRepository, repositoryBasePath, - fileNames + fileNames, + pathType, + pathHashAlgorithm ); } @@ -449,4 +489,63 @@ public IndexShardSnapshotStatus getIndexShardSnapshotStatus() { null ); // Not adding a real generation here as it doesn't matter to callers } + + public PathType getPathType() { + return pathType; + } + + public PathHashAlgorithm getPathHashAlgorithm() { + return pathHashAlgorithm; + } + + public RemoteStorePathStrategy getRemoteStorePathStrategy() { + if (Objects.nonNull(pathType)) { + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + } + return new RemoteStorePathStrategy(PathType.FIXED); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || getClass() != obj.getClass()) return false; + RemoteStoreShardShallowCopySnapshot that = (RemoteStoreShardShallowCopySnapshot) obj; + + return Objects.equals(this.snapshot, that.snapshot) + && Objects.equals(this.version, that.version) + && this.indexVersion == that.indexVersion + && this.startTime == that.startTime + && this.time == that.time + && this.totalFileCount == that.totalFileCount + && this.totalSize == that.totalSize + && this.primaryTerm == that.primaryTerm + && this.commitGeneration == that.commitGeneration + && Objects.equals(this.remoteStoreRepository, that.remoteStoreRepository) + && Objects.equals(this.repositoryBasePath, that.repositoryBasePath) + && Objects.equals(this.indexUUID, that.indexUUID) + && Objects.equals(this.fileNames, that.fileNames) + && Objects.equals(this.pathType, that.pathType) + && Objects.equals(this.pathHashAlgorithm, that.pathHashAlgorithm); + } + + @Override + public int hashCode() { + return Objects.hash( + snapshot, + version, + indexVersion, + startTime, + time, + totalFileCount, + totalSize, + primaryTerm, + commitGeneration, + remoteStoreRepository, + repositoryBasePath, + indexUUID, + fileNames, + pathType, + pathHashAlgorithm + ); + } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java index e100ffaabf13d..4599aa32325c1 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteBlobStoreInternalTranslogFactory.java @@ -9,6 +9,7 @@ package org.opensearch.index.translog; import org.opensearch.index.remote.RemoteTranslogTransferTracker; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; @@ -34,11 +35,14 @@ public class RemoteBlobStoreInternalTranslogFactory implements TranslogFactory { private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; + private final RemoteStoreSettings remoteStoreSettings; + public RemoteBlobStoreInternalTranslogFactory( Supplier repositoriesServiceSupplier, ThreadPool threadPool, String repositoryName, - RemoteTranslogTransferTracker remoteTranslogTransferTracker + RemoteTranslogTransferTracker remoteTranslogTransferTracker, + RemoteStoreSettings remoteStoreSettings ) { Repository repository; try { @@ -49,6 +53,7 @@ public RemoteBlobStoreInternalTranslogFactory( this.repository = repository; this.threadPool = threadPool; this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + this.remoteStoreSettings = remoteStoreSettings; } @Override @@ -74,7 +79,8 @@ public Translog newTranslog( blobStoreRepository, threadPool, startedPrimarySupplier, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + remoteStoreSettings ); } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index bb7769eae1bf5..67799f0465c29 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -28,6 +28,7 @@ import org.opensearch.index.translog.transfer.TranslogTransferManager; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; @@ -100,7 +101,8 @@ public RemoteFsTranslog( BlobStoreRepository blobStoreRepository, ThreadPool threadPool, BooleanSupplier startedPrimarySupplier, - RemoteTranslogTransferTracker remoteTranslogTransferTracker + RemoteTranslogTransferTracker remoteTranslogTransferTracker, + RemoteStoreSettings remoteStoreSettings ) throws IOException { super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier, persistedSequenceNumberConsumer); logger = Loggers.getLogger(getClass(), shardId); @@ -113,7 +115,8 @@ public RemoteFsTranslog( shardId, fileTransferTracker, remoteTranslogTransferTracker, - indexSettings().getRemoteStorePathStrategy() + indexSettings().getRemoteStorePathStrategy(), + remoteStoreSettings ); try { download(translogTransferManager, location, logger); @@ -163,6 +166,7 @@ public static void download( ThreadPool threadPool, Path location, RemoteStorePathStrategy pathStrategy, + RemoteStoreSettings remoteStoreSettings, Logger logger ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( @@ -181,7 +185,8 @@ public static void download( shardId, fileTransferTracker, remoteTranslogTransferTracker, - pathStrategy + pathStrategy, + remoteStoreSettings ); RemoteFsTranslog.download(translogTransferManager, location, logger); logger.trace(remoteTranslogTransferTracker.toString()); @@ -259,7 +264,8 @@ public static TranslogTransferManager buildTranslogTransferManager( ShardId shardId, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker tracker, - RemoteStorePathStrategy pathStrategy + RemoteStorePathStrategy pathStrategy, + RemoteStoreSettings remoteStoreSettings ) { assert Objects.nonNull(pathStrategy); String indexUUID = shardId.getIndex().getUUID(); @@ -281,7 +287,7 @@ public static TranslogTransferManager buildTranslogTransferManager( .build(); BlobPath mdPath = pathStrategy.generatePath(mdPathInput); BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); - return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker); + return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker, remoteStoreSettings); } @Override @@ -553,8 +559,13 @@ private void deleteStaleRemotePrimaryTerms() { } } - public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathStrategy pathStrategy) - throws IOException { + public static void cleanup( + Repository repository, + ShardId shardId, + ThreadPool threadPool, + RemoteStorePathStrategy pathStrategy, + RemoteStoreSettings remoteStoreSettings + ) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; // We use a dummy stats tracker to ensure the flow doesn't break. @@ -567,7 +578,8 @@ public static void cleanup(Repository repository, ShardId shardId, ThreadPool th shardId, fileTransferTracker, remoteTranslogTransferTracker, - pathStrategy + pathStrategy, + remoteStoreSettings ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java index 82dd6301ef79f..a1d5041ff9aff 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/BlobStoreTransferService.java @@ -12,8 +12,10 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionRunnable; +import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobDownloadResponse; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; @@ -164,6 +166,12 @@ public InputStream downloadBlob(Iterable path, String fileName) throws I return blobStore.blobContainer((BlobPath) path).readBlob(fileName); } + @Override + @ExperimentalApi + public BlobDownloadResponse downloadBlobWithMetadata(Iterable path, String fileName) throws IOException { + return blobStore.blobContainer((BlobPath) path).readBlobWithMetadata(fileName); + } + @Override public void deleteBlobs(Iterable path, List fileNames) throws IOException { blobStore.blobContainer((BlobPath) path).deleteBlobsIgnoringIfNotExists(fileNames); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java index cfe833dde87eb..5fe8b02058383 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TransferService.java @@ -8,6 +8,8 @@ package org.opensearch.index.translog.transfer; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.blobstore.BlobDownloadResponse; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.stream.write.WritePriority; @@ -125,6 +127,16 @@ void uploadBlobs( */ InputStream downloadBlob(Iterable path, String fileName) throws IOException; + /** + * + * @param path the remote path from where download should be made + * @param fileName the name of the file + * @return {@link BlobDownloadResponse} of the remote file + * @throws IOException the exception while reading the data + */ + @ExperimentalApi + BlobDownloadResponse downloadBlobWithMetadata(Iterable path, String fileName) throws IOException; + void listAllInSortedOrder(Iterable path, String filenamePrefix, int limit, ActionListener> listener); void listAllInSortedOrderAsync( diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index c9e07ca3ef8c1..1087244623b87 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -28,6 +28,7 @@ import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -60,9 +61,7 @@ public class TranslogTransferManager { private final BlobPath remoteMetadataTransferPath; private final FileTransferTracker fileTransferTracker; private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; - - private static final long TRANSFER_TIMEOUT_IN_MILLIS = 30000; - + private final RemoteStoreSettings remoteStoreSettings; private static final int METADATA_FILES_TO_FETCH = 10; private final Logger logger; @@ -79,7 +78,8 @@ public TranslogTransferManager( BlobPath remoteDataTransferPath, BlobPath remoteMetadataTransferPath, FileTransferTracker fileTransferTracker, - RemoteTranslogTransferTracker remoteTranslogTransferTracker + RemoteTranslogTransferTracker remoteTranslogTransferTracker, + RemoteStoreSettings remoteStoreSettings ) { this.shardId = shardId; this.transferService = transferService; @@ -88,6 +88,7 @@ public TranslogTransferManager( this.fileTransferTracker = fileTransferTracker; this.logger = Loggers.getLogger(getClass(), shardId); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; + this.remoteStoreSettings = remoteStoreSettings; } public RemoteTranslogTransferTracker getRemoteTranslogTransferTracker() { @@ -151,7 +152,7 @@ public boolean transferSnapshot(TransferSnapshot transferSnapshot, TranslogTrans transferService.uploadBlobs(toUpload, blobPathMap, latchedActionListener, WritePriority.HIGH); try { - if (latch.await(TRANSFER_TIMEOUT_IN_MILLIS, TimeUnit.MILLISECONDS) == false) { + if (latch.await(remoteStoreSettings.getClusterRemoteTranslogTransferTimeout().millis(), TimeUnit.MILLISECONDS) == false) { Exception ex = new TranslogUploadFailedException( "Timed out waiting for transfer of snapshot " + transferSnapshot + " to complete" ); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 4f68c03913199..75130a4041b77 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -498,7 +498,8 @@ protected void closeInternal() { repositoriesServiceSupplier, threadPool, remoteStoreStatsTrackerFactory, - settings + settings, + remoteStoreSettings ); this.searchRequestStats = searchRequestStats; this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); @@ -528,7 +529,8 @@ private static BiFunction getTrans Supplier repositoriesServiceSupplier, ThreadPool threadPool, RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - Settings settings + Settings settings, + RemoteStoreSettings remoteStoreSettings ) { return (indexSettings, shardRouting) -> { if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { @@ -536,14 +538,16 @@ private static BiFunction getTrans repositoriesServiceSupplier, threadPool, indexSettings.getRemoteStoreTranslogRepository(), - remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) + remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()), + remoteStoreSettings ); } else if (isRemoteDataAttributePresent(settings) && shardRouting.primary()) { return new RemoteBlobStoreInternalTranslogFactory( repositoriesServiceSupplier, threadPool, RemoteStoreNodeAttribute.getRemoteStoreTranslogRepo(indexSettings.getNodeSettings()), - remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()) + remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardRouting.shardId()), + remoteStoreSettings ); } return new InternalTranslogFactory(); diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java index 5e6dba2b398db..7f2121093f8e8 100644 --- a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -54,8 +54,20 @@ public class RemoteStoreSettings { Property.Dynamic ); + /** + * Controls timeout value while uploading translog and checkpoint files to remote translog + */ + public static final Setting CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING = Setting.timeSetting( + "cluster.remote_store.translog.transfer_timeout", + TimeValue.timeValueSeconds(30), + TimeValue.timeValueSeconds(30), + Property.NodeScope, + Property.Dynamic + ); + private volatile TimeValue clusterRemoteTranslogBufferInterval; private volatile int minRemoteSegmentMetadataFiles; + private volatile TimeValue clusterRemoteTranslogTransferTimeout; public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); @@ -69,9 +81,14 @@ public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, this::setMinRemoteSegmentMetadataFiles ); + + this.clusterRemoteTranslogTransferTimeout = CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING, + this::setClusterRemoteTranslogTransferTimeout + ); } - // Exclusively for testing, please do not use it elsewhere. public TimeValue getClusterRemoteTranslogBufferInterval() { return clusterRemoteTranslogBufferInterval; } @@ -87,4 +104,12 @@ private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) public int getMinRemoteSegmentMetadataFiles() { return this.minRemoteSegmentMetadataFiles; } + + public TimeValue getClusterRemoteTranslogTransferTimeout() { + return clusterRemoteTranslogTransferTimeout; + } + + private void setClusterRemoteTranslogTransferTimeout(TimeValue clusterRemoteTranslogTransferTimeout) { + this.clusterRemoteTranslogTransferTimeout = clusterRemoteTranslogTransferTimeout; + } } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 9065e4e194e05..1a5701d9204ef 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -108,7 +108,6 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; @@ -672,8 +671,7 @@ public void cloneRemoteStoreIndexShardSnapshot( remoteStoreRepository, indexUUID, String.valueOf(shardId.shardId()), - new RemoteStorePathStrategy(PathType.FIXED) - // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + remStoreBasedShardMetadata.getRemoteStorePathStrategy() ); remoteStoreMetadataLockManger.cloneLock( FileLockInfo.getLockInfoBuilder().withAcquirerId(source.getUUID()).build(), @@ -1154,8 +1152,7 @@ protected void releaseRemoteStoreLockAndCleanup( remoteStoreRepoForIndex, indexUUID, shardId, - new RemoteStorePathStrategy(PathType.FIXED) - // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + remoteStoreShardShallowCopySnapshot.getRemoteStorePathStrategy() ); remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); @@ -1178,8 +1175,7 @@ protected void releaseRemoteStoreLockAndCleanup( indexUUID, new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), ThreadPool.Names.REMOTE_PURGE, - new RemoteStorePathStrategy(PathType.FIXED) - // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + remoteStoreShardShallowCopySnapshot.getRemoteStorePathStrategy() ); } } @@ -2694,6 +2690,7 @@ public void snapshotRemoteStoreIndexShard( // now create and write the commit point logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); try { + RemoteStorePathStrategy pathStrategy = store.indexSettings().getRemoteStorePathStrategy(); REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.write( new RemoteStoreShardShallowCopySnapshot( snapshotId.getName(), @@ -2707,7 +2704,9 @@ public void snapshotRemoteStoreIndexShard( store.indexSettings().getUUID(), store.indexSettings().getRemoteStoreRepository(), this.basePath().toString(), - fileNames + fileNames, + pathStrategy.getType(), + pathStrategy.getHashAlgorithm() ), shardContainer, snapshotId.getUUID(), diff --git a/server/src/main/java/org/opensearch/script/DerivedFieldScript.java b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java index 7f5b991950ec6..e9988ec5aeef2 100644 --- a/server/src/main/java/org/opensearch/script/DerivedFieldScript.java +++ b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java @@ -9,14 +9,17 @@ package org.opensearch.script; import org.apache.lucene.index.LeafReaderContext; -import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.common.collect.Tuple; import org.opensearch.index.fielddata.ScriptDocValues; import org.opensearch.search.lookup.LeafSearchLookup; import org.opensearch.search.lookup.SearchLookup; import org.opensearch.search.lookup.SourceLookup; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; @@ -30,7 +33,7 @@ public abstract class DerivedFieldScript { public static final String[] PARAMETERS = {}; public static final ScriptContext CONTEXT = new ScriptContext<>("derived_field", Factory.class); - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); + private static final int MAX_BYTE_SIZE = 1024 * 1024; // Maximum allowed byte size (1 MB) private static final Map> PARAMS_FUNCTIONS = Map.of( "doc", @@ -49,16 +52,27 @@ public abstract class DerivedFieldScript { */ private final LeafSearchLookup leafLookup; + /** + * The field values emitted from the script. + */ + private List emittedValues; + + private int totalByteSize; + public DerivedFieldScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { Map parameters = new HashMap<>(params); this.leafLookup = lookup.getLeafSearchLookup(leafContext); parameters.putAll(leafLookup.asMap()); this.params = new DynamicMap(parameters, PARAMS_FUNCTIONS); + this.emittedValues = new ArrayList<>(); + this.totalByteSize = 0; } - protected DerivedFieldScript() { - params = null; - leafLookup = null; + public DerivedFieldScript() { + this.params = null; + this.leafLookup = null; + this.emittedValues = new ArrayList<>(); + this.totalByteSize = 0; } /** @@ -75,14 +89,54 @@ public Map> getDoc() { return leafLookup.doc(); } + /** + * Return the emitted values from the script execution. + */ + public List getEmittedValues() { + return emittedValues; + } + /** * Set the current document to run the script on next. + * Clears the emittedValues as well since they should be scoped per document. */ public void setDocument(int docid) { + this.emittedValues = new ArrayList<>(); + this.totalByteSize = 0; leafLookup.setDocument(docid); } - public abstract Object execute(); + public void addEmittedValue(Object o) { + int byteSize = getObjectByteSize(o); + int newTotalByteSize = totalByteSize + byteSize; + if (newTotalByteSize <= MAX_BYTE_SIZE) { + emittedValues.add(o); + totalByteSize = newTotalByteSize; + } else { + throw new IllegalStateException("Exceeded maximum allowed byte size for emitted values"); + } + } + + private int getObjectByteSize(Object obj) { + if (obj instanceof String) { + return ((String) obj).getBytes(StandardCharsets.UTF_8).length; + } else if (obj instanceof Integer) { + return Integer.BYTES; + } else if (obj instanceof Long) { + return Long.BYTES; + } else if (obj instanceof Double) { + return Double.BYTES; + } else if (obj instanceof Boolean) { + return Byte.BYTES; // Assuming 1 byte for boolean + } else if (obj instanceof Tuple) { + // Assuming each element in the tuple is a double for GeoPoint case + return Double.BYTES * 2; + } else { + throw new IllegalArgumentException("Unsupported object type passed in emit()"); + } + } + + public void execute() {} /** * A factory to construct {@link DerivedFieldScript} instances. @@ -95,7 +149,6 @@ public interface LeafFactory { /** * A factory to construct stateful {@link DerivedFieldScript} factories for a specific index. - * * @opensearch.internal */ public interface Factory extends ScriptFactory { diff --git a/server/src/main/java/org/opensearch/script/ScriptEmitValues.java b/server/src/main/java/org/opensearch/script/ScriptEmitValues.java new file mode 100644 index 0000000000000..5d12f36442179 --- /dev/null +++ b/server/src/main/java/org/opensearch/script/ScriptEmitValues.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.script; + +import org.opensearch.common.collect.Tuple; + +/** + * Values that can be emitted in a derived field script context. + *

+ * The emit function can be called multiple times within a script definition + * so the function will handle collecting the values over the script execution. + */ +public final class ScriptEmitValues { + + /** + * Takes in a single value and emits it + * Could be a long, double, String, etc. + */ + public static final class EmitSingle { + + private final DerivedFieldScript derivedFieldScript; + + public EmitSingle(DerivedFieldScript derivedFieldScript) { + this.derivedFieldScript = derivedFieldScript; + } + + // TODO: Keeping this generic for the time being due to limitations with + // binding methods with the same name and arity. + // Ideally, we should have an emit signature per derived field type and try to scope + // that to the respective script execution so the other emits aren't allowed. + // One way to do this could be to create implementations of the DerivedFieldScript.LeafFactory + // per field type where they each define their own emit() method and then the engine that executes + // it can have custom compilation logic to perform class bindings on that emit implementation. + public void emit(Object val) { + derivedFieldScript.addEmittedValue(val); + } + + } + + /** + * Emits a GeoPoint value + */ + public static final class GeoPoint { + + private final DerivedFieldScript derivedFieldScript; + + public GeoPoint(DerivedFieldScript derivedFieldScript) { + this.derivedFieldScript = derivedFieldScript; + } + + public void emit(double lat, double lon) { + derivedFieldScript.addEmittedValue(new Tuple<>(lat, lon)); + } + + } + +} diff --git a/server/src/main/java/org/opensearch/transport/Header.java b/server/src/main/java/org/opensearch/transport/Header.java index 57c1da6f46aec..ac30df8dda02c 100644 --- a/server/src/main/java/org/opensearch/transport/Header.java +++ b/server/src/main/java/org/opensearch/transport/Header.java @@ -75,11 +75,11 @@ public int getNetworkMessageSize() { return networkMessageSize; } - Version getVersion() { + public Version getVersion() { return version; } - long getRequestId() { + public long getRequestId() { return requestId; } @@ -87,7 +87,7 @@ byte getStatus() { return status; } - boolean isRequest() { + public boolean isRequest() { return TransportStatus.isRequest(status); } @@ -99,7 +99,7 @@ boolean isError() { return TransportStatus.isError(status); } - boolean isHandshake() { + public boolean isHandshake() { return TransportStatus.isHandshake(status); } diff --git a/server/src/main/java/org/opensearch/transport/InboundBytesHandler.java b/server/src/main/java/org/opensearch/transport/InboundBytesHandler.java new file mode 100644 index 0000000000000..276891212e43f --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/InboundBytesHandler.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.bytes.ReleasableBytesReference; + +import java.io.Closeable; +import java.io.IOException; +import java.util.function.BiConsumer; + +/** + * Interface for handling inbound bytes. Can be implemented by different transport protocols. + */ +public interface InboundBytesHandler extends Closeable { + + public void doHandleBytes( + TcpChannel channel, + ReleasableBytesReference reference, + BiConsumer messageHandler + ) throws IOException; + + public boolean canHandleBytes(ReleasableBytesReference reference); + + @Override + void close(); +} diff --git a/server/src/main/java/org/opensearch/transport/InboundDecoder.java b/server/src/main/java/org/opensearch/transport/InboundDecoder.java index 82fc09a985446..d6b7a98e876b3 100644 --- a/server/src/main/java/org/opensearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/opensearch/transport/InboundDecoder.java @@ -50,8 +50,8 @@ */ public class InboundDecoder implements Releasable { - static final Object PING = new Object(); - static final Object END_CONTENT = new Object(); + public static final Object PING = new Object(); + public static final Object END_CONTENT = new Object(); private final Version version; private final PageCacheRecycler recycler; diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index a8315c3cae4e0..6492900c49a0e 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -32,35 +32,14 @@ package org.opensearch.transport; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.BytesRef; -import org.opensearch.Version; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.common.io.stream.ByteBufferStreamInput; -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.transport.TransportAddress; -import org.opensearch.core.transport.TransportResponse; -import org.opensearch.telemetry.tracing.Span; -import org.opensearch.telemetry.tracing.SpanBuilder; -import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; -import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; -import java.io.EOFException; import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Collections; import java.util.Map; -import java.util.stream.Collectors; /** * Handler for inbound data @@ -69,21 +48,13 @@ */ public class InboundHandler { - private static final Logger logger = LogManager.getLogger(InboundHandler.class); - private final ThreadPool threadPool; - private final OutboundHandler outboundHandler; - private final NamedWriteableRegistry namedWriteableRegistry; - private final TransportHandshaker handshaker; - private final TransportKeepAlive keepAlive; - private final Transport.ResponseHandlers responseHandlers; - private final Transport.RequestHandlers requestHandlers; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; private volatile long slowLogThresholdMs = Long.MAX_VALUE; - private final Tracer tracer; + private final Map protocolMessageHandlers; InboundHandler( ThreadPool threadPool, @@ -96,13 +67,19 @@ public class InboundHandler { Tracer tracer ) { this.threadPool = threadPool; - this.outboundHandler = outboundHandler; - this.namedWriteableRegistry = namedWriteableRegistry; - this.handshaker = handshaker; - this.keepAlive = keepAlive; - this.requestHandlers = requestHandlers; - this.responseHandlers = responseHandlers; - this.tracer = tracer; + this.protocolMessageHandlers = Map.of( + NativeInboundMessage.NATIVE_PROTOCOL, + new NativeMessageHandler( + threadPool, + outboundHandler, + namedWriteableRegistry, + handshaker, + requestHandlers, + responseHandlers, + tracer, + keepAlive + ) + ); } void setMessageListener(TransportMessageListener listener) { @@ -117,377 +94,17 @@ void setSlowLogThreshold(TimeValue slowLogThreshold) { this.slowLogThresholdMs = slowLogThreshold.getMillis(); } - void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception { + void inboundMessage(TcpChannel channel, ProtocolInboundMessage message) throws Exception { final long startTime = threadPool.relativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); - TransportLogger.logInboundMessage(channel, message); - if (message.isPing()) { - keepAlive.receiveKeepAlive(channel); - } else { - messageReceived(channel, message, startTime); - } - } - - // Empty stream constant to avoid instantiating a new stream for empty messages. - private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES)); - - private void messageReceived(TcpChannel channel, InboundMessage message, long startTime) throws IOException { - final InetSocketAddress remoteAddress = channel.getRemoteAddress(); - final Header header = message.getHeader(); - assert header.needsToReadVariableHeader() == false; - ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext existing = threadContext.stashContext()) { - // Place the context with the headers from the message - threadContext.setHeaders(header.getHeaders()); - threadContext.putTransient("_remote_address", remoteAddress); - if (header.isRequest()) { - handleRequest(channel, header, message); - } else { - // Responses do not support short circuiting currently - assert message.isShortCircuit() == false; - final TransportResponseHandler handler; - long requestId = header.getRequestId(); - if (header.isHandshake()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - TransportResponseHandler theHandler = responseHandlers.onResponseReceived( - requestId, - messageListener - ); - if (theHandler == null && header.isError()) { - handler = handshaker.removeHandlerForHandshake(requestId); - } else { - handler = theHandler; - } - } - // ignore if its null, the service logs it - if (handler != null) { - final StreamInput streamInput; - if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) { - streamInput = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(streamInput, header.getVersion()); - if (header.isError()) { - handlerResponseError(requestId, streamInput, handler); - } else { - handleResponse(requestId, remoteAddress, streamInput, handler); - } - } else { - assert header.isError() == false; - handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); - } - } - - } - } finally { - final long took = threadPool.relativeTimeInMillis() - startTime; - final long logThreshold = slowLogThresholdMs; - if (logThreshold > 0 && took > logThreshold) { - logger.warn( - "handling inbound transport message [{}] took [{}ms] which is above the warn threshold of [{}ms]", - message, - took, - logThreshold - ); - } - } - } - - private Map> extractHeaders(Map headers) { - return headers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> Collections.singleton(e.getValue()))); - } - - private void handleRequest(TcpChannel channel, Header header, InboundMessage message) throws IOException { - final String action = header.getActionName(); - final long requestId = header.getRequestId(); - final Version version = header.getVersion(); - final Map> headers = extractHeaders(header.getHeaders().v1()); - Span span = tracer.startSpan(SpanBuilder.from(action, channel), headers); - try (SpanScope spanScope = tracer.withSpanInScope(span)) { - if (header.isHandshake()) { - messageListener.onRequestReceived(requestId, action); - // Cannot short circuit handshakes - assert message.isShortCircuit() == false; - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final TcpTransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); - try { - handshaker.handleHandshake(traceableTransportChannel, requestId, stream); - } catch (Exception e) { - if (Version.CURRENT.isCompatible(header.getVersion())) { - sendErrorResponse(action, traceableTransportChannel, e); - } else { - logger.warn( - new ParameterizedMessage( - "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", - channel, - header.getVersion() - ), - e - ); - channel.close(); - } - } - } else { - final TcpTransportChannel transportChannel = new TcpTransportChannel( - outboundHandler, - channel, - action, - requestId, - version, - header.getFeatures(), - header.isCompressed(), - header.isHandshake(), - message.takeBreakerReleaseControl() - ); - TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); - try { - messageListener.onRequestReceived(requestId, action); - if (message.isShortCircuit()) { - sendErrorResponse(action, traceableTransportChannel, message.getException()); - } else { - final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); - assertRemoteVersion(stream, header.getVersion()); - final RequestHandlerRegistry reg = requestHandlers.getHandler(action); - assert reg != null; - - final T request = newRequest(requestId, action, stream, reg); - request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); - checkStreamIsFullyConsumed(requestId, action, stream); - - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { - try { - reg.processMessageReceived(request, traceableTransportChannel); - } catch (Exception e) { - sendErrorResponse(reg.getAction(), traceableTransportChannel, e); - } - } else { - threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); - } - } - } catch (Exception e) { - sendErrorResponse(action, traceableTransportChannel, e); - } - } - } catch (Exception e) { - span.setError(e); - span.endSpan(); - throw e; - } - } - - /** - * Creates new request instance out of input stream. Throws IllegalStateException if the end of - * the stream was reached before the request is fully deserialized from the stream. - * @param transport request type - * @param requestId request identifier - * @param action action name - * @param stream stream - * @param reg request handler registry - * @return new request instance - * @throws IOException IOException - * @throws IllegalStateException IllegalStateException - */ - private T newRequest( - final long requestId, - final String action, - final StreamInput stream, - final RequestHandlerRegistry reg - ) throws IOException { - try { - return reg.newRequest(stream); - } catch (final EOFException e) { - // Another favor of (de)serialization issues is when stream contains less bytes than - // the request handler needs to deserialize the payload. - throw new IllegalStateException( - "Message fully read (request) but more data is expected for requestId [" - + requestId - + "], action [" - + action - + "]; resetting", - e - ); - } - } - - /** - * Checks if the stream is fully consumed and throws the exceptions if that is not the case. - * @param requestId request identifier - * @param action action name - * @param stream stream - * @throws IOException IOException - */ - private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { - // in case we throw an exception, i.e. when the limit is hit, we don't want to verify - final int nextByte = stream.read(); - - // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (request) for requestId [" - + requestId - + "], action [" - + action - + "], available [" - + stream.available() - + "]; resetting" - ); - } - } - - /** - * Checks if the stream is fully consumed and throws the exceptions if that is not the case. - * @param requestId request identifier - * @param handler response handler - * @param stream stream - * @param error "true" if response represents error, "false" otherwise - * @throws IOException IOException - */ - private void checkStreamIsFullyConsumed( - final long requestId, - final TransportResponseHandler handler, - final StreamInput stream, - final boolean error - ) throws IOException { - if (stream != EMPTY_STREAM_INPUT) { - // Check the entire message has been read - final int nextByte = stream.read(); - // calling read() is useful to make sure the message is fully read, even if there is an EOS marker - if (nextByte != -1) { - throw new IllegalStateException( - "Message not fully read (response) for requestId [" - + requestId - + "], handler [" - + handler - + "], error [" - + error - + "]; resetting" - ); - } - } - } - - private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { - try { - transportChannel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", actionName), inner); - } - } - - private void handleResponse( - final long requestId, - InetSocketAddress remoteAddress, - final StreamInput stream, - final TransportResponseHandler handler - ) { - final T response; - try { - response = handler.read(stream); - response.remoteAddress(new TransportAddress(remoteAddress)); - checkStreamIsFullyConsumed(requestId, handler, stream, false); - } catch (Exception e) { - final Exception serializationException = new TransportSerializationException( - "Failed to deserialize response from handler [" + handler + "]", - e - ); - logger.warn(new ParameterizedMessage("Failed to deserialize response from [{}]", remoteAddress), serializationException); - handleException(handler, serializationException); - return; - } - final String executor = handler.executor(); - if (ThreadPool.Names.SAME.equals(executor)) { - doHandleResponse(handler, response); - } else { - threadPool.executor(executor).execute(() -> doHandleResponse(handler, response)); - } + messageReceivedFromPipeline(channel, message, startTime); } - private void doHandleResponse(TransportResponseHandler handler, T response) { - try { - handler.handleResponse(response); - } catch (Exception e) { - handleException(handler, new ResponseHandlerFailureTransportException(e)); - } - } - - private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { - Exception error; - try { - error = stream.readException(); - checkStreamIsFullyConsumed(requestId, handler, stream, true); - } catch (Exception e) { - error = new TransportSerializationException( - "Failed to deserialize exception response from stream for handler [" + handler + "]", - e - ); - } - handleException(handler, error); - } - - private void handleException(final TransportResponseHandler handler, Throwable error) { - if (!(error instanceof RemoteTransportException)) { - error = new RemoteTransportException(error.getMessage(), error); - } - final RemoteTransportException rtx = (RemoteTransportException) error; - threadPool.executor(handler.executor()).execute(() -> { - try { - handler.handleException(rtx); - } catch (Exception e) { - logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); - } - }); - } - - private StreamInput namedWriteableStream(StreamInput delegate) { - return new NamedWriteableAwareStreamInput(delegate, namedWriteableRegistry); - } - - static void assertRemoteVersion(StreamInput in, Version version) { - assert version.equals(in.getVersion()) : "Stream version [" + in.getVersion() + "] does not match version [" + version + "]"; - } - - /** - * Internal request handler - * - * @opensearch.internal - */ - private static class RequestHandler extends AbstractRunnable { - private final RequestHandlerRegistry reg; - private final T request; - private final TransportChannel transportChannel; - - RequestHandler(RequestHandlerRegistry reg, T request, TransportChannel transportChannel) { - this.reg = reg; - this.request = request; - this.transportChannel = transportChannel; - } - - @Override - protected void doRun() throws Exception { - reg.processMessageReceived(request, transportChannel); - } - - @Override - public boolean isForceExecution() { - return reg.isForceExecution(); - } - - @Override - public void onFailure(Exception e) { - sendErrorResponse(reg.getAction(), transportChannel, e); + private void messageReceivedFromPipeline(TcpChannel channel, ProtocolInboundMessage message, long startTime) throws IOException { + ProtocolMessageHandler protocolMessageHandler = protocolMessageHandlers.get(message.getProtocol()); + if (protocolMessageHandler == null) { + throw new IllegalStateException("No protocol message handler found for protocol: " + message.getProtocol()); } + protocolMessageHandler.messageReceived(channel, message, startTime, slowLogThresholdMs, messageListener); } } diff --git a/server/src/main/java/org/opensearch/transport/InboundMessage.java b/server/src/main/java/org/opensearch/transport/InboundMessage.java index 71c4d6973505d..5c68257557061 100644 --- a/server/src/main/java/org/opensearch/transport/InboundMessage.java +++ b/server/src/main/java/org/opensearch/transport/InboundMessage.java @@ -32,105 +32,77 @@ package org.opensearch.transport; -import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.annotation.DeprecatedApi; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; -import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.transport.nativeprotocol.NativeInboundMessage; import java.io.IOException; /** * Inbound data as a message - * + * This api is deprecated, please use {@link org.opensearch.transport.nativeprotocol.NativeInboundMessage} instead. * @opensearch.api */ -@PublicApi(since = "1.0.0") -public class InboundMessage implements Releasable { +@DeprecatedApi(since = "2.14.0") +public class InboundMessage implements Releasable, ProtocolInboundMessage { - private final Header header; - private final ReleasableBytesReference content; - private final Exception exception; - private final boolean isPing; - private Releasable breakerRelease; - private StreamInput streamInput; + private final NativeInboundMessage nativeInboundMessage; public InboundMessage(Header header, ReleasableBytesReference content, Releasable breakerRelease) { - this.header = header; - this.content = content; - this.breakerRelease = breakerRelease; - this.exception = null; - this.isPing = false; + this.nativeInboundMessage = new NativeInboundMessage(header, content, breakerRelease); } public InboundMessage(Header header, Exception exception) { - this.header = header; - this.content = null; - this.breakerRelease = null; - this.exception = exception; - this.isPing = false; + this.nativeInboundMessage = new NativeInboundMessage(header, exception); } public InboundMessage(Header header, boolean isPing) { - this.header = header; - this.content = null; - this.breakerRelease = null; - this.exception = null; - this.isPing = isPing; + this.nativeInboundMessage = new NativeInboundMessage(header, isPing); } public Header getHeader() { - return header; + return this.nativeInboundMessage.getHeader(); } public int getContentLength() { - if (content == null) { - return 0; - } else { - return content.length(); - } + return this.nativeInboundMessage.getContentLength(); } public Exception getException() { - return exception; + return this.nativeInboundMessage.getException(); } public boolean isPing() { - return isPing; + return this.nativeInboundMessage.isPing(); } public boolean isShortCircuit() { - return exception != null; + return this.nativeInboundMessage.getException() != null; } public Releasable takeBreakerReleaseControl() { - final Releasable toReturn = breakerRelease; - breakerRelease = null; - if (toReturn != null) { - return toReturn; - } else { - return () -> {}; - } + return this.nativeInboundMessage.takeBreakerReleaseControl(); } public StreamInput openOrGetStreamInput() throws IOException { - assert isPing == false && content != null; - if (streamInput == null) { - streamInput = content.streamInput(); - streamInput.setVersion(header.getVersion()); - } - return streamInput; + return this.nativeInboundMessage.openOrGetStreamInput(); } @Override public void close() { - IOUtils.closeWhileHandlingException(streamInput); - Releasables.closeWhileHandlingException(content, breakerRelease); + this.nativeInboundMessage.close(); } @Override public String toString() { - return "InboundMessage{" + header + "}"; + return this.nativeInboundMessage.toString(); } + + @Override + public String getProtocol() { + return this.nativeInboundMessage.getProtocol(); + } + } diff --git a/server/src/main/java/org/opensearch/transport/InboundPipeline.java b/server/src/main/java/org/opensearch/transport/InboundPipeline.java index dd4690e5e6abf..5cee3bb975223 100644 --- a/server/src/main/java/org/opensearch/transport/InboundPipeline.java +++ b/server/src/main/java/org/opensearch/transport/InboundPipeline.java @@ -38,11 +38,11 @@ import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.breaker.CircuitBreaker; -import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.transport.nativeprotocol.NativeInboundBytesHandler; import java.io.IOException; import java.util.ArrayDeque; -import java.util.ArrayList; +import java.util.List; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongSupplier; @@ -55,17 +55,16 @@ */ public class InboundPipeline implements Releasable { - private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); - private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); - private final LongSupplier relativeTimeInMillis; private final StatsTracker statsTracker; private final InboundDecoder decoder; private final InboundAggregator aggregator; - private final BiConsumer messageHandler; private Exception uncaughtException; private final ArrayDeque pending = new ArrayDeque<>(2); private boolean isClosed = false; + private final BiConsumer messageHandler; + private final List protocolBytesHandlers; + private InboundBytesHandler currentHandler; public InboundPipeline( Version version, @@ -74,7 +73,7 @@ public InboundPipeline( LongSupplier relativeTimeInMillis, Supplier circuitBreaker, Function> registryFunction, - BiConsumer messageHandler + BiConsumer messageHandler ) { this( statsTracker, @@ -90,18 +89,23 @@ public InboundPipeline( LongSupplier relativeTimeInMillis, InboundDecoder decoder, InboundAggregator aggregator, - BiConsumer messageHandler + BiConsumer messageHandler ) { this.relativeTimeInMillis = relativeTimeInMillis; this.statsTracker = statsTracker; this.decoder = decoder; this.aggregator = aggregator; + this.protocolBytesHandlers = List.of(new NativeInboundBytesHandler(pending, decoder, aggregator, statsTracker)); this.messageHandler = messageHandler; } @Override public void close() { isClosed = true; + if (currentHandler != null) { + currentHandler.close(); + currentHandler = null; + } Releasables.closeWhileHandlingException(decoder, aggregator); Releasables.closeWhileHandlingException(pending); pending.clear(); @@ -124,95 +128,21 @@ public void doHandleBytes(TcpChannel channel, ReleasableBytesReference reference statsTracker.markBytesRead(reference.length()); pending.add(reference.retain()); - final ArrayList fragments = fragmentList.get(); - boolean continueHandling = true; - - while (continueHandling && isClosed == false) { - boolean continueDecoding = true; - while (continueDecoding && pending.isEmpty() == false) { - try (ReleasableBytesReference toDecode = getPendingBytes()) { - final int bytesDecoded = decoder.decode(toDecode, fragments::add); - if (bytesDecoded != 0) { - releasePendingBytes(bytesDecoded); - if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { - continueDecoding = false; - } - } else { - continueDecoding = false; - } - } - } - - if (fragments.isEmpty()) { - continueHandling = false; - } else { - try { - forwardFragments(channel, fragments); - } finally { - for (Object fragment : fragments) { - if (fragment instanceof ReleasableBytesReference) { - ((ReleasableBytesReference) fragment).close(); - } - } - fragments.clear(); - } - } - } - } - - private void forwardFragments(TcpChannel channel, ArrayList fragments) throws IOException { - for (Object fragment : fragments) { - if (fragment instanceof Header) { - assert aggregator.isAggregating() == false; - aggregator.headerReceived((Header) fragment); - } else if (fragment == InboundDecoder.PING) { - assert aggregator.isAggregating() == false; - messageHandler.accept(channel, PING_MESSAGE); - } else if (fragment == InboundDecoder.END_CONTENT) { - assert aggregator.isAggregating(); - try (InboundMessage aggregated = aggregator.finishAggregation()) { - statsTracker.markMessageReceived(); - messageHandler.accept(channel, aggregated); + // If we don't have a current handler, we should try to find one based on the protocol of the incoming bytes. + if (currentHandler == null) { + for (InboundBytesHandler handler : protocolBytesHandlers) { + if (handler.canHandleBytes(reference)) { + currentHandler = handler; + break; } - } else { - assert aggregator.isAggregating(); - assert fragment instanceof ReleasableBytesReference; - aggregator.aggregate((ReleasableBytesReference) fragment); } } - } - private boolean endOfMessage(Object fragment) { - return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; - } - - private ReleasableBytesReference getPendingBytes() { - if (pending.size() == 1) { - return pending.peekFirst().retain(); + // If we have a current handler determined based on protocol, we should continue to use it for the fragmented bytes. + if (currentHandler != null) { + currentHandler.doHandleBytes(channel, reference, messageHandler); } else { - final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()]; - int index = 0; - for (ReleasableBytesReference pendingReference : pending) { - bytesReferences[index] = pendingReference.retain(); - ++index; - } - final Releasable releasable = () -> Releasables.closeWhileHandlingException(bytesReferences); - return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable); - } - } - - private void releasePendingBytes(int bytesConsumed) { - int bytesToRelease = bytesConsumed; - while (bytesToRelease != 0) { - try (ReleasableBytesReference reference = pending.pollFirst()) { - assert reference != null; - if (bytesToRelease < reference.length()) { - pending.addFirst(reference.retainedSlice(bytesToRelease, reference.length() - bytesToRelease)); - bytesToRelease -= bytesToRelease; - } else { - bytesToRelease -= reference.length(); - } - } + throw new IllegalStateException("No bytes handler found for the incoming transport protocol"); } } } diff --git a/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java new file mode 100644 index 0000000000000..861b95a8098f2 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/NativeMessageHandler.java @@ -0,0 +1,494 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.BytesRef; +import org.opensearch.Version; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.common.io.stream.ByteBufferStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.TransportAddress; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.SpanBuilder; +import org.opensearch.telemetry.tracing.SpanScope; +import org.opensearch.telemetry.tracing.Tracer; +import org.opensearch.telemetry.tracing.channels.TraceableTcpTransportChannel; +import org.opensearch.threadpool.ThreadPool; + +import java.io.EOFException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Native handler for inbound data + * + * @opensearch.internal + */ +public class NativeMessageHandler implements ProtocolMessageHandler { + + private static final Logger logger = LogManager.getLogger(NativeMessageHandler.class); + + private final ThreadPool threadPool; + private final OutboundHandler outboundHandler; + private final NamedWriteableRegistry namedWriteableRegistry; + private final TransportHandshaker handshaker; + private final TransportKeepAlive keepAlive; + private final Transport.ResponseHandlers responseHandlers; + private final Transport.RequestHandlers requestHandlers; + + private final Tracer tracer; + + NativeMessageHandler( + ThreadPool threadPool, + OutboundHandler outboundHandler, + NamedWriteableRegistry namedWriteableRegistry, + TransportHandshaker handshaker, + Transport.RequestHandlers requestHandlers, + Transport.ResponseHandlers responseHandlers, + Tracer tracer, + TransportKeepAlive keepAlive + ) { + this.threadPool = threadPool; + this.outboundHandler = outboundHandler; + this.namedWriteableRegistry = namedWriteableRegistry; + this.handshaker = handshaker; + this.requestHandlers = requestHandlers; + this.responseHandlers = responseHandlers; + this.tracer = tracer; + this.keepAlive = keepAlive; + } + + // Empty stream constant to avoid instantiating a new stream for empty messages. + private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES)); + + @Override + public void messageReceived( + TcpChannel channel, + ProtocolInboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException { + InboundMessage inboundMessage = (InboundMessage) message; + TransportLogger.logInboundMessage(channel, inboundMessage); + if (inboundMessage.isPing()) { + keepAlive.receiveKeepAlive(channel); + } else { + handleMessage(channel, inboundMessage, startTime, slowLogThresholdMs, messageListener); + } + } + + private void handleMessage( + TcpChannel channel, + InboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException { + final InetSocketAddress remoteAddress = channel.getRemoteAddress(); + final Header header = message.getHeader(); + assert header.needsToReadVariableHeader() == false; + ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + // Place the context with the headers from the message + threadContext.setHeaders(header.getHeaders()); + threadContext.putTransient("_remote_address", remoteAddress); + if (header.isRequest()) { + handleRequest(channel, header, message, messageListener); + } else { + // Responses do not support short circuiting currently + assert message.isShortCircuit() == false; + final TransportResponseHandler handler; + long requestId = header.getRequestId(); + if (header.isHandshake()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + TransportResponseHandler theHandler = responseHandlers.onResponseReceived( + requestId, + messageListener + ); + if (theHandler == null && header.isError()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + handler = theHandler; + } + } + // ignore if its null, the service logs it + if (handler != null) { + final StreamInput streamInput; + if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) { + streamInput = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(streamInput, header.getVersion()); + if (header.isError()) { + handlerResponseError(requestId, streamInput, handler); + } else { + handleResponse(requestId, remoteAddress, streamInput, handler); + } + } else { + assert header.isError() == false; + handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); + } + } + + } + } finally { + final long took = threadPool.relativeTimeInMillis() - startTime; + final long logThreshold = slowLogThresholdMs; + if (logThreshold > 0 && took > logThreshold) { + logger.warn( + "handling inbound transport message [{}] took [{}ms] which is above the warn threshold of [{}ms]", + message, + took, + logThreshold + ); + } + } + } + + private Map> extractHeaders(Map headers) { + return headers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> Collections.singleton(e.getValue()))); + } + + private void handleRequest( + TcpChannel channel, + Header header, + InboundMessage message, + TransportMessageListener messageListener + ) throws IOException { + final String action = header.getActionName(); + final long requestId = header.getRequestId(); + final Version version = header.getVersion(); + final Map> headers = extractHeaders(header.getHeaders().v1()); + Span span = tracer.startSpan(SpanBuilder.from(action, channel), headers); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + if (header.isHandshake()) { + messageListener.onRequestReceived(requestId, action); + // Cannot short circuit handshakes + assert message.isShortCircuit() == false; + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + handshaker.handleHandshake(traceableTransportChannel, requestId, stream); + } catch (Exception e) { + if (Version.CURRENT.isCompatible(header.getVersion())) { + sendErrorResponse(action, traceableTransportChannel, e); + } else { + logger.warn( + new ParameterizedMessage( + "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", + channel, + header.getVersion() + ), + e + ); + channel.close(); + } + } + } else { + final TcpTransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + channel, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + TransportChannel traceableTransportChannel = TraceableTcpTransportChannel.create(transportChannel, span, tracer); + try { + messageListener.onRequestReceived(requestId, action); + if (message.isShortCircuit()) { + sendErrorResponse(action, traceableTransportChannel, message.getException()); + } else { + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final RequestHandlerRegistry reg = requestHandlers.getHandler(action); + assert reg != null; + + final T request = newRequest(requestId, action, stream, reg); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + checkStreamIsFullyConsumed(requestId, action, stream); + + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + try { + reg.processMessageReceived(request, traceableTransportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), traceableTransportChannel, e); + } + } else { + threadPool.executor(executor).execute(new RequestHandler<>(reg, request, traceableTransportChannel)); + } + } + } catch (Exception e) { + sendErrorResponse(action, traceableTransportChannel, e); + } + } + } catch (Exception e) { + span.setError(e); + span.endSpan(); + throw e; + } + } + + /** + * Creates new request instance out of input stream. Throws IllegalStateException if the end of + * the stream was reached before the request is fully deserialized from the stream. + * @param transport request type + * @param requestId request identifier + * @param action action name + * @param stream stream + * @param reg request handler registry + * @return new request instance + * @throws IOException IOException + * @throws IllegalStateException IllegalStateException + */ + private T newRequest( + final long requestId, + final String action, + final StreamInput stream, + final RequestHandlerRegistry reg + ) throws IOException { + try { + return reg.newRequest(stream); + } catch (final EOFException e) { + // Another favor of (de)serialization issues is when stream contains less bytes than + // the request handler needs to deserialize the payload. + throw new IllegalStateException( + "Message fully read (request) but more data is expected for requestId [" + + requestId + + "], action [" + + action + + "]; resetting", + e + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param action action name + * @param stream stream + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + final int nextByte = stream.read(); + + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (request) for requestId [" + + requestId + + "], action [" + + action + + "], available [" + + stream.available() + + "]; resetting" + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param handler response handler + * @param stream stream + * @param error "true" if response represents error, "false" otherwise + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed( + final long requestId, + final TransportResponseHandler handler, + final StreamInput stream, + final boolean error + ) throws IOException { + if (stream != EMPTY_STREAM_INPUT) { + // Check the entire message has been read + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (response) for requestId [" + + requestId + + "], handler [" + + handler + + "], error [" + + error + + "]; resetting" + ); + } + } + } + + private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { + try { + transportChannel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", actionName), inner); + } + } + + private void handleResponse( + final long requestId, + InetSocketAddress remoteAddress, + final StreamInput stream, + final TransportResponseHandler handler + ) { + final T response; + try { + response = handler.read(stream); + response.remoteAddress(new TransportAddress(remoteAddress)); + checkStreamIsFullyConsumed(requestId, handler, stream, false); + } catch (Exception e) { + final Exception serializationException = new TransportSerializationException( + "Failed to deserialize response from handler [" + handler + "]", + e + ); + logger.warn(new ParameterizedMessage("Failed to deserialize response from [{}]", remoteAddress), serializationException); + handleException(handler, serializationException); + return; + } + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + doHandleResponse(handler, response); + } else { + threadPool.executor(executor).execute(() -> doHandleResponse(handler, response)); + } + } + + private void doHandleResponse(TransportResponseHandler handler, T response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException(e)); + } + } + + private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { + Exception error; + try { + error = stream.readException(); + checkStreamIsFullyConsumed(requestId, handler, stream, true); + } catch (Exception e) { + error = new TransportSerializationException( + "Failed to deserialize exception response from stream for handler [" + handler + "]", + e + ); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException(error.getMessage(), error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + threadPool.executor(handler.executor()).execute(() -> { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + } + }); + } + + private StreamInput namedWriteableStream(StreamInput delegate) { + return new NamedWriteableAwareStreamInput(delegate, namedWriteableRegistry); + } + + static void assertRemoteVersion(StreamInput in, Version version) { + assert version.equals(in.getVersion()) : "Stream version [" + in.getVersion() + "] does not match version [" + version + "]"; + } + + /** + * Internal request handler + * + * @opensearch.internal + */ + private static class RequestHandler extends AbstractRunnable { + private final RequestHandlerRegistry reg; + private final T request; + private final TransportChannel transportChannel; + + RequestHandler(RequestHandlerRegistry reg, T request, TransportChannel transportChannel) { + this.reg = reg; + this.request = request; + this.transportChannel = transportChannel; + } + + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + sendErrorResponse(reg.getAction(), transportChannel, e); + } + } + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java b/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java new file mode 100644 index 0000000000000..43c2d5ffe4c96 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtocolInboundMessage.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.annotation.PublicApi; + +/** + * Base class for inbound data as a message. + * Different implementations are used for different protocols. + * + * @opensearch.internal + */ +@PublicApi(since = "2.14.0") +public interface ProtocolInboundMessage { + + /** + * @return the protocol used to encode this message + */ + public String getProtocol(); + +} diff --git a/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java b/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java new file mode 100644 index 0000000000000..714d91d1e74c7 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/ProtocolMessageHandler.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import java.io.IOException; + +/** + * Interface for message handlers based on transport protocol. + * + * @opensearch.internal + */ +public interface ProtocolMessageHandler { + + public void messageReceived( + TcpChannel channel, + ProtocolInboundMessage message, + long startTime, + long slowLogThresholdMs, + TransportMessageListener messageListener + ) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 7d45152089f37..e32bba5e836d3 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -762,12 +762,24 @@ protected void serverAcceptedChannel(TcpChannel channel) { protected abstract void stopInternal(); /** + * @deprecated use {@link #inboundMessage(TcpChannel, ProtocolInboundMessage)} * Handles inbound message that has been decoded. * * @param channel the channel the message is from * @param message the message */ + @Deprecated(since = "2.14.0", forRemoval = true) public void inboundMessage(TcpChannel channel, InboundMessage message) { + inboundMessage(channel, (ProtocolInboundMessage) message); + } + + /** + * Handles inbound message that has been decoded. + * + * @param channel the channel the message is from + * @param message the message + */ + public void inboundMessage(TcpChannel channel, ProtocolInboundMessage message) { try { inboundHandler.inboundMessage(channel, message); } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java new file mode 100644 index 0000000000000..a8a4c0da7ec0f --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundBytesHandler.java @@ -0,0 +1,167 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport.nativeprotocol; + +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.core.common.bytes.CompositeBytesReference; +import org.opensearch.transport.Header; +import org.opensearch.transport.InboundAggregator; +import org.opensearch.transport.InboundBytesHandler; +import org.opensearch.transport.InboundDecoder; +import org.opensearch.transport.InboundMessage; +import org.opensearch.transport.ProtocolInboundMessage; +import org.opensearch.transport.StatsTracker; +import org.opensearch.transport.TcpChannel; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.function.BiConsumer; + +/** + * Handler for inbound bytes for the native protocol. + */ +public class NativeInboundBytesHandler implements InboundBytesHandler { + + private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); + private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); + + private final ArrayDeque pending; + private final InboundDecoder decoder; + private final InboundAggregator aggregator; + private final StatsTracker statsTracker; + private boolean isClosed = false; + + public NativeInboundBytesHandler( + ArrayDeque pending, + InboundDecoder decoder, + InboundAggregator aggregator, + StatsTracker statsTracker + ) { + this.pending = pending; + this.decoder = decoder; + this.aggregator = aggregator; + this.statsTracker = statsTracker; + } + + @Override + public void close() { + isClosed = true; + } + + @Override + public boolean canHandleBytes(ReleasableBytesReference reference) { + return true; + } + + @Override + public void doHandleBytes( + TcpChannel channel, + ReleasableBytesReference reference, + BiConsumer messageHandler + ) throws IOException { + final ArrayList fragments = fragmentList.get(); + boolean continueHandling = true; + + while (continueHandling && isClosed == false) { + boolean continueDecoding = true; + while (continueDecoding && pending.isEmpty() == false) { + try (ReleasableBytesReference toDecode = getPendingBytes()) { + final int bytesDecoded = decoder.decode(toDecode, fragments::add); + if (bytesDecoded != 0) { + releasePendingBytes(bytesDecoded); + if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { + continueDecoding = false; + } + } else { + continueDecoding = false; + } + } + } + + if (fragments.isEmpty()) { + continueHandling = false; + } else { + try { + forwardFragments(channel, fragments, messageHandler); + } finally { + for (Object fragment : fragments) { + if (fragment instanceof ReleasableBytesReference) { + ((ReleasableBytesReference) fragment).close(); + } + } + fragments.clear(); + } + } + } + } + + private ReleasableBytesReference getPendingBytes() { + if (pending.size() == 1) { + return pending.peekFirst().retain(); + } else { + final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()]; + int index = 0; + for (ReleasableBytesReference pendingReference : pending) { + bytesReferences[index] = pendingReference.retain(); + ++index; + } + final Releasable releasable = () -> Releasables.closeWhileHandlingException(bytesReferences); + return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable); + } + } + + private void releasePendingBytes(int bytesConsumed) { + int bytesToRelease = bytesConsumed; + while (bytesToRelease != 0) { + try (ReleasableBytesReference reference = pending.pollFirst()) { + assert reference != null; + if (bytesToRelease < reference.length()) { + pending.addFirst(reference.retainedSlice(bytesToRelease, reference.length() - bytesToRelease)); + bytesToRelease -= bytesToRelease; + } else { + bytesToRelease -= reference.length(); + } + } + } + } + + private boolean endOfMessage(Object fragment) { + return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; + } + + private void forwardFragments( + TcpChannel channel, + ArrayList fragments, + BiConsumer messageHandler + ) throws IOException { + for (Object fragment : fragments) { + if (fragment instanceof Header) { + assert aggregator.isAggregating() == false; + aggregator.headerReceived((Header) fragment); + } else if (fragment == InboundDecoder.PING) { + assert aggregator.isAggregating() == false; + messageHandler.accept(channel, PING_MESSAGE); + } else if (fragment == InboundDecoder.END_CONTENT) { + assert aggregator.isAggregating(); + try (InboundMessage aggregated = aggregator.finishAggregation()) { + statsTracker.markMessageReceived(); + messageHandler.accept(channel, aggregated); + } + } else { + assert aggregator.isAggregating(); + assert fragment instanceof ReleasableBytesReference; + aggregator.aggregate((ReleasableBytesReference) fragment); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java new file mode 100644 index 0000000000000..1143f129b6319 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/NativeInboundMessage.java @@ -0,0 +1,149 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.transport.nativeprotocol; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.transport.Header; +import org.opensearch.transport.ProtocolInboundMessage; + +import java.io.IOException; + +/** + * Inbound data as a message + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class NativeInboundMessage implements Releasable, ProtocolInboundMessage { + + /** + * The protocol used to encode this message + */ + public static String NATIVE_PROTOCOL = "native"; + + private final Header header; + private final ReleasableBytesReference content; + private final Exception exception; + private final boolean isPing; + private Releasable breakerRelease; + private StreamInput streamInput; + + public NativeInboundMessage(Header header, ReleasableBytesReference content, Releasable breakerRelease) { + this.header = header; + this.content = content; + this.breakerRelease = breakerRelease; + this.exception = null; + this.isPing = false; + } + + public NativeInboundMessage(Header header, Exception exception) { + this.header = header; + this.content = null; + this.breakerRelease = null; + this.exception = exception; + this.isPing = false; + } + + public NativeInboundMessage(Header header, boolean isPing) { + this.header = header; + this.content = null; + this.breakerRelease = null; + this.exception = null; + this.isPing = isPing; + } + + @Override + public String getProtocol() { + return NATIVE_PROTOCOL; + } + + public Header getHeader() { + return header; + } + + public int getContentLength() { + if (content == null) { + return 0; + } else { + return content.length(); + } + } + + public Exception getException() { + return exception; + } + + public boolean isPing() { + return isPing; + } + + public boolean isShortCircuit() { + return exception != null; + } + + public Releasable takeBreakerReleaseControl() { + final Releasable toReturn = breakerRelease; + breakerRelease = null; + if (toReturn != null) { + return toReturn; + } else { + return () -> {}; + } + } + + public StreamInput openOrGetStreamInput() throws IOException { + assert isPing == false && content != null; + if (streamInput == null) { + streamInput = content.streamInput(); + streamInput.setVersion(header.getVersion()); + } + return streamInput; + } + + @Override + public void close() { + IOUtils.closeWhileHandlingException(streamInput); + Releasables.closeWhileHandlingException(content, breakerRelease); + } + + @Override + public String toString() { + return "InboundMessage{" + header + "}"; + } + +} diff --git a/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java b/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java new file mode 100644 index 0000000000000..84f6d7d0ec5c2 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/nativeprotocol/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Native transport protocol package. */ +package org.opensearch.transport.nativeprotocol; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java index 848df5f8e4979..5bab2ceca0988 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -51,10 +51,13 @@ import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.index.shard.DocsStats; import org.opensearch.index.store.StoreStats; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -65,7 +68,12 @@ import java.util.Set; import static java.util.Collections.emptyMap; -import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.hamcrest.CoreMatchers.equalTo; public class TransportResizeActionTests extends OpenSearchTestCase { @@ -95,6 +103,19 @@ private ClusterState createClusterState(String name, int numShards, int numRepli return clusterState; } + private ClusterSettings createClusterSettings( + CompatibilityMode compatibilityMode, + RemoteStoreNodeService.Direction migrationDirection + ) { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + clusterSettings.applySettings( + (Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), compatibilityMode) + .put(MIGRATION_DIRECTION_SETTING.getKey(), migrationDirection)).build() + ); + return clusterSettings; + } + public void testErrorCondition() { ClusterState state = createClusterState( "source", @@ -102,6 +123,7 @@ public void testErrorCondition() { randomIntBetween(0, 10), Settings.builder().put("index.blocks.write", true).build() ); + ClusterSettings clusterSettings = createClusterSettings(CompatibilityMode.STRICT, RemoteStoreNodeService.Direction.NONE); assertTrue( expectThrows( IllegalStateException.class, @@ -110,6 +132,7 @@ public void testErrorCondition() { state, (i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ) @@ -125,6 +148,7 @@ public void testErrorCondition() { clusterState, (i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null, new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ); @@ -144,6 +168,7 @@ public void testErrorCondition() { clusterState, (i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ); @@ -173,6 +198,7 @@ public void testErrorCondition() { clusterState, (i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ); @@ -189,7 +215,7 @@ public void testPassNumRoutingShards() { EmptyClusterInfoService.INSTANCE, EmptySnapshotsInfoService.INSTANCE ); - + ClusterSettings clusterSettings = createClusterSettings(CompatibilityMode.STRICT, RemoteStoreNodeService.Direction.NONE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard @@ -204,6 +230,7 @@ public void testPassNumRoutingShards() { clusterState, null, new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ); @@ -217,6 +244,7 @@ public void testPassNumRoutingShards() { clusterState, null, new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ); @@ -235,6 +263,7 @@ public void testPassNumRoutingShardsAndFail() { EmptySnapshotsInfoService.INSTANCE ); + ClusterSettings clusterSettings = createClusterSettings(CompatibilityMode.STRICT, RemoteStoreNodeService.Direction.NONE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard @@ -249,6 +278,7 @@ public void testPassNumRoutingShardsAndFail() { clusterState, null, new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ); @@ -265,6 +295,7 @@ public void testPassNumRoutingShardsAndFail() { finalState, null, new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, "source", "target" ) @@ -286,6 +317,7 @@ public void testShrinkIndexSettings() { EmptySnapshotsInfoService.INSTANCE ); + ClusterSettings clusterSettings = createClusterSettings(CompatibilityMode.STRICT, RemoteStoreNodeService.Direction.NONE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard @@ -301,6 +333,7 @@ public void testShrinkIndexSettings() { clusterState, (i) -> stats, new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, indexName, "target" ); @@ -325,6 +358,8 @@ public void testShrinkWithMaxShardSize() { EmptyClusterInfoService.INSTANCE, EmptySnapshotsInfoService.INSTANCE ); + + ClusterSettings clusterSettings = createClusterSettings(CompatibilityMode.STRICT, RemoteStoreNodeService.Direction.NONE); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); // now we start the shard @@ -345,6 +380,7 @@ public void testShrinkWithMaxShardSize() { clusterState, (i) -> stats, new StoreStats(100, between(1, 10000)), + clusterSettings, indexName, "target" ); @@ -366,6 +402,7 @@ public void testShrinkWithMaxShardSize() { clusterState, (i) -> stats, new StoreStats(100, between(1, 10000)), + clusterSettings, indexName, "target" ); @@ -387,6 +424,7 @@ public void testShrinkWithMaxShardSize() { clusterState, (i) -> stats, new StoreStats(100, between(1, 10000)), + clusterSettings, indexName, "target" ); @@ -477,6 +515,7 @@ public void testIndexBlocks() { createClusterState(indexName, 10, 0, 40, Settings.builder().put("index.blocks.read_only", true).build()) ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + ClusterSettings clusterSettings = createClusterSettings(CompatibilityMode.STRICT, RemoteStoreNodeService.Direction.NONE); // Target index will be blocked by [index.blocks.read_only=true] copied from the source index ResizeRequest resizeRequest = new ResizeRequest("target", indexName); ResizeType resizeType; @@ -500,6 +539,7 @@ public void testIndexBlocks() { finalState, null, new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, indexName, "target" ) @@ -551,6 +591,7 @@ public void testIndexBlocks() { clusterState, (i) -> stats, new StoreStats(100, between(1, 10000)), + clusterSettings, indexName, "target" ); @@ -561,6 +602,127 @@ public void testIndexBlocks() { assertEquals(request.waitForActiveShards(), activeShardCount); } + public void testResizeFailuresDuringMigration() { + // We will keep all other settings correct for resize request, + // So we only need to test for the failures due to cluster setting validation while migration + final Settings directionEnabledNodeSettings = Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build(); + FeatureFlags.initializeFeatureFlags(directionEnabledNodeSettings); + boolean isRemoteStoreEnabled = randomBoolean(); + CompatibilityMode compatibilityMode = randomFrom(CompatibilityMode.values()); + RemoteStoreNodeService.Direction migrationDirection = randomFrom(RemoteStoreNodeService.Direction.values()); + // If not mixed mode, then migration direction is NONE. + if (!compatibilityMode.equals(CompatibilityMode.MIXED)) { + migrationDirection = RemoteStoreNodeService.Direction.NONE; + } + ClusterSettings clusterSettings = createClusterSettings(compatibilityMode, migrationDirection); + + ClusterState clusterState = ClusterState.builder( + createClusterState( + "source", + 10, + 0, + 40, + Settings.builder().put("index.blocks.write", true).put(SETTING_REMOTE_STORE_ENABLED, isRemoteStoreEnabled).build() + ) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + AllocationService service = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + + RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + // now we start the shard + routingTable = OpenSearchAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / 10), between(1, 1000), between(1, 10000)); + ResizeRequest resizeRequest = new ResizeRequest("target", "source"); + ResizeType resizeType; + int expectedShardsNum; + String cause; + switch (randomIntBetween(0, 2)) { + case 0: + resizeType = ResizeType.SHRINK; + expectedShardsNum = 5; + cause = "shrink_index"; + break; + case 1: + resizeType = ResizeType.SPLIT; + expectedShardsNum = 20; + cause = "split_index"; + break; + default: + resizeType = ResizeType.CLONE; + expectedShardsNum = 10; + cause = "clone_index"; + } + resizeRequest.setResizeType(resizeType); + resizeRequest.getTargetIndexRequest() + .settings(Settings.builder().put("index.number_of_shards", expectedShardsNum).put("index.blocks.read_only", false).build()); + final ActiveShardCount activeShardCount = randomBoolean() ? ActiveShardCount.ALL : ActiveShardCount.ONE; + resizeRequest.setWaitForActiveShards(activeShardCount); + + if (compatibilityMode == CompatibilityMode.MIXED) { + if ((migrationDirection == RemoteStoreNodeService.Direction.REMOTE_STORE && isRemoteStoreEnabled == false) + || migrationDirection == RemoteStoreNodeService.Direction.DOCREP && isRemoteStoreEnabled == true) { + ClusterState finalState = clusterState; + IllegalStateException ise = expectThrows( + IllegalStateException.class, + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + finalState, + (i) -> stats, + new StoreStats(between(1, 10000), between(1, 10000)), + clusterSettings, + "source", + "target" + ) + ); + assertEquals( + ise.getMessage(), + "Index " + + resizeType + + " is not allowed as remote migration mode is mixed" + + " and index is remote store " + + (isRemoteStoreEnabled ? "enabled" : "disabled") + ); + } else { + CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + clusterSettings, + "source", + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals("source", request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals(cause, request.cause()); + assertEquals(request.waitForActiveShards(), activeShardCount); + } + } else { + CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + clusterSettings, + "source", + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals("source", request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals(cause, request.cause()); + assertEquals(request.waitForActiveShards(), activeShardCount); + } + } + private DiscoveryNode newNode(String nodeId) { final Set roles = Collections.unmodifiableSet( new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)) diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index 4f07c098b0869..9625475607911 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -112,6 +112,13 @@ public void testUpdateRequest() throws IOException { parser.parse(request, "foo", null, null, null, true, false, MediaTypeRegistry.JSON, req -> fail(), updateRequest -> { assertFalse(updateRequest.isRequireAlias()); }, req -> fail()); + + request = new BytesArray( + "{ \"update\":{ \"_id\": \"bar\", \"require_alias\": false, \"pipeline\": \"testPipeline\" } }\n{\"upsert\": {\"x\": 1}}\n" + ); + parser.parse(request, "foo", null, null, null, true, false, MediaTypeRegistry.JSON, req -> fail(), updateRequest -> { + assertEquals(updateRequest.upsertRequest().getPipeline(), "testPipeline"); + }, req -> fail()); } public void testBarfOnLackOfTrailingNewline() { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index a2f19b8c694d0..fa71b77648d35 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -1593,12 +1593,9 @@ public void testRemoteCustomData() { // Case 2 - cluster.remote_store.index.path.prefix.optimised=fixed (default value) indexMetadata = testRemoteCustomData(true, PathType.FIXED); - validateRemoteCustomData(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathType.NAME, PathType.FIXED.name()); - validateRemoteCustomData( - indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), - PathHashAlgorithm.NAME, - PathHashAlgorithm.FNV_1A.name() - ); + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + validateRemoteCustomData(remoteCustomData, PathType.NAME, PathType.FIXED.name()); + assertNull(remoteCustomData.get(PathHashAlgorithm.NAME)); // Case 3 - cluster.remote_store.index.path.prefix.optimised=hashed_prefix indexMetadata = testRemoteCustomData(true, PathType.HASHED_PREFIX); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index a8282faaddced..e8273d294f24f 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; @@ -50,6 +51,8 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -61,6 +64,9 @@ import static org.opensearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; public class FilterAllocationDeciderTests extends OpenSearchAllocationTestCase { @@ -406,4 +412,57 @@ public void testWildcardIPFilter() { "test ip validation" ); } + + public void testMixedModeRemoteStoreAllocation() { + // For mixed mode remote store direction cluster's existing indices replica creation , + // we don't consider filter allocation decider for replica of existing indices + FeatureFlags.initializeFeatureFlags(Settings.builder().put(REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build()); + ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings initialSettings = Settings.builder() + .put("cluster.routing.allocation.exclude._id", "node2") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), RemoteStoreNodeService.CompatibilityMode.MIXED) + .put(MIGRATION_DIRECTION_SETTING.getKey(), RemoteStoreNodeService.Direction.REMOTE_STORE) + .build(); + + FilterAllocationDecider filterAllocationDecider = new FilterAllocationDecider(initialSettings, clusterSettings); + AllocationDeciders allocationDeciders = new AllocationDeciders( + Arrays.asList( + filterAllocationDecider, + new SameShardAllocationDecider(Settings.EMPTY, clusterSettings), + new ReplicaAfterPrimaryActiveAllocationDecider() + ) + ); + AllocationService service = new AllocationService( + allocationDeciders, + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + ClusterState state = createInitialClusterState(service, Settings.EMPTY, Settings.EMPTY); + RoutingTable routingTable = state.routingTable(); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, state.getRoutingNodes(), state, null, null, 0); + allocation.debugDecision(true); + ShardRouting sr = ShardRouting.newUnassigned( + routingTable.index("sourceIndex").shard(0).shardId(), + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "") + ); + Decision.Single decision = (Decision.Single) filterAllocationDecider.canAllocate( + sr, + state.getRoutingNodes().node("node2"), + allocation + ); + assertEquals(decision.toString(), Type.YES, decision.type()); + + sr = ShardRouting.newUnassigned( + routingTable.index("sourceIndex").shard(0).shardId(), + false, + RecoverySource.PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "") + ); + decision = (Decision.Single) filterAllocationDecider.canAllocate(sr, state.getRoutingNodes().node("node2"), allocation); + assertEquals(decision.toString(), Type.NO, decision.type()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java index 86adfc1279d9a..190ad3283dcfc 100644 --- a/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/structure/RoutingIteratorTests.java @@ -700,9 +700,18 @@ public void testWeightedRoutingWithDifferentWeights() { .shard(0) .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); assertEquals(1, shardIterator.size()); - shardRouting = shardIterator.nextOrNull(); - assertNotNull(shardRouting); - assertFalse(Arrays.asList("node2", "node1").contains(shardRouting.currentNodeId())); + assertEquals("node3", shardIterator.nextOrNull().currentNodeId()); + + weights = Map.of("zone1", -1.0, "zone2", 0.0, "zone3", 1.0); + weightedRouting = new WeightedRouting("zone", weights); + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); + assertEquals(3, shardIterator.size()); + assertEquals("node3", shardIterator.nextOrNull().currentNodeId()); + assertNotEquals("node3", shardIterator.nextOrNull().currentNodeId()); + assertNotEquals("node3", shardIterator.nextOrNull().currentNodeId()); weights = Map.of("zone1", 3.0, "zone2", 2.0, "zone3", 0.0); weightedRouting = new WeightedRouting("zone", weights); @@ -711,8 +720,138 @@ public void testWeightedRoutingWithDifferentWeights() { .shard(0) .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); assertEquals(3, shardIterator.size()); - shardRouting = shardIterator.nextOrNull(); - assertNotNull(shardRouting); + assertNotEquals("node3", shardIterator.nextOrNull().currentNodeId()); + assertNotEquals("node3", shardIterator.nextOrNull().currentNodeId()); + assertEquals("node3", shardIterator.nextOrNull().currentNodeId()); + } finally { + terminate(threadPool); + } + } + + public void testWeightedRoutingWithInitializingShards() { + TestThreadPool threadPool = null; + try { + Settings.Builder settings = Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put("cluster.routing.allocation.awareness.attributes", "zone"); + AllocationService strategy = createAllocationService(settings.build()); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .build(); + + RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(routingTable) + .build(); + + threadPool = new TestThreadPool("testThatOnlyNodesSupport"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + + Map node1Attributes = new HashMap<>(); + node1Attributes.put("zone", "zone1"); + Map node2Attributes = new HashMap<>(); + node2Attributes.put("zone", "zone2"); + Map node3Attributes = new HashMap<>(); + node3Attributes.put("zone", "zone3"); + + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(newNode("node1", unmodifiableMap(node1Attributes))) + .add(newNode("node2", unmodifiableMap(node2Attributes))) + .add(newNode("node3", unmodifiableMap(node3Attributes))) + .localNodeId("node1") + .build(); + clusterState = ClusterState.builder(clusterState).nodes(nodes).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + + // Making the first shard as active + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + // Making the second shard as active + clusterState = startRandomInitializingShard(clusterState, strategy); + + String[] startedNodes = new String[2]; + String[] startedZones = new String[2]; + String initializingNode = null; + String initializingZone = null; + int i = 0; + for (ShardRouting shard : clusterState.routingTable().allShards()) { + if (shard.initializing()) { + initializingNode = shard.currentNodeId(); + initializingZone = nodes.resolveNode(shard.currentNodeId()).getAttributes().get("zone"); + + } else { + startedNodes[i] = shard.currentNodeId(); + startedZones[i++] = nodes.resolveNode(shard.currentNodeId()).getAttributes().get("zone"); + } + } + + Map weights = Map.of(startedZones[0], 1.0, initializingZone, 1.0, startedZones[1], 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + // With fail open enabled set to false, we expect 2 shard routing, first one started, followed by initializing + ShardIterator shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); + + assertEquals(2, shardIterator.size()); + assertEquals(startedNodes[0], shardIterator.nextOrNull().currentNodeId()); + assertEquals(initializingNode, shardIterator.nextOrNull().currentNodeId()); + + // With fail open enabled set to true, we expect 3 shard routing, first one started, followed by initializing, third one started + // with zero weight + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); + + assertEquals(3, shardIterator.size()); + assertEquals(startedNodes[0], shardIterator.nextOrNull().currentNodeId()); + assertEquals(initializingNode, shardIterator.nextOrNull().currentNodeId()); + assertEquals(startedNodes[1], shardIterator.nextOrNull().currentNodeId()); + + weights = Map.of(initializingZone, 1.0, startedZones[0], 0.0, startedZones[1], 0.0); + weightedRouting = new WeightedRouting("zone", weights); + + // only initializing shard has weight with fail open true + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); + assertEquals(1, shardIterator.size()); + assertEquals(initializingNode, shardIterator.nextOrNull().currentNodeId()); + + // only initializing shard has weight with fail open false + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); + assertEquals(3, shardIterator.size()); + assertEquals(initializingNode, shardIterator.nextOrNull().currentNodeId()); + assertNotEquals(initializingNode, shardIterator.nextOrNull().currentNodeId()); + assertNotEquals(initializingNode, shardIterator.nextOrNull().currentNodeId()); + + weights = Map.of(initializingZone, 0.0, startedZones[0], 1.0, startedZones[1], 0.0); + weightedRouting = new WeightedRouting("zone", weights); + + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, false, null); + assertEquals(1, shardIterator.size()); + assertEquals(startedNodes[0], shardIterator.nextOrNull().currentNodeId()); + + shardIterator = clusterState.routingTable() + .index("test") + .shard(0) + .activeInitializingShardsWeightedIt(weightedRouting, clusterState.nodes(), 1, true, null); + assertEquals(3, shardIterator.size()); + assertEquals(startedNodes[0], shardIterator.nextOrNull().currentNodeId()); + assertEquals(startedNodes[1], shardIterator.nextOrNull().currentNodeId()); + assertEquals(initializingNode, shardIterator.nextOrNull().currentNodeId()); + } finally { terminate(threadPool); } diff --git a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java index 1780819390052..aee4ae40d16a5 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainerTests.java @@ -57,7 +57,7 @@ public void testReadBlobAsync() throws Exception { final ListenerTestUtils.CountingCompletionListener completionListener = new ListenerTestUtils.CountingCompletionListener<>(); final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); - final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); + final ReadContext readContext = new ReadContext.Builder(size, List.of(() -> streamContainerFuture)).build(); Mockito.doAnswer(invocation -> { ActionListener readContextActionListener = invocation.getArgument(1); @@ -103,7 +103,7 @@ public void testReadBlobAsyncException() throws Exception { final ListenerTestUtils.CountingCompletionListener completionListener = new ListenerTestUtils.CountingCompletionListener<>(); final CompletableFuture streamContainerFuture = CompletableFuture.completedFuture(inputStreamContainer); - final ReadContext readContext = new ReadContext(size, List.of(() -> streamContainerFuture), null); + final ReadContext readContext = new ReadContext.Builder(size, List.of(() -> streamContainerFuture)).build(); Mockito.doAnswer(invocation -> { ActionListener readContextActionListener = invocation.getArgument(1); diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java index 0163c2275e7f4..c47874f3ba294 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -78,7 +78,7 @@ public void testReadContextListener() throws InterruptedException, IOException { UnaryOperator.identity(), MAX_CONCURRENT_STREAMS ); - ReadContext readContext = new ReadContext((long) PART_SIZE * NUMBER_OF_PARTS, blobPartStreams, null); + ReadContext readContext = new ReadContext.Builder((long) PART_SIZE * NUMBER_OF_PARTS, blobPartStreams).build(); readContextListener.onResponse(readContext); countDownLatch.await(); @@ -125,7 +125,7 @@ public int available() { threadPool.generic() ) ); - ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); + ReadContext readContext = new ReadContext.Builder((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams).build(); readContextListener.onResponse(readContext); countDownLatch.await(); @@ -178,7 +178,7 @@ public int read(byte[] b) throws IOException { threadPool.generic() ) ); - ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS + 1, blobPartStreams, null); + ReadContext readContext = new ReadContext.Builder((long) (PART_SIZE + 1) * NUMBER_OF_PARTS + 1, blobPartStreams).build(); readContextListener.onResponse(readContext); countDownLatch.await(); @@ -203,7 +203,7 @@ public void testWriteToTempFile_alreadyExists_replacesFile() throws Exception { UnaryOperator.identity(), MAX_CONCURRENT_STREAMS ); - ReadContext readContext = new ReadContext((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams, null); + ReadContext readContext = new ReadContext.Builder((long) (PART_SIZE + 1) * NUMBER_OF_PARTS, blobPartStreams).build(); readContextListener.onResponse(readContext); countDownLatch.await(); diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 82d7ab06f126b..4ce4936c047d9 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -237,7 +237,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { repositoriesServiceReference::get, threadPool, indexSettings.getRemoteStoreTranslogRepository(), - new RemoteTranslogTransferTracker(shardRouting.shardId(), 10) + new RemoteTranslogTransferTracker(shardRouting.shardId(), 10), + DefaultRemoteStoreSettings.INSTANCE ); } return new InternalTranslogFactory(); diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java index 64f4f1f3e083e..bd6d7b88ade28 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java @@ -197,8 +197,8 @@ public void setDocument(int docId) { } @Override - public Object execute() { - return raw_requests[docId][scriptIndex[0]]; + public void execute() { + addEmittedValue(raw_requests[docId][scriptIndex[0]]); } }; diff --git a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java index 18d117fa8c0f5..1bb303a874b9a 100644 --- a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java +++ b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java @@ -67,8 +67,8 @@ public void testDerivedField() throws IOException { when(searchLookup.getLeafSearchLookup(ctx)).thenReturn(leafLookup); return new DerivedFieldScript(params, lookup, ctx) { @Override - public Object execute() { - return raw_requests[sourceLookup.docId()][2]; + public void execute() { + addEmittedValue(raw_requests[sourceLookup.docId()][2]); } }; }; diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index 33008bee1a392..fe5635063f783 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; import org.opensearch.index.remote.RemoteStoreEnums.DataType; @@ -24,7 +25,10 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_INFIX; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.index.remote.RemoteStoreEnums.PathType.parseString; public class RemoteStoreEnumsTests extends OpenSearchTestCase { @@ -136,6 +140,370 @@ public void testGeneratePathForFixedType() { assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); } + public void testGeneratePathForHashedPrefixType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId; + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("DgSI70IciXs/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/data/", result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("oKU5SjILiy4/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/translog/metadata/", result.buildAsString()); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_PREFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("AUBRfCIuWdk/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/data/", result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("erwR-G735Uw/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/metadata/", result.buildAsString()); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertTrue( + result.buildAsString() + .startsWith(String.join(SEPARATOR, FNV_1A.hash(pathInput), basePath, dataCategory.getName(), dataType.getName())) + ); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_PREFIX.path(pathInput, FNV_1A); + assertEquals("KeYDIk0mJXI/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/k2ijhe877d7yuhx7/10/segments/lock_files/", result.buildAsString()); + } + + public void testGeneratePathForHashedInfixType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList); + basePath = basePath.length() == 0 ? basePath : basePath.substring(0, basePath.length() - 1); + // Translog Data + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = HASHED_INFIX.path(pathInput, FNV_1A); + String expected = derivePath(basePath, pathInput); + String actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + BlobPath fixedBlobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String fixedIndexUUID = "k2ijhe877d7yuhx7"; + String fixedShardId = "10"; + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/DgSI70IciXs/k2ijhe877d7yuhx7/10/translog/data/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Translog Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/oKU5SjILiy4/k2ijhe877d7yuhx7/10/translog/metadata/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Translog Lock files - This is a negative case where the assertion will trip. + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_INFIX.path(finalPathInput, null)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> HASHED_INFIX.path(finalPathInput, null)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/AUBRfCIuWdk/k2ijhe877d7yuhx7/10/segments/data/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Segment Metadata + dataType = METADATA; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/erwR-G735Uw/k2ijhe877d7yuhx7/10/segments/metadata/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // Segment Lockfiles + dataType = LOCK_FILES; + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = derivePath(basePath, pathInput); + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + + // assert with exact value for known base path + pathInput = PathInput.builder() + .basePath(fixedBlobPath) + .indexUUID(fixedIndexUUID) + .shardId(fixedShardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = HASHED_INFIX.path(pathInput, FNV_1A); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/KeYDIk0mJXI/k2ijhe877d7yuhx7/10/segments/lock_files/"; + actual = result.buildAsString(); + assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); + } + + private String derivePath(String basePath, PathInput pathInput) { + return "".equals(basePath) + ? String.join( + SEPARATOR, + FNV_1A.hash(pathInput), + pathInput.indexUUID(), + pathInput.shardId(), + pathInput.dataCategory().getName(), + pathInput.dataType().getName() + ) + : String.join( + SEPARATOR, + basePath, + FNV_1A.hash(pathInput), + pathInput.indexUUID(), + pathInput.shardId(), + pathInput.dataCategory().getName(), + pathInput.dataType().getName() + ); + } + private List getPathList() { List pathList = new ArrayList<>(); int length = randomIntBetween(0, 5); @@ -152,5 +520,4 @@ private String getPath(List pathList) { } return p + SEPARATOR; } - } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java new file mode 100644 index 0000000000000..9d4b41f5c395f --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyResolverTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.Version; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.test.OpenSearchTestCase; + +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; + +public class RemoteStorePathStrategyResolverTests extends OpenSearchTestCase { + + public void testGetMinVersionOlder() { + Settings settings = Settings.builder() + .put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.V_2_13_0); + assertEquals(PathType.FIXED, resolver.get().getType()); + assertNull(resolver.get().getHashAlgorithm()); + } + + public void testGetMinVersionNewer() { + PathType pathType = randomFrom(PathType.values()); + Settings settings = Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType).build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + RemoteStorePathStrategyResolver resolver = new RemoteStorePathStrategyResolver(clusterSettings, () -> Version.CURRENT); + assertEquals(pathType, resolver.get().getType()); + if (pathType.requiresHashAlgorithm()) { + assertNotNull(resolver.get().getHashAlgorithm()); + } else { + assertNull(resolver.get().getHashAlgorithm()); + } + + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java index d3c7d754d6b61..34074861f2764 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -17,8 +17,10 @@ import java.util.ArrayList; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreUtils.longToUrlBase64; import static org.opensearch.index.remote.RemoteStoreUtils.verifyNoMultipleWriters; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX; import static org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR; @@ -179,4 +181,32 @@ public void testVerifyMultipleWriters_Translog() throws InterruptedException { ); } + public void testLongToBase64() { + Map longToExpectedBase64String = Map.of( + -5537941589147079860L, + "syVHd0gGq0w", + -5878421770170594047L, + "rmumi5UPDQE", + -5147010836697060622L, + "uJIk6f-V6vI", + 937096430362711837L, + "DQE8PQwOVx0", + 8422273604115462710L, + "dOHtOEZzejY", + -2528761975013221124L, + "3OgIYbXSXPw", + -5512387536280560513L, + "s4AQvdu03H8", + -5749656451579835857L, + "sDUd65cNCi8", + 5569654857969679538L, + "TUtjlYLPvLI", + -1563884000447039930L, + "6kv3yZNv9kY" + ); + for (Map.Entry entry : longToExpectedBase64String.entrySet()) { + assertEquals(entry.getValue(), longToUrlBase64(entry.getKey())); + assertEquals(11, entry.getValue().length()); + } + } } diff --git a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java index 38c4bb781ce06..e3259a3097278 100644 --- a/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java +++ b/server/src/test/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshotTests.java @@ -14,6 +14,8 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -40,7 +42,10 @@ public void testToXContent() throws IOException { String repositoryBasePath = "test-repo-basepath"; List fileNames = new ArrayList<>(5); fileNames.addAll(Arrays.asList("file1", "file2", "file3", "file4", "file5")); + + // Case 1 - Without remote path type fields RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "1", snapshot, indexVersion, primaryTerm, @@ -52,7 +57,9 @@ public void testToXContent() throws IOException { indexUUID, remoteStoreRepository, repositoryBasePath, - fileNames + fileNames, + null, + null ); String actual; try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { @@ -66,6 +73,67 @@ public void testToXContent() throws IOException { + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"]}"; assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 2 - with just fixed type + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.FIXED, + null + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; + + // Case 3 - with just hashed prefix type and hash algorithm + shardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A + ); + try (XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder()) { + builder.startObject(); + shardShallowCopySnapshot.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + actual = builder.toString(); + } + + expectedXContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1" + + ",\"path_hash_algorithm\":0}"; + assert Objects.equals(actual, expectedXContent) : "xContent is " + actual; } public void testFromXContent() throws IOException { @@ -83,6 +151,7 @@ public void testFromXContent() throws IOException { List fileNames = new ArrayList<>(5); fileNames.addAll(Arrays.asList("file1", "file2", "file3", "file4", "file5")); RemoteStoreShardShallowCopySnapshot expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "1", snapshot, indexVersion, primaryTerm, @@ -94,7 +163,9 @@ public void testFromXContent() throws IOException { indexUUID, remoteStoreRepository, repositoryBasePath, - fileNames + fileNames, + null, + null ); String xContent = "{\"version\":\"1\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" @@ -102,22 +173,66 @@ public void testFromXContent() throws IOException { + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"]}"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); - assertEquals(actualShardShallowCopySnapshot.snapshot(), expectedShardShallowCopySnapshot.snapshot()); - assertEquals( - actualShardShallowCopySnapshot.getRemoteStoreRepository(), - expectedShardShallowCopySnapshot.getRemoteStoreRepository() - ); - assertEquals(actualShardShallowCopySnapshot.getCommitGeneration(), expectedShardShallowCopySnapshot.getCommitGeneration()); - assertEquals(actualShardShallowCopySnapshot.getPrimaryTerm(), expectedShardShallowCopySnapshot.getPrimaryTerm()); - assertEquals(actualShardShallowCopySnapshot.startTime(), expectedShardShallowCopySnapshot.startTime()); - assertEquals(actualShardShallowCopySnapshot.time(), expectedShardShallowCopySnapshot.time()); - assertEquals(actualShardShallowCopySnapshot.totalSize(), expectedShardShallowCopySnapshot.totalSize()); - assertEquals(actualShardShallowCopySnapshot.totalFileCount(), expectedShardShallowCopySnapshot.totalFileCount()); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.FIXED + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":0}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.FIXED, + null + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); + } + + // with pathType=PathType.HASHED_PREFIX and pathHashAlgorithm=PathHashAlgorithm.FNV_1A + xContent = "{\"version\":\"2\",\"name\":\"test-snapshot\",\"index_version\":1,\"start_time\":123,\"time\":123," + + "\"number_of_files\":5,\"total_size\":5,\"index_uuid\":\"syzhajds-ashdlfj\",\"remote_store_repository\":" + + "\"test-rs-repository\",\"commit_generation\":5,\"primary_term\":3,\"remote_store_repository_base_path\":" + + "\"test-repo-basepath\",\"file_names\":[\"file1\",\"file2\",\"file3\",\"file4\",\"file5\"],\"path_type\":1,\"path_hash_algorithm\":0}"; + expectedShardShallowCopySnapshot = new RemoteStoreShardShallowCopySnapshot( + "2", + snapshot, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames, + PathType.HASHED_PREFIX, + PathHashAlgorithm.FNV_1A + ); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { + RemoteStoreShardShallowCopySnapshot actualShardShallowCopySnapshot = RemoteStoreShardShallowCopySnapshot.fromXContent(parser); + assert Objects.equals(expectedShardShallowCopySnapshot, actualShardShallowCopySnapshot); } } public void testFromXContentInvalid() throws IOException { - final int iters = scaledRandomIntBetween(1, 10); + final int iters = 14; for (int iter = 0; iter < iters; iter++) { String snapshot = "test-snapshot"; long indexVersion = 1; @@ -133,10 +248,11 @@ public void testFromXContentInvalid() throws IOException { List fileNames = new ArrayList<>(5); fileNames.addAll(Arrays.asList("file1", "file2", "file3", "file4", "file5")); String failure = null; - String version = RemoteStoreShardShallowCopySnapshot.DEFAULT_VERSION; - long length = Math.max(0, Math.abs(randomLong())); + String version = "1"; + PathType pathType = null; + PathHashAlgorithm pathHashAlgorithm = null; // random corruption - switch (randomIntBetween(0, 8)) { + switch (iter) { case 0: snapshot = null; failure = "Invalid/Missing Snapshot Name"; @@ -170,6 +286,31 @@ public void testFromXContentInvalid() throws IOException { failure = "Invalid Version Provided"; break; case 8: + version = "2"; + failure = "Invalid combination of pathType=null pathHashAlgorithm=null for version=2"; + break; + case 9: + version = "1"; + pathType = PathType.FIXED; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=null for version=1"; + break; + case 10: + version = "1"; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + failure = "Invalid combination of pathType=null pathHashAlgorithm=FNV_1A for version=1"; + break; + case 11: + version = "2"; + pathType = PathType.FIXED; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + failure = "Invalid combination of pathType=FIXED pathHashAlgorithm=FNV_1A for version=2"; + break; + case 12: + version = "2"; + pathType = PathType.HASHED_PREFIX; + pathHashAlgorithm = PathHashAlgorithm.FNV_1A; + break; + case 13: break; default: fail("shouldn't be here"); @@ -194,6 +335,14 @@ public void testFromXContentInvalid() throws IOException { builder.value(fileName); } builder.endArray(); + // We are handling NP check since a cluster can have indexes created earlier which do not have remote store + // path type and path hash algorithm in its custom data in index metadata. + if (Objects.nonNull(pathType)) { + builder.field(RemoteStoreShardShallowCopySnapshot.PATH_TYPE, pathType.getCode()); + } + if (Objects.nonNull(pathHashAlgorithm)) { + builder.field(RemoteStoreShardShallowCopySnapshot.PATH_HASH_ALGORITHM, pathHashAlgorithm.getCode()); + } builder.endObject(); byte[] xContent = BytesReference.toBytes(BytesReference.bytes(builder)); @@ -211,7 +360,8 @@ public void testFromXContentInvalid() throws IOException { assertEquals(remoteStoreShardShallowCopySnapshot.startTime(), startTime); assertEquals(remoteStoreShardShallowCopySnapshot.time(), time); assertEquals(remoteStoreShardShallowCopySnapshot.totalSize(), totalSize); - assertEquals(remoteStoreShardShallowCopySnapshot.totalFileCount(), totalFileCount); + assertEquals(remoteStoreShardShallowCopySnapshot.getPathType(), pathType); + assertEquals(remoteStoreShardShallowCopySnapshot.getPathHashAlgorithm(), pathHashAlgorithm); } else { try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { parser.nextToken(); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 11b4eb078226f..44ddd2de9d007 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -704,9 +704,9 @@ public void testCleanupAsync() throws Exception { String repositoryName = "test-repository"; String indexUUID = "test-idx-uuid"; ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); - RemoteStorePathStrategy pathStrategy = new RemoteStorePathStrategy( - randomFrom(PathType.values()), - randomFrom(PathHashAlgorithm.values()) + RemoteStorePathStrategy pathStrategy = randomFrom( + new RemoteStorePathStrategy(PathType.FIXED), + new RemoteStorePathStrategy(PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A) ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 70800d4fd423a..0e3854f65135f 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -50,6 +50,7 @@ import org.opensearch.index.translog.transfer.TranslogTransferManager; import org.opensearch.index.translog.transfer.TranslogTransferMetadata; import org.opensearch.index.translog.transfer.TranslogUploadFailedException; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -188,7 +189,8 @@ private RemoteFsTranslog create(Path path, BlobStoreRepository repository, Strin repository, threadPool, primaryMode::get, - new RemoteTranslogTransferTracker(shardId, 10) + new RemoteTranslogTransferTracker(shardId, 10), + DefaultRemoteStoreSettings.INSTANCE ); } @@ -459,7 +461,8 @@ public void testExtraGenToKeep() throws Exception { repository, threadPool, () -> Boolean.TRUE, - new RemoteTranslogTransferTracker(shardId, 10) + new RemoteTranslogTransferTracker(shardId, 10), + DefaultRemoteStoreSettings.INSTANCE ) { @Override ChannelFactory getChannelFactory() { @@ -1508,7 +1511,8 @@ public void testTranslogWriterCanFlushInAddOrReadCall() throws IOException { repository, threadPool, () -> Boolean.TRUE, - new RemoteTranslogTransferTracker(shardId, 10) + new RemoteTranslogTransferTracker(shardId, 10), + DefaultRemoteStoreSettings.INSTANCE ) { @Override ChannelFactory getChannelFactory() { @@ -1616,7 +1620,8 @@ public void force(boolean metaData) throws IOException { repository, threadPool, () -> Boolean.TRUE, - new RemoteTranslogTransferTracker(shardId, 10) + new RemoteTranslogTransferTracker(shardId, 10), + DefaultRemoteStoreSettings.INSTANCE ) { @Override ChannelFactory getChannelFactory() { diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 49719017ce736..81ae479d018b0 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -18,6 +18,7 @@ import org.opensearch.common.blobstore.stream.write.WritePriority; import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; @@ -27,6 +28,8 @@ import org.opensearch.index.translog.transfer.FileSnapshot.TransferFileSnapshot; import org.opensearch.index.translog.transfer.FileSnapshot.TranslogFileSnapshot; import org.opensearch.index.translog.transfer.listener.TranslogTransferListener; +import org.opensearch.indices.DefaultRemoteStoreSettings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -100,7 +103,8 @@ public void setUp() throws Exception { remoteBaseTransferPath.add(TRANSLOG.getName()), remoteBaseTransferPath.add(METADATA.getName()), tracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE ); delayForBlobDownload = 1; @@ -165,7 +169,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { remoteBaseTransferPath.add(TRANSLOG.getName()), remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE ); assertTrue(translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -188,20 +193,36 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) { public void testTransferSnapshotOnUploadTimeout() throws Exception { doAnswer(invocationOnMock -> { - Thread.sleep(31 * 1000); + Set transferFileSnapshots = invocationOnMock.getArgument(0); + ActionListener listener = invocationOnMock.getArgument(2); + Runnable runnable = () -> { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + for (TransferFileSnapshot transferFileSnapshot : transferFileSnapshots) { + listener.onResponse(transferFileSnapshot); + } + }; + Thread t = new Thread(runnable); + t.start(); return null; }).when(transferService).uploadBlobs(anySet(), anyMap(), any(ActionListener.class), any(WritePriority.class)); FileTransferTracker fileTransferTracker = new FileTransferTracker( new ShardId("index", "indexUUid", 0), remoteTranslogTransferTracker ); + RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); + when(remoteStoreSettings.getClusterRemoteTranslogTransferTimeout()).thenReturn(new TimeValue(1)); TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, remoteBaseTransferPath.add(TRANSLOG.getName()), remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + remoteStoreSettings ); SetOnce exception = new SetOnce<>(); translogTransferManager.transferSnapshot(createTransferSnapshot(), new TranslogTransferListener() { @@ -243,7 +264,8 @@ public void testTransferSnapshotOnThreadInterrupt() throws Exception { remoteBaseTransferPath.add(TRANSLOG.getName()), remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE ); SetOnce exception = new SetOnce<>(); @@ -336,14 +358,6 @@ public String toString() { } public void testReadMetadataNoFile() throws IOException { - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath.add(TRANSLOG.getName()), - remoteBaseTransferPath.add(METADATA.getName()), - null, - remoteTranslogTransferTracker - ); doAnswer(invocation -> { LatchedActionListener> latchedActionListener = invocation.getArgument(3); List bmList = new LinkedList<>(); @@ -358,14 +372,6 @@ public void testReadMetadataNoFile() throws IOException { // This should happen most of the time - public void testReadMetadataFile() throws IOException { - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath.add(TRANSLOG.getName()), - remoteBaseTransferPath.add(METADATA.getName()), - null, - remoteTranslogTransferTracker - ); TranslogTransferMetadata metadata1 = new TranslogTransferMetadata(1, 1, 1, 2); String mdFilename1 = metadata1.getFileName(); @@ -395,14 +401,6 @@ public void testReadMetadataFile() throws IOException { } public void testReadMetadataReadException() throws IOException { - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath.add(TRANSLOG.getName()), - remoteBaseTransferPath.add(METADATA.getName()), - null, - remoteTranslogTransferTracker - ); TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2); String mdFilename = tm.getFileName(); @@ -432,15 +430,6 @@ public void testMetadataFileNameOrder() throws IOException { } public void testReadMetadataListException() throws IOException { - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath.add(TRANSLOG.getName()), - remoteBaseTransferPath.add(METADATA.getName()), - null, - remoteTranslogTransferTracker - ); - doAnswer(invocation -> { LatchedActionListener> latchedActionListener = invocation.getArgument(3); latchedActionListener.onFailure(new IOException("Issue while listing")); @@ -512,7 +501,8 @@ public void testDeleteTranslogSuccess() throws Exception { remoteBaseTransferPath.add(TRANSLOG.getName()), remoteBaseTransferPath.add(METADATA.getName()), tracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -526,14 +516,6 @@ public void testDeleteTranslogSuccess() throws Exception { } public void testDeleteStaleTranslogMetadata() { - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath.add(TRANSLOG.getName()), - remoteBaseTransferPath.add(METADATA.getName()), - null, - remoteTranslogTransferTracker - ); String tm1 = new TranslogTransferMetadata(1, 1, 1, 2).getFileName(); String tm2 = new TranslogTransferMetadata(1, 2, 1, 2).getFileName(); String tm3 = new TranslogTransferMetadata(2, 3, 1, 2).getFileName(); @@ -584,7 +566,8 @@ public void testDeleteTranslogFailure() throws Exception { remoteBaseTransferPath.add(TRANSLOG.getName()), remoteBaseTransferPath.add(METADATA.getName()), tracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + DefaultRemoteStoreSettings.INSTANCE ); String translogFile = "translog-19.tlog", checkpointFile = "translog-19.ckp"; tracker.add(translogFile, true); @@ -622,14 +605,6 @@ public void testGetPrimaryTermAndGeneration() { } public void testMetadataConflict() throws InterruptedException { - TranslogTransferManager translogTransferManager = new TranslogTransferManager( - shardId, - transferService, - remoteBaseTransferPath.add(TRANSLOG.getName()), - remoteBaseTransferPath.add(METADATA.getName()), - null, - remoteTranslogTransferTracker - ); TranslogTransferMetadata tm = new TranslogTransferMetadata(1, 1, 1, 2, "node--1"); String mdFilename = tm.getFileName(); long count = mdFilename.chars().filter(ch -> ch == METADATA_SEPARATOR.charAt(0)).count(); diff --git a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java index 3809a44e55901..8a77d97f88d67 100644 --- a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java @@ -10,8 +10,11 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING; + public class RemoteStoreSettingsDynamicUpdateTests extends OpenSearchTestCase { private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); private final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(Settings.EMPTY, clusterSettings); @@ -66,4 +69,31 @@ public void testSegmentMetadataRetention() { ); assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); } + + public void testClusterRemoteTranslogTransferTimeout() { + // Test default value + assertEquals(TimeValue.timeValueSeconds(30), remoteStoreSettings.getClusterRemoteTranslogTransferTimeout()); + + // Test override with valid value + clusterSettings.applySettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.getKey(), "40s").build()); + assertEquals(TimeValue.timeValueSeconds(40), remoteStoreSettings.getClusterRemoteTranslogTransferTimeout()); + + // Test override with value less than minimum + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.getKey(), "10s").build() + ) + ); + assertEquals(TimeValue.timeValueSeconds(40), remoteStoreSettings.getClusterRemoteTranslogTransferTimeout()); + + // Test override with invalid time value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_TRANSFER_TIMEOUT_SETTING.getKey(), "123").build() + ) + ); + assertEquals(TimeValue.timeValueSeconds(40), remoteStoreSettings.getClusterRemoteTranslogTransferTimeout()); + } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 57c126b85ff70..29ffb94ce8bf4 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -43,6 +43,8 @@ import java.util.List; import java.util.Map; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -56,14 +58,16 @@ protected Collection> getPlugins() { } protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepository) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository remoteStorerepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepository); - BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreIndex, + remoteStorerepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { return Arrays.stream(lockDirectory.listAll()) diff --git a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java index e002297911788..0d171e17e70e1 100644 --- a/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundHandlerTests.java @@ -275,11 +275,11 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws // response so we must just close the connection on an error. To avoid the failure disappearing into a black hole we at least log // it. - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(NativeMessageHandler.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected message", - InboundHandler.class.getCanonicalName(), + NativeMessageHandler.class.getCanonicalName(), Level.WARN, "could not send error response to handshake" ) @@ -308,11 +308,11 @@ public void testClosesChannelOnErrorInHandshakeWithIncompatibleVersion() throws } public void testLogsSlowInboundProcessing() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(InboundHandler.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(NativeMessageHandler.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "expected message", - InboundHandler.class.getCanonicalName(), + NativeMessageHandler.class.getCanonicalName(), Level.WARN, "handling inbound transport message " ) diff --git a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java index ae4b537223394..2dfe8a0dd8590 100644 --- a/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundPipelineTests.java @@ -72,24 +72,25 @@ public void testPipelineHandling() throws IOException { final List> expected = new ArrayList<>(); final List> actual = new ArrayList<>(); final List toRelease = new ArrayList<>(); - final BiConsumer messageHandler = (c, m) -> { + final BiConsumer messageHandler = (c, m) -> { try { - final Header header = m.getHeader(); + InboundMessage message = (InboundMessage) m; + final Header header = message.getHeader(); final MessageData actualData; final Version version = header.getVersion(); final boolean isRequest = header.isRequest(); final long requestId = header.getRequestId(); final boolean isCompressed = header.isCompressed(); - if (m.isShortCircuit()) { + if (message.isShortCircuit()) { actualData = new MessageData(version, requestId, isRequest, isCompressed, header.getActionName(), null); } else if (isRequest) { - final TestRequest request = new TestRequest(m.openOrGetStreamInput()); + final TestRequest request = new TestRequest(message.openOrGetStreamInput()); actualData = new MessageData(version, requestId, isRequest, isCompressed, header.getActionName(), request.value); } else { - final TestResponse response = new TestResponse(m.openOrGetStreamInput()); + final TestResponse response = new TestResponse(message.openOrGetStreamInput()); actualData = new MessageData(version, requestId, isRequest, isCompressed, null, response.value); } - actual.add(new Tuple<>(actualData, m.getException())); + actual.add(new Tuple<>(actualData, message.getException())); } catch (IOException e) { throw new AssertionError(e); } @@ -214,7 +215,7 @@ public void testPipelineHandling() throws IOException { } public void testDecodeExceptionIsPropagated() throws IOException { - BiConsumer messageHandler = (c, m) -> {}; + BiConsumer messageHandler = (c, m) -> {}; final StatsTracker statsTracker = new StatsTracker(); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); @@ -268,7 +269,7 @@ public void testDecodeExceptionIsPropagated() throws IOException { } public void testEnsureBodyIsNotPrematurelyReleased() throws IOException { - BiConsumer messageHandler = (c, m) -> {}; + BiConsumer messageHandler = (c, m) -> {}; final StatsTracker statsTracker = new StatsTracker(); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, PageCacheRecycler.NON_RECYCLING_INSTANCE); diff --git a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java index ff99435f765d8..36ba409a2de03 100644 --- a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java @@ -97,8 +97,9 @@ public void setUp() throws Exception { final InboundAggregator aggregator = new InboundAggregator(breaker, (Predicate) action -> true); pipeline = new InboundPipeline(statsTracker, millisSupplier, decoder, aggregator, (c, m) -> { try (BytesStreamOutput streamOutput = new BytesStreamOutput()) { - Streams.copy(m.openOrGetStreamInput(), streamOutput); - message.set(new Tuple<>(m.getHeader(), streamOutput.bytes())); + InboundMessage m1 = (InboundMessage) m; + Streams.copy(m1.openOrGetStreamInput(), streamOutput); + message.set(new Tuple<>(m1.getHeader(), streamOutput.bytes())); } catch (IOException e) { throw new AssertionError(e); } diff --git a/settings.gradle b/settings.gradle index 8fbf32504215b..ccc239ff17062 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.16.2" + id "com.gradle.enterprise" version "3.17" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index b2ece9c813802..4dd4c734a1701 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -389,7 +389,7 @@ protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperatio } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * supplied node id. * * @param shardId the shard id to use @@ -407,7 +407,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * supplied node id. * * @param shardId the shard id to use @@ -441,7 +441,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * * @param routing shard routing to use @@ -459,7 +459,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param routing shard routing to use * @param indexMetadata indexMetadata for the shard, including any mapping @@ -498,7 +498,7 @@ protected IndexShard newShard( } /** - * creates a new initializing shard. The shard will will be put in its proper path under the + * creates a new initializing shard. The shard will be put in its proper path under the * current node id the shard is assigned to. * @param routing shard routing to use * @param shardPath path to use for shard data @@ -682,7 +682,8 @@ protected IndexShard newShard( () -> mockRepoSvc, threadPool, settings.getRemoteStoreTranslogRepository(), - new RemoteTranslogTransferTracker(shardRouting.shardId(), 20) + new RemoteTranslogTransferTracker(shardRouting.shardId(), 20), + DefaultRemoteStoreSettings.INSTANCE ); } return new InternalTranslogFactory(); diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index 8c7e9718eb0cd..456b55883f91e 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -288,10 +288,10 @@ public double execute(Map params1, double[] values) { ctx ) { @Override - public Object execute() { + public void execute() { Map vars = new HashMap<>(derivedFieldsParams); vars.put("params", derivedFieldsParams); - return script.apply(vars); + script.apply(vars); } }; return context.factoryClazz.cast(factory); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 0ee889af5ce1a..ce76914882150 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -101,6 +101,8 @@ import java.util.function.Function; import java.util.function.Predicate; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -559,19 +561,16 @@ protected void assertDocCount(String index, long count) { } protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); - return getLockFilesInRemoteStore(remoteStoreIndex, remoteStoreRepositoryName, indexUUID); - } - - protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName, String indexUUID) - throws IOException { final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepositoryName); - BlobPath shardLevelBlobPath = remoteStoreRepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobPath shardLevelBlobPath = getShardLevelBlobPath( + client(), + remoteStoreIndex, + remoteStoreRepository.basePath(), + "0", + SEGMENTS, + LOCK_FILES + ); BlobContainer blobContainer = remoteStoreRepository.blobStore().blobContainer(shardLevelBlobPath); try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { return lockDirectory.listAll(); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 664314245530e..c26c3f8d21380 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -135,6 +135,7 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; @@ -210,6 +211,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -377,6 +379,14 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String TESTS_CLUSTER_NAME = "tests.clustername"; + protected static final String REMOTE_BACKED_STORAGE_REPOSITORY_NAME = "test-remote-store-repo"; + + private Path remoteStoreRepositoryPath; + + private ReplicationType randomReplicationType; + + private String randomStorageType; + @BeforeClass public static void beforeClass() throws Exception { testClusterRule.beforeClass(); @@ -1894,11 +1904,19 @@ protected Settings nodeSettings(int nodeOrdinal) { builder.put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true); } - // Randomly set a replication strategy for the node. Replication Strategy can still be manually overridden by subclass if needed. + // Randomly set a Replication Strategy and storage type for the node. Both Replication Strategy and Storage Type can still be + // manually overridden by subclass if needed. if (useRandomReplicationStrategy()) { - ReplicationType replicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; - logger.info("Randomly using Replication Strategy as {}.", replicationType.toString()); - builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), replicationType); + if (randomReplicationType.equals(ReplicationType.SEGMENT) && randomStorageType.equals("REMOTE_STORE")) { + logger.info("Randomly using Replication Strategy as {} and Storage Type as {}.", randomReplicationType, randomStorageType); + if (remoteStoreRepositoryPath == null) { + remoteStoreRepositoryPath = randomRepoPath().toAbsolutePath(); + } + builder.put(remoteStoreClusterSettings(REMOTE_BACKED_STORAGE_REPOSITORY_NAME, remoteStoreRepositoryPath)); + } else { + logger.info("Randomly using Replication Strategy as {} and Storage Type as {}.", randomReplicationType, randomStorageType); + builder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), randomReplicationType); + } } return builder.build(); } @@ -1951,6 +1969,14 @@ protected boolean ignoreExternalCluster() { } protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { + if (useRandomReplicationStrategy()) { + randomReplicationType = randomBoolean() ? ReplicationType.DOCUMENT : ReplicationType.SEGMENT; + if (randomReplicationType.equals(ReplicationType.SEGMENT)) { + randomStorageType = randomBoolean() ? "REMOTE_STORE" : "LOCAL"; + } else { + randomStorageType = "LOCAL"; + } + } String clusterAddresses = System.getProperty(TESTS_CLUSTER); if (Strings.hasLength(clusterAddresses) && ignoreExternalCluster() == false) { if (scope == Scope.TEST) { @@ -2538,7 +2564,7 @@ public static Settings buildRemoteStoreNodeAttributes( ); } - public static Settings buildRemoteStoreNodeAttributes( + private static Settings buildRemoteStoreNodeAttributes( String segmentRepoName, Path segmentRepoPath, String segmentRepoType, @@ -2593,6 +2619,7 @@ public static Settings buildRemoteStoreNodeAttributes( settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); } + settings.put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), randomFrom(PathType.values())); return settings.build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index f381ebdb64fc2..5a3f3b5a07a8d 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -63,14 +63,17 @@ import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.Version; import org.opensearch.bootstrap.BootstrapForTesting; +import org.opensearch.client.Client; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterModule; +import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.PersistedStateRegistry; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.Numbers; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -120,6 +123,8 @@ import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.analysis.TokenFilterFactory; import org.opensearch.index.analysis.TokenizerFactory; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.indices.analysis.AnalysisModule; import org.opensearch.monitor.jvm.JvmInfo; @@ -1797,4 +1802,39 @@ protected static InetAddress randomIp(boolean v4) { throw new AssertionError(); } } + + public static BlobPath getShardLevelBlobPath( + Client client, + String remoteStoreIndex, + BlobPath basePath, + String shardId, + RemoteStoreEnums.DataCategory dataCategory, + RemoteStoreEnums.DataType dataType + ) { + String indexUUID = client.admin() + .indices() + .prepareGetSettings(remoteStoreIndex) + .get() + .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + ClusterState state = client.admin().cluster().prepareState().execute().actionGet().getState(); + Map remoteCustomData = state.metadata() + .index(remoteStoreIndex) + .getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + RemoteStoreEnums.PathType type = Objects.isNull(remoteCustomData) + ? RemoteStoreEnums.PathType.FIXED + : RemoteStoreEnums.PathType.valueOf(remoteCustomData.get(RemoteStoreEnums.PathType.NAME)); + RemoteStoreEnums.PathHashAlgorithm hashAlgorithm = Objects.nonNull(remoteCustomData) + ? remoteCustomData.containsKey(RemoteStoreEnums.PathHashAlgorithm.NAME) + ? RemoteStoreEnums.PathHashAlgorithm.valueOf(remoteCustomData.get(RemoteStoreEnums.PathHashAlgorithm.NAME)) + : null + : null; + RemoteStorePathStrategy.PathInput pathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(basePath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + return type.path(pathInput, hashAlgorithm); + } }