From d09ef68549fcc7422e4d6bc0f0537c9abcfc81f1 Mon Sep 17 00:00:00 2001 From: Chesnay Schepler Date: Wed, 1 Sep 2021 09:41:17 +0200 Subject: [PATCH] [FLINK-24099][docs] Refer to nightlies.apache.org --- README.md | 2 +- docs/config.toml | 42 +++++++++---------- docs/content.zh/_index.md | 2 +- .../docs/deployment/filesystems/s3.md | 2 +- .../docs/deployment/memory/mem_migration.md | 2 +- .../serialization/types_serialization.md | 2 +- .../docs/libs/state_processor_api.md | 2 +- docs/content.zh/release-notes/flink-1.11.md | 32 +++++++------- docs/content.zh/release-notes/flink-1.13.md | 2 +- docs/content.zh/release-notes/flink-1.7.md | 2 +- docs/content.zh/release-notes/flink-1.8.md | 2 +- docs/content.zh/release-notes/flink-1.9.md | 4 +- docs/content/_index.md | 2 +- .../docs/connectors/datastream/kafka.md | 4 +- .../content/docs/deployment/filesystems/s3.md | 2 +- .../docs/deployment/memory/mem_migration.md | 2 +- .../serialization/types_serialization.md | 2 +- docs/content/docs/libs/state_processor_api.md | 2 +- docs/content/release-notes/flink-1.11.md | 32 +++++++------- docs/content/release-notes/flink-1.13.md | 2 +- docs/content/release-notes/flink-1.7.md | 2 +- docs/content/release-notes/flink-1.8.md | 2 +- docs/content/release-notes/flink-1.9.md | 4 +- .../partials/docs/inject/content-before.html | 4 +- .../kubernetes_config_configuration.html | 2 +- .../client/cli/DynamicPropertiesUtil.java | 2 +- .../org/apache/flink/core/fs/FileSystem.java | 4 +- .../EmptyFieldsCountAccumulator.java | 2 +- .../org/apache/flink/api/java/DataSet.java | 2 +- .../KubernetesConfigOptions.java | 2 +- flink-python/README.md | 6 +-- .../mixing_use_of_datastream_and_table.py | 2 +- .../pyflink/examples/table/multi_sink.py | 2 +- .../examples/table/pandas/pandas_udaf.py | 2 +- .../examples/table/process_json_data.py | 2 +- .../table/process_json_data_with_udf.py | 2 +- .../examples/table/windowing/over_window.py | 2 +- .../table/windowing/session_window.py | 2 +- .../table/windowing/sliding_window.py | 2 +- .../examples/table/windowing/tumble_window.py | 2 +- .../pyflink/examples/table/word_count.py | 2 +- flink-python/pyflink/table/descriptors.py | 2 +- .../AbstractBroadcastStateTransformation.java | 2 +- .../flink/table/client/cli/CliStrings.java | 2 +- .../resources/sql-client-help-command.out | 2 +- .../src/test/resources/sql/misc.q | 2 +- tools/releasing/create_snapshot_branch.sh | 2 +- 47 files changed, 104 insertions(+), 104 deletions(-) diff --git a/README.md b/README.md index 66c2b4ff5ec5d..c0cd28915e9d7 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ The IntelliJ IDE supports Maven out of the box and offers a plugin for Scala dev * IntelliJ download: [https://www.jetbrains.com/idea/](https://www.jetbrains.com/idea/) * IntelliJ Scala Plugin: [https://plugins.jetbrains.com/plugin/?id=1347](https://plugins.jetbrains.com/plugin/?id=1347) -Check out our [Setting up IntelliJ](https://ci.apache.org/projects/flink/flink-docs-master/flinkDev/ide_setup.html#intellij-idea) guide for details. +Check out our [Setting up IntelliJ](https://nightlies.apache.org/flink/flink-docs-master/flinkDev/ide_setup.html#intellij-idea) guide for details. ### Eclipse Scala IDE diff --git a/docs/config.toml b/docs/config.toml index 366e9329fd665..9659c10306efc 100644 --- a/docs/config.toml +++ b/docs/config.toml @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -baseURL = '//ci.apache.org/projects/flink/flink-docs-master' +baseURL = '//nightlies.apache.org/flink/flink-docs-master' languageCode = "en-us" title = "Apache Flink" enableGitInfo = false @@ -60,36 +60,36 @@ pygmentsUseClasses = true ZhDownloadPage = "//flink.apache.org/zh/downloads.html" - JavaDocs = "//ci.apache.org/projects/flink/flink-docs-master/api/java/" + JavaDocs = "//nightlies.apache.org/flink/flink-docs-master/api/java/" - ScalaDocs = "//ci.apache.org/projects/flink/flink-docs-master/api/scala/index.html#org.apache.flink.api.scala.package" + ScalaDocs = "//nightlies.apache.org/flink/flink-docs-master/api/scala/index.html#org.apache.flink.api.scala.package" - PyDocs = "//ci.apache.org/projects/flink/flink-docs-master/api/python/" + PyDocs = "//nightlies.apache.org/flink/flink-docs-master/api/python/" # External links at the bottom # of the menu MenuLinks = [ ["Project Homepage", "//flink.apache.org"], - ["JavaDocs", "//ci.apache.org/projects/flink/flink-docs-master/api/java/"], - ["ScalaDocs", "//ci.apache.org/projects/flink/flink-docs-master/api/scala/index.html#org.apache.flink.api.scala.package"], - ["PyDocs", "//ci.apache.org/projects/flink/flink-docs-master/api/python/"] + ["JavaDocs", "//nightlies.apache.org/flink/flink-docs-master/api/java/"], + ["ScalaDocs", "//nightlies.apache.org/flink/flink-docs-master/api/scala/index.html#org.apache.flink.api.scala.package"], + ["PyDocs", "//nightlies.apache.org/flink/flink-docs-master/api/python/"] ] PreviousDocs = [ - ["1.13", "http://ci.apache.org/projects/flink/flink-docs-release-1.13"], - ["1.12", "http://ci.apache.org/projects/flink/flink-docs-release-1.12"], - ["1.11", "http://ci.apache.org/projects/flink/flink-docs-release-1.11"], - ["1.10", "http://ci.apache.org/projects/flink/flink-docs-release-1.10"], - ["1.9", "http://ci.apache.org/projects/flink/flink-docs-release-1.9"], - ["1.8", "http://ci.apache.org/projects/flink/flink-docs-release-1.8"], - ["1.7", "http://ci.apache.org/projects/flink/flink-docs-release-1.7"], - ["1.6", "http://ci.apache.org/projects/flink/flink-docs-release-1.6"], - ["1.5", "http://ci.apache.org/projects/flink/flink-docs-release-1.5"], - ["1.4", "http://ci.apache.org/projects/flink/flink-docs-release-1.4"], - ["1.3", "http://ci.apache.org/projects/flink/flink-docs-release-1.3"], - ["1.2", "http://ci.apache.org/projects/flink/flink-docs-release-1.2"], - ["1.1", "http://ci.apache.org/projects/flink/flink-docs-release-1.1"], - ["1.0", "http://ci.apache.org/projects/flink/flink-docs-release-1.0"] + ["1.13", "http://nightlies.apache.org/flink/flink-docs-release-1.13"], + ["1.12", "http://nightlies.apache.org/flink/flink-docs-release-1.12"], + ["1.11", "http://nightlies.apache.org/flink/flink-docs-release-1.11"], + ["1.10", "http://nightlies.apache.org/flink/flink-docs-release-1.10"], + ["1.9", "http://nightlies.apache.org/flink/flink-docs-release-1.9"], + ["1.8", "http://nightlies.apache.org/flink/flink-docs-release-1.8"], + ["1.7", "http://nightlies.apache.org/flink/flink-docs-release-1.7"], + ["1.6", "http://nightlies.apache.org/flink/flink-docs-release-1.6"], + ["1.5", "http://nightlies.apache.org/flink/flink-docs-release-1.5"], + ["1.4", "http://nightlies.apache.org/flink/flink-docs-release-1.4"], + ["1.3", "http://nightlies.apache.org/flink/flink-docs-release-1.3"], + ["1.2", "http://nightlies.apache.org/flink/flink-docs-release-1.2"], + ["1.1", "http://nightlies.apache.org/flink/flink-docs-release-1.1"], + ["1.0", "http://nightlies.apache.org/flink/flink-docs-release-1.0"] ] [markup] diff --git a/docs/content.zh/_index.md b/docs/content.zh/_index.md index 1970d28620f4a..782b36a9b4c1a 100644 --- a/docs/content.zh/_index.md +++ b/docs/content.zh/_index.md @@ -63,7 +63,7 @@ under the License. {{< columns >}} * [DataStream API]({{< ref "docs/dev/datastream/overview" >}}) * [Table API & SQL]({{< ref "docs/dev/table/overview" >}}) -* [Stateful Functions](https://ci.apache.org/projects/flink/flink-statefun-docs-stable/) +* [Stateful Functions](https://nightlies.apache.org/flink/flink-statefun-docs-stable/) <---> diff --git a/docs/content.zh/docs/deployment/filesystems/s3.md b/docs/content.zh/docs/deployment/filesystems/s3.md index 270bbad4f07b6..48200506766ed 100644 --- a/docs/content.zh/docs/deployment/filesystems/s3.md +++ b/docs/content.zh/docs/deployment/filesystems/s3.md @@ -125,7 +125,7 @@ s3.path.style.access: true 如果熵注入被启用,路径中配置好的字串将会被随机字符所替换。例如路径 `s3://my-bucket/checkpoints/_entropy_/dashboard-job/` 将会被替换成类似于 `s3://my-bucket/checkpoints/gf36ikvg/dashboard-job/` 的路径。 **这仅在使用熵注入选项创建文件时启用!** -否则将完全删除文件路径中的 entropy key。更多细节请参见 [FileSystem.create(Path, WriteOption)](https://ci.apache.org/projects/flink/flink-docs-release-1.6/api/java/org/apache/flink/core/fs/FileSystem.html#create-org.apache.flink.core.fs.Path-org.apache.flink.core.fs.FileSystem.WriteOptions-)。 +否则将完全删除文件路径中的 entropy key。更多细节请参见 [FileSystem.create(Path, WriteOption)](https://nightlies.apache.org/flink/flink-docs-release-1.6/api/java/org/apache/flink/core/fs/FileSystem.html#create-org.apache.flink.core.fs.Path-org.apache.flink.core.fs.FileSystem.WriteOptions-)。 {{< hint info >}} 目前 Flink 运行时仅对 checkpoint 数据文件使用熵注入选项。所有其他文件包括 checkpoint 元数据与外部 URI 都不使用熵注入,以保证 checkpoint URI 的可预测性。 diff --git a/docs/content.zh/docs/deployment/memory/mem_migration.md b/docs/content.zh/docs/deployment/memory/mem_migration.md index 2c9951f32e533..ceb78d3b388af 100644 --- a/docs/content.zh/docs/deployment/memory/mem_migration.md +++ b/docs/content.zh/docs/deployment/memory/mem_migration.md @@ -29,7 +29,7 @@ under the License. 在 *1.10* 和 *1.11* 版本中,Flink 分别对 [TaskManager]({{< ref "docs/deployment/memory/mem_setup_tm" >}}) 和 [JobManager]({{< ref "docs/deployment/memory/mem_setup_jobmanager" >}}) 的内存配置方法做出了较大的改变。 部分配置参数被移除了,或是语义上发生了变化。 -本篇升级指南将介绍如何将 [*Flink 1.9 及以前版本*](https://ci.apache.org/projects/flink/flink-docs-release-1.9/ops/mem_setup.html)的 TaskManager 内存配置升级到 *Flink 1.10 及以后版本*, +本篇升级指南将介绍如何将 [*Flink 1.9 及以前版本*](https://nightlies.apache.org/flink/flink-docs-release-1.9/ops/mem_setup.html)的 TaskManager 内存配置升级到 *Flink 1.10 及以后版本*, 以及如何将 *Flink 1.10 及以前版本*的 JobManager 内存配置升级到 *Flink 1.11 及以后版本*。 * toc diff --git a/docs/content.zh/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md b/docs/content.zh/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md index 4898fc6267ea6..ee1055ecf1479 100644 --- a/docs/content.zh/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md +++ b/docs/content.zh/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md @@ -111,7 +111,7 @@ Java and Scala classes are treated by Flink as a special POJO data type if they POJOs are generally represented with a `PojoTypeInfo` and serialized with the `PojoSerializer` (using [Kryo](https://github.com/EsotericSoftware/kryo) as configurable fallback). The exception is when the POJOs are actually Avro types (Avro Specific Records) or produced as "Avro Reflect Types". In that case the POJO's are represented by an `AvroTypeInfo` and serialized with the `AvroSerializer`. -You can also register your own custom serializer if required; see [Serialization](https://ci.apache.org/projects/flink/flink-docs-stable/dev/types_serialization.html#serialization-of-pojo-types) for further information. +You can also register your own custom serializer if required; see [Serialization](https://nightlies.apache.org/flink/flink-docs-stable/dev/types_serialization.html#serialization-of-pojo-types) for further information. Flink analyzes the structure of POJO types, i.e., it learns about the fields of a POJO. As a result POJO types are easier to use than general types. Moreover, Flink can process POJOs more efficiently than general types. diff --git a/docs/content.zh/docs/libs/state_processor_api.md b/docs/content.zh/docs/libs/state_processor_api.md index 6997ec12b9f1e..8dc3e0797d491 100644 --- a/docs/content.zh/docs/libs/state_processor_api.md +++ b/docs/content.zh/docs/libs/state_processor_api.md @@ -27,7 +27,7 @@ under the License. # State Processor API Apache Flink's State Processor API provides powerful functionality to reading, writing, and modifying savepoints and checkpoints using Flink’s batch DataSet API. -Due to the [interoperability of DataSet and Table API](https://ci.apache.org/projects/flink/flink-docs-master/dev/table/common.html#integration-with-datastream-and-dataset-api), you can even use relational Table API or SQL queries to analyze and process state data. +Due to the [interoperability of DataSet and Table API](https://nightlies.apache.org/flink/flink-docs-master/dev/table/common.html#integration-with-datastream-and-dataset-api), you can even use relational Table API or SQL queries to analyze and process state data. For example, you can take a savepoint of a running stream processing application and analyze it with a DataSet batch program to verify that the application behaves correctly. Or you can read a batch of data from any store, preprocess it, and write the result to a savepoint that you use to bootstrap the state of a streaming application. diff --git a/docs/content.zh/release-notes/flink-1.11.md b/docs/content.zh/release-notes/flink-1.11.md index 2807dd4b1fc8b..d4109c2d37915 100644 --- a/docs/content.zh/release-notes/flink-1.11.md +++ b/docs/content.zh/release-notes/flink-1.11.md @@ -32,7 +32,7 @@ these notes carefully if you are planning to upgrade your Flink version to 1.11. The user can now submit applications and choose to execute their `main()` method on the cluster rather than the client. This allows for more light-weight application submission. For more details, -see the [Application Mode documentation](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/#application-mode). +see the [Application Mode documentation](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/#application-mode). #### Web Submission behaves the same as detached mode. @@ -80,22 +80,22 @@ The examples of `Dockerfiles` and docker image `build.sh` scripts have been remo - `flink-container/docker` - `flink-container/kubernetes` -Check the updated user documentation for [Flink Docker integration](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html) instead. It now describes in detail how to [use](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-a-flink-image) and [customize](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#customize-flink-image) [the Flink official docker image](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#docker-hub-flink-images): configuration options, logging, plugins, adding more dependencies and installing software. The documentation also includes examples for Session and Job cluster deployments with: -- [docker run](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-flink-image) -- [docker compose](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-compose) -- [docker swarm](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-swarm) -- [standalone Kubernetes](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/kubernetes.html) +Check the updated user documentation for [Flink Docker integration](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html) instead. It now describes in detail how to [use](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-a-flink-image) and [customize](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#customize-flink-image) [the Flink official docker image](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#docker-hub-flink-images): configuration options, logging, plugins, adding more dependencies and installing software. The documentation also includes examples for Session and Job cluster deployments with: +- [docker run](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-flink-image) +- [docker compose](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-compose) +- [docker swarm](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-swarm) +- [standalone Kubernetes](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/kubernetes.html) ### Memory Management #### New JobManager Memory Model ##### Overview With [FLIP-116](https://cwiki.apache.org/confluence/display/FLINK/FLIP-116%3A+Unified+Memory+Configuration+for+Job+Managers), a new memory model has been introduced for the JobManager. New configuration options have been introduced to control the memory consumption of the JobManager process. This affects all types of deployments: standalone, YARN, Mesos, and the new active Kubernetes integration. -Please, check the user documentation for [more details](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html). +Please, check the user documentation for [more details](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html). If you try to reuse your previous Flink configuration without any adjustments, the new memory model can result in differently computed memory parameters for the JVM and, thus, performance changes or even failures. -In order to start the JobManager process, you have to specify at least one of the following options [`jobmanager.memory.flink.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-flink-size), [`jobmanager.memory.process.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size) or [`jobmanager.memory.heap.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size). -See also [the migration guide](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_migration.html#migrate-job-manager-memory-configuration) for more information. +In order to start the JobManager process, you have to specify at least one of the following options [`jobmanager.memory.flink.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-flink-size), [`jobmanager.memory.process.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size) or [`jobmanager.memory.heap.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size). +See also [the migration guide](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_migration.html#migrate-job-manager-memory-configuration) for more information. ##### Deprecation and breaking changes The following options are deprecated: @@ -103,24 +103,24 @@ The following options are deprecated: * `jobmanager.heap.mb` If these deprecated options are still used, they will be interpreted as one of the following new options in order to maintain backwards compatibility: - * [JVM Heap](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-jvm-heap) ([`jobmanager.memory.heap.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size)) for standalone and Mesos deployments - * [Total Process Memory](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-total-memory) ([`jobmanager.memory.process.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size)) for containerized deployments (Kubernetes and Yarn) + * [JVM Heap](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-jvm-heap) ([`jobmanager.memory.heap.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size)) for standalone and Mesos deployments + * [Total Process Memory](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-total-memory) ([`jobmanager.memory.process.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size)) for containerized deployments (Kubernetes and Yarn) The following options have been removed and have no effect anymore: * `containerized.heap-cutoff-ratio` * `containerized.heap-cutoff-min` -There is [no container cut-off](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_migration.html#container-cut-off-memory) anymore. +There is [no container cut-off](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_migration.html#container-cut-off-memory) anymore. ##### JVM arguments The `direct` and `metaspace` memory of the JobManager's JVM process are now limited by configurable values: - * [`jobmanager.memory.off-heap.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-off-heap-size) - * [`jobmanager.memory.jvm-metaspace.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-jvm-metaspace-size) + * [`jobmanager.memory.off-heap.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-off-heap-size) + * [`jobmanager.memory.jvm-metaspace.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-jvm-metaspace-size) -See also [JVM Parameters](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup.html#jvm-parameters). +See also [JVM Parameters](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup.html#jvm-parameters). {{< hint warning >}} -These new limits can produce the respective `OutOfMemoryError` exceptions if they are not configured properly or there is a respective memory leak. See also [the troubleshooting guide](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_trouble.html#outofmemoryerror-direct-buffer-memory). +These new limits can produce the respective `OutOfMemoryError` exceptions if they are not configured properly or there is a respective memory leak. See also [the troubleshooting guide](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_trouble.html#outofmemoryerror-direct-buffer-memory). {{< /hint >}} #### Removal of deprecated mesos.resourcemanager.tasks.mem diff --git a/docs/content.zh/release-notes/flink-1.13.md b/docs/content.zh/release-notes/flink-1.13.md index dceda51f24cd5..7644e08f3beba 100644 --- a/docs/content.zh/release-notes/flink-1.13.md +++ b/docs/content.zh/release-notes/flink-1.13.md @@ -50,7 +50,7 @@ In 1.13, checkpointing configurations have been extracted into their own interfa This change does not affect the runtime behavior and simply provides a better mental model to users. Pipelines can be updated to use the new the new abstractions without losing state, consistency, or change in semantics. -Please follow the [migration guide](https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/ops/state/state_backends/#migrating-from-legacy-backends) or the JavaDoc on the deprecated state backend classes - `MemoryStateBackend`, `FsStateBackend` and `RocksDBStateBackend` for migration details. +Please follow the [migration guide](https://nightlies.apache.org/flink/flink-docs-release-1.13/docs/ops/state/state_backends/#migrating-from-legacy-backends) or the JavaDoc on the deprecated state backend classes - `MemoryStateBackend`, `FsStateBackend` and `RocksDBStateBackend` for migration details. #### Unify binary format for Keyed State savepoints diff --git a/docs/content.zh/release-notes/flink-1.7.md b/docs/content.zh/release-notes/flink-1.7.md index 09700dc799ac8..5630556bed2d5 100644 --- a/docs/content.zh/release-notes/flink-1.7.md +++ b/docs/content.zh/release-notes/flink-1.7.md @@ -65,7 +65,7 @@ Before Flink 1.7, serializer snapshots were implemented as a `TypeSerializerConf Moreover, the responsibility of serializer schema compatibility checks lived within the `TypeSerializer`, implemented in the `TypeSerializer#ensureCompatibility(TypeSerializerConfigSnapshot)` method. To be future-proof and to have flexibility to migrate your state serializers and schema, it is highly recommended to migrate from the old abstractions. -Details and migration guides can be found [here](https://ci.apache.org/projects/flink/flink-docs-master/dev/stream/state/custom_serialization.html). +Details and migration guides can be found [here](https://nightlies.apache.org/flink/flink-docs-master/dev/stream/state/custom_serialization.html). ### Removal of the legacy mode diff --git a/docs/content.zh/release-notes/flink-1.8.md b/docs/content.zh/release-notes/flink-1.8.md index 3d74d3993ff48..86e09bc784bde 100644 --- a/docs/content.zh/release-notes/flink-1.8.md +++ b/docs/content.zh/release-notes/flink-1.8.md @@ -201,7 +201,7 @@ The `CompositeSerializerSnapshot` utility class has been removed. You should now use `CompositeTypeSerializerSnapshot` instead, for snapshots of composite serializers that delegate serialization to multiple nested serializers. Please see -[here](http://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/stream/state/custom_serialization.html#implementing-a-compositetypeserializersnapshot) +[here](http://nightlies.apache.org/flink/flink-docs-release-1.8/dev/stream/state/custom_serialization.html#implementing-a-compositetypeserializersnapshot) for instructions on using `CompositeTypeSerializerSnapshot`. ### Memory management diff --git a/docs/content.zh/release-notes/flink-1.9.md b/docs/content.zh/release-notes/flink-1.9.md index be14cd326691b..0ce8038faa2a3 100644 --- a/docs/content.zh/release-notes/flink-1.9.md +++ b/docs/content.zh/release-notes/flink-1.9.md @@ -169,7 +169,7 @@ memory segments. The default timeout is 30 seconds, and is configurable via `tas It is possible that for some previously working deployments this default timeout value is too low and might have to be increased. -Please also notice that several network I/O metrics have had their scope changed. See the [1.9 metrics documentation](https://ci.apache.org/projects/flink/flink-docs-master/ops/metrics.html) +Please also notice that several network I/O metrics have had their scope changed. See the [1.9 metrics documentation](https://nightlies.apache.org/flink/flink-docs-master/ops/metrics.html) for which metrics are affected. In 1.9.0, these metrics will still be available under their previous scopes, but this may no longer be the case in future versions. @@ -183,7 +183,7 @@ Related issues: Due to a bug in the `AsyncWaitOperator`, in 1.9.0 the default chaining behaviour of the operator is now changed so that it is never chained after another operator. This should not be problematic for migrating from older version snapshots as long as an uid was assigned to the operator. If an uid was not assigned to the operator, please see -the instructions [here](https://ci.apache.org/projects/flink/flink-docs-release-1.9/ops/upgrading.html#matching-operator-state) +the instructions [here](https://nightlies.apache.org/flink/flink-docs-release-1.9/ops/upgrading.html#matching-operator-state) for a possible workaround. Related issues: diff --git a/docs/content/_index.md b/docs/content/_index.md index cd8dc07f57680..a610e74de9e97 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -63,7 +63,7 @@ The reference documentation covers all the details. Some starting points: {{< columns >}} * [DataStream API]({{< ref "docs/dev/datastream/overview" >}}) * [Table API & SQL]({{< ref "docs/dev/table/overview" >}}) -* [Stateful Functions](https://ci.apache.org/projects/flink/flink-statefun-docs-stable/) +* [Stateful Functions](https://nightlies.apache.org/flink/flink-statefun-docs-stable/) <---> diff --git a/docs/content/docs/connectors/datastream/kafka.md b/docs/content/docs/connectors/datastream/kafka.md index 22bfa057161a7..58e03425818ec 100644 --- a/docs/content/docs/connectors/datastream/kafka.md +++ b/docs/content/docs/connectors/datastream/kafka.md @@ -343,7 +343,7 @@ when the record is emitted downstream. `FlinkKafkaConsumer` is deprecated and will be removed with Flink 1.15, please use `KafkaSource` instead. {{< /hint >}} -For older references you can look at the Flink 1.13 documentation. +For older references you can look at the Flink 1.13 documentation. ## Kafka Sink @@ -452,7 +452,7 @@ Kafka sink exposes the following metrics in the respective [scope]({{< ref "docs `FlinkKafkaProducer` is deprecated and will be removed with Flink 1.15, please use `KafkaSink` instead. {{< /hint >}} -For older references you can look at the Flink 1.13 documentation. +For older references you can look at the Flink 1.13 documentation. ## Kafka Connector Metrics diff --git a/docs/content/docs/deployment/filesystems/s3.md b/docs/content/docs/deployment/filesystems/s3.md index df81160d8990a..c523e439b74dc 100644 --- a/docs/content/docs/deployment/filesystems/s3.md +++ b/docs/content/docs/deployment/filesystems/s3.md @@ -134,7 +134,7 @@ a technique to improve the scalability of AWS S3 buckets through adding some ran If entropy injection is activated, a configured substring in the path is replaced with random characters. For example, path `s3://my-bucket/checkpoints/_entropy_/dashboard-job/` would be replaced by something like `s3://my-bucket/checkpoints/gf36ikvg/dashboard-job/`. **This only happens when the file creation passes the option to inject entropy!** -Otherwise, the file path removes the entropy key substring entirely. See [FileSystem.create(Path, WriteOption)](https://ci.apache.org/projects/flink/flink-docs-release-1.6/api/java/org/apache/flink/core/fs/FileSystem.html#create-org.apache.flink.core.fs.Path-org.apache.flink.core.fs.FileSystem.WriteOptions-) +Otherwise, the file path removes the entropy key substring entirely. See [FileSystem.create(Path, WriteOption)](https://nightlies.apache.org/flink/flink-docs-release-1.6/api/java/org/apache/flink/core/fs/FileSystem.html#create-org.apache.flink.core.fs.Path-org.apache.flink.core.fs.FileSystem.WriteOptions-) for details. {{< hint info >}} diff --git a/docs/content/docs/deployment/memory/mem_migration.md b/docs/content/docs/deployment/memory/mem_migration.md index 15a8d8d66cb38..9c279eec36f47 100644 --- a/docs/content/docs/deployment/memory/mem_migration.md +++ b/docs/content/docs/deployment/memory/mem_migration.md @@ -30,7 +30,7 @@ under the License. The memory setup has changed a lot with the *1.10* release for [TaskManagers]({{< ref "docs/deployment/memory/mem_setup_tm" >}}) and with the *1.11* release for [JobManagers]({{< ref "docs/deployment/memory/mem_setup_jobmanager" >}}). Many configuration options were removed or their semantics changed. This guide will help you to migrate the TaskManager memory configuration from Flink -[<= *1.9*](https://ci.apache.org/projects/flink/flink-docs-release-1.9/ops/mem_setup.html) to >= *1.10* and +[<= *1.9*](https://nightlies.apache.org/flink/flink-docs-release-1.9/ops/mem_setup.html) to >= *1.10* and the JobManager memory configuration from Flink <= *1.10* to >= *1.11*. {{< hint warning >}} diff --git a/docs/content/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md b/docs/content/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md index a490dc05aefb9..61115370fdf19 100644 --- a/docs/content/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md +++ b/docs/content/docs/dev/datastream/fault-tolerance/serialization/types_serialization.md @@ -112,7 +112,7 @@ Java and Scala classes are treated by Flink as a special POJO data type if they POJOs are generally represented with a `PojoTypeInfo` and serialized with the `PojoSerializer` (using [Kryo](https://github.com/EsotericSoftware/kryo) as configurable fallback). The exception is when the POJOs are actually Avro types (Avro Specific Records) or produced as "Avro Reflect Types". In that case the POJO's are represented by an `AvroTypeInfo` and serialized with the `AvroSerializer`. -You can also register your own custom serializer if required; see [Serialization](https://ci.apache.org/projects/flink/flink-docs-stable/dev/types_serialization.html#serialization-of-pojo-types) for further information. +You can also register your own custom serializer if required; see [Serialization](https://nightlies.apache.org/flink/flink-docs-stable/dev/types_serialization.html#serialization-of-pojo-types) for further information. Flink analyzes the structure of POJO types, i.e., it learns about the fields of a POJO. As a result POJO types are easier to use than general types. Moreover, Flink can process POJOs more efficiently than general types. diff --git a/docs/content/docs/libs/state_processor_api.md b/docs/content/docs/libs/state_processor_api.md index 31e1a87ef06d2..0ee7faa2be7e2 100644 --- a/docs/content/docs/libs/state_processor_api.md +++ b/docs/content/docs/libs/state_processor_api.md @@ -27,7 +27,7 @@ under the License. # State Processor API Apache Flink's State Processor API provides powerful functionality to reading, writing, and modifying savepoints and checkpoints using Flink’s batch DataSet API. -Due to the [interoperability of DataSet and Table API](https://ci.apache.org/projects/flink/flink-docs-master/dev/table/common.html#integration-with-datastream-and-dataset-api), you can even use relational Table API or SQL queries to analyze and process state data. +Due to the [interoperability of DataSet and Table API](https://nightlies.apache.org/flink/flink-docs-master/dev/table/common.html#integration-with-datastream-and-dataset-api), you can even use relational Table API or SQL queries to analyze and process state data. For example, you can take a savepoint of a running stream processing application and analyze it with a DataSet batch program to verify that the application behaves correctly. Or you can read a batch of data from any store, preprocess it, and write the result to a savepoint that you use to bootstrap the state of a streaming application. diff --git a/docs/content/release-notes/flink-1.11.md b/docs/content/release-notes/flink-1.11.md index 2807dd4b1fc8b..d4109c2d37915 100644 --- a/docs/content/release-notes/flink-1.11.md +++ b/docs/content/release-notes/flink-1.11.md @@ -32,7 +32,7 @@ these notes carefully if you are planning to upgrade your Flink version to 1.11. The user can now submit applications and choose to execute their `main()` method on the cluster rather than the client. This allows for more light-weight application submission. For more details, -see the [Application Mode documentation](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/#application-mode). +see the [Application Mode documentation](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/#application-mode). #### Web Submission behaves the same as detached mode. @@ -80,22 +80,22 @@ The examples of `Dockerfiles` and docker image `build.sh` scripts have been remo - `flink-container/docker` - `flink-container/kubernetes` -Check the updated user documentation for [Flink Docker integration](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html) instead. It now describes in detail how to [use](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-a-flink-image) and [customize](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#customize-flink-image) [the Flink official docker image](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#docker-hub-flink-images): configuration options, logging, plugins, adding more dependencies and installing software. The documentation also includes examples for Session and Job cluster deployments with: -- [docker run](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-flink-image) -- [docker compose](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-compose) -- [docker swarm](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-swarm) -- [standalone Kubernetes](https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/standalone/kubernetes.html) +Check the updated user documentation for [Flink Docker integration](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html) instead. It now describes in detail how to [use](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-a-flink-image) and [customize](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#customize-flink-image) [the Flink official docker image](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#docker-hub-flink-images): configuration options, logging, plugins, adding more dependencies and installing software. The documentation also includes examples for Session and Job cluster deployments with: +- [docker run](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#how-to-run-flink-image) +- [docker compose](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-compose) +- [docker swarm](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/docker.html#flink-with-docker-swarm) +- [standalone Kubernetes](https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/standalone/kubernetes.html) ### Memory Management #### New JobManager Memory Model ##### Overview With [FLIP-116](https://cwiki.apache.org/confluence/display/FLINK/FLIP-116%3A+Unified+Memory+Configuration+for+Job+Managers), a new memory model has been introduced for the JobManager. New configuration options have been introduced to control the memory consumption of the JobManager process. This affects all types of deployments: standalone, YARN, Mesos, and the new active Kubernetes integration. -Please, check the user documentation for [more details](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html). +Please, check the user documentation for [more details](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html). If you try to reuse your previous Flink configuration without any adjustments, the new memory model can result in differently computed memory parameters for the JVM and, thus, performance changes or even failures. -In order to start the JobManager process, you have to specify at least one of the following options [`jobmanager.memory.flink.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-flink-size), [`jobmanager.memory.process.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size) or [`jobmanager.memory.heap.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size). -See also [the migration guide](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_migration.html#migrate-job-manager-memory-configuration) for more information. +In order to start the JobManager process, you have to specify at least one of the following options [`jobmanager.memory.flink.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-flink-size), [`jobmanager.memory.process.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size) or [`jobmanager.memory.heap.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size). +See also [the migration guide](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_migration.html#migrate-job-manager-memory-configuration) for more information. ##### Deprecation and breaking changes The following options are deprecated: @@ -103,24 +103,24 @@ The following options are deprecated: * `jobmanager.heap.mb` If these deprecated options are still used, they will be interpreted as one of the following new options in order to maintain backwards compatibility: - * [JVM Heap](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-jvm-heap) ([`jobmanager.memory.heap.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size)) for standalone and Mesos deployments - * [Total Process Memory](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-total-memory) ([`jobmanager.memory.process.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size)) for containerized deployments (Kubernetes and Yarn) + * [JVM Heap](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-jvm-heap) ([`jobmanager.memory.heap.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-heap-size)) for standalone and Mesos deployments + * [Total Process Memory](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup_jobmanager.html#configure-total-memory) ([`jobmanager.memory.process.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-process-size)) for containerized deployments (Kubernetes and Yarn) The following options have been removed and have no effect anymore: * `containerized.heap-cutoff-ratio` * `containerized.heap-cutoff-min` -There is [no container cut-off](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_migration.html#container-cut-off-memory) anymore. +There is [no container cut-off](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_migration.html#container-cut-off-memory) anymore. ##### JVM arguments The `direct` and `metaspace` memory of the JobManager's JVM process are now limited by configurable values: - * [`jobmanager.memory.off-heap.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-off-heap-size) - * [`jobmanager.memory.jvm-metaspace.size`](https://ci.apache.org/projects/flink/flink-docs-master/deployment/config.html#jobmanager-memory-jvm-metaspace-size) + * [`jobmanager.memory.off-heap.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-off-heap-size) + * [`jobmanager.memory.jvm-metaspace.size`](https://nightlies.apache.org/flink/flink-docs-master/deployment/config.html#jobmanager-memory-jvm-metaspace-size) -See also [JVM Parameters](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_setup.html#jvm-parameters). +See also [JVM Parameters](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_setup.html#jvm-parameters). {{< hint warning >}} -These new limits can produce the respective `OutOfMemoryError` exceptions if they are not configured properly or there is a respective memory leak. See also [the troubleshooting guide](https://ci.apache.org/projects/flink/flink-docs-master/deployment/memory/mem_trouble.html#outofmemoryerror-direct-buffer-memory). +These new limits can produce the respective `OutOfMemoryError` exceptions if they are not configured properly or there is a respective memory leak. See also [the troubleshooting guide](https://nightlies.apache.org/flink/flink-docs-master/deployment/memory/mem_trouble.html#outofmemoryerror-direct-buffer-memory). {{< /hint >}} #### Removal of deprecated mesos.resourcemanager.tasks.mem diff --git a/docs/content/release-notes/flink-1.13.md b/docs/content/release-notes/flink-1.13.md index 1798564e0da1f..3b093c9e9d7cb 100644 --- a/docs/content/release-notes/flink-1.13.md +++ b/docs/content/release-notes/flink-1.13.md @@ -52,7 +52,7 @@ In 1.13, checkpointing configurations have been extracted into their own interfa This change does not affect the runtime behavior and simply provides a better mental model to users. Pipelines can be updated to use the new the new abstractions without losing state, consistency, or change in semantics. -Please follow the [migration guide](https://ci.apache.org/projects/flink/flink-docs-release-1.13/docs/ops/state/state_backends/#migrating-from-legacy-backends) or the JavaDoc on the deprecated state backend classes - `MemoryStateBackend`, `FsStateBackend` and `RocksDBStateBackend` for migration details. +Please follow the [migration guide](https://nightlies.apache.org/flink/flink-docs-release-1.13/docs/ops/state/state_backends/#migrating-from-legacy-backends) or the JavaDoc on the deprecated state backend classes - `MemoryStateBackend`, `FsStateBackend` and `RocksDBStateBackend` for migration details. #### Unify binary format for Keyed State savepoints diff --git a/docs/content/release-notes/flink-1.7.md b/docs/content/release-notes/flink-1.7.md index 09700dc799ac8..5630556bed2d5 100644 --- a/docs/content/release-notes/flink-1.7.md +++ b/docs/content/release-notes/flink-1.7.md @@ -65,7 +65,7 @@ Before Flink 1.7, serializer snapshots were implemented as a `TypeSerializerConf Moreover, the responsibility of serializer schema compatibility checks lived within the `TypeSerializer`, implemented in the `TypeSerializer#ensureCompatibility(TypeSerializerConfigSnapshot)` method. To be future-proof and to have flexibility to migrate your state serializers and schema, it is highly recommended to migrate from the old abstractions. -Details and migration guides can be found [here](https://ci.apache.org/projects/flink/flink-docs-master/dev/stream/state/custom_serialization.html). +Details and migration guides can be found [here](https://nightlies.apache.org/flink/flink-docs-master/dev/stream/state/custom_serialization.html). ### Removal of the legacy mode diff --git a/docs/content/release-notes/flink-1.8.md b/docs/content/release-notes/flink-1.8.md index 3d74d3993ff48..86e09bc784bde 100644 --- a/docs/content/release-notes/flink-1.8.md +++ b/docs/content/release-notes/flink-1.8.md @@ -201,7 +201,7 @@ The `CompositeSerializerSnapshot` utility class has been removed. You should now use `CompositeTypeSerializerSnapshot` instead, for snapshots of composite serializers that delegate serialization to multiple nested serializers. Please see -[here](http://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/stream/state/custom_serialization.html#implementing-a-compositetypeserializersnapshot) +[here](http://nightlies.apache.org/flink/flink-docs-release-1.8/dev/stream/state/custom_serialization.html#implementing-a-compositetypeserializersnapshot) for instructions on using `CompositeTypeSerializerSnapshot`. ### Memory management diff --git a/docs/content/release-notes/flink-1.9.md b/docs/content/release-notes/flink-1.9.md index be14cd326691b..0ce8038faa2a3 100644 --- a/docs/content/release-notes/flink-1.9.md +++ b/docs/content/release-notes/flink-1.9.md @@ -169,7 +169,7 @@ memory segments. The default timeout is 30 seconds, and is configurable via `tas It is possible that for some previously working deployments this default timeout value is too low and might have to be increased. -Please also notice that several network I/O metrics have had their scope changed. See the [1.9 metrics documentation](https://ci.apache.org/projects/flink/flink-docs-master/ops/metrics.html) +Please also notice that several network I/O metrics have had their scope changed. See the [1.9 metrics documentation](https://nightlies.apache.org/flink/flink-docs-master/ops/metrics.html) for which metrics are affected. In 1.9.0, these metrics will still be available under their previous scopes, but this may no longer be the case in future versions. @@ -183,7 +183,7 @@ Related issues: Due to a bug in the `AsyncWaitOperator`, in 1.9.0 the default chaining behaviour of the operator is now changed so that it is never chained after another operator. This should not be problematic for migrating from older version snapshots as long as an uid was assigned to the operator. If an uid was not assigned to the operator, please see -the instructions [here](https://ci.apache.org/projects/flink/flink-docs-release-1.9/ops/upgrading.html#matching-operator-state) +the instructions [here](https://nightlies.apache.org/flink/flink-docs-release-1.9/ops/upgrading.html#matching-operator-state) for a possible workaround. Related issues: diff --git a/docs/layouts/partials/docs/inject/content-before.html b/docs/layouts/partials/docs/inject/content-before.html index 35b65bdc7922d..bfbea6ce7aba4 100644 --- a/docs/layouts/partials/docs/inject/content-before.html +++ b/docs/layouts/partials/docs/inject/content-before.html @@ -22,14 +22,14 @@ {{ if $.Site.Params.ShowOutDatedWarning }}
- {{ markdownify "This documentation is for an out-of-date version of Apache Flink. We recommend you use the latest [stable version](https://ci.apache.org/projects/flink/flink-docs-stable/)."}} + {{ markdownify "This documentation is for an out-of-date version of Apache Flink. We recommend you use the latest [stable version](https://nightlies.apache.org/flink/flink-docs-stable/)."}}
{{ end }} {{ if (not $.Site.Params.IsStable) }}
- {{ markdownify "This documentation is for an unreleased version of Apache Flink. We recommend you use the latest [stable version](https://ci.apache.org/projects/flink/flink-docs-stable/)."}} + {{ markdownify "This documentation is for an unreleased version of Apache Flink. We recommend you use the latest [stable version](https://nightlies.apache.org/flink/flink-docs-stable/)."}}
{{ end }} diff --git a/docs/layouts/shortcodes/generated/kubernetes_config_configuration.html b/docs/layouts/shortcodes/generated/kubernetes_config_configuration.html index 2235ac23eea3d..7705535c11c8d 100644 --- a/docs/layouts/shortcodes/generated/kubernetes_config_configuration.html +++ b/docs/layouts/shortcodes/generated/kubernetes_config_configuration.html @@ -114,7 +114,7 @@
kubernetes.jobmanager.owner.reference
(none) List<Map> - The user-specified Owner References to be set to the JobManager Deployment. When all the owner resources are deleted, the JobManager Deployment will be deleted automatically, which also deletes all the resources created by this Flink cluster. The value should be formatted as a semicolon-separated list of owner references, where each owner reference is a comma-separated list of `key:value` pairs. E.g., apiVersion:v1,blockOwnerDeletion:true,controller:true,kind:FlinkApplication,name:flink-app-name,uid:flink-app-uid;apiVersion:v1,kind:Deployment,name:deploy-name,uid:deploy-uid + The user-specified Owner References to be set to the JobManager Deployment. When all the owner resources are deleted, the JobManager Deployment will be deleted automatically, which also deletes all the resources created by this Flink cluster. The value should be formatted as a semicolon-separated list of owner references, where each owner reference is a comma-separated list of `key:value` pairs. E.g., apiVersion:v1,blockOwnerDeletion:true,controller:true,kind:FlinkApplication,name:flink-app-name,uid:flink-app-uid;apiVersion:v1,kind:Deployment,name:deploy-name,uid:deploy-uid
kubernetes.jobmanager.replicas
diff --git a/flink-clients/src/main/java/org/apache/flink/client/cli/DynamicPropertiesUtil.java b/flink-clients/src/main/java/org/apache/flink/client/cli/DynamicPropertiesUtil.java index 83ef10df29c3a..d8535b9a08aca 100644 --- a/flink-clients/src/main/java/org/apache/flink/client/cli/DynamicPropertiesUtil.java +++ b/flink-clients/src/main/java/org/apache/flink/client/cli/DynamicPropertiesUtil.java @@ -42,7 +42,7 @@ class DynamicPropertiesUtil { .valueSeparator('=') .desc( "Allows specifying multiple generic configuration options. The available " - + "options can be found at https://ci.apache.org/projects/flink/flink-docs-stable/ops/config.html") + + "options can be found at https://nightlies.apache.org/flink/flink-docs-stable/ops/config.html") .build(); /** diff --git a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java index b77c2f5671ef1..5556ab29e421b 100644 --- a/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java +++ b/flink-core/src/main/java/org/apache/flink/core/fs/FileSystem.java @@ -519,7 +519,7 @@ public static FileSystem getUnguardedFileSystem(final URI fsUri) throws IOExcept + ".org/projects/flink/flink-docs-stable/ops/plugins.html for more information. If you want to " + "use a Hadoop file system for that scheme, please add the scheme to the configuration fs" + ".allowed-fallback-filesystems. For a full list of supported file systems, " - + "please see https://ci.apache.org/projects/flink/flink-docs-stable/ops/filesystems/.", + + "please see https://nightlies.apache.org/flink/flink-docs-stable/ops/filesystems/.", uri.getScheme(), plugins.size() == 1 ? "" : "s", String.join(", ", plugins))); @@ -532,7 +532,7 @@ public static FileSystem getUnguardedFileSystem(final URI fsUri) throws IOExcept + uri.getScheme() + "'. The scheme is not directly supported by Flink and no Hadoop file system to " + "support this scheme could be loaded. For a full list of supported file systems, " - + "please see https://ci.apache.org/projects/flink/flink-docs-stable/ops/filesystems/.", + + "please see https://nightlies.apache.org/flink/flink-docs-stable/ops/filesystems/.", e); } } diff --git a/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java b/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java index 32d258bfb6c18..1f4c13813c04e 100644 --- a/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java +++ b/flink-examples/flink-examples-batch/src/main/java/org/apache/flink/examples/java/relational/EmptyFieldsCountAccumulator.java @@ -236,7 +236,7 @@ public String toString() { /** * It is recommended to use POJOs (Plain old Java objects) instead of TupleX for data types with * many fields. Also, POJOs can be used to give large Tuple-types a name. Source + * href="https://nightlies.apache.org/flink/flink-docs-master/apis/best_practices.html#naming-large-tuplex-types">Source * (docs) */ public static class StringTriple extends Tuple3 { diff --git a/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java b/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java index 03e1eca544985..4ad46fb862a1f 100644 --- a/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java +++ b/flink-java/src/main/java/org/apache/flink/api/java/DataSet.java @@ -1557,7 +1557,7 @@ public SortPartitionOperator sortPartition(KeySelector keyExtractor * ... * dataset.writeAsText("file:///path1"); } *
  • A directory is always created when fs.output.always-create-directory + * href="https://nightlies.apache.org/flink/flink-docs-master/setup/config.html#file-systems">fs.output.always-create-directory * is set to true in flink-conf.yaml file, even when parallelism is set to 1. *
    {@code .
          * └── path1/
    diff --git a/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java b/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
    index be0315ae604a1..b905c8c399c5b 100644
    --- a/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
    +++ b/flink-kubernetes/src/main/java/org/apache/flink/kubernetes/configuration/KubernetesConfigOptions.java
    @@ -132,7 +132,7 @@ public class KubernetesConfigOptions {
                                                         + "apiVersion:v1,blockOwnerDeletion:true,controller:true,kind:FlinkApplication,name:flink-app-name,uid:flink-app-uid;"
                                                         + "apiVersion:v1,kind:Deployment,name:deploy-name,uid:deploy-uid",
                                                 link(
    -                                                    "https://ci.apache.org/projects/flink/flink-docs-master/deployment/resource-providers/native_kubernetes.html#manual-resource-cleanup",
    +                                                    "https://nightlies.apache.org/flink/flink-docs-master/deployment/resource-providers/native_kubernetes.html#manual-resource-cleanup",
                                                         "Owner References"))
                                         .build());
         public static final ConfigOption JOB_MANAGER_CPU =
    diff --git a/flink-python/README.md b/flink-python/README.md
    index e5fbf9518dff2..42df1c818bf50 100644
    --- a/flink-python/README.md
    +++ b/flink-python/README.md
    @@ -8,11 +8,11 @@ Learn more about Flink at [https://flink.apache.org/](https://flink.apache.org/)
     
     This packaging allows you to write Flink programs in Python, but it is currently a very initial version and will change in future versions.
     
    -In this initial version only Table API is supported, you can find the documentation at [https://ci.apache.org/projects/flink/flink-docs-stable/dev/table/tableApi.html](https://ci.apache.org/projects/flink/flink-docs-stable/dev/table/tableApi.html)
    +In this initial version only Table API is supported, you can find the documentation at [https://nightlies.apache.org/flink/flink-docs-stable/dev/table/tableApi.html](https://nightlies.apache.org/flink/flink-docs-stable/dev/table/tableApi.html)
     
    -The tutorial can be found at [https://ci.apache.org/projects/flink/flink-docs-stable/tutorials/python_table_api.html](https://ci.apache.org/projects/flink/flink-docs-stable/tutorials/python_table_api.html)
    +The tutorial can be found at [https://nightlies.apache.org/flink/flink-docs-stable/tutorials/python_table_api.html](https://nightlies.apache.org/flink/flink-docs-stable/tutorials/python_table_api.html)
     
    -The auto-generated Python docs can be found at [https://ci.apache.org/projects/flink/flink-docs-stable/api/python/](https://ci.apache.org/projects/flink/flink-docs-stable/api/python/)
    +The auto-generated Python docs can be found at [https://nightlies.apache.org/flink/flink-docs-stable/api/python/](https://nightlies.apache.org/flink/flink-docs-stable/api/python/)
     
     ## Python Requirements
     
    diff --git a/flink-python/pyflink/examples/table/mixing_use_of_datastream_and_table.py b/flink-python/pyflink/examples/table/mixing_use_of_datastream_and_table.py
    index f388dfb965565..dc518f4e3d2c7 100644
    --- a/flink-python/pyflink/examples/table/mixing_use_of_datastream_and_table.py
    +++ b/flink-python/pyflink/examples/table/mixing_use_of_datastream_and_table.py
    @@ -70,7 +70,7 @@ def length(data):
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/multi_sink.py b/flink-python/pyflink/examples/table/multi_sink.py
    index 858122bb18cef..966cb5fe70f31 100644
    --- a/flink-python/pyflink/examples/table/multi_sink.py
    +++ b/flink-python/pyflink/examples/table/multi_sink.py
    @@ -64,7 +64,7 @@ def contains_flink(data):
     
         # execute the statement set
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
         statement_set.execute().wait()
     
    diff --git a/flink-python/pyflink/examples/table/pandas/pandas_udaf.py b/flink-python/pyflink/examples/table/pandas/pandas_udaf.py
    index e45bd02cb15e8..e4e8b9ea5222b 100644
    --- a/flink-python/pyflink/examples/table/pandas/pandas_udaf.py
    +++ b/flink-python/pyflink/examples/table/pandas/pandas_udaf.py
    @@ -82,7 +82,7 @@ def mean_udaf(v):
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/process_json_data.py b/flink-python/pyflink/examples/table/process_json_data.py
    index b88339a4a81cb..eeb6f7c88cd1d 100644
    --- a/flink-python/pyflink/examples/table/process_json_data.py
    +++ b/flink-python/pyflink/examples/table/process_json_data.py
    @@ -51,7 +51,7 @@ def process_json_data():
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/process_json_data_with_udf.py b/flink-python/pyflink/examples/table/process_json_data_with_udf.py
    index 0401c291dd9b6..31969e2f67592 100644
    --- a/flink-python/pyflink/examples/table/process_json_data_with_udf.py
    +++ b/flink-python/pyflink/examples/table/process_json_data_with_udf.py
    @@ -60,7 +60,7 @@ def update_tel(data):
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/windowing/over_window.py b/flink-python/pyflink/examples/table/windowing/over_window.py
    index 4e7fe6f4cc26b..982d6b860d468 100644
    --- a/flink-python/pyflink/examples/table/windowing/over_window.py
    +++ b/flink-python/pyflink/examples/table/windowing/over_window.py
    @@ -79,7 +79,7 @@ def tumble_window_demo():
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/windowing/session_window.py b/flink-python/pyflink/examples/table/windowing/session_window.py
    index 421d05408f9a2..5b40a7b9af8bd 100644
    --- a/flink-python/pyflink/examples/table/windowing/session_window.py
    +++ b/flink-python/pyflink/examples/table/windowing/session_window.py
    @@ -75,7 +75,7 @@ def session_window_demo():
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/windowing/sliding_window.py b/flink-python/pyflink/examples/table/windowing/sliding_window.py
    index 11c1079f7e341..1b8bb150fd215 100644
    --- a/flink-python/pyflink/examples/table/windowing/sliding_window.py
    +++ b/flink-python/pyflink/examples/table/windowing/sliding_window.py
    @@ -77,7 +77,7 @@ def sliding_window_demo():
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/windowing/tumble_window.py b/flink-python/pyflink/examples/table/windowing/tumble_window.py
    index c6b609088cc57..dd3ba2ea408b9 100644
    --- a/flink-python/pyflink/examples/table/windowing/tumble_window.py
    +++ b/flink-python/pyflink/examples/table/windowing/tumble_window.py
    @@ -77,7 +77,7 @@ def tumble_window_demo():
         table.execute_insert('sink') \
              .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/examples/table/word_count.py b/flink-python/pyflink/examples/table/word_count.py
    index fef2169ac1109..986a17d6f5d08 100644
    --- a/flink-python/pyflink/examples/table/word_count.py
    +++ b/flink-python/pyflink/examples/table/word_count.py
    @@ -121,7 +121,7 @@ def split(line: Row):
            .execute_insert('sink') \
            .wait()
         # remove .wait if submitting to a remote cluster, refer to
    -    # https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
    +    # https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
         # for more details
     
     
    diff --git a/flink-python/pyflink/table/descriptors.py b/flink-python/pyflink/table/descriptors.py
    index 0aeac26d3d778..1d1ea823330dc 100644
    --- a/flink-python/pyflink/table/descriptors.py
    +++ b/flink-python/pyflink/table/descriptors.py
    @@ -210,7 +210,7 @@ def field(self, field_name: str, field_type: Union[DataType, str]) -> 'Schema':
             Adds a field with the field name and the data type or type string. Required.
             This method can be called multiple times. The call order of this method defines
             also the order of the fields in a row. Here is a document that introduces the type strings:
    -        https://ci.apache.org/projects/flink/flink-docs-stable/dev/table/connect.html#type-strings
    +        https://nightlies.apache.org/flink/flink-docs-stable/dev/table/connect.html#type-strings
     
             :param field_name: The field name.
             :param field_type: The data type or type string of the field.
    diff --git a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/AbstractBroadcastStateTransformation.java b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/AbstractBroadcastStateTransformation.java
    index ece2730caf947..82a50804e1785 100644
    --- a/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/AbstractBroadcastStateTransformation.java
    +++ b/flink-streaming-java/src/main/java/org/apache/flink/streaming/api/transformations/AbstractBroadcastStateTransformation.java
    @@ -35,7 +35,7 @@
      * function on the resulting connected stream.
      *
      * 

    For more information see the + * href="https://nightlies.apache.org/flink/flink-docs-stable/dev/stream/state/broadcast_state.html"> * Broadcast State Pattern documentation page. * * @param The type of the elements in the non-broadcasted input. diff --git a/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliStrings.java b/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliStrings.java index 98a8e77ed33fe..b102870a749c9 100644 --- a/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliStrings.java +++ b/flink-table/flink-sql-client/src/main/java/org/apache/flink/table/client/cli/CliStrings.java @@ -142,7 +142,7 @@ public AttributedString build() { // About Documentation Link. .style(AttributedStyle.DEFAULT) .append( - "\nYou can also type any Flink SQL statement, please visit https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/table/sql/overview/ for more details.") + "\nYou can also type any Flink SQL statement, please visit https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/table/sql/overview/ for more details.") .toAttributedString(); public static final String MESSAGE_WELCOME; diff --git a/flink-table/flink-sql-client/src/test/resources/sql-client-help-command.out b/flink-table/flink-sql-client/src/test/resources/sql-client-help-command.out index 3ea565a3c5da4..50a84ea1435e4 100644 --- a/flink-table/flink-sql-client/src/test/resources/sql-client-help-command.out +++ b/flink-table/flink-sql-client/src/test/resources/sql-client-help-command.out @@ -16,4 +16,4 @@ REMOVE JAR Removes the specified jar file from the submitted jobs' cla SHOW JARS Shows the list of user-specified jar dependencies. This list is impacted by the --jar and --library startup options as well as the ADD/REMOVE JAR commands. Hint: Make sure that a statement ends with ";" for finalizing (multi-line) statements. -You can also type any Flink SQL statement, please visit https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/table/sql/overview/ for more details. +You can also type any Flink SQL statement, please visit https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/table/sql/overview/ for more details. diff --git a/flink-table/flink-sql-client/src/test/resources/sql/misc.q b/flink-table/flink-sql-client/src/test/resources/sql/misc.q index 393bea90aa8c6..340adca08eb02 100644 --- a/flink-table/flink-sql-client/src/test/resources/sql/misc.q +++ b/flink-table/flink-sql-client/src/test/resources/sql/misc.q @@ -35,5 +35,5 @@ REMOVE JAR Removes the specified jar file from the submitted jobs' cla SHOW JARS Shows the list of user-specified jar dependencies. This list is impacted by the --jar and --library startup options as well as the ADD/REMOVE JAR commands. Hint: Make sure that a statement ends with ";" for finalizing (multi-line) statements. -You can also type any Flink SQL statement, please visit https://ci.apache.org/projects/flink/flink-docs-stable/docs/dev/table/sql/overview/ for more details. +You can also type any Flink SQL statement, please visit https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/table/sql/overview/ for more details. !ok diff --git a/tools/releasing/create_snapshot_branch.sh b/tools/releasing/create_snapshot_branch.sh index 1ac269a98b394..b5fcfa61d442b 100755 --- a/tools/releasing/create_snapshot_branch.sh +++ b/tools/releasing/create_snapshot_branch.sh @@ -50,7 +50,7 @@ perl -pi -e "s#^ VersionTitle = .*# VersionTitle = \"${SHORT_RELEASE_VERSION}\ perl -pi -e "s#^ Branch = .*# Branch = \"release-${SHORT_RELEASE_VERSION}\"#" ${config_file} -url_base="//ci.apache.org/projects/flink/flink-docs-release-" +url_base="//nightlies.apache.org/flink/flink-docs-release-" perl -pi -e "s#^baseURL = .*#baseURL = \'${url_base}${SHORT_RELEASE_VERSION}\'#" ${config_file} perl -pi -e "s#^ JavaDocs = .*# JavaDocs = \"${url_base}${SHORT_RELEASE_VERSION}/api/java/\"#" ${config_file} perl -pi -e "s#^ \[\"JavaDocs\", .*# \[\"JavaDocs\", \"${url_base}${SHORT_RELEASE_VERSION}/api/java/\"\],#" ${config_file}