diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 50e403f78..bcd50d878 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -60,7 +60,7 @@ Once your changes and tests are ready to submit for review: Ensure that all tests pass by running `make check-all`. This runs sequentially lint checks, unit tests and integration tests. These can be executed in isolation using `make lint`, `make test` and `make it` respectively, in case you need to iterate over a subset of tests. - Note: Integration tests are much slower than unit tests and require `docker-compose`. + Note: Integration tests are much slower than unit tests and require `docker compose`. 3. Sign the Contributor License Agreement diff --git a/docker/docker-compose-tests.yml b/docker/docker-compose-tests.yml index 8728b3a42..fce3645a5 100644 --- a/docker/docker-compose-tests.yml +++ b/docker/docker-compose-tests.yml @@ -12,10 +12,17 @@ services: es01: condition: service_healthy es01: - image: docker.elastic.co/elasticsearch/elasticsearch:7.17.0 + image: docker.elastic.co/elasticsearch/elasticsearch:9.2.4 container_name: es01 environment: + - node.name=es01 + - cluster.name=docker-cluster - discovery.type=single-node + - bootstrap.memory_lock=true + # It disable security for tests + - xpack.security.enabled=false + - xpack.security.enrollment.enabled=false + - xpack.security.http.ssl.enabled=false - "ES_JAVA_OPTS=-Xms1g -Xmx1g" volumes: - esdata1:/usr/share/elasticsearch/data @@ -24,10 +31,11 @@ services: networks: - esnet healthcheck: - test: curl -f http://localhost:9200 - interval: 5s - timeout: 2s - retries: 10 + test: ["CMD-SHELL", "curl -s http://localhost:9200/_cluster/health | grep -vq '\"status\":\"red\"'"] + interval: 10s + timeout: 10s + retries: 120 + networks: esnet: name: rally-tests diff --git a/docs/adding_tracks.rst b/docs/adding_tracks.rst index 4eae501be..ad0752b8c 100644 --- a/docs/adding_tracks.rst +++ b/docs/adding_tracks.rst @@ -295,11 +295,11 @@ You can also show details about your track with ``esrally info --track-path=~/ra 5. force-merge 6. query-match-all (8 clients) -Congratulations, you have created your first track! You can test it with ``esrally race --distribution-version=7.14.1 --track-path=~/rally-tracks/tutorial``. +Congratulations, you have created your first track! You can test it with ``esrally race --distribution-version=9.2.4 --track-path=~/rally-tracks/tutorial``. .. note:: - To test the track with Elasticsearch prior to 7.0.0 you need to update ``index.json`` and ``track.json`` as specified in notes above and then execute ``esrally race --distribution-version=6.5.3 --track-path=~/rally-tracks/tutorial``. + To test the track with Elasticsearch prior to 7.0.0 you need to update ``index.json`` and ``track.json`` as specified in notes above and then execute ``esrally race --distribution-version=9.2.4 --track-path=~/rally-tracks/tutorial``. .. _add_track_test_mode: diff --git a/docs/car.rst b/docs/car.rst index 4bbb0d393..47e4cc38a 100644 --- a/docs/car.rst +++ b/docs/car.rst @@ -162,7 +162,7 @@ You can now verify that everything works by listing all teams in this team repos This shows all teams that are available on the ``master`` branch of this repository. Suppose you only created tracks on the branch ``2`` because you're interested in the performance of Elasticsearch 2.x, then you can specify also the distribution version:: - esrally list teams --team-repository=private --distribution-version=7.0.0 + esrally list teams --team-repository=private --distribution-version=9.2.4 Rally will follow the same branch fallback logic as described above. diff --git a/docs/cluster_management.rst b/docs/cluster_management.rst index 50eb2beb8..57ae2e1ba 100644 --- a/docs/cluster_management.rst +++ b/docs/cluster_management.rst @@ -25,7 +25,7 @@ In this section we will setup a single Elasticsearch node locally, run a benchma First we need to install Elasticearch:: - esrally install --quiet --distribution-version=7.4.2 --node-name="rally-node-0" --network-host="127.0.0.1" --http-port=39200 --master-nodes="rally-node-0" --seed-hosts="127.0.0.1:39300" + esrally install --quiet --distribution-version=9.2.4 --node-name="rally-node-0" --network-host="127.0.0.1" --http-port=39200 --master-nodes="rally-node-0" --seed-hosts="127.0.0.1:39300" The parameter ``--network-host`` defines the network interface this node will bind to and ``--http-port`` defines which port will be exposed for HTTP traffic. Rally will automatically choose the transport port range as 100 above (39300). The parameters ``--master-nodes`` and ``--seed-hosts`` are necessary for the discovery process. Please see the respective Elasticsearch documentation on `discovery `_ for more details. @@ -70,11 +70,11 @@ Levelling Up: Benchmarking a Cluster This approach of being able to manage individual cluster nodes shows its power when we want to setup a cluster consisting of multiple nodes. At the moment Rally only supports a uniform cluster architecture but with this approach we can also setup arbitrarily complex clusters. The following examples shows how to setup a uniform three node cluster on three machines with the IPs ``192.168.14.77``, ``192.168.14.78`` and ``192.168.14.79``. On each machine we will issue the following command (pick the right one per machine):: # on 192.168.14.77 - export INSTALLATION_ID=$(esrally install --quiet --distribution-version=7.4.2 --node-name="rally-node-0" --network-host="192.168.14.77" --http-port=39200 --master-nodes="rally-node-0,rally-node-1,rally-node-2" --seed-hosts="192.168.14.77:39300,192.168.14.78:39300,192.168.14.79:39300" | jq --raw-output '.["installation-id"]') + export INSTALLATION_ID=$(esrally install --quiet --distribution-version=9.2.4 --node-name="rally-node-0" --network-host="192.168.14.77" --http-port=39200 --master-nodes="rally-node-0,rally-node-1,rally-node-2" --seed-hosts="192.168.14.77:39300,192.168.14.78:39300,192.168.14.79:39300" | jq --raw-output '.["installation-id"]') # on 192.168.14.78 - export INSTALLATION_ID=$(esrally install --quiet --distribution-version=7.4.2 --node-name="rally-node-1" --network-host="192.168.14.78" --http-port=39200 --master-nodes="rally-node-0,rally-node-1,rally-node-2" --seed-hosts="192.168.14.77:39300,192.168.14.78:39300,192.168.14.79:39300" | jq --raw-output '.["installation-id"]') + export INSTALLATION_ID=$(esrally install --quiet --distribution-version=9.2.4 --node-name="rally-node-1" --network-host="192.168.14.78" --http-port=39200 --master-nodes="rally-node-0,rally-node-1,rally-node-2" --seed-hosts="192.168.14.77:39300,192.168.14.78:39300,192.168.14.79:39300" | jq --raw-output '.["installation-id"]') # on 192.168.14.79 - export INSTALLATION_ID=$(esrally install --quiet --distribution-version=7.4.2 --node-name="rally-node-2" --network-host="192.168.14.79" --http-port=39200 --master-nodes="rally-node-0,rally-node-1,rally-node-2" --seed-hosts="192.168.14.77:39300,192.168.14.78:39300,192.168.14.79:39300" | jq --raw-output '.["installation-id"]') + export INSTALLATION_ID=$(esrally install --quiet --distribution-version=9.2.4 --node-name="rally-node-2" --network-host="192.168.14.79" --http-port=39200 --master-nodes="rally-node-0,rally-node-1,rally-node-2" --seed-hosts="192.168.14.77:39300,192.168.14.78:39300,192.168.14.79:39300" | jq --raw-output '.["installation-id"]') Then we pick a random race id, e.g. ``fb38013d-5d06-4b81-b81a-b61c8c10f6e5`` and set it on each machine (including the machine where will generate load):: diff --git a/docs/command_line_reference.rst b/docs/command_line_reference.rst index c2b111f33..56958518f 100644 --- a/docs/command_line_reference.rst +++ b/docs/command_line_reference.rst @@ -109,22 +109,22 @@ Because ``--quiet`` is specified, Rally will suppress all non-essential output ( This subcommand can be used to download Elasticsearch distributions. Example:: - esrally download --distribution-version=6.8.0 --quiet + esrally download --distribution-version=9.2.4 --quiet -This will download the OSS distribution of Elasticsearch 6.8.0. Because ``--quiet`` is specified, Rally will suppress all non-essential output (banners, progress messages etc.) and only return the location of the binary on the local machine after it has downloaded it:: +This will download the OSS distribution of Elasticsearch 9.2.4. Because ``--quiet`` is specified, Rally will suppress all non-essential output (banners, progress messages etc.) and only return the location of the binary on the local machine after it has downloaded it:: { - "elasticsearch": "/Users/dm/.rally/benchmarks/distributions/elasticsearch-oss-6.8.0.tar.gz" + "elasticsearch": "/Users/dm/.rally/benchmarks/distributions/elasticsearch-oss-9.2.4.tar.gz" } To download the default distribution you need to specify a license (via ``--car``):: - esrally download --distribution-version=6.8.0 --car=basic-license --quiet + esrally download --distribution-version=9.2.4 --car=basic-license --quiet This will show the path to the default distribution:: { - "elasticsearch": "/Users/dm/.rally/benchmarks/distributions/elasticsearch-6.8.0.tar.gz" + "elasticsearch": "/Users/dm/.rally/benchmarks/distributions/elasticsearch-9.2.4.tar.gz" } ``delete`` @@ -143,7 +143,7 @@ The ``delete`` subcommand is used to delete records for different configuration This subcommand can be used to install a single Elasticsearch node. Example:: - esrally install --quiet --distribution-version=7.4.2 --node-name="rally-node-0" --network-host="127.0.0.1" --http-port=39200 --master-nodes="rally-node-0" --seed-hosts="127.0.0.1:39300" + esrally install --quiet --distribution-version=9.2.4 --node-name="rally-node-0" --network-host="127.0.0.1" --http-port=39200 --master-nodes="rally-node-0" --seed-hosts="127.0.0.1:39300" This will output the id of this installation:: @@ -310,13 +310,13 @@ Used to specify the current node's name in the cluster when it is setup via the This parameter is useful in benchmarks involved multiple Elasticsearch clusters. It's used to configure the cluster name of the current Elasticsearch node when it is setup via the ``install`` or ``race`` subcommand. The following example sets up two Elasticsearch clusters: ``cluster-1`` and ``cluster-2``, and each has two nodes:: # install node-1 in cluster-1 - esrally install --quiet --distribution-version=8.2.2 --node-name="node-1" --cluster-name=cluster-1 --network-host="192.168.1.1" --http-port=39200 --master-nodes="node-1" --seed-hosts="192.168.1.1:39300,192.168.1.2:39300" + esrally install --quiet --distribution-version=9.2.4 --node-name="node-1" --cluster-name=cluster-1 --network-host="192.168.1.1" --http-port=39200 --master-nodes="node-1" --seed-hosts="192.168.1.1:39300,192.168.1.2:39300" # install node-2 in cluster-1 - esrally install --quiet --distribution-version=8.2.2 --node-name="node-2" --cluster-name=cluster-1 --network-host="192.168.1.2" --http-port=39200 --master-nodes="node-1" --seed-hosts="192.168.1.1:39300,192.168.1.2:39300" + esrally install --quiet --distribution-version=9.2.4 --node-name="node-2" --cluster-name=cluster-1 --network-host="192.168.1.2" --http-port=39200 --master-nodes="node-1" --seed-hosts="192.168.1.1:39300,192.168.1.2:39300" # install node-3 in cluster-2 - esrally install --quiet --distribution-version=8.2.2 --node-name="node-3" --cluster-name=cluster-2 --network-host="192.168.1.3" --http-port=39200 --master-nodes="node-3" --seed-hosts="192.168.1.3:39300,192.168.1.4:39300" + esrally install --quiet --distribution-version=9.2.4 --node-name="node-3" --cluster-name=cluster-2 --network-host="192.168.1.3" --http-port=39200 --master-nodes="node-3" --seed-hosts="192.168.1.3:39300,192.168.1.4:39300" # install node-4 in cluster-2 - esrally install --quiet --distribution-version=8.2.2 --node-name="node-4" --cluster-name=cluster-2 --network-host="192.168.1.4" --http-port=39200 --master-nodes="node-3" --seed-hosts="192.168.1.3:39300,192.168.1.4:39300" + esrally install --quiet --distribution-version=9.2.4 --node-name="node-4" --cluster-name=cluster-2 --network-host="192.168.1.4" --http-port=39200 --master-nodes="node-3" --seed-hosts="192.168.1.3:39300,192.168.1.4:39300" If the ``cluster-name`` parameter is not specified, Rally will use ``rally-benchmark`` as the default cluster name. @@ -425,14 +425,14 @@ Example:: Specifies the name of the target operating system for which an artifact should be downloaded. By default this value is automatically derived based on the operating system Rally is run. This command line flag is only applicable to the ``download`` subcommand and allows to download an artifact for a different operating system. Example:: - esrally download --distribution-version=7.5.1 --target-os=linux + esrally download --distribution-version=9.2.4 --target-os=linux ``target-arch`` ~~~~~~~~~~~~~~~ Specifies the name of the target CPU architecture for which an artifact should be downloaded. By default this value is automatically derived based on the CPU architecture Rally is run. This command line flag is only applicable to the ``download`` subcommand and allows to download an artifact for a different CPU architecture. Example:: - esrally download --distribution-version=7.5.1 --target-arch=x86_64 + esrally download --distribution-version=9.2.4 --target-arch=x86_64 ``car`` @@ -481,7 +481,7 @@ Allows to override variables of Elasticsearch plugins. It accepts a list of comm Example:: - esrally race --track=geonames --distribution-version=6.1.1. --elasticsearch-plugins="x-pack:monitoring-http" --plugin-params="monitoring_type:'http',monitoring_host:'some_remote_host',monitoring_port:10200,monitoring_user:'rally',monitoring_password:'m0n1t0r1ng'" + esrally race --track=geonames --distribution-version=9.2.4. --elasticsearch-plugins="x-pack:monitoring-http" --plugin-params="monitoring_type:'http',monitoring_host:'some_remote_host',monitoring_port:10200,monitoring_user:'rally',monitoring_password:'m0n1t0r1ng'" This enables the HTTP exporter of `X-Pack Monitoring `_ and exports the data to the configured monitoring host. @@ -594,9 +594,9 @@ This command line parameter sets the major version of the JDK that Rally should Example:: # Run a benchmark with defaults - esrally race --track=geonames --distribution-version=7.0.0 + esrally race --track=geonames --distribution-version=9.2.4 # Force to run with JDK 11 - esrally race --track=geonames --distribution-version=7.0.0 --runtime-jdk=11 + esrally race --track=geonames --distribution-version=9.2.4 --runtime-jdk=11 It is also possible to specify the JDK that is bundled with Elasticsearch with the special value ``bundled``. The `JDK is bundled from Elasticsearch 7.0.0 onwards `_. @@ -640,10 +640,10 @@ If you want Rally to launch and benchmark a cluster using a binary distribution, :: - esrally race --track=geonames --distribution-version=7.0.0 + esrally race --track=geonames --distribution-version=9.2.4 -Rally will then benchmark the official Elasticsearch 7.0.0 distribution. Please check our :doc:`version support page ` to see which Elasticsearch versions are currently supported by Rally. +Rally will then benchmark the official Elasticsearch 9.2.4 distribution. Please check our :doc:`version support page ` to see which Elasticsearch versions are currently supported by Rally. ``distribution-repository`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/developing.rst b/docs/developing.rst index 0b41102d7..025dddcf7 100644 --- a/docs/developing.rst +++ b/docs/developing.rst @@ -8,7 +8,7 @@ Install the following software packages: * `uv `_ * JDK version required to build Elasticsearch. Please refer to the `build setup requirements `_. -* `Docker `_ and on Linux additionally `docker-compose `_. +* `Docker `_ and on Linux additionally `docker compose `_. * `jq `_ * git diff --git a/docs/elasticsearch_plugins.rst b/docs/elasticsearch_plugins.rst index 50ac424ed..4662a80da 100644 --- a/docs/elasticsearch_plugins.rst +++ b/docs/elasticsearch_plugins.rst @@ -44,7 +44,7 @@ In order to tell Rally to install a plugin, use the ``--elasticsearch-plugins`` Example:: - esrally race --track=geonames --distribution-version=7.12.0 --elasticsearch-plugins="analysis-icu,analysis-phonetic" + esrally race --track=geonames --distribution-version=9.2.4 --elasticsearch-plugins="analysis-icu,analysis-phonetic" This will install the plugins ``analysis-icu`` and ``analysis-phonetic`` (in that order). In order to use the features that these plugins provide, you need to write a :doc:`custom track `. diff --git a/docs/migrate.rst b/docs/migrate.rst index 3009bf923..ec2342a68 100644 --- a/docs/migrate.rst +++ b/docs/migrate.rst @@ -213,7 +213,7 @@ Previously a subcommand was optional when running a benchmark. With Rally 2.1.0 Invoke Rally with the ``race`` subcommand instead:: - esrally race --distribution-version=7.10.0 + esrally race --distribution-version=9.2.4 Running without a track is deprecated @@ -225,7 +225,7 @@ Previously Rally has implicitly chosen the geonames track as default when ``--tr Invoke Rally with ``--track=geonames`` instead:: - esrally race --distribution-version=7.10.0 --track=geonames + esrally race --distribution-version=9.2.4 --track=geonames Migrating to Rally 2.0.4 @@ -240,7 +240,7 @@ Rally 2.0.4 will warn when invoked without subcommand. So instead of invoking:: Invoke Rally with the ``race`` subcommand instead:: - esrally race --distribution-version=7.10.0 + esrally race --distribution-version=9.2.4 When Rally is invoked without a subcommand it will issue the following warning on the command line and in the log file:: diff --git a/docs/pipelines.rst b/docs/pipelines.rst index 00b88329f..2f2f5244d 100644 --- a/docs/pipelines.rst +++ b/docs/pipelines.rst @@ -30,7 +30,7 @@ from-distribution This pipeline allows to benchmark an official Elasticsearch distribution which will be automatically downloaded by Rally. An example invocation:: - esrally race --track=geonames --pipeline=from-distribution --distribution-version=7.0.0 + esrally race --track=geonames --pipeline=from-distribution --distribution-version=9.2.4 The version numbers have to match the name in the download URL path. diff --git a/docs/quickstart.rst b/docs/quickstart.rst index bdcaab3f5..1b8f2492b 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -18,9 +18,9 @@ Run your first race Now we're ready to run our first :doc:`race `:: - esrally race --distribution-version=6.5.3 --track=geonames + esrally race --distribution-version=9.2.4 --track=geonames -This will download Elasticsearch 6.5.3 and run the `geonames `_ :doc:`track ` against it. After the race, a :doc:`summary report ` is written to the command line::: +This will download Elasticsearch 9.2.4 and run the `geonames `_ :doc:`track ` against it. After the race, a :doc:`summary report ` is written to the command line::: ------------------------------------------------------ diff --git a/docs/race.rst b/docs/race.rst index 7c1b825fd..99db79f1b 100644 --- a/docs/race.rst +++ b/docs/race.rst @@ -48,11 +48,11 @@ Starting a Race To start a race you have to define the track and challenge to run. For example:: - esrally race --distribution-version=6.0.0 --track=geopoint --challenge=append-fast-with-conflicts + esrally race --distribution-version=9.2.4 --track=geopoint --challenge=append-fast-with-conflicts Rally will then start racing on this track. If you have never started Rally before, it should look similar to the following output:: - $ esrally race --distribution-version=6.0.0 --track=geopoint --challenge=append-fast-with-conflicts + $ esrally race --distribution-version=9.2.4 --track=geopoint --challenge=append-fast-with-conflicts ____ ____ / __ \____ _/ / /_ __ @@ -61,8 +61,8 @@ Rally will then start racing on this track. If you have never started Rally befo /_/ |_|\__,_/_/_/\__, / /____/ - [INFO] Racing on track [geopoint], challenge [append-fast-with-conflicts] and car ['defaults'] with version [6.0.0]. - [INFO] Downloading Elasticsearch 6.0.0 ... [OK] + [INFO] Racing on track [geopoint], challenge [append-fast-with-conflicts] and car ['defaults'] with version [9.2.4]. + [INFO] Downloading Elasticsearch 9.2.4 ... [OK] [INFO] Rally will delete the benchmark candidate after the benchmark [INFO] Downloading data from [http://benchmarks.elasticsearch.org.s3.amazonaws.com/corpora/geopoint/documents.json.bz2] (482 MB) to [/Users/dm/.rally/benchmarks/data/geopoint/documents.json.bz2] ... [OK] [INFO] Decompressing track data from [/Users/dm/.rally/benchmarks/data/geopoint/documents.json.bz2] to [/Users/dm/.rally/benchmarks/data/geopoint/documents.json] (resulting size: 2.28 GB) ... [OK] diff --git a/docs/recipes.rst b/docs/recipes.rst index 1586bbfa0..f2cbc5a43 100644 --- a/docs/recipes.rst +++ b/docs/recipes.rst @@ -84,7 +84,7 @@ To run a benchmark for this scenario follow these steps: 1. :doc:`Install ` and :doc:`configure ` Rally on all machines. Be sure that the same version is installed on all of them and fully :doc:`configured `. 2. Start the :doc:`Rally daemon ` on each machine. The Rally daemon allows Rally to communicate with all remote machines. On the benchmark coordinator run ``esrallyd start --node-ip=10.5.5.5 --coordinator-ip=10.5.5.5`` and on the benchmark candidate machines run ``esrallyd start --node-ip=10.5.5.10 --coordinator-ip=10.5.5.5`` and ``esrallyd start --node-ip=10.5.5.11 --coordinator-ip=10.5.5.5`` respectively. The ``--node-ip`` parameter tells Rally the IP of the machine on which it is running. As some machines have more than one network interface, Rally will not attempt to auto-detect the machine IP. The ``--coordinator-ip`` parameter tells Rally the IP of the benchmark coordinator node. -3. Start the benchmark by invoking Rally as usual on the benchmark coordinator, for example: ``esrally race --track=pmc --distribution-version=7.0.0 --target-hosts=10.5.5.10:39200,10.5.5.11:39200``. Rally will derive from the ``--target-hosts`` parameter that it should provision the nodes ``10.5.5.10`` and ``10.5.5.11``. +3. Start the benchmark by invoking Rally as usual on the benchmark coordinator, for example: ``esrally race --track=pmc --distribution-version=9.2.4 --target-hosts=10.5.5.10:39200,10.5.5.11:39200``. Rally will derive from the ``--target-hosts`` parameter that it should provision the nodes ``10.5.5.10`` and ``10.5.5.11``. 4. After the benchmark has finished you can stop the Rally daemon again. On the benchmark coordinator and on the benchmark candidates run ``esrallyd stop``. .. note:: diff --git a/docs/versions.rst b/docs/versions.rst index 4261aaaf3..7b6a25f02 100644 --- a/docs/versions.rst +++ b/docs/versions.rst @@ -10,7 +10,7 @@ However, Rally does not support Elasticsearch clusters using the OSS license. End-of-life Policy ================== -The latest version of Rally allows to benchmark all currently supported versions of Elasticsearch. Once an `Elasticsearch version reaches end-of-life `_, Rally will support benchmarking its last minor version until a new major version comes out. For example, after the release of Elasticsearch 8.0.0, Rally dropped support for Elasticsearch < 6.8.0. +The latest version of Rally allows to benchmark all currently supported versions of Elasticsearch. Once an `Elasticsearch version reaches end-of-life `_, Rally will drop support too at any time soon. Metrics store ============= diff --git a/esrally/client/asynchronous.py b/esrally/client/asynchronous.py index 9718c5040..378d4eaa4 100644 --- a/esrally/client/asynchronous.py +++ b/esrally/client/asynchronous.py @@ -34,17 +34,23 @@ HeadApiResponse, ListApiResponse, ObjectApiResponse, + OpenTelemetrySpan, TextApiResponse, ) from elastic_transport.client_utils import DEFAULT from elasticsearch import AsyncElasticsearch from elasticsearch._async.client import IlmClient from elasticsearch.compat import warn_stacklevel -from elasticsearch.exceptions import HTTP_EXCEPTIONS, ApiError, ElasticsearchWarning +from elasticsearch.exceptions import ( + HTTP_EXCEPTIONS, + ApiError, + ElasticsearchWarning, + UnsupportedProductError, +) from multidict import CIMultiDict, CIMultiDictProxy from yarl import URL -from esrally.client.common import _WARNING_RE, _mimetype_header_to_compat, _quote_query +from esrally.client.common import _WARNING_RE, _quote_query, mimetype_headers_to_compat from esrally.client.context import RequestContextHolder from esrally.utils import io, versions @@ -330,10 +336,8 @@ async def put_lifecycle(self, *args, **kwargs): class RallyAsyncElasticsearch(AsyncElasticsearch, RequestContextHolder): - def __init__(self, *args, **kwargs): - distribution_version = kwargs.pop("distribution_version", None) - distribution_flavor = kwargs.pop("distribution_flavor", None) - super().__init__(*args, **kwargs) + def __init__(self, hosts: Any = None, *, distribution_version: str | None = None, distribution_flavor: str | None = None, **kwargs): + super().__init__(hosts, **kwargs) # skip verification at this point; we've already verified this earlier with the synchronous client. # The async client is used in the hot code path and we use customized overrides (such as that we don't # parse response bodies in some cases for performance reasons, e.g. when using the bulk API). @@ -356,7 +360,7 @@ def options(self, *args, **kwargs): new_self.distribution_flavor = self.distribution_flavor return new_self - async def perform_request( + async def _perform_request( self, method: str, path: str, @@ -364,33 +368,27 @@ async def perform_request( params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, + otel_span: OpenTelemetrySpan, ) -> ApiResponse[Any]: - # We need to ensure that we provide content-type and accept headers - if body is not None: - if headers is None: - headers = {"content-type": "application/json", "accept": "application/json"} - else: - if headers.get("content-type") is None: - headers["content-type"] = "application/json" - if headers.get("accept") is None: - headers["accept"] = "application/json" - if headers: request_headers = self._headers.copy() request_headers.update(headers) else: request_headers = self._headers - # Converts all parts of a Accept/Content-Type headers - # from application/X -> application/vnd.elasticsearch+X - # see https://github.com/elastic/elasticsearch/issues/51816 - # Not applicable to serverless + if body is not None: + # It ensures content-type and accept headers are set. + mimetype = "application/json" + if path.endswith("/_bulk"): + mimetype = "application/x-ndjson" + for header in ("content-type", "accept"): + request_headers.setdefault(header, mimetype) + if not self.is_serverless: - if versions.is_version_identifier(self.distribution_version) and ( - versions.Version.from_string(self.distribution_version) >= versions.Version.from_string("8.0.0") - ): - _mimetype_header_to_compat("Accept", request_headers) - _mimetype_header_to_compat("Content-Type", request_headers) + # Converts all parts of a Accept/Content-Type headers + # from application/X -> application/vnd.elasticsearch+X + # see https://github.com/elastic/elasticsearch/issues/51816 + mimetype_headers_to_compat(request_headers, self.distribution_version) if params: target = f"{path}?{_quote_query(params)}" @@ -407,6 +405,7 @@ async def perform_request( retry_on_status=self._retry_on_status, retry_on_timeout=self._retry_on_timeout, client_meta=self._client_meta, + otel_span=otel_span, ) # HEAD with a 404 is returned as a normal response @@ -430,6 +429,19 @@ async def perform_request( raise HTTP_EXCEPTIONS.get(meta.status, ApiError)(message=message, meta=meta, body=resp_body) + # 'X-Elastic-Product: Elasticsearch' should be on every 2XX response. + if not self._verified_elasticsearch: + # If the header is set we mark the server as verified. + if meta.headers.get("x-elastic-product", "") == "Elasticsearch": + self._verified_elasticsearch = True + # Otherwise we only raise an error on 2XX responses. + elif meta.status >= 200 and meta.status < 300: + raise UnsupportedProductError( + message="The client noticed that the server is not Elasticsearch " "and we do not support this unknown product", + meta=meta, + body=resp_body, + ) + # 'Warning' headers should be reraised as 'ElasticsearchWarning' if "warning" in meta.headers: warning_header = (meta.headers.get("warning") or "").strip() diff --git a/esrally/client/common.py b/esrally/client/common.py index e0ca051c9..328eb0adf 100644 --- a/esrally/client/common.py +++ b/esrally/client/common.py @@ -3,26 +3,33 @@ from datetime import date, datetime from typing import Any +import elastic_transport from elastic_transport.client_utils import percent_encode -from elasticsearch import VERSION - - -def _client_major_version_to_str(version: tuple) -> str: - return str(version[0]) +from esrally.utils import versions _WARNING_RE = re.compile(r"\"([^\"]*)\"") -_COMPAT_MIMETYPE_TEMPLATE = "application/vnd.elasticsearch+%s; compatible-with=" + _client_major_version_to_str(VERSION) _COMPAT_MIMETYPE_RE = re.compile(r"application/(json|x-ndjson|vnd\.mapbox-vector-tile)") -_COMPAT_MIMETYPE_SUB = _COMPAT_MIMETYPE_TEMPLATE % (r"\g<1>",) -def _mimetype_header_to_compat(header, request_headers): - # Converts all parts of a Accept/Content-Type headers - # from application/X -> application/vnd.elasticsearch+X - mimetype = request_headers.get(header, None) if request_headers else None - if mimetype: - request_headers[header] = _COMPAT_MIMETYPE_RE.sub(_COMPAT_MIMETYPE_SUB, mimetype) +def mimetype_headers_to_compat( + headers: elastic_transport.HttpHeaders, + distribution_version: str | None = None, +) -> None: + if not headers: + return + + major_version = 8 + if versions.is_version_identifier(distribution_version): + major_version = versions.Version.from_string(distribution_version).major + + for header in ("accept", "content-type"): + mimetype = headers.get(header) + if not mimetype: + continue + headers[header] = _COMPAT_MIMETYPE_RE.sub( + "application/vnd.elasticsearch+%s; compatible-with=%s" % (r"\g<1>", major_version), mimetype + ) def _escape(value: Any) -> str: diff --git a/esrally/client/factory.py b/esrally/client/factory.py index 9fe0186dd..29e4198af 100644 --- a/esrally/client/factory.py +++ b/esrally/client/factory.py @@ -260,7 +260,7 @@ async def on_request_end(session, trace_config_ctx, params): hosts=self.hosts, transport_class=RallyAsyncTransport, ssl_context=self.ssl_context, - maxsize=self.max_connections, + connections_per_node=self.max_connections, **self.client_options, ) diff --git a/esrally/client/synchronous.py b/esrally/client/synchronous.py index 29d6babfc..692e98403 100644 --- a/esrally/client/synchronous.py +++ b/esrally/client/synchronous.py @@ -25,6 +25,7 @@ HeadApiResponse, ListApiResponse, ObjectApiResponse, + OpenTelemetrySpan, TextApiResponse, ) from elastic_transport.client_utils import DEFAULT @@ -37,7 +38,7 @@ UnsupportedProductError, ) -from esrally.client.common import _WARNING_RE, _mimetype_header_to_compat, _quote_query +from esrally.client.common import _WARNING_RE, _quote_query, mimetype_headers_to_compat from esrally.utils import versions @@ -122,11 +123,8 @@ def check_product(cls, headers, response): class RallySyncElasticsearch(Elasticsearch): - def __init__(self, *args, **kwargs): - distribution_version = kwargs.pop("distribution_version", None) - distribution_flavor = kwargs.pop("distribution_flavor", None) - super().__init__(*args, **kwargs) - self._verified_elasticsearch = None + def __init__(self, hosts: Any = None, *, distribution_version: str | None = None, distribution_flavor: str | None = None, **kwargs): + super().__init__(hosts, **kwargs) self.distribution_version = distribution_version self.distribution_flavor = distribution_flavor @@ -140,7 +138,7 @@ def options(self, *args, **kwargs): new_self.distribution_flavor = self.distribution_flavor return new_self - def perform_request( + def _perform_request( self, method: str, path: str, @@ -148,45 +146,27 @@ def perform_request( params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, + otel_span: OpenTelemetrySpan, ) -> ApiResponse[Any]: - # We need to ensure that we provide content-type and accept headers - if body is not None: - if headers is None: - headers = {"content-type": "application/json", "accept": "application/json"} - else: - if headers.get("content-type") is None: - headers["content-type"] = "application/json" - if headers.get("accept") is None: - headers["accept"] = "application/json" - if headers: request_headers = self._headers.copy() request_headers.update(headers) else: request_headers = self._headers - if self._verified_elasticsearch is None: - info = self.transport.perform_request(method="GET", target="/", headers=request_headers) - info_meta = info.meta - info_body = info.body - - if not 200 <= info_meta.status < 299: - raise HTTP_EXCEPTIONS.get(info_meta.status, ApiError)(message=str(info_body), meta=info_meta, body=info_body) - - self._verified_elasticsearch = _ProductChecker.check_product(info_meta.headers, info_body) - - if self._verified_elasticsearch is not True: - _ProductChecker.raise_error(self._verified_elasticsearch, info_meta, info_body) + if body is not None: + # It ensures content-type and accept headers are set. + mimetype = "application/json" + if path.endswith("/_bulk"): + mimetype = "application/x-ndjson" + for header in ("content-type", "accept"): + request_headers.setdefault(header, mimetype) - # Converts all parts of a Accept/Content-Type headers - # from application/X -> application/vnd.elasticsearch+X - # see https://github.com/elastic/elasticsearch/issues/51816 if not self.is_serverless: - if versions.is_version_identifier(self.distribution_version) and ( - versions.Version.from_string(self.distribution_version) >= versions.Version.from_string("8.0.0") - ): - _mimetype_header_to_compat("Accept", headers) - _mimetype_header_to_compat("Content-Type", headers) + # Converts all parts of an Accept/Content-Type headers + # from application/X -> application/vnd.elasticsearch+X + # see https://github.com/elastic/elasticsearch/issues/51816 + mimetype_headers_to_compat(request_headers, self.distribution_version) if params: target = f"{path}?{_quote_query(params)}" @@ -203,6 +183,7 @@ def perform_request( retry_on_status=self._retry_on_status, retry_on_timeout=self._retry_on_timeout, client_meta=self._client_meta, + otel_span=otel_span, ) # HEAD with a 404 is returned as a normal response @@ -226,6 +207,19 @@ def perform_request( raise HTTP_EXCEPTIONS.get(meta.status, ApiError)(message=message, meta=meta, body=resp_body) + # 'X-Elastic-Product: Elasticsearch' should be on every 2XX response. + if not self._verified_elasticsearch: + # If the header is set we mark the server as verified. + if meta.headers.get("x-elastic-product", "") == "Elasticsearch": + self._verified_elasticsearch = True + # Otherwise we only raise an error on 2XX responses. + elif meta.status >= 200 and meta.status < 300: + raise UnsupportedProductError( + message=("The client noticed that the server is not Elasticsearch " "and we do not support this unknown product"), + meta=meta, + body=resp_body, + ) + # 'Warning' headers should be reraised as 'ElasticsearchWarning' if "warning" in meta.headers: warning_header = (meta.headers.get("warning") or "").strip() diff --git a/esrally/mechanic/launcher.py b/esrally/mechanic/launcher.py index 41ca46ab7..3c263318c 100644 --- a/esrally/mechanic/launcher.py +++ b/esrally/mechanic/launcher.py @@ -65,7 +65,7 @@ def _start_process(self, binary_path): self._wait_for_healthy_running_container(container_id, DockerLauncher.PROCESS_WAIT_TIMEOUT_SECONDS) def _docker_compose(self, compose_config, cmd): - return "docker-compose -f {} {}".format(os.path.join(compose_config, "docker-compose.yml"), cmd) + return "docker compose -f {} {}".format(os.path.join(compose_config, "docker-compose.yml"), cmd) def _get_container_id(self, compose_config): compose_ps_cmd = self._docker_compose(compose_config, "ps -q") diff --git a/esrally/min-es-version.txt b/esrally/min-es-version.txt index e029aa99b..ae9a76b92 100644 --- a/esrally/min-es-version.txt +++ b/esrally/min-es-version.txt @@ -1 +1 @@ -6.8.0 +8.0.0 diff --git a/esrally/track/loader.py b/esrally/track/loader.py index e256970a6..f778e6e82 100644 --- a/esrally/track/loader.py +++ b/esrally/track/loader.py @@ -235,6 +235,7 @@ def _install_dependencies(dependencies): if dependencies: log_path = os.path.join(paths.logs(), "dependency.log") console.info(f"Installing track dependencies [{', '.join(dependencies)}]") + os.makedirs(os.path.dirname(log_path), exist_ok=True) try: with open(log_path, "ab") as install_log: subprocess.check_call( diff --git a/esrally/utils/versions.py b/esrally/utils/versions.py index ceaf7a5f6..d7644de93 100644 --- a/esrally/utils/versions.py +++ b/esrally/utils/versions.py @@ -29,11 +29,11 @@ def _versions_pattern(strict): return VERSIONS if strict else VERSIONS_OPTIONAL -def is_version_identifier(text, strict=True): +def is_version_identifier(text: str | None, strict: bool = True) -> bool: return text is not None and _versions_pattern(strict).match(text) is not None -def is_serverless(text): +def is_serverless(text) -> bool: return text == "serverless" diff --git a/it/__init__.py b/it/__init__.py index 8863307c0..6a6c238c2 100644 --- a/it/__init__.py +++ b/it/__init__.py @@ -31,10 +31,7 @@ from esrally.utils import process CONFIG_NAMES = ["in-memory-it", "es-it"] -DISTRIBUTIONS = ["8.4.0"] -# There are no ARM distribution artefacts for 6.8.0, which can't be tested on Apple Silicon -if platform.machine() != "arm64": - DISTRIBUTIONS.insert(0, "6.8.0") +DISTRIBUTIONS = ["8.4.0", "9.2.4"] TRACKS = ["geonames", "nyc_taxis", "http_logs", "nested"] ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) diff --git a/it/conftest.py b/it/conftest.py index df5f6476e..ae0109b77 100644 --- a/it/conftest.py +++ b/it/conftest.py @@ -30,7 +30,7 @@ def check_prerequisites(): print("Checking prerequisites...") if process.run_subprocess_with_logging("docker ps") != 0: raise AssertionError("Docker must be installed and the daemon must be up and running to run integration tests.") - if process.run_subprocess_with_logging("docker-compose --help") != 0: + if process.run_subprocess_with_logging("docker compose --help") != 0: raise AssertionError("Docker Compose is required to run integration tests.") diff --git a/it/docker_dev_image_test.py b/it/docker_dev_image_test.py index adcefff89..2b8b83b74 100644 --- a/it/docker_dev_image_test.py +++ b/it/docker_dev_image_test.py @@ -16,59 +16,42 @@ # under the License. import os +import subprocess + +import pytest import it from esrally import version -from esrally.utils import process - - -def test_docker_geonames(): - test_command = ( - "race --pipeline=benchmark-only --test-mode --track=geonames --challenge=append-no-conflicts-index-only --target-hosts=es01:9200" - ) - run_docker_compose_test(test_command) - - -def test_docker_list_tracks(): - test_command = "list tracks" - run_docker_compose_test(test_command) - - -def test_docker_help(): - test_command = "--help" - run_docker_compose_test(test_command) - +from esrally.utils import cases + + +@cases.cases( + arg_name="command", + help="--help", + race_geonames=( + "race --pipeline=benchmark-only --test-mode --track=geonames --challenge=append-no-conflicts-index-only " "--target-hosts=es01:9200" + ), + list_tracks="list tracks", +) +def test_docker_compose(command: str): + env = os.environ.copy() + env["TEST_COMMAND"] = command + env["RALLY_DOCKER_IMAGE"] = "elastic/rally" + env["RALLY_VERSION"] = version.__version__ + env["RALLY_VERSION_TAG"] = version.__version__ -def test_docker_override_cmd(): - test_command = ( - "esrally race --pipeline=benchmark-only --test-mode --track=geonames " - "--challenge=append-no-conflicts-index-only --target-hosts=es01:9200" - ) - run_docker_compose_test(test_command) - - -def run_docker_compose_test(test_command): try: - if run_docker_compose_up(test_command) != 0: - raise AssertionError(f"The docker-compose test failed with test command: {test_command}") + return subprocess.run( + f"docker compose -f {it.ROOT_DIR}/docker/docker-compose-tests.yml up --abort-on-container-exit", + env=env, + capture_output=False, # We'll define streams manually + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + check=True, + shell=True, + ) + except subprocess.CalledProcessError as err: + pytest.fail(f"Docker compose test failed:\n{err.stdout}") finally: - # Always ensure proper cleanup regardless of results - run_docker_compose_down() - - -def run_docker_compose_up(test_command): - env_variables = os.environ.copy() - env_variables["TEST_COMMAND"] = test_command - env_variables["RALLY_DOCKER_IMAGE"] = "elastic/rally" - env_variables["RALLY_VERSION"] = version.__version__ - env_variables["RALLY_VERSION_TAG"] = version.__version__ - - return process.run_subprocess_with_logging( - f"docker-compose -f {it.ROOT_DIR}/docker/docker-compose-tests.yml up --abort-on-container-exit", - env=env_variables, - ) - - -def run_docker_compose_down(): - if process.run_subprocess_with_logging(f"docker-compose -f {it.ROOT_DIR}/docker/docker-compose-tests.yml down -v") != 0: - raise AssertionError("Failed to stop running containers from docker-compose-tests.yml") + subprocess.run(f"docker compose -f {it.ROOT_DIR}/docker/docker-compose-tests.yml down -v", shell=True, check=False) diff --git a/pyproject.toml b/pyproject.toml index fbbef91cb..f982354fd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,16 +42,14 @@ classifiers = [ ################################################################################################ dependencies = [ # License: Apache 2.0 - "elasticsearch[async]==8.6.1", - "elastic-transport==8.4.1", + "elasticsearch[async]==9.2.1", + "elastic-transport==9.2.1", # License: MIT - "urllib3==1.26.19", + "urllib3==2.6.3", # License: Apache 2.0 "aiohttp==3.13.3", "aiosignal==1.4.0", - "docker==6.0.0", - # avoid specific requests version to fix bug in docker-py - "requests<2.32.0", + "requests==2.32.5", # License: BSD "psutil==5.9.4", # License: MIT @@ -92,6 +90,8 @@ dependencies = [ "hatch==1.3.1", "hatchling==1.6.0", "wheel==0.45.1", + "pip==25.2", + "docker>=7.1.0", ] [project.optional-dependencies] diff --git a/recipes/ccr/start.sh b/recipes/ccr/start.sh index c36d4c5d8..19a33f0ea 100755 --- a/recipes/ccr/start.sh +++ b/recipes/ccr/start.sh @@ -4,10 +4,10 @@ set -e source .elastic-version # Start metrics store -docker-compose -f ./metricstore-docker-compose.yml up -d +docker compose -f ./metricstore-docker-compose.yml up -d # Start Elasticsearch -docker-compose up -d +docker compose up -d printf "Waiting for clusters to get ready " diff --git a/recipes/ccr/stop.sh b/recipes/ccr/stop.sh index c985d3cf4..45862daf6 100755 --- a/recipes/ccr/stop.sh +++ b/recipes/ccr/stop.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash source .elastic-version -docker-compose down -v -docker-compose -f metricstore-docker-compose.yml down -v +docker compose down -v +docker compose -f metricstore-docker-compose.yml down -v diff --git a/release-docker-test.sh b/release-docker-test.sh index b9d590318..9c92ea2a3 100755 --- a/release-docker-test.sh +++ b/release-docker-test.sh @@ -32,7 +32,7 @@ function check_prerequisites { exit 1 fi - if ! type docker-compose > /dev/null; then + if ! type docker compose > /dev/null; then echo "docker compose is necessary to run the integration tests" exit 1 fi @@ -84,11 +84,11 @@ function exit_if_docker_not_running { function docker_compose { if [[ "$1" == "up" ]]; then - docker-compose -f docker/docker-compose-tests.yml up --abort-on-container-exit + docker compose -f docker/docker-compose-tests.yml up --abort-on-container-exit elif [[ "$1" == "down" ]]; then - docker-compose -f docker/docker-compose-tests.yml down -v + docker compose -f docker/docker-compose-tests.yml down -v else - error "Unknown argument [$1] for docker-compose, exiting." + error "Unknown argument [$1] for docker compose, exiting." fi } diff --git a/tests/client/common_test.py b/tests/client/common_test.py deleted file mode 100644 index dac449c60..000000000 --- a/tests/client/common_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -from esrally.client import common - - -# pylint: disable=protected-access -def test_client_major_version_to_str(): - version = (8, 2, 0) - assert common._client_major_version_to_str(version) == "8" diff --git a/tests/client/factory_test.py b/tests/client/factory_test.py index 9abf37fc0..9fc28b97a 100644 --- a/tests/client/factory_test.py +++ b/tests/client/factory_test.py @@ -372,7 +372,7 @@ def test_create_async_client_with_api_key_auth_override(self, es): hosts=["https://localhost:9200"], transport_class=RallyAsyncTransport, ssl_context=f.ssl_context, - maxsize=f.max_connections, + connections_per_node=f.max_connections, verify_certs=True, serializer=f.client_options["serializer"], api_key=api_key, diff --git a/tests/driver/runner_test.py b/tests/driver/runner_test.py index ae2875dd9..38673e191 100644 --- a/tests/driver/runner_test.py +++ b/tests/driver/runner_test.py @@ -22,6 +22,7 @@ import json import math import random +import typing from unittest import mock import elastic_transport @@ -5968,7 +5969,9 @@ async def test_create_ilm_policy_without_request_params(self, es): @mock.patch("esrally.client.asynchronous.IlmClient") @pytest.mark.asyncio + @typing.no_type_check async def test_RallyIlmClient_rewrites_kwargs(self, es_ilm): + es = RallyAsyncElasticsearch(hosts=["http://localhost:9200"]) es_ilm.put_lifecycle = mock.AsyncMock(return_value={}) diff --git a/tests/mechanic/launcher_test.py b/tests/mechanic/launcher_test.py index 9bdb84c6c..a22129c35 100644 --- a/tests/mechanic/launcher_test.py +++ b/tests/mechanic/launcher_test.py @@ -347,7 +347,7 @@ class TestDockerLauncher: @mock.patch("esrally.utils.process.run_subprocess_with_output") def test_starts_container_successfully(self, run_subprocess_with_output, run_subprocess_with_logging): run_subprocess_with_logging.return_value = 0 - # Docker container id (from docker-compose ps), Docker container id (from docker ps --filter ...) + # Docker container id (from docker compose ps), Docker container id (from docker ps --filter ...) run_subprocess_with_output.side_effect = [["de604d0d"], ["de604d0d"]] cfg = config.Config() docker = launcher.DockerLauncher(cfg) @@ -373,10 +373,10 @@ def test_starts_container_successfully(self, run_subprocess_with_output, run_sub assert node.node_name == "testnode" assert node.telemetry is not None - run_subprocess_with_logging.assert_called_once_with("docker-compose -f /bin/docker-compose.yml up -d") + run_subprocess_with_logging.assert_called_once_with("docker compose -f /bin/docker-compose.yml up -d") run_subprocess_with_output.assert_has_calls( [ - mock.call("docker-compose -f /bin/docker-compose.yml ps -q"), + mock.call("docker compose -f /bin/docker-compose.yml ps -q"), mock.call('docker ps -a --filter "id=de604d0d" --filter "status=running" --filter "health=healthy" -q'), ] ) @@ -386,7 +386,7 @@ def test_starts_container_successfully(self, run_subprocess_with_output, run_sub @mock.patch("esrally.utils.process.run_subprocess_with_output") def test_container_not_started(self, run_subprocess_with_output, run_subprocess_with_logging, sleep): run_subprocess_with_logging.return_value = 0 - # Docker container id (from docker-compose ps), but NO Docker container id (from docker ps --filter...) twice + # Docker container id (from docker compose ps), but NO Docker container id (from docker ps --filter...) twice run_subprocess_with_output.side_effect = [["de604d0d"], [], []] cfg = config.Config() # ensure we only check the status two times @@ -422,7 +422,7 @@ def test_stops_container_successfully_with_metrics_store(self, run_subprocess_wi add_metadata_for_node.assert_called_once_with(metrics_store, "testnode", "127.0.0.1") - run_subprocess_with_logging.assert_called_once_with("docker-compose -f /bin/docker-compose.yml down") + run_subprocess_with_logging.assert_called_once_with("docker compose -f /bin/docker-compose.yml down") @mock.patch("esrally.telemetry.add_metadata_for_node") @mock.patch("esrally.utils.process.run_subprocess_with_logging") @@ -437,4 +437,4 @@ def test_stops_container_when_no_metrics_store_is_provided(self, run_subprocess_ assert add_metadata_for_node.call_count == 0 - run_subprocess_with_logging.assert_called_once_with("docker-compose -f /bin/docker-compose.yml down") + run_subprocess_with_logging.assert_called_once_with("docker compose -f /bin/docker-compose.yml down") diff --git a/tests/track/loader_test.py b/tests/track/loader_test.py index e60271528..5d2bd181d 100644 --- a/tests/track/loader_test.py +++ b/tests/track/loader_test.py @@ -16,18 +16,22 @@ # under the License. import copy +import dataclasses import os import random import re +import subprocess +import sys import textwrap import urllib.error from unittest import mock import pytest -from esrally import config, exceptions +from esrally import config, exceptions, paths from esrally.track import loader, track -from esrally.utils import io +from esrally.utils import console, io +from esrally.utils.cases import cases def strip_ws(s): @@ -4323,3 +4327,40 @@ def test_allow_to_specify_default_preparator(self): ] actual_processors = [proc.__class__ for proc in tpr.processors] assert len(expected_processors) == len(actual_processors) + + +@dataclasses.dataclass +class InstallDependenciesCase: + requirements: list[str] + + +@cases( + empty=InstallDependenciesCase(requirements=[]), + simple=InstallDependenciesCase(requirements=["pyyaml"]), +) +def test_install_dependencies(case: InstallDependenciesCase, monkeypatch: pytest.MonkeyPatch, tmpdir) -> None: + # pylint: disable=protected-access + monkeypatch.chdir(str(tmpdir)) + monkeypatch.setattr(paths, "logs", lambda: "./logs") + monkeypatch.setattr(paths, "libs", lambda: "./libs") + monkeypatch.setattr(console, "info", mock.create_autospec(console.info)) + monkeypatch.setattr(subprocess, "check_call", mock.create_autospec(subprocess.check_call)) + loader._install_dependencies(case.requirements) + + if not case.requirements: + subprocess.check_call.assert_not_called() + assert not os.path.isdir("./logs") + return + + subprocess.check_call.assert_called_once() + assert subprocess.check_call.call_args[0][0] == [ + sys.executable, + "-m", + "pip", + "install", + *case.requirements, + "--upgrade", + "--target", + "./libs", + ] + assert os.path.isfile("./logs/dependency.log") diff --git a/uv.lock b/uv.lock index 5e0c506ec..1afd40411 100644 --- a/uv.lock +++ b/uv.lock @@ -530,18 +530,16 @@ wheels = [ [[package]] name = "docker" -version = "6.0.0" +version = "7.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "packaging" }, { name = "pywin32", marker = "sys_platform == 'win32'" }, { name = "requests" }, { name = "urllib3" }, - { name = "websocket-client" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1a/d1/c41d51a0b5192533885545e031ee1b98ee6dc93ceb0c1deb4ecfe212a9a8/docker-6.0.0.tar.gz", hash = "sha256:19e330470af40167d293b0352578c1fa22d74b34d3edf5d4ff90ebc203bbb2f1", size = 257587, upload-time = "2022-08-18T19:58:51.128Z" } +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834, upload-time = "2024-05-23T11:13:57.216Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/57/16/71275ff97da8d2b3b1895655182eb18692d234860bfb42366aaf511389af/docker-6.0.0-py3-none-any.whl", hash = "sha256:6e06ee8eca46cd88733df09b6b80c24a1a556bc5cb1e1ae54b2c239886d245cf", size = 147235, upload-time = "2022-08-18T19:58:48.937Z" }, + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" }, ] [[package]] @@ -573,27 +571,32 @@ wheels = [ [[package]] name = "elastic-transport" -version = "8.4.1" +version = "9.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, + { name = "sniffio" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/6b/db/d934d605258d38bd470c83d535c3a73c3d01e4ad357ecb4336300fbb8e88/elastic-transport-8.4.1.tar.gz", hash = "sha256:e5548997113c5d9566c9a1a51ed67bce50a4871bc0e44b692166461279e4167e", size = 44847, upload-time = "2023-09-25T20:06:26.619Z" } +sdist = { url = "https://files.pythonhosted.org/packages/23/0a/a92140b666afdcb9862a16e4d80873b3c887c1b7e3f17e945fc3460edf1b/elastic_transport-9.2.1.tar.gz", hash = "sha256:97d9abd638ba8aa90faa4ca1bf1a18bde0fe2088fbc8757f2eb7b299f205773d", size = 77403, upload-time = "2025-12-23T11:54:12.849Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7e/1c/13bb1826382a1275e9191e9ab5cac3c59247f49c4b4dd96b131ec123d9ff/elastic_transport-8.4.1-py3-none-any.whl", hash = "sha256:c718ce40e8217b6045604961463c10da69a152dda07af4e25b3feae8d7965fc0", size = 59545, upload-time = "2023-09-25T20:06:24.688Z" }, + { url = "https://files.pythonhosted.org/packages/2c/e6/a42b600ae8b808371f740381f6c32050cad93f870d36cc697b8b7006bf7c/elastic_transport-9.2.1-py3-none-any.whl", hash = "sha256:39e1a25e486af34ce7aa1bc9005d1c736f1b6fb04c9b64ea0604ded5a61fc1d4", size = 65327, upload-time = "2025-12-23T11:54:11.681Z" }, ] [[package]] name = "elasticsearch" -version = "8.6.1" +version = "9.2.1" source = { registry = "https://pypi.org/simple" } dependencies = [ + { name = "anyio" }, { name = "elastic-transport" }, + { name = "python-dateutil" }, + { name = "sniffio" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/43/b8/105646badde05925d73d0b86db890a100c93e4258be1af8c6a370888fb0e/elasticsearch-8.6.1.tar.gz", hash = "sha256:5c9217c45d36c9872b97681320b20e7fb6eb10867a88ad81345bca13ef92aedf", size = 306022, upload-time = "2023-01-27T23:26:12.897Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/6c/67bb17ca0035b0cac4cfbbe64e18d120203fef22da66dd4c636563a0ea63/elasticsearch-9.2.1.tar.gz", hash = "sha256:97f473418e8976611349757287ac982acf12f4e305182863d985d5a031c36830", size = 878062, upload-time = "2025-12-23T14:37:31.694Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d4/1e/3c6b72cdeac41c83efdadf8a6385ca921b84cb9c37d125ec9cd2940773bc/elasticsearch-8.6.1-py3-none-any.whl", hash = "sha256:7c340008bf01f81fe633af7f473daed42c30481837aa828646663eb7a426acb8", size = 385396, upload-time = "2023-01-27T23:26:07.685Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d5/84264c29ec67f2f8129676ce11f05defb52f44e97e5f411db9a220f2aa43/elasticsearch-9.2.1-py3-none-any.whl", hash = "sha256:8665f5a0b4d29a7c2772851c05ea8a09279abb7928b7d727524613bd61d75958", size = 963593, upload-time = "2025-12-23T14:37:28.047Z" }, ] [package.optional-dependencies] @@ -621,6 +624,7 @@ dependencies = [ { name = "jinja2" }, { name = "jsonschema" }, { name = "markupsafe" }, + { name = "pip" }, { name = "psutil" }, { name = "py-cpuinfo" }, { name = "python-json-logger" }, @@ -677,10 +681,10 @@ requires-dist = [ { name = "boto3", marker = "extra == 's3'", specifier = "==1.34.68" }, { name = "boto3-stubs", marker = "extra == 'develop'", specifier = "==1.26.125" }, { name = "certifi" }, - { name = "docker", specifier = "==6.0.0" }, + { name = "docker", specifier = ">=7.1.0" }, { name = "ecs-logging", specifier = "==2.2.0" }, - { name = "elastic-transport", specifier = "==8.4.1" }, - { name = "elasticsearch", extras = ["async"], specifier = "==8.6.1" }, + { name = "elastic-transport", specifier = "==9.2.1" }, + { name = "elasticsearch", extras = ["async"], specifier = "==9.2.1" }, { name = "furo", marker = "extra == 'develop'", specifier = "==2022.6.21" }, { name = "github3-py", marker = "extra == 'develop'", specifier = "==3.2.0" }, { name = "gitpython", marker = "extra == 'develop'", specifier = "==3.1.30" }, @@ -694,6 +698,7 @@ requires-dist = [ { name = "jsonschema", specifier = "==3.1.1" }, { name = "markupsafe", specifier = "==2.0.1" }, { name = "mypy", marker = "extra == 'develop'", specifier = "==1.15.0" }, + { name = "pip", specifier = "==25.2" }, { name = "pre-commit", marker = "extra == 'develop'", specifier = "==2.20.0" }, { name = "psutil", specifier = "==5.9.4" }, { name = "py-cpuinfo", specifier = "==7.0.0" }, @@ -703,7 +708,7 @@ requires-dist = [ { name = "pytest-benchmark", marker = "extra == 'develop'", specifier = "==5.2.2" }, { name = "pytest-httpserver", marker = "extra == 'develop'", specifier = "==1.1.3" }, { name = "python-json-logger", specifier = "==2.0.7" }, - { name = "requests", specifier = "<2.32.0" }, + { name = "requests", specifier = "==2.32.5" }, { name = "sphinx", marker = "extra == 'develop'", specifier = "==5.1.1" }, { name = "standard-imghdr", marker = "extra == 'develop'", specifier = "==3.13.0" }, { name = "tabulate", specifier = "==0.8.9" }, @@ -716,7 +721,7 @@ requires-dist = [ { name = "types-urllib3", marker = "extra == 'develop'", specifier = "==1.26.19" }, { name = "typing-extensions", specifier = "==4.12.2" }, { name = "ujson", marker = "extra == 'develop'" }, - { name = "urllib3", specifier = "==1.26.19" }, + { name = "urllib3", specifier = "==2.6.3" }, { name = "wheel", specifier = "==0.45.1" }, { name = "yappi", specifier = "==1.6.10" }, { name = "zstandard", specifier = "==0.21.0" }, @@ -1515,6 +1520,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, ] +[[package]] +name = "pip" +version = "25.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/16/650289cd3f43d5a2fadfd98c68bd1e1e7f2550a1a5326768cddfbcedb2c5/pip-25.2.tar.gz", hash = "sha256:578283f006390f85bb6282dffb876454593d637f5d1be494b5202ce4877e71f2", size = 1840021, upload-time = "2025-07-30T21:50:15.401Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/3f/945ef7ab14dc4f9d7f40288d2df998d1837ee0888ec3659c813487572faa/pip-25.2-py3-none-any.whl", hash = "sha256:6d67a2b4e7f14d8b31b8b52648866fa717f45a1eb70e83002f4331d07e953717", size = 1752557, upload-time = "2025-07-30T21:50:13.323Z" }, +] + [[package]] name = "platformdirs" version = "4.3.8" @@ -1925,7 +1939,7 @@ wheels = [ [[package]] name = "requests" -version = "2.31.0" +version = "2.32.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, @@ -1933,9 +1947,9 @@ dependencies = [ { name = "idna" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9d/be/10918a2eac4ae9f02f6cfe6414b7a155ccd8f7f9d4380d62fd5b955065c3/requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1", size = 110794, upload-time = "2023-05-22T15:12:44.175Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/8e/0e2d847013cb52cd35b38c009bb167a1a26b2ce6cd6965bf26b47bc0bf44/requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f", size = 62574, upload-time = "2023-05-22T15:12:42.313Z" }, + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] [[package]] @@ -2388,11 +2402,11 @@ wheels = [ [[package]] name = "urllib3" -version = "1.26.19" +version = "2.6.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/c8/93/65e479b023bbc46dab3e092bda6b0005424ea3217d711964ccdede3f9b1b/urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429", size = 306068, upload-time = "2024-06-17T14:53:34.424Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ae/6a/99eaaeae8becaa17a29aeb334a18e5d582d873b6f084c11f02581b8d7f7f/urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3", size = 143933, upload-time = "2024-06-17T14:53:31.589Z" }, + { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] [[package]] @@ -2421,15 +2435,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f3/40/b1c265d4b2b62b58576588510fc4d1fe60a86319c8de99fd8e9fec617d2c/virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11", size = 6057982, upload-time = "2025-05-08T17:58:21.15Z" }, ] -[[package]] -name = "websocket-client" -version = "1.8.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e6/30/fba0d96b4b5fbf5948ed3f4681f7da2f9f64512e1d303f94b4cc174c24a5/websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da", size = 54648, upload-time = "2024-04-23T22:16:16.976Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/5a/84/44687a29792a70e111c5c477230a72c4b957d88d16141199bf9acb7537a3/websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526", size = 58826, upload-time = "2024-04-23T22:16:14.422Z" }, -] - [[package]] name = "werkzeug" version = "2.1.2"