Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add micro benchmark tests for metric instrument operations #3267

Merged
merged 12 commits into from
Aug 17, 2023
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
([#3223](https://github.com/open-telemetry/opentelemetry-python/pull/3223))
- Add speced out environment variables and arguments for BatchLogRecordProcessor
([#3237](https://github.com/open-telemetry/opentelemetry-python/pull/3237))
- Add benchmark tests for metrics
([#3267](https://github.com/open-telemetry/opentelemetry-python/pull/3267))

## Version 1.17.0/0.38b0 (2023-03-22)

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
lzchen marked this conversation as resolved.
Show resolved Hide resolved

from opentelemetry.sdk.metrics import Counter, MeterProvider
from opentelemetry.sdk.metrics.export import (
AggregationTemporality,
InMemoryMetricReader,
)

reader = InMemoryMetricReader()
lzchen marked this conversation as resolved.
Show resolved Hide resolved
reader_delta = InMemoryMetricReader(
preferred_temporality={
Counter: AggregationTemporality.DELTA,
},
)
provider = MeterProvider(
lzchen marked this conversation as resolved.
Show resolved Hide resolved
metric_readers=[reader],
)
provider2 = MeterProvider(metric_readers=[reader_delta])
lzchen marked this conversation as resolved.
Show resolved Hide resolved
meter = provider.get_meter("sdk_meter_provider")
meter2 = provider2.get_meter("sdk_meter_provider_delta")
counter = meter.create_counter("test_counter")
counter2 = meter2.create_counter("test_counter2")
udcounter = meter.create_up_down_counter("test_udcounter")
lzchen marked this conversation as resolved.
Show resolved Hide resolved


@pytest.mark.parametrize(
("num_labels", "temporality"),
[
(0, "delta"),
(1, "delta"),
(3, "delta"),
(5, "delta"),
(10, "delta"),
(0, "cumulative"),
(1, "cumulative"),
(3, "cumulative"),
(5, "cumulative"),
(10, "cumulative"),
],
)
def test_counter_add(benchmark, num_labels, temporality):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)
lzchen marked this conversation as resolved.
Show resolved Hide resolved

def benchmark_counter_add():
if temporality == "cumulative":
lzchen marked this conversation as resolved.
Show resolved Hide resolved
counter.add(1, labels)
else:
counter2.add(1, labels)

benchmark(benchmark_counter_add)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 10])
def test_up_down_counter_add(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_up_down_counter_add():
udcounter.add(1, labels)

benchmark(benchmark_up_down_counter_add)
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random

import pytest

from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import InMemoryMetricReader
from opentelemetry.sdk.metrics.view import (
ExplicitBucketHistogramAggregation,
View,
)

MAX_BOUND_VALUE = 10000


def _generate_bounds(bound_count):
bounds = []
for i in range(bound_count):
bounds.append(i * MAX_BOUND_VALUE / bound_count)
return bounds


hist_view_10 = View(
instrument_name="test_histogram_10_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(10)),
)
hist_view_49 = View(
instrument_name="test_histogram_49_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(49)),
)
hist_view_50 = View(
instrument_name="test_histogram_50_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(50)),
)
hist_view_1000 = View(
instrument_name="test_histogram_1000_bound",
aggregation=ExplicitBucketHistogramAggregation(_generate_bounds(1000)),
)
reader = InMemoryMetricReader()
provider = MeterProvider(
metric_readers=[reader],
views=[
hist_view_10,
hist_view_49,
hist_view_50,
hist_view_1000,
],
)
meter = provider.get_meter("sdk_meter_provider")
hist = meter.create_histogram("test_histogram_default")
hist10 = meter.create_histogram("test_histogram_10_bound")
hist49 = meter.create_histogram("test_histogram_49_bound")
hist50 = meter.create_histogram("test_histogram_50_bound")
hist1000 = meter.create_histogram("test_histogram_1000_bound")
lzchen marked this conversation as resolved.
Show resolved Hide resolved


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record():
hist.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_10(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_10():
hist10.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_10)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_49(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_49():
hist49.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_49)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_50(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_50():
hist50.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_50)


@pytest.mark.parametrize("num_labels", [0, 1, 3, 5, 7])
def test_histogram_record_1000(benchmark, num_labels):
labels = {}
for i in range(num_labels):
labels["Key{}".format(i)] = "Value{}".format(i)

def benchmark_histogram_record_1000():
hist1000.record(random.random() * MAX_BOUND_VALUE)

benchmark(benchmark_histogram_record_1000)