Skip to content

Commit

Permalink
feat(insights): Update backend performance score function to handle m…
Browse files Browse the repository at this point in the history
…issing vitals (#82750)

Currently, if a webvital is missing, we treat the individual score as 0.
This can be misleading because it negatively affects the overall
performance score of an organization/project.

Instead, we want to remove any missing vital factors from the overall
performance score calculation, which is what this change aims to do by
updating the `performance_score` function.
  • Loading branch information
edwardgou-sentry authored Jan 2, 2025
1 parent 10abfbd commit 54ff4ec
Show file tree
Hide file tree
Showing 2 changed files with 135 additions and 6 deletions.
93 changes: 87 additions & 6 deletions src/sentry/search/events/datasets/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from django.utils.functional import cached_property
from snuba_sdk import Column, Condition, Function, Op, OrderBy

from sentry import features
from sentry.api.event_search import SearchFilter
from sentry.exceptions import IncompatibleMetricsQuery, InvalidSearchQuery
from sentry.search.events import constants, fields
Expand Down Expand Up @@ -1531,6 +1532,12 @@ def _resolve_web_vital_score_function(
args: Mapping[str, str | Column | SelectType | int | float],
alias: str | None,
) -> SelectType:
"""Returns the normalized score (0.0-1.0) for a given web vital.
This function exists because we don't store a metric for the normalized score.
The normalized score is calculated by dividing the sum of measurements.score.* by the sum of measurements.score.weight.*
To calculate the total performance score, see _resolve_total_performance_score_function.
"""
column = args["column"]
metric_id = args["metric_id"]

Expand Down Expand Up @@ -1835,6 +1842,14 @@ def _resolve_total_performance_score_function(
_: Mapping[str, str | Column | SelectType | int | float],
alias: str | None,
) -> SelectType:
"""Returns the total performance score based on a page/site's web vitals.
This function is calculated by:
the summation of (normalized_vital_score * weight) for each vital, divided by the sum of all weights
- normalized_vital_score is the 0.0-1.0 score for each individual vital
- weight is the 0.0-1.0 weight for each individual vital (this is a constant value stored in constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS)
- if all webvitals have data, then the sum of all weights is 1
- normalized_vital_score is obtained through _resolve_web_vital_score_function (see docstring on that function for more details)
"""
vitals = ["lcp", "fcp", "cls", "ttfb", "inp"]
scores = {
vital: Function(
Expand All @@ -1853,9 +1868,38 @@ def _resolve_total_performance_score_function(
for vital in vitals
}

weights = {
vital: Function(
"if",
[
Function(
"isZeroOrNull",
[
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(f"measurements.score.{vital}"),
],
),
],
),
],
),
0,
constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS[vital],
],
)
for vital in vitals
}

# TODO: Is there a way to sum more than 2 values at once?
return Function(
"plus",
"divide",
[
Function(
"plus",
Expand All @@ -1866,17 +1910,54 @@ def _resolve_total_performance_score_function(
Function(
"plus",
[
scores["lcp"],
scores["fcp"],
Function(
"plus",
[
scores["lcp"],
scores["fcp"],
],
),
scores["cls"],
],
),
scores["cls"],
scores["ttfb"],
],
),
scores["ttfb"],
scores["inp"],
],
),
scores["inp"],
(
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
weights["lcp"],
weights["fcp"],
],
),
weights["cls"],
],
),
weights["ttfb"],
],
),
weights["inp"],
],
)
if features.has(
"organizations:performance-vitals-handle-missing-webvitals",
self.builder.params.organization,
)
else 1
),
],
alias,
)
Expand Down
48 changes: 48 additions & 0 deletions tests/snuba/api/endpoints/test_organization_events_mep.py
Original file line number Diff line number Diff line change
Expand Up @@ -3100,6 +3100,50 @@ def test_total_performance_score(self):
assert data[0]["performance_score(measurements.score.total)"] == 0.48
assert meta["isMetricsData"]

def test_total_performance_score_with_missing_vitals(self):
self.store_transaction_metric(
0.03,
metric="measurements.score.lcp",
tags={"transaction": "foo_transaction", "transaction.op": "pageload"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.30,
metric="measurements.score.weight.lcp",
tags={"transaction": "foo_transaction", "transaction.op": "pageload"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.15,
metric="measurements.score.fcp",
tags={"transaction": "foo_transaction", "transaction.op": "pageload"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.15,
metric="measurements.score.weight.fcp",
tags={"transaction": "foo_transaction", "transaction.op": "pageload"},
timestamp=self.min_ago,
)
with self.feature({"organizations:performance-vitals-handle-missing-webvitals": True}):
response = self.do_request(
{
"field": [
"transaction",
"performance_score(measurements.score.total)",
],
"query": "",
"dataset": "metrics",
"per_page": 50,
}
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 1
data = response.data["data"]
meta = response.data["meta"]
assert data[0]["performance_score(measurements.score.total)"] == 0.4
assert meta["isMetricsData"]

def test_count_scores(self):
self.store_transaction_metric(
0.1,
Expand Down Expand Up @@ -4030,6 +4074,10 @@ def test_performance_score_boundaries(self):
def test_total_performance_score(self):
super().test_total_performance_score()

@pytest.mark.xfail(reason="Not implemented")
def test_total_performance_score_with_missing_vitals(self):
super().test_total_performance_score_with_missing_vitals()

@pytest.mark.xfail(reason="Not implemented")
def test_invalid_performance_score_column(self):
super().test_invalid_performance_score_column()
Expand Down

0 comments on commit 54ff4ec

Please sign in to comment.