Skip to content

feat(insights): update total opportunity score function to handle missing vitals #82923

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
146 changes: 78 additions & 68 deletions src/sentry/search/events/datasets/metrics.py
Original file line number Diff line number Diff line change
@@ -1754,29 +1754,36 @@ def _resolve_total_web_vital_opportunity_score_with_fixed_weights_function(
)
for vital in vitals
}
# TODO: Divide by the total weights to factor out any missing web vitals
return Function(
"plus",
"divide",
[
adjusted_opportunity_scores["lcp"],
Function(
"plus",
[
adjusted_opportunity_scores["fcp"],
adjusted_opportunity_scores["lcp"],
Function(
"plus",
[
adjusted_opportunity_scores["cls"],
adjusted_opportunity_scores["fcp"],
Function(
"plus",
[
adjusted_opportunity_scores["ttfb"],
adjusted_opportunity_scores["inp"],
adjusted_opportunity_scores["cls"],
Function(
"plus",
[
adjusted_opportunity_scores["ttfb"],
adjusted_opportunity_scores["inp"],
],
),
],
),
],
),
],
),
self._resolve_total_weights_function(),
],
alias,
)
@@ -1837,6 +1844,68 @@ def _resolve_count_scores_function(
alias,
)

def _resolve_total_weights_function(self) -> SelectType:
vitals = ["lcp", "fcp", "cls", "ttfb", "inp"]
weights = {
vital: Function(
"if",
[
Function(
"isZeroOrNull",
[
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(f"measurements.score.{vital}"),
],
),
],
),
],
),
0,
constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS[vital],
],
)
for vital in vitals
}

if features.has(
"organizations:performance-vitals-handle-missing-webvitals",
self.builder.params.organization,
):
return Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
weights["lcp"],
weights["fcp"],
],
),
weights["cls"],
],
),
weights["ttfb"],
],
),
weights["inp"],
],
)
return 1

def _resolve_total_performance_score_function(
self,
_: Mapping[str, str | Column | SelectType | int | float],
@@ -1868,39 +1937,11 @@ def _resolve_total_performance_score_function(
for vital in vitals
}

weights = {
vital: Function(
"if",
[
Function(
"isZeroOrNull",
[
Function(
"countIf",
[
Column("value"),
Function(
"equals",
[
Column("metric_id"),
self.resolve_metric(f"measurements.score.{vital}"),
],
),
],
),
],
),
0,
constants.WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS[vital],
],
)
for vital in vitals
}

# TODO: Is there a way to sum more than 2 values at once?
# TODO: Divide by the total weights to factor out any missing web vitals
return Function(
"divide",
[
# TODO: Is there a way to sum more than 2 values at once?
Function(
"plus",
[
@@ -1926,38 +1967,7 @@ def _resolve_total_performance_score_function(
scores["inp"],
],
),
(
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
Function(
"plus",
[
weights["lcp"],
weights["fcp"],
],
),
weights["cls"],
],
),
weights["ttfb"],
],
),
weights["inp"],
],
)
if features.has(
"organizations:performance-vitals-handle-missing-webvitals",
self.builder.params.organization,
)
else 1
),
self._resolve_total_weights_function(),
],
alias,
)
88 changes: 88 additions & 0 deletions tests/snuba/api/endpoints/test_organization_events_mep.py
Original file line number Diff line number Diff line change
@@ -3021,6 +3021,90 @@ def test_opportunity_score_with_fixed_weights(self):
assert data[1]["total_opportunity_score()"] == 0.36
assert meta["isMetricsData"]

def test_opportunity_score_with_fixed_weights_and_missing_vitals(self):
self.store_transaction_metric(
0.5,
metric="measurements.score.inp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
1.0,
metric="measurements.score.weight.inp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.2,
metric="measurements.score.inp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
1.0,
metric="measurements.score.weight.inp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.2,
metric="measurements.score.inp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.5,
metric="measurements.score.weight.inp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.1,
metric="measurements.score.lcp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.3,
metric="measurements.score.weight.lcp",
tags={"transaction": "foo_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.2,
metric="measurements.score.inp",
tags={"transaction": "bar_transaction"},
timestamp=self.min_ago,
)
self.store_transaction_metric(
0.5,
metric="measurements.score.weight.inp",
tags={"transaction": "bar_transaction"},
timestamp=self.min_ago,
)

with self.feature({"organizations:performance-vitals-handle-missing-webvitals": True}):
response = self.do_request(
{
"field": [
"transaction",
"total_opportunity_score()",
],
"query": "event.type:transaction",
"orderby": "transaction",
"dataset": "metrics",
"per_page": 50,
}
)
assert response.status_code == 200, response.content
assert len(response.data["data"]) == 2
data = response.data["data"]
meta = response.data["meta"]

assert data[0]["total_opportunity_score()"] == 0.09999999999999999
assert data[1]["total_opportunity_score()"] == 0.6
assert meta["isMetricsData"]

def test_total_performance_score(self):
self.store_transaction_metric(
0.03,
@@ -4090,6 +4174,10 @@ def test_opportunity_score(self):
def test_opportunity_score_with_fixed_weights(self):
super().test_opportunity_score_with_fixed_weights()

@pytest.mark.xfail(reason="Not implemented")
def test_opportunity_score_with_fixed_weights_and_missing_vitals(self):
super().test_opportunity_score_with_fixed_weights_and_missing_vitals()

@pytest.mark.xfail(reason="Not implemented")
def test_count_scores(self):
super().test_count_scores()