Skip to content
Open
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions util/opentelemetry-util-genai/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## Unreleased

- Add more Semconv attributes to LLMInvocation spans.
([https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3862](#3862))

## Version 0.2b0 (2025-10-14)

- Add jsonlines support to fsspec uploader
Expand Down
4 changes: 2 additions & 2 deletions util/opentelemetry-util-genai/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ classifiers = [
"Programming Language :: Python :: 3.13",
]
dependencies = [
"opentelemetry-instrumentation ~= 0.57b0",
"opentelemetry-semantic-conventions ~= 0.57b0",
"opentelemetry-instrumentation ~= 0.58b0",
"opentelemetry-semantic-conventions ~= 0.58b0",
"opentelemetry-api>=1.31.0",
]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __init__(self, tracer_provider: TracerProvider | None = None):
__name__,
__version__,
tracer_provider,
schema_url=Schemas.V1_36_0.value,
schema_url=Schemas.V1_37_0.value,
)

def start_llm(
Expand Down Expand Up @@ -132,6 +132,7 @@ def fail_llm( # pylint: disable=no-self-use
# TODO: Provide feedback that this invocation was not started
return invocation

_apply_finish_attributes(invocation.span, invocation)
_apply_error_attributes(invocation.span, error)
# Detach context and end span
otel_context.detach(invocation.context_token)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

from dataclasses import asdict
from typing import List
from typing import Any, Dict, List, Optional

from opentelemetry.semconv._incubating.attributes import (
gen_ai_attributes as GenAI,
Expand Down Expand Up @@ -60,26 +60,7 @@ def _apply_common_span_attributes(
# TODO: clean provider name to match GenAiProviderNameValues?
span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, invocation.provider)

if invocation.output_messages:
span.set_attribute(
GenAI.GEN_AI_RESPONSE_FINISH_REASONS,
[gen.finish_reason for gen in invocation.output_messages],
)

if invocation.response_model_name is not None:
span.set_attribute(
GenAI.GEN_AI_RESPONSE_MODEL, invocation.response_model_name
)
if invocation.response_id is not None:
span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, invocation.response_id)
if invocation.input_tokens is not None:
span.set_attribute(
GenAI.GEN_AI_USAGE_INPUT_TOKENS, invocation.input_tokens
)
if invocation.output_tokens is not None:
span.set_attribute(
GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, invocation.output_tokens
)
_apply_response_attributes(span, invocation)


def _maybe_set_span_messages(
Expand Down Expand Up @@ -112,6 +93,8 @@ def _apply_finish_attributes(span: Span, invocation: LLMInvocation) -> None:
_maybe_set_span_messages(
span, invocation.input_messages, invocation.output_messages
)
_apply_request_attributes(span, invocation)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not blocking for this PR, but we should eventually allow to set attributes at start time. I noticed we don't properly mark which attributes should be provided at the start time, fixing it in the open-telemetry/semantic-conventions#2994

_apply_response_attributes(span, invocation)
span.set_attributes(invocation.attributes)


Expand All @@ -122,7 +105,68 @@ def _apply_error_attributes(span: Span, error: Error) -> None:
span.set_attribute(ErrorAttributes.ERROR_TYPE, error.type.__qualname__)


def _apply_request_attributes(span: Span, invocation: LLMInvocation) -> None:
"""Attach GenAI request semantic convention attributes to the span."""
attributes: Dict[str, Any] = {}
if invocation.temperature is not None:
attributes[GenAI.GEN_AI_REQUEST_TEMPERATURE] = invocation.temperature
if invocation.top_p is not None:
attributes[GenAI.GEN_AI_REQUEST_TOP_P] = invocation.top_p
if invocation.frequency_penalty is not None:
attributes[GenAI.GEN_AI_REQUEST_FREQUENCY_PENALTY] = (
invocation.frequency_penalty
)
if invocation.presence_penalty is not None:
attributes[GenAI.GEN_AI_REQUEST_PRESENCE_PENALTY] = (
invocation.presence_penalty
)
if invocation.max_tokens is not None:
attributes[GenAI.GEN_AI_REQUEST_MAX_TOKENS] = invocation.max_tokens
if invocation.stop_sequences is not None:
attributes[GenAI.GEN_AI_REQUEST_STOP_SEQUENCES] = (
invocation.stop_sequences
)
if invocation.seed is not None:
attributes[GenAI.GEN_AI_REQUEST_SEED] = invocation.seed
if attributes:
span.set_attributes(attributes)


def _apply_response_attributes(span: Span, invocation: LLMInvocation) -> None:
"""Attach GenAI response semantic convention attributes to the span."""
attributes: Dict[str, Any] = {}

finish_reasons: Optional[List[str]]
if invocation.finish_reasons is not None:
finish_reasons = invocation.finish_reasons
elif invocation.output_messages:
finish_reasons = [
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wonder if this should be a sorted set instead of a list ? or at least converted to a set and then converted back to a sorted list to get rid of duplicates

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Finish reasons are sorted and unique now. some unit tests for this added as well.

message.finish_reason for message in invocation.output_messages
]
else:
finish_reasons = None

if finish_reasons:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe if finish_reasons is not available, we should set error - open-telemetry/semantic-conventions#2919 (comment)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm what would you set error to in that case ? I feel like instrumentations should set error explicitly (or call into a util that sets it explicitly) rather than try to infer it like that

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think Liudmila means literally setting the reason to error finish_reason = FinishReasons.ERROR. Not talking about the error attribute itself

attributes[GenAI.GEN_AI_RESPONSE_FINISH_REASONS] = finish_reasons

if invocation.response_model_name is not None:
attributes[GenAI.GEN_AI_RESPONSE_MODEL] = (
invocation.response_model_name
)
if invocation.response_id is not None:
attributes[GenAI.GEN_AI_RESPONSE_ID] = invocation.response_id
if invocation.input_tokens is not None:
attributes[GenAI.GEN_AI_USAGE_INPUT_TOKENS] = invocation.input_tokens
if invocation.output_tokens is not None:
attributes[GenAI.GEN_AI_USAGE_OUTPUT_TOKENS] = invocation.output_tokens

if attributes:
span.set_attributes(attributes)


__all__ = [
"_apply_finish_attributes",
"_apply_error_attributes",
"_apply_request_attributes",
"_apply_response_attributes",
]
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,17 @@ class LLMInvocation:
provider: Optional[str] = None
response_model_name: Optional[str] = None
response_id: Optional[str] = None
finish_reasons: Optional[List[str]] = None
input_tokens: Optional[int] = None
output_tokens: Optional[int] = None
attributes: Dict[str, Any] = field(default_factory=_new_str_any_dict)
temperature: Optional[float] = None
top_p: Optional[float] = None
frequency_penalty: Optional[float] = None
presence_penalty: Optional[float] = None
max_tokens: Optional[int] = None
stop_sequences: Optional[List[str]] = None
seed: Optional[int] = None


@dataclass
Expand Down
Loading