- 
                Notifications
    You must be signed in to change notification settings 
- Fork 803
GenAI Utils | Add more SemConv Attributes #3862
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 8 commits
aca88c9
              2620cbb
              2205dd2
              cfee342
              7bf83af
              3a16294
              26fcfff
              309d348
              627376d
              cb10d59
              7c804f8
              1b5644a
              e845b12
              File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | 
|---|---|---|
|  | @@ -13,7 +13,7 @@ | |
| # limitations under the License. | ||
|  | ||
| from dataclasses import asdict | ||
| from typing import List | ||
| from typing import Any, Dict, List, Optional | ||
|         
                  aabmass marked this conversation as resolved.
              Outdated
          
            Show resolved
            Hide resolved | ||
|  | ||
| from opentelemetry.semconv._incubating.attributes import ( | ||
| gen_ai_attributes as GenAI, | ||
|  | @@ -60,26 +60,7 @@ def _apply_common_span_attributes( | |
| # TODO: clean provider name to match GenAiProviderNameValues? | ||
| span.set_attribute(GenAI.GEN_AI_PROVIDER_NAME, invocation.provider) | ||
|  | ||
| if invocation.output_messages: | ||
| span.set_attribute( | ||
| GenAI.GEN_AI_RESPONSE_FINISH_REASONS, | ||
| [gen.finish_reason for gen in invocation.output_messages], | ||
| ) | ||
|  | ||
| if invocation.response_model_name is not None: | ||
| span.set_attribute( | ||
| GenAI.GEN_AI_RESPONSE_MODEL, invocation.response_model_name | ||
| ) | ||
| if invocation.response_id is not None: | ||
| span.set_attribute(GenAI.GEN_AI_RESPONSE_ID, invocation.response_id) | ||
| if invocation.input_tokens is not None: | ||
| span.set_attribute( | ||
| GenAI.GEN_AI_USAGE_INPUT_TOKENS, invocation.input_tokens | ||
| ) | ||
| if invocation.output_tokens is not None: | ||
| span.set_attribute( | ||
| GenAI.GEN_AI_USAGE_OUTPUT_TOKENS, invocation.output_tokens | ||
| ) | ||
| _apply_response_attributes(span, invocation) | ||
|  | ||
|  | ||
| def _maybe_set_span_messages( | ||
|  | @@ -112,6 +93,8 @@ def _apply_finish_attributes(span: Span, invocation: LLMInvocation) -> None: | |
| _maybe_set_span_messages( | ||
| span, invocation.input_messages, invocation.output_messages | ||
| ) | ||
| _apply_request_attributes(span, invocation) | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. not blocking for this PR, but we should eventually allow to set attributes at start time. I noticed we don't properly mark which attributes should be provided at the start time, fixing it in the open-telemetry/semantic-conventions#2994 | ||
| _apply_response_attributes(span, invocation) | ||
| span.set_attributes(invocation.attributes) | ||
|  | ||
|  | ||
|  | @@ -122,7 +105,68 @@ def _apply_error_attributes(span: Span, error: Error) -> None: | |
| span.set_attribute(ErrorAttributes.ERROR_TYPE, error.type.__qualname__) | ||
|  | ||
|  | ||
| def _apply_request_attributes(span: Span, invocation: LLMInvocation) -> None: | ||
| """Attach GenAI request semantic convention attributes to the span.""" | ||
| attributes: Dict[str, Any] = {} | ||
| if invocation.temperature is not None: | ||
| attributes[GenAI.GEN_AI_REQUEST_TEMPERATURE] = invocation.temperature | ||
| if invocation.top_p is not None: | ||
| attributes[GenAI.GEN_AI_REQUEST_TOP_P] = invocation.top_p | ||
| if invocation.frequency_penalty is not None: | ||
| attributes[GenAI.GEN_AI_REQUEST_FREQUENCY_PENALTY] = ( | ||
| invocation.frequency_penalty | ||
| ) | ||
| if invocation.presence_penalty is not None: | ||
| attributes[GenAI.GEN_AI_REQUEST_PRESENCE_PENALTY] = ( | ||
| invocation.presence_penalty | ||
| ) | ||
| if invocation.max_tokens is not None: | ||
| attributes[GenAI.GEN_AI_REQUEST_MAX_TOKENS] = invocation.max_tokens | ||
| if invocation.stop_sequences is not None: | ||
| attributes[GenAI.GEN_AI_REQUEST_STOP_SEQUENCES] = ( | ||
| invocation.stop_sequences | ||
| ) | ||
| if invocation.seed is not None: | ||
| attributes[GenAI.GEN_AI_REQUEST_SEED] = invocation.seed | ||
| if attributes: | ||
| span.set_attributes(attributes) | ||
|  | ||
|  | ||
| def _apply_response_attributes(span: Span, invocation: LLMInvocation) -> None: | ||
| """Attach GenAI response semantic convention attributes to the span.""" | ||
| attributes: Dict[str, Any] = {} | ||
|  | ||
| finish_reasons: Optional[List[str]] | ||
| if invocation.response_finish_reasons is not None: | ||
| finish_reasons = invocation.response_finish_reasons | ||
| elif invocation.output_messages: | ||
| finish_reasons = [ | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. wonder if this should be a sorted set instead of a list ? or at least converted to a set and then converted back to a sorted list to get rid of duplicates There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Finish reasons are sorted and unique now. some unit tests for this added as well. | ||
| message.finish_reason for message in invocation.output_messages | ||
| ] | ||
| else: | ||
| finish_reasons = None | ||
|  | ||
| if finish_reasons: | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I believe if finish_reasons is not available, we should set  There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. hmm what would you set error to in that case ? I feel like instrumentations should set error explicitly (or call into a util that sets it explicitly) rather than try to infer it like that There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think Liudmila means literally setting the reason to error  | ||
| attributes[GenAI.GEN_AI_RESPONSE_FINISH_REASONS] = finish_reasons | ||
|  | ||
| if invocation.response_model_name is not None: | ||
| attributes[GenAI.GEN_AI_RESPONSE_MODEL] = ( | ||
| invocation.response_model_name | ||
| ) | ||
| if invocation.response_id is not None: | ||
| attributes[GenAI.GEN_AI_RESPONSE_ID] = invocation.response_id | ||
| if invocation.input_tokens is not None: | ||
| attributes[GenAI.GEN_AI_USAGE_INPUT_TOKENS] = invocation.input_tokens | ||
| if invocation.output_tokens is not None: | ||
| attributes[GenAI.GEN_AI_USAGE_OUTPUT_TOKENS] = invocation.output_tokens | ||
|  | ||
| if attributes: | ||
| span.set_attributes(attributes) | ||
|  | ||
|  | ||
| __all__ = [ | ||
| "_apply_finish_attributes", | ||
| "_apply_error_attributes", | ||
| "_apply_request_attributes", | ||
| "_apply_response_attributes", | ||
| ] | ||
Uh oh!
There was an error while loading. Please reload this page.