-
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added some improvements & new feature to ai package (#15)
ai: enhance Anthropic integration with streaming support and refactored design This PR adds streaming capability to the Anthropic LLM provider and improves the overall design through better abstraction and testability. Key changes: - Add streaming response support via GetStreamingResponse - Extract AnthropicClient interface to improve testability - Create RealAnthropicClient implementation wrapping the official SDK - Reduce code duplication by extracting common message handling logic - Add comprehensive tests with mock implementations - Update documentation with streaming examples - Improve general code organization and maintainability - Fix import ordering and godoc formatting The changes maintain backward compatibility while adding new streaming capabilities in line with other providers. The refactoring improves the codebase's testability and reduces duplication through better abstraction.
- Loading branch information
1 parent
d4af759
commit bd0f478
Showing
9 changed files
with
926 additions
and
127 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,21 +1,90 @@ | ||
// Package ai provides a flexible interface for interacting with various Language Learning Models (LLMs). | ||
package ai | ||
|
||
import "context" | ||
|
||
// LLMRequest handles the configuration and execution of LLM requests. | ||
// It provides a consistent interface for interacting with different LLM providers. | ||
type LLMRequest struct { | ||
requestConfig LLMRequestConfig | ||
provider LLMProvider | ||
} | ||
|
||
// NewLLMRequest creates a new LLMRequest with the specified configuration. | ||
func NewLLMRequest(requestConfig LLMRequestConfig) *LLMRequest { | ||
// NewLLMRequest creates a new LLMRequest with the specified configuration and provider. | ||
// The provider parameter allows injecting different LLM implementations (OpenAI, Anthropic, etc.). | ||
// | ||
// Example usage: | ||
// | ||
// // Create provider | ||
// provider := ai.NewOpenAILLMProvider(ai.OpenAIProviderConfig{ | ||
// APIKey: "your-api-key", | ||
// Model: "gpt-3.5-turbo", | ||
// }) | ||
// | ||
// // Configure request options | ||
// config := ai.NewRequestConfig( | ||
// ai.WithMaxToken(2000), | ||
// ai.WithTemperature(0.7), | ||
// ) | ||
// | ||
// // Create LLM request client | ||
// llm := ai.NewLLMRequest(config, provider) | ||
func NewLLMRequest(config LLMRequestConfig, provider LLMProvider) *LLMRequest { | ||
return &LLMRequest{ | ||
requestConfig: requestConfig, | ||
requestConfig: config, | ||
provider: provider, | ||
} | ||
} | ||
|
||
// Generate sends a prompt to the specified LLM provider and returns the response. | ||
// Returns LLMResponse containing the generated text and metadata, or an error if the operation fails. | ||
func (r *LLMRequest) Generate(messages []LLMMessage, llmProvider LLMProvider) (LLMResponse, error) { | ||
return llmProvider.GetResponse(messages, r.requestConfig) | ||
// Generate sends messages to the configured LLM provider and returns the response. | ||
// It uses the provider and configuration specified during initialization. | ||
// | ||
// Example usage: | ||
// | ||
// messages := []ai.LLMMessage{ | ||
// {Role: ai.SystemRole, Text: "You are a helpful assistant"}, | ||
// {Role: ai.UserRole, Text: "What is the capital of France?"}, | ||
// } | ||
// | ||
// response, err := llm.Generate(messages) | ||
// if err != nil { | ||
// log.Fatal(err) | ||
// } | ||
// fmt.Printf("Response: %s\n", response.Text) | ||
// fmt.Printf("Tokens used: %d\n", response.TotalOutputToken) | ||
// | ||
// The method returns LLMResponse containing: | ||
// - Generated text | ||
// - Token usage statistics | ||
// - Completion time | ||
// - Other provider-specific metadata | ||
func (r *LLMRequest) Generate(messages []LLMMessage) (LLMResponse, error) { | ||
return r.provider.GetResponse(messages, r.requestConfig) | ||
} | ||
|
||
// GenerateStream creates a streaming response channel for the given messages. | ||
// It returns a channel that receives StreamingLLMResponse chunks and an error if initialization fails. | ||
// | ||
// Example usage: | ||
// | ||
// request := NewLLMRequest(config) | ||
// stream, err := request.GenerateStream(context.Background(), []LLMMessage{ | ||
// {Role: UserRole, Text: "Tell me a story"}, | ||
// }) | ||
// if err != nil { | ||
// log.Fatal(err) | ||
// } | ||
// | ||
// for response := range stream { | ||
// if response.Error != nil { | ||
// log.Printf("Error: %v", response.Error) | ||
// break | ||
// } | ||
// if response.Done { | ||
// break | ||
// } | ||
// fmt.Print(response.Text) | ||
// } | ||
func (r *LLMRequest) GenerateStream(ctx context.Context, messages []LLMMessage) (<-chan StreamingLLMResponse, error) { | ||
return r.provider.GetStreamingResponse(ctx, messages, r.requestConfig) | ||
} |
Oops, something went wrong.