|  | 
|  | 1 | +package openai | 
|  | 2 | + | 
|  | 3 | +import ( | 
|  | 4 | +	"context" | 
|  | 5 | +	"errors" | 
|  | 6 | +	"io" | 
|  | 7 | +	"testing" | 
|  | 8 | +) | 
|  | 9 | + | 
|  | 10 | +// This file demonstrates how to create mock clients for go-openai streaming | 
|  | 11 | +// functionality. This pattern is useful when testing code that depends on | 
|  | 12 | +// go-openai streaming but you want to control the responses for testing. | 
|  | 13 | + | 
|  | 14 | +// MockOpenAIStreamClient demonstrates how to create a full mock client for go-openai | 
|  | 15 | +type MockOpenAIStreamClient struct { | 
|  | 16 | +	// Configure canned responses | 
|  | 17 | +	ChatCompletionResponse  ChatCompletionResponse | 
|  | 18 | +	ChatCompletionStreamErr error | 
|  | 19 | + | 
|  | 20 | +	// Allow function overrides for more complex scenarios | 
|  | 21 | +	CreateChatCompletionStreamFn func( | 
|  | 22 | +		ctx context.Context, req ChatCompletionRequest) (*ChatCompletionStream, error) | 
|  | 23 | +} | 
|  | 24 | + | 
|  | 25 | +func (m *MockOpenAIStreamClient) CreateChatCompletionStream( | 
|  | 26 | +	ctx context.Context, | 
|  | 27 | +	req ChatCompletionRequest, | 
|  | 28 | +) (*ChatCompletionStream, error) { | 
|  | 29 | +	if m.CreateChatCompletionStreamFn != nil { | 
|  | 30 | +		return m.CreateChatCompletionStreamFn(ctx, req) | 
|  | 31 | +	} | 
|  | 32 | +	return nil, m.ChatCompletionStreamErr | 
|  | 33 | +} | 
|  | 34 | + | 
|  | 35 | +// mockStreamReader creates specific responses for testing | 
|  | 36 | +type mockStreamReader struct { | 
|  | 37 | +	responses []ChatCompletionStreamResponse | 
|  | 38 | +	index     int | 
|  | 39 | +} | 
|  | 40 | + | 
|  | 41 | +func (m *mockStreamReader) Recv() (ChatCompletionStreamResponse, error) { | 
|  | 42 | +	if m.index >= len(m.responses) { | 
|  | 43 | +		return ChatCompletionStreamResponse{}, io.EOF | 
|  | 44 | +	} | 
|  | 45 | +	resp := m.responses[m.index] | 
|  | 46 | +	m.index++ | 
|  | 47 | +	return resp, nil | 
|  | 48 | +} | 
|  | 49 | + | 
|  | 50 | +func (m *mockStreamReader) Close() error { | 
|  | 51 | +	return nil | 
|  | 52 | +} | 
|  | 53 | + | 
|  | 54 | +func TestMockOpenAIStreamClient_Demo(t *testing.T) { | 
|  | 55 | +	// Create expected responses that our mock stream will return | 
|  | 56 | +	expectedResponses := []ChatCompletionStreamResponse{ | 
|  | 57 | +		{ | 
|  | 58 | +			ID:     "test-1", | 
|  | 59 | +			Object: "chat.completion.chunk", | 
|  | 60 | +			Model:  "gpt-3.5-turbo", | 
|  | 61 | +			Choices: []ChatCompletionStreamChoice{ | 
|  | 62 | +				{ | 
|  | 63 | +					Index: 0, | 
|  | 64 | +					Delta: ChatCompletionStreamChoiceDelta{ | 
|  | 65 | +						Role:    "assistant", | 
|  | 66 | +						Content: "Hello", | 
|  | 67 | +					}, | 
|  | 68 | +				}, | 
|  | 69 | +			}, | 
|  | 70 | +		}, | 
|  | 71 | +		{ | 
|  | 72 | +			ID:     "test-2", | 
|  | 73 | +			Object: "chat.completion.chunk", | 
|  | 74 | +			Model:  "gpt-3.5-turbo", | 
|  | 75 | +			Choices: []ChatCompletionStreamChoice{ | 
|  | 76 | +				{ | 
|  | 77 | +					Index: 0, | 
|  | 78 | +					Delta: ChatCompletionStreamChoiceDelta{ | 
|  | 79 | +						Content: " World", | 
|  | 80 | +					}, | 
|  | 81 | +				}, | 
|  | 82 | +			}, | 
|  | 83 | +		}, | 
|  | 84 | +		{ | 
|  | 85 | +			ID:     "test-3", | 
|  | 86 | +			Object: "chat.completion.chunk", | 
|  | 87 | +			Model:  "gpt-3.5-turbo", | 
|  | 88 | +			Choices: []ChatCompletionStreamChoice{ | 
|  | 89 | +				{ | 
|  | 90 | +					Index:        0, | 
|  | 91 | +					Delta:        ChatCompletionStreamChoiceDelta{}, | 
|  | 92 | +					FinishReason: "stop", | 
|  | 93 | +				}, | 
|  | 94 | +			}, | 
|  | 95 | +		}, | 
|  | 96 | +	} | 
|  | 97 | + | 
|  | 98 | +	// Create mock client with custom stream function | 
|  | 99 | +	mockClient := &MockOpenAIStreamClient{ | 
|  | 100 | +		CreateChatCompletionStreamFn: func( | 
|  | 101 | +			ctx context.Context, req ChatCompletionRequest, | 
|  | 102 | +		) (*ChatCompletionStream, error) { | 
|  | 103 | +			// Create a mock stream reader with our expected responses | 
|  | 104 | +			mockStreamReader := &mockStreamReader{ | 
|  | 105 | +				responses: expectedResponses, | 
|  | 106 | +				index:     0, | 
|  | 107 | +			} | 
|  | 108 | +			// Return a new ChatCompletionStream with our mock reader | 
|  | 109 | +			return NewChatCompletionStream(mockStreamReader), nil | 
|  | 110 | +		}, | 
|  | 111 | +	} | 
|  | 112 | + | 
|  | 113 | +	// Test the mock client | 
|  | 114 | +	stream, err := mockClient.CreateChatCompletionStream( | 
|  | 115 | +		context.Background(), | 
|  | 116 | +		ChatCompletionRequest{ | 
|  | 117 | +			Model: GPT3Dot5Turbo, | 
|  | 118 | +			Messages: []ChatCompletionMessage{ | 
|  | 119 | +				{ | 
|  | 120 | +					Role:    ChatMessageRoleUser, | 
|  | 121 | +					Content: "Hello!", | 
|  | 122 | +				}, | 
|  | 123 | +			}, | 
|  | 124 | +		}, | 
|  | 125 | +	) | 
|  | 126 | +	if err != nil { | 
|  | 127 | +		t.Fatalf("CreateChatCompletionStream returned error: %v", err) | 
|  | 128 | +	} | 
|  | 129 | +	defer stream.Close() | 
|  | 130 | + | 
|  | 131 | +	// Verify we get back exactly the responses we configured | 
|  | 132 | +	fullResponse := "" | 
|  | 133 | +	for i, expectedResponse := range expectedResponses { | 
|  | 134 | +		receivedResponse, streamErr := stream.Recv() | 
|  | 135 | +		if streamErr != nil { | 
|  | 136 | +			t.Fatalf("stream.Recv() failed at index %d: %v", i, streamErr) | 
|  | 137 | +		} | 
|  | 138 | + | 
|  | 139 | +		// Additional specific checks | 
|  | 140 | +		if receivedResponse.ID != expectedResponse.ID { | 
|  | 141 | +			t.Errorf("Response %d ID mismatch. Expected: %s, Got: %s", | 
|  | 142 | +				i, expectedResponse.ID, receivedResponse.ID) | 
|  | 143 | +		} | 
|  | 144 | +		if len(receivedResponse.Choices) > 0 && len(expectedResponse.Choices) > 0 { | 
|  | 145 | +			expectedContent := expectedResponse.Choices[0].Delta.Content | 
|  | 146 | +			receivedContent := receivedResponse.Choices[0].Delta.Content | 
|  | 147 | +			if receivedContent != expectedContent { | 
|  | 148 | +				t.Errorf("Response %d content mismatch. Expected: %s, Got: %s", | 
|  | 149 | +					i, expectedContent, receivedContent) | 
|  | 150 | +			} | 
|  | 151 | +			fullResponse += receivedContent | 
|  | 152 | +		} | 
|  | 153 | +	} | 
|  | 154 | + | 
|  | 155 | +	// Verify EOF at the end | 
|  | 156 | +	_, streamErr := stream.Recv() | 
|  | 157 | +	if streamErr != io.EOF { | 
|  | 158 | +		t.Errorf("Expected EOF at end of stream, got: %v", streamErr) | 
|  | 159 | +	} | 
|  | 160 | + | 
|  | 161 | +	// Verify the full assembled response | 
|  | 162 | +	expectedFullResponse := "Hello World" | 
|  | 163 | +	if fullResponse != expectedFullResponse { | 
|  | 164 | +		t.Errorf("Full response mismatch. Expected: %s, Got: %s", expectedFullResponse, fullResponse) | 
|  | 165 | +	} | 
|  | 166 | + | 
|  | 167 | +	t.Log("✅ Successfully demonstrated mock OpenAI client with streaming responses!") | 
|  | 168 | +	t.Logf("   Full response assembled: %q", fullResponse) | 
|  | 169 | +} | 
|  | 170 | + | 
|  | 171 | +// TestMockOpenAIStreamClient_ErrorHandling demonstrates error handling | 
|  | 172 | +func TestMockOpenAIStreamClient_ErrorHandling(t *testing.T) { | 
|  | 173 | +	expectedError := errors.New("mock stream error") | 
|  | 174 | + | 
|  | 175 | +	mockClient := &MockOpenAIStreamClient{ | 
|  | 176 | +		ChatCompletionStreamErr: expectedError, | 
|  | 177 | +	} | 
|  | 178 | + | 
|  | 179 | +	_, err := mockClient.CreateChatCompletionStream( | 
|  | 180 | +		context.Background(), | 
|  | 181 | +		ChatCompletionRequest{ | 
|  | 182 | +			Model: GPT3Dot5Turbo, | 
|  | 183 | +			Messages: []ChatCompletionMessage{ | 
|  | 184 | +				{ | 
|  | 185 | +					Role:    ChatMessageRoleUser, | 
|  | 186 | +					Content: "Hello!", | 
|  | 187 | +				}, | 
|  | 188 | +			}, | 
|  | 189 | +		}, | 
|  | 190 | +	) | 
|  | 191 | + | 
|  | 192 | +	if err != expectedError { | 
|  | 193 | +		t.Errorf("Expected error %v, got %v", expectedError, err) | 
|  | 194 | +	} | 
|  | 195 | + | 
|  | 196 | +	t.Log("✅ Successfully demonstrated mock OpenAI client error handling!") | 
|  | 197 | +} | 
0 commit comments