Skip to content

Commit f8cffc2

Browse files
authored
Merge pull request #158 from Cloud-Code-AI/156-update-how-we-pass-model-configuration-with-extra-params
fix: flake8 issues fixed
2 parents 3157103 + b56cc4f commit f8cffc2

File tree

2 files changed

+4
-5
lines changed

2 files changed

+4
-5
lines changed

kaizen/llms/provider.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,7 @@ def chat_completion(self, prompt, user: str = None):
3131
{"role": "user", "content": prompt},
3232
]
3333

34-
response = litellm.completion(
35-
messages=messages, user=user, **self.model_config
36-
)
34+
response = litellm.completion(messages=messages, user=user, **self.model_config)
3735
return response["choices"][0]["message"]["content"], response["usage"]
3836

3937
def is_inside_token_limit(self, PROMPT, percentage=0.7):

tests/llms/test_provider.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import pytest
2-
from unittest.mock import patch, Mock
2+
from unittest.mock import patch
33
from kaizen.llms.provider import LLMProvider
44

55

@@ -33,7 +33,8 @@ def test_chat_completion(mock_completion, llm_provider):
3333
"usage": {"prompt_tokens": 10, "completion_tokens": 10},
3434
}
3535
response, usage = llm_provider.chat_completion("test prompt")
36-
assert response != None
36+
assert response is not None
37+
assert usage is not None
3738

3839

3940
@patch("kaizen.llms.provider.litellm.token_counter")

0 commit comments

Comments
 (0)