Skip to content

Commit 47c91a3

Browse files
authored
Merge pull request #169 from Cloud-Code-AI/167-updating-the-readme-for-the-launch
feat: Added custom model selection
2 parents 61f182e + f23a47c commit 47c91a3

File tree

5 files changed

+36
-11
lines changed

5 files changed

+36
-11
lines changed

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ PYTHONPATH=. poetry run python examples/basic/execute.py
7474
Or using the default pytest module:
7575

7676
```
77-
pytest -v .kaizen/tests/
77+
pytest -v .kaizen/ui-tests/
7878
```
7979

8080
Kaizen will execute the generated tests and provide detailed reports.

config.json

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,24 @@
22
"language_model": {
33
"provider": "litellm",
44
"enable_observability_logging": true,
5-
"model": {
6-
"name": "azure/gpt4-o",
7-
"input_token_cost": 0.000005,
8-
"output_token_cost": 0.000015
5+
"model_config": {
6+
"model": "gpt-3.5-turbo-16k",
7+
"input_cost_per_token": 0.0000005,
8+
"output_cost_per_token": 0.0000015
9+
},
10+
"models": {
11+
"best": {
12+
"model": "gpt-4o",
13+
"input_cost_per_token": 0.000005,
14+
"output_cost_per_token": 0.000015,
15+
"type": "best"
16+
},
17+
"default": {
18+
"model": "gpt-3.5-turbo-16k",
19+
"input_cost_per_token": 0.0000005,
20+
"output_cost_per_token": 0.0000015,
21+
"type": "default"
22+
}
923
}
1024
},
1125
"github_app": {

kaizen/generator/ui.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,11 @@ class UITestGenerator:
1414
def __init__(self):
1515
self.logger = logging.getLogger(__name__)
1616
self.provider = LLMProvider(system_prompt=UI_TESTS_SYSTEM_PROMPT)
17+
self.custom_model = None
18+
if self.provider.models and "best" in self.provider.models:
19+
self.custom_model = self.provider.models["best"]
20+
if "type" in self.custom_model:
21+
del self.custom_model["type"]
1722

1823
def generate_ui_tests(
1924
self,
@@ -45,7 +50,7 @@ def identify_modules(self, web_content: str, user: Optional[str] = None):
4550
This method identifies the different UI modules from a webpage.
4651
"""
4752
prompt = UI_MODULES_PROMPT.format(WEB_CONTENT=web_content)
48-
resp, usage = self.provider.chat_completion(prompt, user=user)
53+
resp, usage = self.provider.chat_completion(prompt, user=user, custom_model=self.custom_model)
4954
modules = parser.extract_multi_json(resp)
5055
return {"modules": modules, "usage": usage}
5156

@@ -63,7 +68,7 @@ def generate_playwright_code(
6368
WEB_CONTENT=web_content, TEST_DESCRIPTION=test_description, URL=web_url
6469
)
6570

66-
resp, usage = self.provider.chat_completion(prompt, user=user)
71+
resp, usage = self.provider.chat_completion(prompt, user=user, custom_model=self.custom_model)
6772

6873
return {"code": resp, "usage": usage}
6974

@@ -92,7 +97,7 @@ def store_tests_files(self, json_tests: list, folder_path: str = ""):
9297
if not folder_path:
9398
folder_path = output.get_parent_folder()
9499

95-
folder_path = os.path.join(folder_path, ".kaizen/tests")
100+
folder_path = os.path.join(folder_path, ".kaizen/ui-tests")
96101
output.create_folder(folder_path)
97102
output.create_test_files(json_tests, folder_path)
98103

kaizen/llms/provider.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,10 @@ def __init__(
1717
self.model_config = model_config
1818
if "default_model_config" in self.config.get("language_model", {}):
1919
self.model_config = self.config["language_model"]["default_model_config"]
20+
21+
if "models" in self.config.get("language_model"):
22+
self.models = self.config["language_model"]["models"]
23+
2024
self.model = self.model_config["model"]
2125
if self.config.get("language_model", {}).get(
2226
"enable_observability_logging", False
@@ -25,13 +29,15 @@ def __init__(
2529
litellm.success_callback = ["supabase"]
2630
litellm.failure_callback = ["supabase"]
2731

28-
def chat_completion(self, prompt, user: str = None):
32+
def chat_completion(self, prompt, user: str = None, custom_model=None):
2933
messages = [
3034
{"role": "system", "content": self.system_prompt},
3135
{"role": "user", "content": prompt},
3236
]
37+
if not custom_model:
38+
custom_model = self.model_config
3339

34-
response = litellm.completion(messages=messages, user=user, **self.model_config)
40+
response = litellm.completion(messages=messages, user=user, **custom_model)
3541
return response["choices"][0]["message"]["content"], response["usage"]
3642

3743
def is_inside_token_limit(self, PROMPT, percentage=0.7):

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "kaizen-cloudcode"
3-
version = "0.1.17"
3+
version = "0.2.0"
44
description = "An intelligent coding companion that accelerates your development workflow by providing efficient assistance, enabling you to craft high-quality code more rapidly."
55
authors = ["Saurav Panda <[email protected]>"]
66
license = "Apache2.0"

0 commit comments

Comments
 (0)