Skip to content

Commit 94dfe04

Browse files
authored
Merge branch 'main' into 562-updata-work-summary-to-output-in-a-templated-format
2 parents 1781312 + 27f4c90 commit 94dfe04

File tree

10 files changed

+55
-22
lines changed

10 files changed

+55
-22
lines changed

.pre-commit-config.yaml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,3 @@ repos:
44
hooks:
55
- id: flake8
66
args: [--config=.flake8]
7-
8-
- repo: https://github.com/psf/black
9-
rev: 23.3.0
10-
hooks:
11-
- id: black
12-
args: [--line-length=88]

cli/kaizen_cli/cli.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from .config.manager import load_config
33
from .commands.config_commands import config
44
from .commands.unit_test_commands import unit_test
5-
from .commands.reviewer_commands import reviewer
5+
from .commands.reviewer_commands import reviewer, generate_commit_msg
66
from .hooks.setup import hooks
77
from kaizen.generator.e2e_tests import E2ETestGenerator
88

@@ -25,6 +25,7 @@ def ui_tests(url):
2525
cli.add_command(unit_test)
2626
cli.add_command(reviewer)
2727
cli.add_command(hooks)
28+
cli.add_command(generate_commit_msg)
2829

2930
if __name__ == "__main__":
3031
cli()

cli/kaizen_cli/commands/reviewer_commands.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
import click
2+
from kaizen.generator.pr_description import PRDescriptionGenerator
3+
from kaizen.llms.provider import LLMProvider
4+
from ..config.manager import load_config
25

36

47
@click.group()
@@ -14,3 +17,20 @@ def work(github_url, branch):
1417
"""Run reviewer work"""
1518
click.echo(f"Reviewing {github_url} on branch {branch}")
1619
# Implement the reviewer work logic here
20+
21+
22+
@click.command()
23+
@click.argument("diff", type=str, required=True)
24+
def generate_commit_msg(diff):
25+
"""Generate a commit message based on the provided diff"""
26+
model_config = load_config()["language_model"]["models"][0]["litellm_params"]
27+
generator = PRDescriptionGenerator(LLMProvider(model_config=model_config))
28+
desc = generator.generate_pull_request_desc(
29+
diff_text=diff,
30+
pull_request_title="",
31+
pull_request_desc="",
32+
pull_request_files=[],
33+
user="",
34+
)
35+
msg, _, _ = generator.generate_pr_commit_message(desc)
36+
click.echo(f'{msg["subject"]}\n\n{msg["body"]}')

cli/kaizen_cli/config/manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def load_config():
2424

2525
# Override with environment variables
2626
for key, value in os.environ.items():
27-
if key.startswith("MYAPP_"):
27+
if key.startswith("KAIZEN_"):
2828
config_key = key[6:].lower().split("__")
2929
try:
3030
parsed_value = json.loads(value)
Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,14 @@
11
#!/bin/sh
22
# hooks/prepare-commit-msg
33

4+
# Change to the repository root directory
5+
cd "$(git rev-parse --show-toplevel)" || exit 1
6+
7+
# Get the staged changes
8+
staged_diff=$(git diff --cached)
9+
410
# Run your CLI command and capture the output
5-
commit_msg=$(kaizen-cli generate-commit-msg)
11+
commit_info=$(kaizen-cli generate-commit-msg "$staged_diff")
612

7-
# Overwrite the commit message file with the generated message
8-
echo "$commit_msg" > "$1"
13+
# Write the commit info to the commit message file
14+
echo "$commit_info" > "$1"

cli/kaizen_cli/hooks/setup.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,14 +15,17 @@ def hooks():
1515
@click.argument("hook_type", type=click.Choice(HOOK_TYPES))
1616
def install(hook_type):
1717
"""Install a specific git hook"""
18-
source = os.path.join(os.path.dirname(__file__), "hooks", hook_type)
18+
source = os.path.join(os.path.dirname(__file__), hook_type)
19+
print(source)
1920
destination = os.path.join(".git", "hooks", hook_type)
2021

2122
if not os.path.exists(source):
2223
click.echo(f"Error: Hook script for {hook_type} not found.")
2324
return
2425

2526
try:
27+
# Create the destination directory if it doesn't exist
28+
os.makedirs(os.path.dirname(destination), exist_ok=True)
2629
shutil.copy(source, destination)
2730
os.chmod(destination, 0o755)
2831
click.echo(f"{hook_type} hook installed successfully")

examples/code_review/main.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@
1212

1313
logging.basicConfig(level="DEBUG")
1414

15-
pr_diff = "https://github.com/Cloud-Code-AI/kaizen/pull/335.patch"
16-
pr_files = "https://api.github.com/repos/Cloud-Code-AI/kaizen/pulls/335/files"
15+
pr_diff = "https://github.com/Cloud-Code-AI/kaizen/pull/559.patch"
16+
pr_files = "https://api.github.com/repos/Cloud-Code-AI/kaizen/pulls/559/files"
1717
pr_title = "feat: updated the prompt to provide solution"
1818

1919
diff_text = get_diff_text(pr_diff, "")
@@ -31,7 +31,7 @@
3131
reeval_response=False,
3232
)
3333

34-
topics = clean_keys(review_data.topics, "important")
34+
topics = clean_keys(review_data.topics, "high")
3535
review_desc = create_pr_review_text(
3636
review_data.issues, code_quality=review_data.code_quality
3737
)
@@ -54,3 +54,4 @@
5454
print(desc_data)
5555

5656
comit_message = pr_desc.generate_pr_commit_message(desc_data.desc)
57+
print(comit_message)

github_app/github_helper/pull_requests.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919

2020
confidence_mapping = {
2121
"critical": 5,
22-
"important": 4,
23-
"moderate": 3,
22+
"high": 4,
23+
"medium": 3,
2424
"low": 2,
2525
"trivial": 1,
2626
}

kaizen/generator/pr_description.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,4 +181,4 @@ def generate_pr_commit_message(
181181
DESC=desc,
182182
)
183183
resp, usage = self.provider.chat_completion_with_json(prompt, user=user)
184-
return resp, usage
184+
return resp, usage, self.provider.model

kaizen/llms/provider.py

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import logging
1010
from collections import defaultdict
1111

12-
DEFAULT_MAX_TOKENS = 8000
12+
DEFAULT_MAX_TOKENS = 4000
1313

1414

1515
def set_all_loggers_to_ERROR():
@@ -36,7 +36,7 @@ def set_all_loggers_to_ERROR():
3636

3737

3838
class LLMProvider:
39-
DEFAULT_MODEL = "gpt-3.5-turbo-1106"
39+
DEFAULT_MODEL = "gpt-4o-mini"
4040
DEFAULT_MAX_TOKENS = 4000
4141
DEFAULT_TEMPERATURE = 0
4242
DEFAULT_MODEL_CONFIG = {"model": DEFAULT_MODEL}
@@ -233,7 +233,12 @@ def is_inside_token_limit(self, PROMPT: str, percentage: float = 0.8) -> bool:
233233
{"role": "user", "content": PROMPT},
234234
]
235235
token_count = litellm.token_counter(model=self.model, messages=messages)
236-
max_tokens = litellm.get_max_tokens(self.model)
236+
if token_count is None:
237+
token_count = litellm.token_counter(model=self.DEFAULT_MODEL, text=PROMPT)
238+
try:
239+
max_tokens = litellm.get_max_tokens(self.model)
240+
except Exception:
241+
max_tokens = DEFAULT_MAX_TOKENS
237242
if not max_tokens:
238243
max_tokens = DEFAULT_MAX_TOKENS
239244
return token_count <= max_tokens * percentage
@@ -243,7 +248,10 @@ def available_tokens(
243248
) -> int:
244249
if not model:
245250
model = self.model
246-
max_tokens = litellm.get_max_tokens(model)
251+
try:
252+
max_tokens = litellm.get_max_tokens(model)
253+
except Exception:
254+
max_tokens = DEFAULT_MAX_TOKENS
247255
used_tokens = litellm.token_counter(model=model, text=message)
248256
if max_tokens:
249257
return int(max_tokens * percentage) - used_tokens

0 commit comments

Comments
 (0)