Skip to content

Commit f60feb8

Browse files
authored
Merge pull request #149 from Cloud-Code-AI/147-if-item-in-section-are-removed-remove-the-title-of-topic
147 if item in section are removed remove the title of topic
2 parents be8c765 + 253fdc4 commit f60feb8

File tree

2 files changed

+37
-20
lines changed

2 files changed

+37
-20
lines changed

kaizen/llms/provider.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def __init__(
1717
max_tokens=DEFAULT_MAX_TOKENS,
1818
temperature=DEFAULT_TEMPERATURE,
1919
input_token_cost=DEFAULT_INPUT_TOKEN_COST,
20-
output_token_cost=DEFAULT_OUTPUT_TOKEN_COST
20+
output_token_cost=DEFAULT_OUTPUT_TOKEN_COST,
2121
):
2222
self.config = ConfigData().get_config_data()
2323
self.system_prompt = system_prompt
@@ -40,8 +40,12 @@ def chat_completion(self, prompt, user: str = None):
4040
]
4141
if "model" in self.config.get("language_model", {}):
4242
self.model = self.config["language_model"]["model"]["name"]
43-
self.input_token_cost = self.config["language_model"]["model"]["input_token_cost"]
44-
self.output_token_cost = self.config["language_model"]["model"]["output_token_cost"]
43+
self.input_token_cost = self.config["language_model"]["model"][
44+
"input_token_cost"
45+
]
46+
self.output_token_cost = self.config["language_model"]["model"][
47+
"output_token_cost"
48+
]
4549

4650
response = litellm.completion(
4751
model=self.model,
@@ -50,7 +54,7 @@ def chat_completion(self, prompt, user: str = None):
5054
temperature=self.temperature,
5155
user=user,
5256
input_cost_per_token=self.input_token_cost,
53-
output_cost_per_token=self.output_token_cost
57+
output_cost_per_token=self.output_token_cost,
5458
)
5559
return response["choices"][0]["message"]["content"], response["usage"]
5660

kaizen/reviewer/code_review.py

Lines changed: 29 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -145,20 +145,33 @@ def merge_topics(self, reviews):
145145
return topics
146146

147147
def create_pr_review_text(self, topics):
148-
markdown_output = "## Code Review\n\n"
149-
148+
markdown_title = "## Code Review\n\n"
149+
markdown_output = ""
150+
high_ranked_issues = 0
150151
for topic, reviews in topics.items():
151-
markdown_output += f"### {topic}\n\n"
152-
for review in reviews:
153-
ct = output.PR_COLLAPSIBLE_TEMPLATE.format(
154-
comment=review.get("comment", "NA"),
155-
reasoning=review.get("reasoning", "NA"),
156-
solution=review.get("solution", "NA"),
157-
confidence=review.get("confidence", "NA"),
158-
start_line=review.get("start_line", "NA"),
159-
end_line=review.get("end_line", "NA"),
160-
file_name=review.get("file_name", "NA"),
161-
)
162-
markdown_output += ct + "\n"
163-
164-
return markdown_output
152+
if len(reviews) > 0:
153+
markdown_output += f"### {topic}\n\n"
154+
for review in reviews:
155+
if review.get("confidence", "") == "critical":
156+
high_ranked_issues += 1
157+
ct = output.PR_COLLAPSIBLE_TEMPLATE.format(
158+
comment=review.get("comment", "NA"),
159+
reasoning=review.get("reasoning", "NA"),
160+
solution=review.get("solution", "NA"),
161+
confidence=review.get("confidence", "NA"),
162+
start_line=review.get("start_line", "NA"),
163+
end_line=review.get("end_line", "NA"),
164+
file_name=review.get("file_name", "NA"),
165+
)
166+
markdown_output += ct + "\n"
167+
if high_ranked_issues > 0:
168+
markdown_output = (
169+
"❗ This review needs attention. 🚨\n\nHere are some feedback:\n\n"
170+
+ markdown_output
171+
)
172+
else:
173+
markdown_output = (
174+
"✅ This is a good review! 👍\n\nHere are some feedback:\n\n"
175+
+ markdown_output
176+
)
177+
return markdown_title + markdown_output

0 commit comments

Comments
 (0)