-
Notifications
You must be signed in to change notification settings - Fork 0
/
testing_return.py
49 lines (40 loc) · 1.54 KB
/
testing_return.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import sys
import os
import json
import openai
from dotenv import load_dotenv
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))
sys.path.append(project_root)
from agent_workflow import Collaboration
load_dotenv()
def fixed_openai_call(func, *args, **kwargs):
try:
"""
for arg in args:
print(f'Arg: {arg}')
"""
return func(*args, **kwargs)
except Exception as e:
print(f"Error in OpenAI call: {e}")
return None
def run_baseline(agent_workflow, input):
initial_response = fixed_openai_call(agent_workflow.assistant, input)
choose_statement = json.loads(initial_response)['choose_statement']
reasoning = json.loads(initial_response)['reasoning']
print(choose_statement)
print(reasoning)
mid_response = fixed_openai_call(agent_workflow.language_analysist_agent, input, choose_statement, reasoning)
choose_statement = json.loads(mid_response)['choose_statement']
reasoning = json.loads(mid_response)['reasoning']
print(choose_statement)
print(reasoning)
late_response = fixed_openai_call(agent_workflow.optimizer_agent, input, choose_statement, reasoning)
choose_statement = json.loads(late_response)['choose_statement']
reasoning = json.loads(late_response)['reasoning']
return choose_statement, reasoning
def evaluate_performance(input):
client = openai
agent_workflow = Collaboration(client=client)
baseline_code = run_baseline(agent_workflow, input)
results = baseline_code
return results