@@ -19,7 +19,6 @@ def __init__(self,**kwargs) -> None:
19
19
super ().__init__ ()
20
20
self .MAX_CHAT_HISTORY = eval (
21
21
os .environ ["MAX_CHAT_HISTORY" ]) if "MAX_CHAT_HISTORY" in os .environ else 10
22
-
23
22
self .model = kwargs ["model" ] if "model" in kwargs else "gpt-3.5-turbo-16k-0613"
24
23
self .temperature = kwargs ["temperature" ] if "temperature" in kwargs else 0.3
25
24
self .log_path = kwargs ["log_path" ].replace ("/" ,os .sep ) if "log_path" in kwargs else "logs"
@@ -205,16 +204,15 @@ def get_response(self,
205
204
messages = messages ,
206
205
functions = functions ,
207
206
function_call = function_call ,
208
- temperature = temperature ,
209
- custom_llm_provider = "openai"
207
+ temperature = temperature
210
208
)
211
209
else :
212
210
response = litellm .completion (
213
211
model = model ,
214
212
messages = messages ,
215
213
temperature = temperature ,
216
- stream = stream ,
217
- custom_llm_provider = "openai" )
214
+ stream = stream
215
+ )
218
216
break
219
217
except Exception as e :
220
218
print (e )
@@ -248,9 +246,9 @@ def init_LLM(default_log_path,**kwargs):
248
246
)
249
247
if LLM_type == "Replicate" :
250
248
LLM = (
251
- OpenAILLM (** kwargs ["LLM" ])
249
+ ReplicateLLM (** kwargs ["LLM" ])
252
250
if "LLM" in kwargs
253
- else OpenAILLM (model = "replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf" ,temperature = 0.3 ,log_path = log_path )
251
+ else ReplicateLLM (model = "replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf" ,temperature = 0.3 ,log_path = log_path )
254
252
)
255
- return LLM
253
+ return LLM
256
254
0 commit comments