@@ -335,7 +335,7 @@ def __init__(
335
335
}
336
336
self .function_calls_counter = {}
337
337
self .function_call_max_loop = 3
338
- self .encode_web_text_list = []
338
+ # self.encode_web_text_list = []
339
339
340
340
if self .get_token_count ("default" ) > self .max_tokens :
341
341
raise t .ActionRefuseError ("System prompt is too long" )
@@ -621,11 +621,16 @@ def ask_stream(
621
621
break
622
622
tiktoken .get_encoding ("cl100k_base" )
623
623
encoding = tiktoken .encoding_for_model (config .GPT_ENGINE )
624
- if self .encode_web_text_list == []:
625
- self .encode_web_text_list = encoding .encode (" " .join (get_url_text_list (prompt )))
626
- print ("search len" , len (self .encode_web_text_list ))
627
- function_response = encoding .decode (self .encode_web_text_list [:function_call_max_tokens ])
628
- self .encode_web_text_list = self .encode_web_text_list [function_call_max_tokens :]
624
+
625
+ encode_web_text_list = encoding .encode (" " .join (get_url_text_list (prompt )))
626
+ print ("search len" , len (encode_web_text_list ))
627
+ function_response = encoding .decode (encode_web_text_list [:function_call_max_tokens ])
628
+ # if self.encode_web_text_list == []:
629
+ # self.encode_web_text_list = encoding.encode(" ".join(get_url_text_list(prompt)))
630
+ # print("search len", len(self.encode_web_text_list))
631
+ # function_response = encoding.decode(self.encode_web_text_list[:function_call_max_tokens])
632
+ # self.encode_web_text_list = self.encode_web_text_list[function_call_max_tokens:]
633
+
629
634
# function_response = eval(function_call_name)(prompt, function_call_max_tokens)
630
635
function_response = (
631
636
"Here is the Search results, inside <Search_results></Search_results> XML tags:"
@@ -654,7 +659,7 @@ def ask_stream(
654
659
self .add_to_conversation (full_response , response_role , convo_id = convo_id )
655
660
self .function_calls_counter = {}
656
661
# self.clear_function_call(convo_id=convo_id)
657
- self .encode_web_text_list = []
662
+ # self.encode_web_text_list = []
658
663
# total_tokens = self.get_token_count(convo_id)
659
664
660
665
async def ask_stream_async (
0 commit comments