1
+ # openmind.py
2
+ # openmind (c) 2024 Gregory L. Magnusson MIT licence
3
+ # internal reasoning loop for continuous AGI reasoning without user interaction
4
+ # openmind internal reasoning asynchronous task ensuring non-blocking execution and efficient concurrency
5
+ # modular integration of automind reasoning with memory
6
+ # ollama model handling from ollama_handler.py for input response
7
+ # API handlling from api.py and chatter.py for openai, together.ai, groq.com, ai71.ai
8
+ # log internal reasoning conclusion ./memory/logs/thoughts.json
9
+ # log not premise ./memory/logs/notpremise.json
10
+ # log short term memory input response ./memory/stm/{timestamp}memory.json
11
+
12
+
1
13
import os
2
14
import time
3
15
from datetime import datetime
4
16
from nicegui import ui # importing ui for easyAGI
5
17
from memory .memory import create_memory_folders , store_in_stm , save_conversation_memory , save_internal_reasoning , DialogEntry , save_valid_truth
6
18
from webmind .ollama_handler import OllamaHandler # Import OllamaHandler for modular Ollama interactions
7
19
from automind .automind import FundamentalAGI
8
- from webmind .chatter import GPT4o , GroqModel , TogetherModel
20
+ from webmind .chatter import GPT4o , GroqModel , TogetherModel , AI71Model
9
21
from webmind .api import APIManager
10
22
import ujson as json
11
23
import asyncio
@@ -134,13 +146,24 @@ def log_and_notify(message, level='info', message_type='info'):
134
146
else :
135
147
log_and_notify ('Together AI API key not found. Please add the key first.' , 'warning' , 'negative' )
136
148
149
+ if model_name == 'ai71' and not model_initialized :
150
+ ai71_key = self .api_manager .get_api_key ('ai71' )
151
+ if ai71_key :
152
+ chatter = AI71Model (ai71_key ) # ai71
153
+ self .agi_instance = FundamentalAGI (chatter )
154
+ log_and_notify ('Using AI71 for AGI' )
155
+ model_initialized = True
156
+ else :
157
+ log_and_notify ('AI71 API key not found. Please add the key first.' , 'warning' , 'negative' )
158
+
137
159
if not model_initialized :
138
160
log_and_notify (f'Failed to initialize AGI with { model_name } ' , 'warning' , 'negative' )
139
161
140
162
async def initialize_agi (self ):
141
163
openai_key = self .api_manager .get_api_key ('openai' )
142
164
groq_key = self .api_manager .get_api_key ('groq' )
143
165
together_key = self .api_manager .get_api_key ('together' )
166
+ ai71_key = self .api_manager .get_api_key ('ai71' )
144
167
llama_running = self .check_llama_running ()
145
168
146
169
if openai_key :
@@ -164,6 +187,13 @@ async def initialize_agi(self):
164
187
with self .message_container :
165
188
ui .notify ('Using Together AI for ezAGI' )
166
189
logging .debug ("AGI initialized with Together AI" )
190
+ elif ai71_key :
191
+ chatter = AI71Model (ai71_key )
192
+ self .agi_instance = FundamentalAGI (chatter )
193
+ if self .message_container .client .connected :
194
+ with self .message_container :
195
+ ui .notify ('Using AI71 for ezAGI' )
196
+ logging .debug ("AGI initialized with AI71" )
167
197
elif llama_running :
168
198
# Call ollama_handler to list models when LLaMA is found running
169
199
models = self .ollama_handler .list_models ()
@@ -224,8 +254,9 @@ async def reasoning_loop(self):
224
254
openai_key = self .api_manager .get_api_key ('openai' )
225
255
groq_key = self .api_manager .get_api_key ('groq' )
226
256
together_key = self .api_manager .get_api_key ('together' )
257
+ ai71_key = self .api_manager .get_api_key ('ai71' )
227
258
llama_running = self .check_llama_running ()
228
- if openai_key or groq_key or together_key or llama_running :
259
+ if openai_key or groq_key or together_key or ai71_key or llama_running :
229
260
await self .initialize_agi ()
230
261
else :
231
262
if not self .initialization_warning_shown :
@@ -384,4 +415,3 @@ def handle_javascript_response(self, msg):
384
415
385
416
# Log the entire message for debugging purposes
386
417
logging .debug (f"Received JavaScript response: { msg } " )
387
-
0 commit comments