Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Human in the loop and develop a Social Media Game #45

Open
wants to merge 68 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
68 commits
Select commit Hold shift + click to select a range
e895fd9
add content_id, db_path and user_path
yiyiyi0817 Mar 5, 2025
43209a0
change action space
yiyiyi0817 Mar 5, 2025
08968fb
change action spac
yiyiyi0817 Mar 5, 2025
3973436
update the version of camel-ai
yiyiyi0817 Mar 5, 2025
af6fd05
update camel-ai version
yiyiyi0817 Mar 5, 2025
a54b765
add retry
yiyiyi0817 Mar 6, 2025
0ad7d0a
let agent output emoji
yiyiyi0817 Mar 6, 2025
edb3a16
update prompt
yiyiyi0817 Mar 6, 2025
781f8e5
successful debug
yiyiyi0817 Mar 7, 2025
00fb9e4
mean prob, loong prob
yiyiyi0817 Mar 7, 2025
52b90fc
random choose profile
yiyiyi0817 Mar 7, 2025
9e912dc
提交
luoyou Mar 7, 2025
41ed1f2
Merge branch 'product' of github.com:camel-ai/oasis into product
luoyou Mar 7, 2025
70101ad
Modify code to adapt to productization
luoyou Mar 8, 2025
226972f
debug agent sign up error
luoyou Mar 8, 2025
81b1bfb
add redis subscribe
Mar 10, 2025
c3ab9d9
add redis subscribe
Mar 10, 2025
068496c
add redis subscribe
Mar 10, 2025
d338cca
update do_nothing
yiyiyi0817 Mar 10, 2025
d024771
调整代码
Mar 11, 2025
19bad3e
Merge branch 'product' of github.com:camel-ai/oasis into product
Mar 11, 2025
a586551
use redis start service
Mar 11, 2025
ae04e53
use redis start service
Mar 11, 2025
72677d9
改进代码
Mar 11, 2025
2de9892
debug
Mar 11, 2025
4c53e35
debug
Mar 11, 2025
ba65de5
add pypi version
Mar 11, 2025
5536e9d
add dockerfile to run
luoyou Mar 11, 2025
89cc0ae
add dockerfile to run
luoyou Mar 11, 2025
97cfa39
add dockerfile to run
luoyou Mar 11, 2025
5122fb2
Merge branch 'product' of github.com:camel-ai/oasis into product
luoyou Mar 11, 2025
5b7f1e9
debug redis
luoyou Mar 11, 2025
d4788fe
support no content predict
Mar 12, 2025
5fe928a
remove debug infomation
luoyou Mar 12, 2025
3a10c11
remove debug content
Mar 13, 2025
6e0b6ac
remove hastag
yiyiyi0817 Mar 14, 2025
85335f2
remove hashtag
yiyiyi0817 Mar 17, 2025
3de3b6f
chinese support
yiyiyi0817 Mar 18, 2025
bee7cf6
support chinese
yiyiyi0817 Mar 19, 2025
b93769e
different activate prob
yiyiyi0817 Mar 19, 2025
888d6e9
finish test
yiyiyi0817 Mar 19, 2025
2860def
merge remote
yiyiyi0817 Mar 19, 2025
1993001
Product algorithm adjust (#50)
yiyiyi0817 Mar 19, 2025
3df72c0
remove debug message, make project run suceess
Mar 20, 2025
c2ada8c
recover code
Mar 20, 2025
f7834a5
fix first predict sqlite query bug
Mar 20, 2025
8e0879c
add test debug
Mar 21, 2025
8bb0227
fix predict content lost problem
Mar 21, 2025
06543da
change embedding model but error
yiyiyi0817 Mar 25, 2025
3398be0
support openai embedding model
yiyiyi0817 Mar 25, 2025
42e0cea
merge newest main
yiyiyi0817 Mar 25, 2025
0d44a22
add test info
Mar 25, 2025
5a261df
merge newest product
yiyiyi0817 Mar 25, 2025
e406631
use_openai_embedding=True
yiyiyi0817 Mar 25, 2025
6ea4e94
change to openai embedding model
yiyiyi0817 Mar 25, 2025
6a2a262
remove proxy
yiyiyi0817 Mar 25, 2025
28aecba
contente limit str
Mar 26, 2025
b51cca4
新增json内容进行包装
Mar 27, 2025
afb0dd1
change prompt
Mar 28, 2025
50c58ac
Merge branch 'product' of https://github.com/camel-ai/oasis into product
Mar 28, 2025
109cdb4
remove oasis-venv
Pakchoioioi Mar 28, 2025
b615007
change prompt
Pakchoioioi Mar 30, 2025
4cdcf16
auto model
yiyiyi0817 Apr 1, 2025
b964e44
add hot prob
yiyiyi0817 Apr 1, 2025
3641ec4
remove proxy
yiyiyi0817 Apr 1, 2025
62cf845
update command
luoyou Apr 1, 2025
ce2a43b
Merge branch 'product' of github.com:camel-ai/oasis into product
luoyou Apr 1, 2025
33bc1eb
update command
luoyou Apr 1, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
FROM python:3.11

WORKDIR /code

COPY . /code

RUN pip install --no-cache-dir --upgrade -e .


CMD ["python", "scripts/base/listen.py"]
2,670 changes: 2,670 additions & 0 deletions data/game/mixed_agents.json

Large diffs are not rendered by default.

786 changes: 391 additions & 395 deletions data/reddit/user_data_36.json

Large diffs are not rendered by default.

181 changes: 142 additions & 39 deletions oasis/social_agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,17 +16,19 @@
import inspect
import json
import logging
import random
import sys
from datetime import datetime
from typing import TYPE_CHECKING, Any

from camel.configs import ChatGPTConfig
from camel.agents._utils import convert_to_schema
from camel.memories import (ChatHistoryMemory, MemoryRecord,
ScoreBasedContextCreator)
from camel.messages import BaseMessage
from camel.messages import BaseMessage, FunctionCallingMessage
from camel.models import ModelFactory
from camel.types import ModelPlatformType, ModelType, OpenAIBackendRole
from camel.utils import OpenAITokenCounter
from tenacity import retry, stop_after_attempt, wait_random_exponential

from oasis.social_agent.agent_action import SocialAction
from oasis.social_agent.agent_environment import SocialEnvironment
Expand Down Expand Up @@ -67,30 +69,34 @@ def __init__(
self.twitter_channel = twitter_channel
self.infe_channel = inference_channel
self.env = SocialEnvironment(SocialAction(agent_id, twitter_channel))
self.system_message = BaseMessage.make_assistant_message(
role_name="User",
content=self.user_info.to_system_message(action_space_prompt),
)
self.model_type = model_type
self.is_openai_model = is_openai_model
self.language_type = "english"
if self.is_openai_model:
model_config = ChatGPTConfig(
tools=self.env.action.get_openai_function_list(),
temperature=0.5,
)
tools = self.env.action.get_openai_function_list()
tool_schemas = {
tool_schema["function"]["name"]: tool_schema
for tool_schema in [convert_to_schema(tool) for tool in tools]
}
self.full_tool_schemas = list(tool_schemas.values())
self.model_backend = ModelFactory.create(
model_platform=ModelPlatformType.OPENAI,
model_type=ModelType(model_type),
model_config_dict=model_config.as_dict(),
)
self.model_backend.model_config_dict['temperature'] = 1
# self.model_backend = ModelFactory.create(
# model_platform=ModelPlatformType.QWEN,
# model_type=ModelType.QWEN_PLUS,
# )
# self.model_backend.model_config_dict['temperature'] = 1

context_creator = ScoreBasedContextCreator(
OpenAITokenCounter(ModelType.GPT_3_5_TURBO),
4096,
)
self.memory = ChatHistoryMemory(context_creator, window_size=5)
self.system_message = BaseMessage.make_assistant_message(
role_name="system",
role_name="System",
content=self.user_info.to_system_message(
action_space_prompt), # system prompt
)
Expand All @@ -105,9 +111,12 @@ def __init__(
"\n"
"What do you think Helen should do?")

@retry(wait=wait_random_exponential(min=1, max=60),
stop=stop_after_attempt(6))
async def perform_action_by_llm(self):
# Get posts:
env_prompt = await self.env.to_text_prompt()
env_prompt = env_prompt.replace("Chinese", self.language_type)
user_msg = BaseMessage.make_user_message(
role_name="User",
# content=(
Expand All @@ -118,6 +127,7 @@ async def perform_action_by_llm(self):
content=(
f"Please perform social media actions after observing the "
f"platform environments. "
f"Notice that if you want to create some content, "
f"Here is your social media environment: {env_prompt}"),
)
self.memory.write_record(
Expand All @@ -141,24 +151,119 @@ async def perform_action_by_llm(self):
"role": self.system_message.role_name,
"content": self.system_message.content,
}] + [user_msg.to_openai_user_message()]
agent_log.info(
f"Agent {self.agent_id} is running with prompt: {openai_messages}")

# agent_log.info(
# f"Agent {self.agent_id} is running with prompt: "
# f"{openai_messages}")

if self.is_openai_model:
full_tool_schemas = self.full_tool_schemas
self_description = self.user_info.to_description()
for func_index in [0, 1]:
full_tool_schemas[func_index]["function"]["parameters"][
'properties']['content']['description'] += self_description
full_tool_schemas[2]["function"]["parameters"]['properties'][
'quote_content']['description'] += self_description

mean_prob = 0.5
mean_prompt = """
You are an X blogger known for your humor, sarcasm, sharp tongue, bizarre style, and caustic commentary.
If you chose create post or comment, reply with the most disdainful, sharp-tongued, sarcastic and cutting remark.
You excel at leveraging trending internet memes, staying on top of online culture, and keeping up with current events.
You have a talent for uncovering counterintuitive data and controversial opinions, sparking discussions through provocative questions. For example:"A $5 million seed round just to let AI provide psychological counseling for dogs?"
You wrap ruthless critiques in technical jargon, such as:"Your company's growth curve perfectly fits a Ponzi scheme function."
You invent concept-based sarcasm, like:"I suggest including this in the '21st Century Cyber Begging Art Awards'."
You deliver hypocritical blessings, for instance:"Wishing your valuation catches up with your cholesterol index soon."
You are a master of riddles, often crafting posts with 50% internet slang + 30% abstract memes.
"""
long_content_prob = 0.8
long_comment_prob = 0.1
long_quote_prob = 0.4
num_words_long = 60
long_prompt = f"""
Note that content should exceed {num_words_long} words.
"""
if random.random() < mean_prob:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this mean every time we call this function, there is a possibility that the mean_prompt will be injected into the agent? This operation will affect other scenarios. @yiyiyi0817 @luoyou

Copy link
Member Author

@yiyiyi0817 yiyiyi0817 Mar 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this branch will not be merged into main branch. Just for the product demo.

Copy link
Member Author

@yiyiyi0817 yiyiyi0817 Mar 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or if someday it need to be merged into the main branch, I can change these codes as optional.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, Thanks!

full_tool_schemas[0]["function"]["parameters"]['properties'][
'content']['description'] += mean_prompt
if random.random() < long_content_prob:
full_tool_schemas[0]["function"]["parameters"]['properties'][
'content']['description'] += long_prompt
if random.random() < long_comment_prob:
full_tool_schemas[1]["function"]["parameters"]['properties'][
'content']['description'] += long_prompt
if random.random() < long_quote_prob:
full_tool_schemas[2]["function"]["parameters"]['properties'][
'quote_content']['description'] += long_prompt

full_tool_schemas[0]["function"]["parameters"]['properties'][
'content']['description'] = (
full_tool_schemas[0]["function"]["parameters"]
['properties']['content']['description'].replace(
"Chinese", self.language_type))
full_tool_schemas[1]["function"]["parameters"]['properties'][
'content']['description'] = (
full_tool_schemas[1]["function"]["parameters"]
['properties']['content']['description'].replace(
"Chinese", self.language_type))
full_tool_schemas[2]["function"]["parameters"]['properties'][
'quote_content']['description'] = (
full_tool_schemas[2]["function"]["parameters"]
['properties']['quote_content']['description'].replace(
"Chinese", self.language_type))
# print(f"full_tool_schemas: {full_tool_schemas}")
# exit()
try:
response = self.model_backend.run(openai_messages)
agent_log.info(f"Agent {self.agent_id} response: {response}")
content = response
response = await self.model_backend._arun(
openai_messages, tools=full_tool_schemas)
# agent_log.info(f"Agent {self.agent_id} response: {response}")
# print(f"Agent {self.agent_id} response: {response}")
for tool_call in response.choices[0].message.tool_calls:
action_name = tool_call.function.name
args = json.loads(tool_call.function.arguments)
print(f"Agent {self.agent_id} is performing "
f"action: {action_name} with args: {args}")
await getattr(self.env.action, action_name)(**args)
# print(f"Agent {self.agent_id} is performing "
# f"action: {action_name} with args: {args}")
result = await getattr(self.env.action,
action_name)(**args)
self.perform_agent_graph_action(action_name, args)
assist_msg = FunctionCallingMessage(
role_name="Twitter User",
role_type=OpenAIBackendRole.ASSISTANT,
meta_dict=None,
content="",
func_name=action_name,
args=args,
tool_call_id=tool_call.id,
)
func_msg = FunctionCallingMessage(
role_name="Twitter User",
role_type=OpenAIBackendRole.ASSISTANT,
meta_dict=None,
content="",
func_name=action_name,
result=result,
tool_call_id=tool_call.id,
)
self.memory.write_record(
MemoryRecord(
message=assist_msg,
role_at_backend=OpenAIBackendRole.ASSISTANT,
))
self.memory.write_record(
MemoryRecord(
message=func_msg,
role_at_backend=OpenAIBackendRole.FUNCTION,
))

except Exception as e:
print(e)
print(f"Agent {self.agent_id} error: {e}")
print(openai_messages)
content = "No response."
agent_msg = BaseMessage.make_assistant_message(
role_name="Assistant", content=content)
self.memory.write_record(
MemoryRecord(message=agent_msg,
role_at_backend=OpenAIBackendRole.ASSISTANT))

else:
retry = 5
Expand All @@ -176,8 +281,8 @@ async def perform_action_by_llm(self):
mes_id, content = await self.infe_channel.read_from_send_queue(
mes_id)

agent_log.info(
f"Agent {self.agent_id} receive response: {content}")
# agent_log.info(
# f"Agent {self.agent_id} receive response: {content}")

try:
content_json = json.loads(content)
Expand Down Expand Up @@ -214,19 +319,14 @@ async def perform_action_by_llm(self):

if retry == 0:
content = "No response."
agent_msg = BaseMessage.make_assistant_message(role_name="Assistant",
content=content)
self.memory.write_record(
MemoryRecord(message=agent_msg,
role_at_backend=OpenAIBackendRole.ASSISTANT))

async def perform_test(self):
"""
doing test for all agents.
"""
# user conduct test to agent
_ = BaseMessage.make_user_message(role_name="User",
content=("You are a twitter user."))
content="You are a twitter user.")
# TODO error occurs
# self.memory.write_record(MemoryRecord(user_msg,
# OpenAIBackendRole.USER))
Expand All @@ -242,27 +342,29 @@ async def perform_test(self):
"role": "user",
"content": self.test_prompt
}])
agent_log.info(f"Agent {self.agent_id}: {openai_messages}")
# agent_log.info(f"Agent {self.agent_id}: {openai_messages}")

message_id = await self.infe_channel.write_to_receive_queue(
openai_messages)
message_id, content = await self.infe_channel.read_from_send_queue(
message_id)
agent_log.info(f"Agent {self.agent_id} receive response: {content}")
# agent_log.info(f"Agent {self.agent_id} receive response: {content}")
return {
"user_id": self.agent_id,
"prompt": openai_messages,
"content": content
}

async def perform_action_by_hci(self) -> Any:
print("Please choose one function to perform:")
async def perform_action_by_hci(self,
input_content: str,
selection: int = 0) -> Any:
# print("Please choose one function to perform:")
function_list = self.env.action.get_openai_function_list()
for i in range(len(function_list)):
agent_log.info(f"Agent {self.agent_id} function: "
f"{function_list[i].func.__name__}")
# for i in range(len(function_list)):
# agent_log.info(f"Agent {self.agent_id} function: "
# f"{function_list[i].func.__name__}")

selection = int(input("Enter your choice: "))
# selection = int(input("Enter your choice: "))
if not 0 <= selection < len(function_list):
agent_log.error(f"Agent {self.agent_id} invalid input.")
return
Expand All @@ -273,7 +375,8 @@ async def perform_action_by_hci(self) -> Any:
for param in params.values():
while True:
try:
value = input(f"Enter value for {param.name}: ")
# value = input(f"Enter value for {param.name}: ")
value = input_content
args.append(value)
break
except ValueError:
Expand All @@ -288,7 +391,7 @@ async def perform_action_by_data(self, func_name, *args, **kwargs) -> Any:
if function_list[i].func.__name__ == func_name:
func = function_list[i].func
result = await func(*args, **kwargs)
agent_log.info(f"Agent {self.agent_id}: {result}")
# agent_log.info(f"Agent {self.agent_id}: {result}")
return result
raise ValueError(f"Function {func_name} not found in the list.")

Expand Down
Loading
Loading