diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..f078709f --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,64 @@ +name: Deploy MkDocs to GitHub Pages + +on: + push: + branches: + - main + paths: + - 'docs/**' + - 'mkdocs.yml' + - '.github/workflows/docs.yml' + pull_request: + branches: + - main + paths: + - 'docs/**' + - 'mkdocs.yml' + - '.github/workflows/docs.yml' + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install mkdocs mkdocs-material mkdocstrings[python] + + - name: Build documentation + run: mkdocs build --strict + + - name: Upload artifact + if: github.ref == 'refs/heads/main' + uses: actions/upload-pages-artifact@v3 + with: + path: ./site + + deploy: + if: github.ref == 'refs/heads/main' + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/docs/api/core.md b/docs/api/core.md new file mode 100644 index 00000000..0ddd1838 --- /dev/null +++ b/docs/api/core.md @@ -0,0 +1,485 @@ +# Core API Reference + +This document provides detailed API reference for the core components of the Talos system. + +## Agent Classes + +### Agent + +The base agent class that provides core functionality for all AI agents in the system. + +```python +class Agent: + def __init__( + self, + name: str, + model: str = "gpt-4o", + memory: Optional[Memory] = None + ): + """Initialize an agent with specified configuration. + + Args: + name: Unique identifier for the agent + model: LLM model to use (default: "gpt-4o") + memory: Optional memory instance for conversation history + """ +``` + +#### Methods + +##### `process_query(query: str) -> QueryResponse` + +Process a user query and return a structured response. + +**Parameters:** +- `query` (str): The user's query or request + +**Returns:** +- `QueryResponse`: Structured response containing answers and metadata + +**Raises:** +- `ValidationError`: If query is empty or invalid +- `APIError`: If LLM service is unavailable + +**Example:** +```python +agent = Agent(name="my_agent") +response = agent.process_query("What is the current market sentiment?") +print(response.answers[0]) +``` + +##### `add_memory(description: str, metadata: Optional[dict] = None) -> None` + +Add a memory to the agent's persistent memory system. + +**Parameters:** +- `description` (str): Description of the memory to store +- `metadata` (Optional[dict]): Additional metadata for the memory + +**Example:** +```python +agent.add_memory( + "User prefers conservative investment strategies", + {"category": "preference", "importance": "high"} +) +``` + +##### `search_memory(query: str, limit: int = 10) -> List[Memory]` + +Search the agent's memory for relevant information. + +**Parameters:** +- `query` (str): Search query +- `limit` (int): Maximum number of results to return + +**Returns:** +- `List[Memory]`: List of relevant memories + +### MainAgent + +The primary agent that orchestrates all system components and handles user interactions. + +```python +class MainAgent(Agent): + def __init__(self): + """Initialize the main agent with all system components.""" +``` + +#### Methods + +##### `run(query: str, history: Optional[List[Message]] = None) -> AIMessage` + +Execute a query through the complete system pipeline. + +**Parameters:** +- `query` (str): User query to process +- `history` (Optional[List[Message]]): Conversation history + +**Returns:** +- `AIMessage`: AI response message + +**Example:** +```python +main_agent = MainAgent() +response = main_agent.run("Analyze the latest governance proposal") +print(response.content) +``` + +## Memory System + +### Memory + +Persistent memory system with semantic search capabilities. + +```python +class Memory: + def __init__( + self, + agent_name: str, + batch_size: int = 10, + auto_save: bool = True + ): + """Initialize memory system. + + Args: + agent_name: Name of the agent using this memory + batch_size: Number of memories to batch before writing + auto_save: Whether to automatically save on destruction + """ +``` + +#### Methods + +##### `add_memory(description: str, metadata: Optional[dict] = None) -> None` + +Add a new memory with optional metadata. + +**Parameters:** +- `description` (str): Memory description +- `metadata` (Optional[dict]): Additional metadata + +##### `search(query: str, limit: int = 10) -> List[MemoryItem]` + +Search memories using semantic similarity. + +**Parameters:** +- `query` (str): Search query +- `limit` (int): Maximum results to return + +**Returns:** +- `List[MemoryItem]`: Relevant memories sorted by similarity + +##### `flush() -> None` + +Manually flush pending writes to persistent storage. + +### MemoryItem + +Individual memory item with metadata and embeddings. + +```python +class MemoryItem(BaseModel): + description: str + metadata: dict + timestamp: datetime + embedding: Optional[List[float]] = None +``` + +## Router System + +### Router + +Routes queries to appropriate skills and services based on content analysis. + +```python +class Router: + def __init__(self): + """Initialize router with empty skill and service registries.""" +``` + +#### Methods + +##### `register_skill(skill: Skill, keywords: List[str]) -> None` + +Register a skill with associated keywords for routing. + +**Parameters:** +- `skill` (Skill): Skill instance to register +- `keywords` (List[str]): Keywords that trigger this skill + +##### `register_service(service: Service, keywords: List[str]) -> None` + +Register a service with associated keywords for routing. + +**Parameters:** +- `service` (Service): Service instance to register +- `keywords` (List[str]): Keywords that trigger this service + +##### `route(query: str) -> Union[Skill, Service, None]` + +Route a query to the most appropriate skill or service. + +**Parameters:** +- `query` (str): User query to route + +**Returns:** +- `Union[Skill, Service, None]`: Best matching handler or None + +## Data Models + +### QueryResponse + +Structured response from agent queries. + +```python +class QueryResponse(BaseModel): + answers: List[str] + metadata: dict = Field(default_factory=dict) + confidence: Optional[float] = None + sources: List[str] = Field(default_factory=list) +``` + +### Message + +Base message class for conversation history. + +```python +class Message(BaseModel): + content: str + role: str # "human", "assistant", "system" + timestamp: datetime = Field(default_factory=datetime.now) + metadata: dict = Field(default_factory=dict) +``` + +### HumanMessage + +Message from human users. + +```python +class HumanMessage(Message): + role: str = "human" +``` + +### AIMessage + +Message from AI agents. + +```python +class AIMessage(Message): + role: str = "assistant" +``` + +### SystemMessage + +System-generated messages. + +```python +class SystemMessage(Message): + role: str = "system" +``` + +## Configuration + +### AgentConfig + +Configuration for agent initialization. + +```python +class AgentConfig(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + name: str + model: str = "gpt-4o" + temperature: float = Field(default=0.7, ge=0.0, le=2.0) + max_tokens: int = Field(default=1000, gt=0) + memory_enabled: bool = True + batch_size: int = Field(default=10, gt=0) +``` + +### MemoryConfig + +Configuration for memory system. + +```python +class MemoryConfig(BaseModel): + batch_size: int = Field(default=10, gt=0) + auto_save: bool = True + max_memories: Optional[int] = None + embedding_model: str = "text-embedding-ada-002" +``` + +## Error Classes + +### TalosError + +Base exception for all Talos-specific errors. + +```python +class TalosError(Exception): + """Base exception for all Talos errors.""" + pass +``` + +### ValidationError + +Raised when input validation fails. + +```python +class ValidationError(TalosError): + """Raised when input validation fails.""" + pass +``` + +### APIError + +Raised when external API calls fail. + +```python +class APIError(TalosError): + """Raised when external API calls fail.""" + + def __init__(self, message: str, status_code: Optional[int] = None): + super().__init__(message) + self.status_code = status_code +``` + +### ConfigurationError + +Raised when configuration is invalid. + +```python +class ConfigurationError(TalosError): + """Raised when configuration is invalid.""" + pass +``` + +## Usage Examples + +### Basic Agent Usage + +```python +from talos.core.agent import Agent +from talos.core.memory import Memory + +# Create agent with custom memory +memory = Memory(agent_name="example_agent", batch_size=5) +agent = Agent(name="example_agent", memory=memory) + +# Process queries +response = agent.process_query("What are the latest DeFi trends?") +print(f"Response: {response.answers[0]}") + +# Add memories +agent.add_memory("User is interested in DeFi trends", {"topic": "defi"}) + +# Search memories +memories = agent.search_memory("DeFi", limit=5) +for memory in memories: + print(f"Memory: {memory.description}") +``` + +### Main Agent Usage + +```python +from talos.core.main_agent import MainAgent + +# Create main agent (includes all system components) +main_agent = MainAgent() + +# Process complex queries +response = main_agent.run("Analyze sentiment for 'yield farming' and recommend APR adjustments") +print(response.content) + +# Interactive conversation +history = [] +while True: + user_input = input(">> ") + if user_input.lower() == 'exit': + break + + response = main_agent.run(user_input, history=history) + print(response.content) + + # History is automatically managed by the agent +``` + +### Memory System Usage + +```python +from talos.core.memory import Memory + +# Create memory system +memory = Memory(agent_name="test_agent", batch_size=20) + +# Add memories with metadata +memory.add_memory( + "Protocol X increased APR to 12% due to market competition", + { + "protocol": "Protocol X", + "action": "apr_increase", + "value": 0.12, + "reason": "competition" + } +) + +# Search for relevant memories +results = memory.search("APR increase competition", limit=10) +for result in results: + print(f"Memory: {result.description}") + print(f"Metadata: {result.metadata}") + +# Manual flush if needed +memory.flush() +``` + +### Router Usage + +```python +from talos.core.router import Router +from talos.skills.proposals import ProposalsSkill +from talos.skills.twitter_sentiment import TwitterSentimentSkill + +# Create router +router = Router() + +# Register skills +router.register_skill( + ProposalsSkill(), + keywords=["proposal", "governance", "vote"] +) +router.register_skill( + TwitterSentimentSkill(), + keywords=["sentiment", "twitter", "social"] +) + +# Route queries +handler = router.route("Analyze this governance proposal") +if handler: + result = handler.run(proposal_text="...") + print(result.answers[0]) +``` + +## Error Handling + +All core API methods include comprehensive error handling. Always wrap API calls in try-catch blocks: + +```python +from talos.core.agent import Agent +from talos.core.exceptions import ValidationError, APIError + +agent = Agent(name="example") + +try: + response = agent.process_query("What is DeFi?") + print(response.answers[0]) +except ValidationError as e: + print(f"Invalid input: {e}") +except APIError as e: + print(f"API error: {e}") + if e.status_code: + print(f"Status code: {e.status_code}") +except Exception as e: + print(f"Unexpected error: {e}") +``` + +## Performance Considerations + +### Memory Management + +- Use batch operations for multiple memory additions +- Call `flush()` manually for time-sensitive operations +- Monitor memory usage in long-running processes + +### API Rate Limiting + +- Implement backoff strategies for API calls +- Cache responses when appropriate +- Use connection pooling for external services + +### Concurrency + +- Core components are thread-safe for read operations +- Use locks for concurrent write operations +- Consider async patterns for I/O-bound operations + +This API reference provides the foundation for building applications with the Talos core system. For specific integrations and advanced usage patterns, refer to the Skills and Services API documentation. diff --git a/docs/api/services.md b/docs/api/services.md new file mode 100644 index 00000000..b1acb085 --- /dev/null +++ b/docs/api/services.md @@ -0,0 +1,459 @@ +# Services API Reference + +This document provides detailed API reference for the services layer of the Talos system. + +## Base Service Interface + +### Service + +Abstract base class for all services in the Talos system. + +```python +from abc import ABC, abstractmethod + +class Service(ABC): + @abstractmethod + def process(self, request: ServiceRequest) -> ServiceResponse: + """Process a service request and return response.""" + pass +``` + +## Service Models + +### ServiceRequest + +Base request model for all service operations. + +```python +class ServiceRequest(BaseModel): + request_id: str = Field(default_factory=lambda: str(uuid.uuid4())) + timestamp: datetime = Field(default_factory=datetime.now) + metadata: dict = Field(default_factory=dict) +``` + +### ServiceResponse + +Base response model for all service operations. + +```python +class ServiceResponse(BaseModel): + request_id: str + success: bool + data: Any = None + error: Optional[str] = None + metadata: dict = Field(default_factory=dict) + processing_time: Optional[float] = None +``` + +## Yield Management Services + +### YieldManagerService + +Calculates optimal staking APR using market data and sentiment analysis. + +```python +class YieldManagerService(Service): + def __init__(self, model: str = "gpt-4o"): + """Initialize yield manager service. + + Args: + model: LLM model to use for analysis + """ +``` + +#### Methods + +##### `process(request: YieldManagerRequest) -> YieldManagerResponse` + +Calculate optimal APR based on market conditions and sentiment. + +**Parameters:** +- `request` (YieldManagerRequest): Request containing market data and parameters + +**Returns:** +- `YieldManagerResponse`: Response with recommended APR and analysis + +**Example:** +```python +from talos.services.yield_manager import YieldManagerService, YieldManagerRequest + +service = YieldManagerService() +request = YieldManagerRequest( + current_apr=0.05, + market_volatility=0.15, + competitor_aprs=[0.06, 0.07, 0.055], + sentiment_score=0.7, + tvl=1000000, + utilization_rate=0.8 +) + +response = service.process(request) +print(f"Recommended APR: {response.recommended_apr}") +print(f"Reasoning: {response.reasoning}") +``` + +### YieldManagerRequest + +Request model for yield management operations. + +```python +class YieldManagerRequest(ServiceRequest): + current_apr: float = Field(ge=0.0, le=1.0) + market_volatility: float = Field(ge=0.0) + competitor_aprs: List[float] = Field(default_factory=list) + sentiment_score: float = Field(ge=-1.0, le=1.0) + tvl: float = Field(ge=0.0) + utilization_rate: float = Field(ge=0.0, le=1.0) + risk_tolerance: str = Field(default="medium") # low, medium, high +``` + +### YieldManagerResponse + +Response model for yield management operations. + +```python +class YieldManagerResponse(ServiceResponse): + recommended_apr: float + confidence: float = Field(ge=0.0, le=1.0) + reasoning: str + risk_assessment: str + market_analysis: dict + implementation_timeline: str +``` + +## Sentiment Analysis Services + +### TalosSentimentService + +Orchestrates comprehensive sentiment analysis across multiple data sources. + +```python +class TalosSentimentService(Service): + def __init__(self): + """Initialize sentiment analysis service.""" +``` + +#### Methods + +##### `process(request: SentimentRequest) -> SentimentResponse` + +Perform comprehensive sentiment analysis. + +**Parameters:** +- `request` (SentimentRequest): Request with query and analysis parameters + +**Returns:** +- `SentimentResponse`: Comprehensive sentiment analysis results + +**Example:** +```python +from talos.services.sentiment import TalosSentimentService, SentimentRequest + +service = TalosSentimentService() +request = SentimentRequest( + query="DeFi yield farming", + sources=["twitter", "reddit", "discord"], + limit=500, + days_back=7 +) + +response = service.process(request) +print(f"Overall sentiment: {response.overall_sentiment}") +print(f"Confidence: {response.confidence}") +for theme in response.key_themes: + print(f"Theme: {theme.topic} - Sentiment: {theme.sentiment}") +``` + +### SentimentRequest + +Request model for sentiment analysis operations. + +```python +class SentimentRequest(ServiceRequest): + query: str = Field(min_length=1, max_length=500) + sources: List[str] = Field(default=["twitter"]) + limit: int = Field(default=100, ge=1, le=1000) + days_back: int = Field(default=7, ge=1, le=30) + language: str = Field(default="en") + include_influencers: bool = Field(default=True) +``` + +### SentimentResponse + +Response model for sentiment analysis operations. + +```python +class SentimentResponse(ServiceResponse): + overall_sentiment: float = Field(ge=-1.0, le=1.0) + confidence: float = Field(ge=0.0, le=1.0) + sentiment_distribution: dict # positive, neutral, negative percentages + key_themes: List[SentimentTheme] + influential_voices: List[InfluentialVoice] + trending_hashtags: List[str] + recommendations: List[str] + data_sources: dict # source -> count mapping +``` + +### SentimentTheme + +Individual sentiment theme with associated metrics. + +```python +class SentimentTheme(BaseModel): + topic: str + sentiment: float = Field(ge=-1.0, le=1.0) + mention_count: int = Field(ge=0) + confidence: float = Field(ge=0.0, le=1.0) + examples: List[str] = Field(default_factory=list) +``` + +### InfluentialVoice + +Influential account or voice in sentiment analysis. + +```python +class InfluentialVoice(BaseModel): + username: str + platform: str + follower_count: int = Field(ge=0) + sentiment: float = Field(ge=-1.0, le=1.0) + influence_score: float = Field(ge=0.0, le=1.0) + recent_content: str +``` + +## GitHub Services + +### GithubService + +Handles GitHub operations including repository management and PR reviews. + +```python +class GithubService(Service): + def __init__(self, token: str): + """Initialize GitHub service. + + Args: + token: GitHub API token + """ +``` + +#### Methods + +##### `process(request: GithubRequest) -> GithubResponse` + +Process GitHub operations based on request type. + +**Parameters:** +- `request` (GithubRequest): Request specifying the GitHub operation + +**Returns:** +- `GithubResponse`: Response with operation results + +**Example:** +```python +from talos.services.github import GithubService, GithubRequest + +service = GithubService(token="your-github-token") + +# Review a pull request +request = GithubRequest( + operation="review_pr", + repository="owner/repo", + pr_number=123, + auto_approve=False +) + +response = service.process(request) +print(f"Security Score: {response.security_score}") +print(f"Quality Score: {response.quality_score}") +print(f"Recommendation: {response.recommendation}") +``` + +### GithubRequest + +Request model for GitHub operations. + +```python +class GithubRequest(ServiceRequest): + operation: str # "review_pr", "list_prs", "approve_pr", "merge_pr" + repository: str + pr_number: Optional[int] = None + state: str = Field(default="open") # open, closed, all + auto_approve: bool = Field(default=False) + post_review: bool = Field(default=False) +``` + +### GithubResponse + +Response model for GitHub operations. + +```python +class GithubResponse(ServiceResponse): + operation: str + repository: str + pr_number: Optional[int] = None + security_score: Optional[int] = Field(ge=0, le=100) + quality_score: Optional[int] = Field(ge=0, le=100) + recommendation: Optional[str] = None + detailed_analysis: Optional[str] = None + pull_requests: Optional[List[dict]] = None +``` + +## Usage Examples + +### Comprehensive Yield Optimization + +```python +from talos.services.yield_manager import YieldManagerService, YieldManagerRequest +from talos.services.sentiment import TalosSentimentService, SentimentRequest + +# Get sentiment data +sentiment_service = TalosSentimentService() +sentiment_request = SentimentRequest( + query="yield farming APR", + sources=["twitter", "reddit"], + limit=200 +) +sentiment_response = sentiment_service.process(sentiment_request) + +# Calculate optimal APR using sentiment data +yield_service = YieldManagerService() +yield_request = YieldManagerRequest( + current_apr=0.05, + market_volatility=0.12, + competitor_aprs=[0.06, 0.07, 0.055], + sentiment_score=sentiment_response.overall_sentiment, + tvl=2000000, + utilization_rate=0.75 +) +yield_response = yield_service.process(yield_request) + +print(f"Current sentiment: {sentiment_response.overall_sentiment}") +print(f"Recommended APR: {yield_response.recommended_apr}") +print(f"Reasoning: {yield_response.reasoning}") +``` + +### Automated PR Review Workflow + +```python +from talos.services.github import GithubService, GithubRequest + +service = GithubService(token="your-token") + +# List open PRs +list_request = GithubRequest( + operation="list_prs", + repository="your-org/your-repo", + state="open" +) +list_response = service.process(list_request) + +# Review each PR +for pr in list_response.pull_requests: + review_request = GithubRequest( + operation="review_pr", + repository="your-org/your-repo", + pr_number=pr["number"], + post_review=True, + auto_approve=True + ) + + review_response = service.process(review_request) + + print(f"PR #{pr['number']}: {review_response.recommendation}") + print(f"Security: {review_response.security_score}/100") + print(f"Quality: {review_response.quality_score}/100") +``` + +### Multi-Source Sentiment Analysis + +```python +from talos.services.sentiment import TalosSentimentService, SentimentRequest + +service = TalosSentimentService() + +# Analyze sentiment across multiple topics +topics = ["DeFi protocols", "yield farming", "staking rewards", "governance tokens"] +results = {} + +for topic in topics: + request = SentimentRequest( + query=topic, + sources=["twitter", "reddit"], + limit=300, + days_back=7 + ) + + response = service.process(request) + results[topic] = { + "sentiment": response.overall_sentiment, + "confidence": response.confidence, + "themes": [theme.topic for theme in response.key_themes[:3]] + } + +# Analyze results +for topic, data in results.items(): + print(f"{topic}: {data['sentiment']:.2f} (confidence: {data['confidence']:.2f})") + print(f" Key themes: {', '.join(data['themes'])}") +``` + +## Error Handling + +Services include comprehensive error handling for various failure scenarios: + +```python +from talos.services.exceptions import ServiceError, APIError, ValidationError + +try: + response = service.process(request) + if not response.success: + print(f"Service error: {response.error}") + else: + # Process successful response + print(f"Success: {response.data}") + +except ValidationError as e: + print(f"Invalid request: {e}") +except APIError as e: + print(f"External API error: {e}") +except ServiceError as e: + print(f"Service processing error: {e}") +except Exception as e: + print(f"Unexpected error: {e}") +``` + +## Performance Considerations + +### Caching + +Services implement intelligent caching for frequently accessed data: + +```python +# Sentiment analysis results are cached for 1 hour +# GitHub repository data is cached for 5 minutes +# Market data is cached for 30 seconds +``` + +### Rate Limiting + +All services respect external API rate limits: + +- **GitHub API**: 5000 requests per hour +- **Twitter API**: 300 requests per 15 minutes +- **OpenAI API**: Varies by plan + +### Batch Operations + +Services support batch operations for efficiency: + +```python +# Batch sentiment analysis +batch_request = SentimentBatchRequest( + queries=["topic1", "topic2", "topic3"], + sources=["twitter"], + limit=100 +) +batch_response = sentiment_service.process_batch(batch_request) +``` + +This services API provides the business logic layer for Talos operations, enabling sophisticated protocol management through well-defined interfaces and comprehensive error handling. diff --git a/docs/api/tools.md b/docs/api/tools.md new file mode 100644 index 00000000..a00feae9 --- /dev/null +++ b/docs/api/tools.md @@ -0,0 +1,709 @@ +# Tools API Reference + +This document provides detailed API reference for the tools layer of the Talos system, which handles external integrations and utilities. + +## Base Tool Interface + +### BaseTool + +Abstract base class for all tools in the Talos system. + +```python +from abc import ABC, abstractmethod + +class BaseTool(ABC): + name: str + description: str + + @abstractmethod + def execute(self, *args, **kwargs) -> Any: + """Execute the tool with provided arguments.""" + pass +``` + +### SupervisedTool + +Wrapper that adds hypervisor approval to any tool. + +```python +class SupervisedTool: + def __init__(self, base_tool: BaseTool, supervisor: Supervisor): + """Wrap a tool with supervision. + + Args: + base_tool: The tool to wrap + supervisor: Supervisor for approval decisions + """ + self.base_tool = base_tool + self.supervisor = supervisor + + def execute(self, *args, **kwargs) -> Any: + """Execute tool with supervisor approval.""" +``` + +## GitHub Tools + +### GithubTools + +Comprehensive GitHub API integration for repository management. + +```python +class GithubTools: + def __init__(self, token: str): + """Initialize GitHub tools. + + Args: + token: GitHub API token with appropriate permissions + """ +``` + +#### Repository Operations + +##### `get_all_pull_requests(user: str, project: str, state: str = "open") -> List[dict]` + +Retrieve pull requests from a repository. + +**Parameters:** +- `user` (str): Repository owner +- `project` (str): Repository name +- `state` (str): PR state - "open", "closed", or "all" + +**Returns:** +- `List[dict]`: List of pull request data + +**Example:** +```python +from talos.tools.github import GithubTools + +github = GithubTools(token="your-token") +prs = github.get_all_pull_requests("microsoft", "vscode", state="open") + +for pr in prs: + print(f"PR #{pr['number']}: {pr['title']}") + print(f"Author: {pr['user']['login']}") + print(f"State: {pr['state']}") +``` + +##### `get_open_issues(user: str, project: str) -> List[dict]` + +Retrieve open issues from a repository. + +**Parameters:** +- `user` (str): Repository owner +- `project` (str): Repository name + +**Returns:** +- `List[dict]`: List of issue data + +##### `review_pull_request(user: str, project: str, pr_number: int) -> dict` + +Perform AI-powered review of a pull request. + +**Parameters:** +- `user` (str): Repository owner +- `project` (str): Repository name +- `pr_number` (int): Pull request number + +**Returns:** +- `dict`: Review analysis with security and quality scores + +**Example:** +```python +review = github.review_pull_request("owner", "repo", 123) +print(f"Security Score: {review['security_score']}/100") +print(f"Quality Score: {review['quality_score']}/100") +print(f"Recommendation: {review['recommendation']}") +print(f"Analysis: {review['detailed_analysis']}") +``` + +##### `approve_pull_request(user: str, project: str, pr_number: int) -> bool` + +Approve a pull request. + +**Parameters:** +- `user` (str): Repository owner +- `project` (str): Repository name +- `pr_number` (int): Pull request number + +**Returns:** +- `bool`: True if approval was successful + +##### `merge_pull_request(user: str, project: str, pr_number: int) -> bool` + +Merge an approved pull request. + +**Parameters:** +- `user` (str): Repository owner +- `project` (str): Repository name +- `pr_number` (int): Pull request number + +**Returns:** +- `bool`: True if merge was successful + +## Twitter Tools + +### TwitterTools + +Twitter API integration for social media monitoring and analysis. + +```python +class TwitterTools: + def __init__(self, bearer_token: str): + """Initialize Twitter tools. + + Args: + bearer_token: Twitter API Bearer Token + """ +``` + +#### Search and Analysis + +##### `search_tweets(query: str, limit: int = 100) -> List[dict]` + +Search for tweets matching a query. + +**Parameters:** +- `query` (str): Search query +- `limit` (int): Maximum number of tweets to return + +**Returns:** +- `List[dict]`: List of tweet data + +**Example:** +```python +from talos.tools.twitter import TwitterTools + +twitter = TwitterTools(bearer_token="your-token") +tweets = twitter.search_tweets("DeFi yield farming", limit=50) + +for tweet in tweets: + print(f"@{tweet['author']['username']}: {tweet['text']}") + print(f"Likes: {tweet['public_metrics']['like_count']}") +``` + +##### `get_user_tweets(username: str, limit: int = 100) -> List[dict]` + +Get recent tweets from a specific user. + +**Parameters:** +- `username` (str): Twitter username (without @) +- `limit` (int): Maximum number of tweets to return + +**Returns:** +- `List[dict]`: List of tweet data + +##### `analyze_sentiment(tweets: List[dict]) -> dict` + +Analyze sentiment of a collection of tweets. + +**Parameters:** +- `tweets` (List[dict]): List of tweet data + +**Returns:** +- `dict`: Sentiment analysis results + +**Example:** +```python +tweets = twitter.search_tweets("protocol governance", limit=200) +sentiment = twitter.analyze_sentiment(tweets) + +print(f"Overall sentiment: {sentiment['overall_sentiment']}") +print(f"Positive: {sentiment['positive_ratio']:.1%}") +print(f"Negative: {sentiment['negative_ratio']:.1%}") +print(f"Neutral: {sentiment['neutral_ratio']:.1%}") +``` + +##### `get_trending_topics(location: str = "worldwide") -> List[str]` + +Get trending topics for a location. + +**Parameters:** +- `location` (str): Location for trends (default: "worldwide") + +**Returns:** +- `List[str]`: List of trending topics + +#### User Analysis + +##### `analyze_user_influence(username: str) -> dict` + +Analyze a user's influence and credibility. + +**Parameters:** +- `username` (str): Twitter username (without @) + +**Returns:** +- `dict`: User influence analysis + +**Example:** +```python +influence = twitter.analyze_user_influence("vitalikbuterin") +print(f"Influence Score: {influence['influence_score']}/100") +print(f"Follower Quality: {influence['follower_quality']}") +print(f"Engagement Rate: {influence['engagement_rate']:.2%}") +print(f"Expertise Areas: {', '.join(influence['expertise_areas'])}") +``` + +## IPFS Tools + +### IPFSTools + +IPFS integration for decentralized storage operations. + +```python +class IPFSTools: + def __init__(self, api_key: str, secret_key: str): + """Initialize IPFS tools. + + Args: + api_key: Pinata API key + secret_key: Pinata secret key + """ +``` + +#### Storage Operations + +##### `upload_json(data: dict, name: str) -> str` + +Upload JSON data to IPFS. + +**Parameters:** +- `data` (dict): JSON data to upload +- `name` (str): Name for the uploaded content + +**Returns:** +- `str`: IPFS hash of uploaded content + +**Example:** +```python +from talos.tools.ipfs import IPFSTools + +ipfs = IPFSTools(api_key="your-key", secret_key="your-secret") + +proposal_data = { + "title": "Increase Staking Rewards", + "description": "Proposal to increase staking rewards from 5% to 8%", + "voting_period": "7 days", + "created_at": "2024-01-15T10:00:00Z" +} + +ipfs_hash = ipfs.upload_json(proposal_data, "governance-proposal-001") +print(f"Proposal uploaded to IPFS: {ipfs_hash}") +print(f"Access URL: https://gateway.pinata.cloud/ipfs/{ipfs_hash}") +``` + +##### `upload_text(content: str, name: str) -> str` + +Upload text content to IPFS. + +**Parameters:** +- `content` (str): Text content to upload +- `name` (str): Name for the uploaded content + +**Returns:** +- `str`: IPFS hash of uploaded content + +##### `retrieve_content(ipfs_hash: str) -> str` + +Retrieve content from IPFS by hash. + +**Parameters:** +- `ipfs_hash` (str): IPFS hash of content to retrieve + +**Returns:** +- `str`: Retrieved content + +##### `pin_content(ipfs_hash: str) -> bool` + +Pin content to ensure it remains available. + +**Parameters:** +- `ipfs_hash` (str): IPFS hash to pin + +**Returns:** +- `bool`: True if pinning was successful + +## Cryptography Tools + +### CryptographyTools + +RSA encryption and decryption operations. + +```python +class CryptographyTools: + def __init__(self, key_dir: str = ".keys"): + """Initialize cryptography tools. + + Args: + key_dir: Directory containing RSA keys + """ +``` + +#### Key Management + +##### `generate_key_pair(key_size: int = 2048) -> Tuple[str, str]` + +Generate RSA key pair. + +**Parameters:** +- `key_size` (int): Key size in bits (1024, 2048, or 4096) + +**Returns:** +- `Tuple[str, str]`: (private_key_path, public_key_path) + +**Example:** +```python +from talos.tools.crypto import CryptographyTools + +crypto = CryptographyTools() +private_key, public_key = crypto.generate_key_pair(key_size=2048) +print(f"Keys generated: {private_key}, {public_key}") +``` + +##### `get_public_key() -> str` + +Get the current public key. + +**Returns:** +- `str`: Public key in PEM format + +##### `get_key_fingerprint() -> str` + +Get fingerprint of the current key pair. + +**Returns:** +- `str`: SHA256 fingerprint of the public key + +#### Encryption Operations + +##### `encrypt_data(data: str, public_key_path: str) -> str` + +Encrypt data using RSA public key. + +**Parameters:** +- `data` (str): Data to encrypt +- `public_key_path` (str): Path to public key file + +**Returns:** +- `str`: Base64-encoded encrypted data + +**Example:** +```python +# Encrypt sensitive data +encrypted = crypto.encrypt_data( + "Secret protocol configuration", + "recipient_public_key.pem" +) +print(f"Encrypted data: {encrypted}") +``` + +##### `decrypt_data(encrypted_data: str) -> str` + +Decrypt data using RSA private key. + +**Parameters:** +- `encrypted_data` (str): Base64-encoded encrypted data + +**Returns:** +- `str`: Decrypted plaintext data + +**Example:** +```python +# Decrypt received data +decrypted = crypto.decrypt_data(encrypted_data) +print(f"Decrypted: {decrypted}") +``` + +##### `sign_data(data: str) -> str` + +Create digital signature for data. + +**Parameters:** +- `data` (str): Data to sign + +**Returns:** +- `str`: Base64-encoded signature + +##### `verify_signature(data: str, signature: str, public_key_path: str) -> bool` + +Verify digital signature. + +**Parameters:** +- `data` (str): Original data +- `signature` (str): Base64-encoded signature +- `public_key_path` (str): Path to public key file + +**Returns:** +- `bool`: True if signature is valid + +## Tool Manager + +### ToolManager + +Central registry and manager for all tools. + +```python +class ToolManager: + def __init__(self): + """Initialize tool manager.""" + self.tools: Dict[str, BaseTool] = {} + self.supervised_tools: Dict[str, SupervisedTool] = {} +``` + +#### Tool Registration + +##### `register_tool(tool: BaseTool) -> None` + +Register a tool with the manager. + +**Parameters:** +- `tool` (BaseTool): Tool to register + +##### `register_supervised_tool(tool: BaseTool, supervisor: Supervisor) -> None` + +Register a tool with supervision. + +**Parameters:** +- `tool` (BaseTool): Tool to register +- `supervisor` (Supervisor): Supervisor for approval + +##### `get_tool(name: str) -> Optional[BaseTool]` + +Get a registered tool by name. + +**Parameters:** +- `name` (str): Tool name + +**Returns:** +- `Optional[BaseTool]`: Tool instance or None + +**Example:** +```python +from talos.core.tool_manager import ToolManager +from talos.tools.github import GithubTools +from talos.hypervisor.supervisor import RuleBasedSupervisor + +# Create tool manager +tool_manager = ToolManager() + +# Register tools +github_tool = GithubTools(token="your-token") +supervisor = RuleBasedSupervisor() + +tool_manager.register_supervised_tool(github_tool, supervisor) + +# Use tools +github = tool_manager.get_tool("github") +if github: + prs = github.get_all_pull_requests("owner", "repo") +``` + +## Usage Examples + +### Comprehensive GitHub Workflow + +```python +from talos.tools.github import GithubTools +from talos.hypervisor.supervisor import RuleBasedSupervisor +from talos.core.tool_manager import SupervisedTool + +# Set up supervised GitHub operations +github = GithubTools(token="your-token") +supervisor = RuleBasedSupervisor() +supervised_github = SupervisedTool(github, supervisor) + +# Automated PR review workflow +def review_repository_prs(owner: str, repo: str): + # Get all open PRs + prs = supervised_github.execute("get_all_pull_requests", owner, repo, "open") + + for pr in prs: + print(f"Reviewing PR #{pr['number']}: {pr['title']}") + + # Perform AI review + review = supervised_github.execute("review_pull_request", owner, repo, pr['number']) + + print(f"Security Score: {review['security_score']}/100") + print(f"Quality Score: {review['quality_score']}/100") + + # Auto-approve if criteria are met + if review['security_score'] >= 85 and review['quality_score'] >= 90: + supervised_github.execute("approve_pull_request", owner, repo, pr['number']) + print(f"PR #{pr['number']} approved automatically") + else: + print(f"PR #{pr['number']} requires manual review") + +review_repository_prs("your-org", "your-repo") +``` + +### Social Media Sentiment Pipeline + +```python +from talos.tools.twitter import TwitterTools +from talos.tools.ipfs import IPFSTools + +# Set up tools +twitter = TwitterTools(bearer_token="your-token") +ipfs = IPFSTools(api_key="your-key", secret_key="your-secret") + +def analyze_protocol_sentiment(protocol_name: str): + # Collect tweets + tweets = twitter.search_tweets(f"{protocol_name} protocol", limit=500) + + # Analyze sentiment + sentiment = twitter.analyze_sentiment(tweets) + + # Identify influential voices + influential_users = [] + for tweet in tweets[:50]: # Top 50 tweets + if tweet['author']['public_metrics']['followers_count'] > 10000: + influence = twitter.analyze_user_influence(tweet['author']['username']) + influential_users.append(influence) + + # Compile report + report = { + "protocol": protocol_name, + "analysis_date": datetime.now().isoformat(), + "tweet_count": len(tweets), + "sentiment": sentiment, + "influential_voices": influential_users[:10], # Top 10 + "recommendations": generate_recommendations(sentiment) + } + + # Store report on IPFS + ipfs_hash = ipfs.upload_json(report, f"{protocol_name}-sentiment-report") + print(f"Sentiment report stored: https://gateway.pinata.cloud/ipfs/{ipfs_hash}") + + return report + +def generate_recommendations(sentiment_data): + recommendations = [] + + if sentiment_data['overall_sentiment'] < -0.3: + recommendations.append("Consider community engagement to address concerns") + recommendations.append("Review recent protocol changes for negative impact") + + if sentiment_data['positive_ratio'] > 0.7: + recommendations.append("Leverage positive sentiment for marketing campaigns") + recommendations.append("Consider expanding successful initiatives") + + return recommendations + +# Analyze multiple protocols +protocols = ["Compound", "Aave", "Uniswap"] +for protocol in protocols: + report = analyze_protocol_sentiment(protocol) + print(f"{protocol} overall sentiment: {report['sentiment']['overall_sentiment']:.2f}") +``` + +### Secure Data Management + +```python +from talos.tools.crypto import CryptographyTools +from talos.tools.ipfs import IPFSTools + +# Set up tools +crypto = CryptographyTools() +ipfs = IPFSTools(api_key="your-key", secret_key="your-secret") + +def secure_proposal_storage(proposal_data: dict): + # Generate keys if needed + if not crypto.get_public_key(): + private_key, public_key = crypto.generate_key_pair(2048) + print(f"Generated new key pair: {public_key}") + + # Encrypt sensitive data + sensitive_fields = ["financial_impact", "implementation_details"] + encrypted_data = proposal_data.copy() + + for field in sensitive_fields: + if field in encrypted_data: + encrypted_value = crypto.encrypt_data( + str(encrypted_data[field]), + crypto.get_public_key() + ) + encrypted_data[f"{field}_encrypted"] = encrypted_value + del encrypted_data[field] + + # Sign the proposal + proposal_text = json.dumps(encrypted_data, sort_keys=True) + signature = crypto.sign_data(proposal_text) + encrypted_data["signature"] = signature + + # Store on IPFS + ipfs_hash = ipfs.upload_json(encrypted_data, "secure-proposal") + + return { + "ipfs_hash": ipfs_hash, + "public_key_fingerprint": crypto.get_key_fingerprint(), + "encrypted_fields": sensitive_fields + } + +# Example usage +proposal = { + "title": "Treasury Rebalancing Proposal", + "description": "Proposal to rebalance treasury allocation", + "financial_impact": {"amount": 1000000, "risk_level": "medium"}, + "implementation_details": {"timeline": "30 days", "steps": ["step1", "step2"]}, + "voting_period": "7 days" +} + +result = secure_proposal_storage(proposal) +print(f"Secure proposal stored: {result['ipfs_hash']}") +``` + +## Error Handling + +All tools include comprehensive error handling: + +```python +from talos.tools.exceptions import ToolError, APIError, AuthenticationError + +try: + result = tool.execute(*args, **kwargs) +except AuthenticationError as e: + print(f"Authentication failed: {e}") + # Handle token refresh or re-authentication +except APIError as e: + print(f"API error: {e}") + if e.status_code == 429: + # Handle rate limiting + time.sleep(60) + result = tool.execute(*args, **kwargs) +except ToolError as e: + print(f"Tool execution error: {e}") +except Exception as e: + print(f"Unexpected error: {e}") +``` + +## Performance Considerations + +### Rate Limiting + +Tools automatically handle API rate limits: +- **GitHub**: 5000 requests/hour, automatic backoff +- **Twitter**: 300 requests/15 minutes, intelligent queuing +- **IPFS**: No strict limits, but connection pooling used + +### Caching + +Tools implement intelligent caching: +- **Repository data**: 5-minute cache +- **User profiles**: 1-hour cache +- **Sentiment analysis**: 30-minute cache for same queries + +### Batch Operations + +Many tools support batch operations for efficiency: + +```python +# Batch GitHub operations +prs_to_review = [123, 124, 125, 126] +reviews = github.batch_review_pull_requests("owner", "repo", prs_to_review) + +# Batch Twitter analysis +queries = ["DeFi", "yield farming", "staking", "governance"] +sentiment_results = twitter.batch_sentiment_analysis(queries, limit=100) +``` + +This tools API provides the external integration layer for Talos, enabling sophisticated interactions with GitHub, Twitter, IPFS, and cryptographic operations while maintaining security through supervised execution. diff --git a/docs/architecture/agents.md b/docs/architecture/agents.md new file mode 100644 index 00000000..bb5ac619 --- /dev/null +++ b/docs/architecture/agents.md @@ -0,0 +1,210 @@ +# Agent System + +The agent system in Talos provides the foundation for AI-driven protocol management through a hierarchical architecture of specialized agents. + +## Agent Hierarchy + +### MainAgent + +The `MainAgent` serves as the top-level orchestrator that integrates all system components: + +```python +class MainAgent: + def __init__(self): + self.router = Router() + self.hypervisor = Hypervisor() + self.tool_manager = ToolManager() + self.memory = Memory() +``` + +**Key Responsibilities:** +- **Query Routing** - Directs user queries to appropriate skills/services +- **Action Supervision** - Ensures all actions pass through hypervisor approval +- **Tool Coordination** - Manages available tools and their registration +- **Memory Management** - Maintains persistent conversation history +- **Skill Integration** - Orchestrates multiple skills for complex tasks + +**Workflow:** +1. Receives user input +2. Routes query through Router to appropriate skill/service +3. Executes actions through SupervisedTool wrappers +4. Stores results in Memory for future reference +5. Returns structured response to user + +### Base Agent + +The `Agent` class provides core functionality inherited by all specialized agents: + +**Core Features:** +- **LLM Interaction** - Standardized interface to language models (default: GPT-4o) +- **Conversation History** - Maintains context across interactions using message history +- **Memory Integration** - Semantic search and retrieval of past conversations +- **Prompt Management** - Template-based prompt system with dynamic loading + +**Implementation Details:** +```python +class Agent: + def __init__(self, model: str = "gpt-4o"): + self.model = model + self.history = [] + self.memory = Memory() + self.prompt_manager = PromptManager() +``` + +## Specialized Agents + +### GitHub PR Review Agent + +Specialized agent for automated code review: + +**Capabilities:** +- **Code Analysis** - Reviews pull requests for quality and security +- **Security Scoring** - Assigns security scores (0-100) based on code analysis +- **Quality Assessment** - Evaluates code quality and adherence to standards +- **Automated Feedback** - Generates detailed review comments +- **Approval Workflow** - Can automatically approve PRs meeting criteria + +**Integration:** +- Uses `GithubService` for repository operations +- Leverages `GithubTools` for API interactions +- Supervised through hypervisor for all actions + +### Sentiment Analysis Agents + +Specialized agents for social media and community sentiment: + +**Twitter Sentiment Agent:** +- Analyzes tweet sentiment for specific queries +- Tracks trending topics and community discussions +- Evaluates account influence and credibility +- Generates sentiment reports with scoring + +**Community Monitoring Agent:** +- Monitors multiple social platforms +- Aggregates sentiment across channels +- Identifies emerging trends and concerns +- Provides actionable insights for protocol decisions + +## Agent Communication + +### Message System + +Agents communicate through a standardized message system: + +```python +class Message: + content: str + role: str # "human", "assistant", "system" + metadata: dict + timestamp: datetime +``` + +**Message Types:** +- **HumanMessage** - User input and queries +- **AIMessage** - Agent responses and analysis +- **SystemMessage** - Internal system communications + +### History Management + +**Conversation Persistence:** +- All interactions stored in persistent memory +- Semantic search enables context retrieval +- Message history maintains conversation flow +- Metadata enables filtering and categorization + +**Memory Integration:** +- Vector embeddings for semantic similarity +- FAISS backend for efficient search +- Batch operations for performance optimization +- Automatic memory consolidation + +## Agent Lifecycle + +### Initialization + +1. **Configuration Loading** - Load agent-specific settings +2. **Tool Registration** - Register available tools with ToolManager +3. **Memory Initialization** - Load persistent memory and history +4. **Prompt Loading** - Load prompt templates from files +5. **Service Integration** - Connect to external services (GitHub, Twitter, etc.) + +### Execution Cycle + +1. **Input Processing** - Parse and validate user input +2. **Context Retrieval** - Search memory for relevant context +3. **Skill Selection** - Route query to appropriate skill/service +4. **Action Planning** - Generate execution plan for complex tasks +5. **Supervised Execution** - Execute actions through hypervisor approval +6. **Result Processing** - Format and store results +7. **Response Generation** - Generate user-facing response + +### Shutdown + +1. **Memory Persistence** - Save conversation history and memories +2. **Tool Cleanup** - Properly close external connections +3. **State Serialization** - Save agent state for future sessions + +## Agent Configuration + +### Model Selection + +Agents can be configured with different LLM models: + +```python +# Default configuration +agent = Agent(model="gpt-4o") + +# Custom model for specific tasks +code_review_agent = Agent(model="gpt-4o-code") +``` + +### Prompt Customization + +Agents use template-based prompts that can be customized: + +```json +{ + "system_prompt": "You are Talos, an AI protocol owner...", + "task_prompts": { + "proposal_evaluation": "Analyze the following proposal...", + "sentiment_analysis": "Evaluate the sentiment of..." + } +} +``` + +### Memory Configuration + +Memory system can be tuned for different use cases: + +```python +memory = Memory( + batch_size=10, # Batch writes for performance + auto_save=True, # Automatic persistence + max_memories=1000 # Memory limit +) +``` + +## Best Practices + +### Agent Design + +- **Single Responsibility** - Each agent should have a clear, focused purpose +- **Stateless Operations** - Minimize agent state for reliability +- **Error Handling** - Robust error handling and recovery +- **Logging** - Comprehensive logging for debugging and monitoring + +### Performance Optimization + +- **Batch Operations** - Use batch processing for memory operations +- **Caching** - Cache frequently accessed data (prompts, configurations) +- **Lazy Loading** - Load resources only when needed +- **Connection Pooling** - Reuse connections to external services + +### Security Considerations + +- **Input Validation** - Validate all user inputs +- **Supervised Execution** - All actions must pass hypervisor approval +- **Audit Trails** - Maintain logs of all agent actions +- **Access Control** - Implement proper permissions for external services + +This agent system provides the foundation for Talos's autonomous protocol management capabilities while maintaining security and reliability through supervised execution. diff --git a/docs/architecture/components.md b/docs/architecture/components.md new file mode 100644 index 00000000..58964711 --- /dev/null +++ b/docs/architecture/components.md @@ -0,0 +1,163 @@ +# Core Components + +Talos is comprised of several key components that allow it to function as a decentralized AI protocol owner. + +## System Architecture + +The codebase follows a layered architecture with clear separation of concerns: + +``` +src/talos/ +├── core/ # Core agent system and orchestration +├── skills/ # Modular capabilities (sentiment analysis, proposals, etc.) +├── services/ # Business logic implementations +├── tools/ # External API integrations and utilities +├── hypervisor/ # Action supervision and approval system +├── prompts/ # LLM prompt templates and management +├── cli/ # Command-line interface +├── data/ # Data management and vector storage +├── models/ # Pydantic data models +└── utils/ # Utility functions and clients +``` + +## Core Components + +### Hypervisor and Supervisor + +The **Hypervisor** is the core of Talos's governance capabilities. It monitors all actions and uses a Supervisor to approve or deny them based on a set of rules and the agent's history. This protects the protocol from malicious or erroneous actions. + +**Key Features:** +- Monitors all agent actions in real-time +- Rule-based approval/denial system +- Maintains audit trails of all decisions +- Integrates with LLM prompts for complex decision making +- Supports multiple supervisor implementations + +**Components:** +- `Hypervisor` - Main monitoring and coordination system +- `Supervisor` - Abstract interface for approval logic +- `RuleBasedSupervisor` - Concrete implementation with configurable rules + +### Proposal Evaluation System + +Talos can systematically evaluate governance proposals, providing detailed analysis to help stakeholders make informed decisions. + +**Capabilities:** +- LLM-based proposal analysis +- Risk assessment and scoring +- Community feedback integration +- Recommendation generation with reasoning +- Historical proposal tracking + +**Implementation:** +- `ProposalsSkill` - Main proposal evaluation logic +- Integration with external data sources +- Structured output with scoring metrics + +### Tool-Based Architecture + +Talos uses a variety of tools to interact with external services like Twitter, GitHub, and GitBook, allowing it to perform a wide range of tasks. + +**Tool Management:** +- `ToolManager` - Central registry for all available tools +- `SupervisedTool` - Wrapper that adds approval workflow to any tool +- Dynamic tool discovery and registration +- Extensible architecture for new integrations + +**Available Tools:** +- **GitHub Tools** - Repository management, PR reviews, issue tracking +- **Twitter Tools** - Social media monitoring, sentiment analysis, posting +- **IPFS Tools** - Decentralized storage and content management +- **Cryptography Tools** - Key management, encryption/decryption + +## Agent System + +### Main Agent + +The `MainAgent` serves as the top-level orchestrator that integrates all system components: + +- **Router Integration** - Routes queries to appropriate skills/services +- **Hypervisor Integration** - Ensures all actions are supervised +- **Tool Management** - Manages available tools and their registration +- **Memory System** - Persistent conversation history and semantic search +- **Skill Coordination** - Orchestrates multiple skills for complex tasks + +### Base Agent + +The `Agent` class provides core functionality for all AI agents: + +- **LLM Interaction** - Standardized interface to language models +- **Conversation History** - Maintains context across interactions +- **Memory Management** - Semantic search and retrieval +- **Prompt Management** - Template-based prompt system + +## Data Management + +### Memory System + +Persistent storage with semantic search capabilities: + +- **FAISS Integration** - Vector similarity search +- **Conversation History** - Maintains context across sessions +- **Metadata Support** - Rich tagging and filtering +- **Batch Operations** - Optimized for performance + +### Dataset Management + +Manages textual datasets with vector embeddings: + +- **Vector Embeddings** - Semantic similarity search +- **FAISS Backend** - Efficient similarity queries +- **Batch Processing** - Optimized for large datasets +- **Metadata Integration** - Rich content tagging + +## External Integrations + +### GitHub Integration + +Comprehensive GitHub API integration: + +- **Repository Operations** - Clone, fork, branch management +- **Pull Request Management** - Review, approve, merge workflows +- **Issue Tracking** - Create, update, close issues +- **Code Review** - AI-powered code analysis and feedback + +### Twitter Integration + +Social media monitoring and engagement: + +- **Content Analysis** - Sentiment analysis and trend detection +- **Account Evaluation** - Influence scoring and verification +- **Automated Posting** - Scheduled and reactive content +- **Community Monitoring** - Real-time sentiment tracking + +### IPFS Integration + +Decentralized storage capabilities: + +- **Content Storage** - Immutable content addressing +- **Metadata Management** - Rich content descriptions +- **Pinning Services** - Reliable content availability +- **Gateway Integration** - HTTP access to IPFS content + +## Configuration and Extensibility + +### Prompt Management + +Template-based prompt system: + +- **File-based Templates** - JSON prompt definitions +- **Dynamic Loading** - Runtime prompt updates +- **Concatenation Support** - Modular prompt composition +- **Version Control** - Track prompt changes over time + +### Skill System + +Modular capability architecture: + +- **Abstract Base Classes** - Standardized skill interface +- **Dynamic Registration** - Runtime skill discovery +- **Parameter Validation** - Type-safe skill execution +- **Result Standardization** - Consistent output formats + +This architecture enables Talos to operate as a sophisticated AI protocol owner while maintaining security, extensibility, and reliability. diff --git a/docs/architecture/hypervisor.md b/docs/architecture/hypervisor.md new file mode 100644 index 00000000..50ecd6e1 --- /dev/null +++ b/docs/architecture/hypervisor.md @@ -0,0 +1,274 @@ +# Hypervisor System + +The Hypervisor is the core security and governance component of Talos, responsible for monitoring and approving all agent actions to ensure protocol safety and integrity. + +## Overview + +The Hypervisor system implements a multi-layered approval mechanism that validates all agent actions before execution. This prevents malicious or erroneous actions from affecting the protocol while maintaining autonomous operation. + +## Architecture + +### Core Components + +```python +class Hypervisor: + def __init__(self): + self.supervisor = RuleBasedSupervisor() + self.action_log = ActionLog() + self.prompt_manager = PromptManager() +``` + +**Key Components:** +- **Hypervisor** - Main coordination and monitoring system +- **Supervisor** - Abstract interface for approval logic +- **ActionLog** - Audit trail of all actions and decisions +- **PromptManager** - LLM prompts for complex decision making + +### Supervisor Interface + +The `Supervisor` provides an abstract interface for different approval strategies: + +```python +class Supervisor: + def approve_action(self, action: Action, context: dict) -> ApprovalResult: + """Approve or deny an action based on rules and context""" + pass +``` + +**Implementations:** +- **RuleBasedSupervisor** - Uses predefined rules for approval +- **LLMSupervisor** - Uses language models for complex decisions +- **HybridSupervisor** - Combines rule-based and LLM approaches + +## Approval Workflow + +### Action Submission + +1. **Action Creation** - Agent creates an action request +2. **Context Gathering** - Collect relevant context (history, rules, metadata) +3. **Supervisor Evaluation** - Submit to appropriate supervisor +4. **Decision Recording** - Log approval/denial with reasoning +5. **Action Execution** - Execute if approved, block if denied + +### Approval Criteria + +**Rule-Based Criteria:** +- **Whitelist/Blacklist** - Allowed/forbidden actions +- **Rate Limiting** - Maximum actions per time period +- **Resource Limits** - CPU, memory, network usage constraints +- **Permission Checks** - Required permissions for specific actions + +**LLM-Based Criteria:** +- **Intent Analysis** - Understand the purpose of the action +- **Risk Assessment** - Evaluate potential negative consequences +- **Protocol Alignment** - Ensure actions align with protocol goals +- **Context Appropriateness** - Verify action fits current situation + +## SupervisedTool System + +### Tool Wrapping + +All external tools are wrapped with supervision: + +```python +class SupervisedTool: + def __init__(self, base_tool: BaseTool, supervisor: Supervisor): + self.base_tool = base_tool + self.supervisor = supervisor + + def execute(self, *args, **kwargs): + action = Action(tool=self.base_tool, args=args, kwargs=kwargs) + approval = self.supervisor.approve_action(action) + + if approval.approved: + return self.base_tool.execute(*args, **kwargs) + else: + raise ActionDeniedException(approval.reason) +``` + +**Benefits:** +- **Transparent Integration** - No changes required to existing tools +- **Consistent Approval** - All tools use same approval mechanism +- **Audit Trail** - All tool usage is logged and tracked +- **Flexible Policies** - Different approval rules per tool type + +### Tool Categories + +**High-Risk Tools:** +- **GitHub Operations** - Code changes, repository management +- **Financial Operations** - Treasury management, token transfers +- **System Operations** - Configuration changes, service restarts + +**Medium-Risk Tools:** +- **Social Media** - Twitter posting, community engagement +- **Data Operations** - Database queries, file operations +- **Communication** - Email, notifications, alerts + +**Low-Risk Tools:** +- **Read Operations** - Data retrieval, status checks +- **Analysis Tools** - Sentiment analysis, data processing +- **Reporting Tools** - Log generation, metrics collection + +## Rule Configuration + +### Rule Definition + +Rules are defined in JSON configuration files: + +```json +{ + "rules": [ + { + "name": "github_pr_approval", + "condition": { + "tool": "github", + "action": "approve_pr", + "max_per_hour": 5 + }, + "approval": "require_review" + }, + { + "name": "twitter_posting", + "condition": { + "tool": "twitter", + "action": "post_tweet" + }, + "approval": "auto_approve", + "filters": ["content_moderation", "brand_guidelines"] + } + ] +} +``` + +**Rule Components:** +- **Condition** - When the rule applies +- **Approval** - Approval strategy (auto, deny, require_review) +- **Filters** - Additional validation steps +- **Metadata** - Rule description and documentation + +### Dynamic Rule Updates + +Rules can be updated dynamically without system restart: + +- **Hot Reloading** - Rules reloaded from configuration files +- **Version Control** - Track rule changes over time +- **Rollback Support** - Revert to previous rule versions +- **A/B Testing** - Test new rules with subset of actions + +## Monitoring and Alerting + +### Action Monitoring + +**Real-time Monitoring:** +- **Action Queue** - Track pending approvals +- **Approval Rates** - Monitor approval/denial ratios +- **Performance Metrics** - Approval latency and throughput +- **Error Tracking** - Failed approvals and system errors + +**Historical Analysis:** +- **Trend Analysis** - Approval patterns over time +- **Risk Assessment** - Identify high-risk action patterns +- **Compliance Reporting** - Generate audit reports +- **Performance Optimization** - Identify bottlenecks + +### Alerting System + +**Alert Types:** +- **High-Risk Actions** - Actions requiring immediate attention +- **Approval Failures** - System errors in approval process +- **Rate Limit Violations** - Excessive action attempts +- **Security Incidents** - Potential malicious activity + +**Alert Channels:** +- **Email Notifications** - Critical alerts to administrators +- **Slack Integration** - Real-time team notifications +- **Dashboard Alerts** - Visual indicators in monitoring UI +- **API Webhooks** - Integration with external systems + +## Security Features + +### Audit Trail + +Complete audit trail of all actions: + +```python +class ActionLog: + def log_action(self, action: Action, result: ApprovalResult): + entry = { + "timestamp": datetime.now(), + "action": action.serialize(), + "approval": result.approved, + "reason": result.reason, + "supervisor": result.supervisor_id, + "context": action.context + } + self.store(entry) +``` + +**Audit Features:** +- **Immutable Logs** - Cannot be modified after creation +- **Cryptographic Signatures** - Verify log integrity +- **Retention Policies** - Automatic log archival and cleanup +- **Export Capabilities** - Generate compliance reports + +### Access Control + +**Permission System:** +- **Role-Based Access** - Different permissions per role +- **Action-Level Permissions** - Granular control over specific actions +- **Time-Based Permissions** - Temporary elevated access +- **Multi-Factor Authentication** - Additional security for sensitive actions + +### Threat Detection + +**Anomaly Detection:** +- **Behavioral Analysis** - Detect unusual action patterns +- **Rate Limiting** - Prevent abuse and DoS attacks +- **Signature Detection** - Identify known attack patterns +- **Machine Learning** - Adaptive threat detection + +## Configuration Examples + +### Basic Configuration + +```yaml +hypervisor: + supervisor: "rule_based" + rules_file: "config/approval_rules.json" + audit_log: "logs/actions.log" + +approval_settings: + default_timeout: 30 + max_pending_actions: 100 + require_confirmation: ["high_risk", "financial"] +``` + +### Advanced Configuration + +```yaml +hypervisor: + supervisor: "hybrid" + llm_model: "gpt-4o" + confidence_threshold: 0.8 + +risk_categories: + high_risk: + - "github.merge_pr" + - "treasury.transfer_funds" + - "system.restart_service" + + medium_risk: + - "twitter.post_tweet" + - "github.create_issue" + +monitoring: + alerts: + - type: "high_risk_action" + threshold: 1 + channels: ["email", "slack"] + - type: "approval_failure_rate" + threshold: 0.1 + window: "1h" +``` + +The Hypervisor system ensures that Talos can operate autonomously while maintaining the highest levels of security and governance oversight. diff --git a/docs/architecture/skills-services.md b/docs/architecture/skills-services.md new file mode 100644 index 00000000..4a8f50c1 --- /dev/null +++ b/docs/architecture/skills-services.md @@ -0,0 +1,310 @@ +# Skills & Services + +Talos uses a modular architecture with Skills and Services that provide specialized capabilities for different aspects of protocol management. + +## Architecture Overview + +### Skills vs Services + +**Skills** are modular capabilities that can be directly invoked by users or other system components: +- User-facing functionality +- Direct query handling +- Standardized input/output interface +- Can be combined for complex workflows + +**Services** are business logic implementations that provide domain-specific functionality: +- Backend processing logic +- Integration with external systems +- Can be used by multiple skills +- Focus on specific business domains + +## Skills System + +### Base Skill Interface + +All skills inherit from the abstract `Skill` base class: + +```python +class Skill: + def run(self, **kwargs) -> QueryResponse: + """Execute the skill with provided parameters""" + pass + + def create_ticket_tool(self) -> BaseTool: + """Create a tool for ticket-based execution""" + pass +``` + +**Standard Interface:** +- **run()** method for direct execution +- **QueryResponse** return type for consistent output +- **create_ticket_tool()** for tool integration +- Parameter validation and type checking + +### Available Skills + +#### ProposalsSkill + +Evaluates governance proposals using LLM analysis: + +**Capabilities:** +- **Proposal Analysis** - Detailed evaluation of governance proposals +- **Risk Assessment** - Identify potential risks and benefits +- **Community Impact** - Assess impact on community and stakeholders +- **Recommendation Generation** - Provide clear approve/reject recommendations + +**Usage:** +```python +skill = ProposalsSkill() +result = skill.run(proposal_text="Increase staking rewards by 10%") +``` + +**Output:** +- Detailed analysis report +- Risk/benefit assessment +- Community impact evaluation +- Clear recommendation with reasoning + +#### TwitterSentimentSkill + +Analyzes Twitter sentiment for given queries: + +**Capabilities:** +- **Tweet Collection** - Gather relevant tweets for analysis +- **Sentiment Analysis** - Evaluate positive/negative sentiment +- **Trend Detection** - Identify emerging trends and topics +- **Influence Scoring** - Assess account influence and credibility + +**Usage:** +```python +skill = TwitterSentimentSkill() +result = skill.run(query="DeFi yield farming", limit=100) +``` + +**Output:** +- Sentiment scores and distribution +- Key themes and topics +- Influential accounts and tweets +- Trend analysis and insights + +#### TwitterInfluencerSkill + +Evaluates Twitter accounts for crypto influence: + +**Capabilities:** +- **Account Analysis** - Comprehensive account evaluation +- **Influence Scoring** - Multi-metric influence assessment +- **Content Analysis** - Evaluate tweet quality and relevance +- **Network Analysis** - Assess follower quality and engagement + +**Metrics:** +- Follower count and growth +- Engagement rates +- Content quality scores +- Network influence metrics + +#### CryptographySkill + +Provides encryption and decryption operations: + +**Capabilities:** +- **Key Generation** - RSA key pair generation +- **Encryption/Decryption** - Secure message handling +- **Digital Signatures** - Message signing and verification +- **Key Management** - Secure key storage and retrieval + +**Security Features:** +- Industry-standard encryption algorithms +- Secure key storage +- Audit trail for all operations +- Integration with hardware security modules + +#### ExecutionPlannerSkill + +Generates execution plans for complex tasks: + +**Capabilities:** +- **Task Decomposition** - Break complex tasks into steps +- **Dependency Analysis** - Identify task dependencies +- **Resource Planning** - Estimate required resources +- **Timeline Generation** - Create realistic execution timelines + +**Use Cases:** +- Protocol upgrade planning +- Treasury rebalancing strategies +- Community engagement campaigns +- Development roadmap planning + +## Services System + +### Base Service Interface + +Services implement the abstract `Service` interface: + +```python +class Service: + def process(self, request: ServiceRequest) -> ServiceResponse: + """Process a service request""" + pass +``` + +### Available Services + +#### YieldManagerService + +Calculates optimal staking APR using market data and sentiment: + +**Inputs:** +- Current market conditions +- Protocol metrics (TVL, utilization) +- Community sentiment data +- Competitor analysis + +**Processing:** +- **Market Analysis** - Evaluate current DeFi landscape +- **Risk Assessment** - Assess protocol-specific risks +- **Sentiment Integration** - Factor in community sentiment +- **Optimization** - Calculate optimal APR using LLM reasoning + +**Output:** +- Recommended APR with reasoning +- Risk assessment and mitigation strategies +- Market positioning analysis +- Implementation timeline + +#### TalosSentimentService + +Orchestrates comprehensive sentiment analysis: + +**Workflow:** +1. **Data Collection** - Gather data from multiple sources +2. **Preprocessing** - Clean and normalize data +3. **Analysis** - Apply sentiment analysis algorithms +4. **Aggregation** - Combine results from different sources +5. **Reporting** - Generate actionable insights + +**Data Sources:** +- Twitter and social media +- Discord and Telegram communities +- Reddit discussions +- News articles and blogs + +#### GithubService + +Handles GitHub operations and PR reviews: + +**Capabilities:** +- **Repository Management** - Clone, fork, branch operations +- **Pull Request Reviews** - Automated code review and scoring +- **Issue Management** - Create, update, and track issues +- **Workflow Automation** - CI/CD integration and automation + +**Review Process:** +1. **Code Analysis** - Static analysis and quality checks +2. **Security Scanning** - Vulnerability detection +3. **Style Validation** - Code style and convention checks +4. **Test Coverage** - Ensure adequate test coverage +5. **Documentation** - Verify documentation updates + +## Router System + +### Query Routing + +The `Router` directs queries to appropriate skills and services: + +```python +class Router: + def __init__(self): + self.skills = [] + self.services = [] + self.keyword_mapping = {} + + def route(self, query: str) -> Union[Skill, Service]: + """Route query to appropriate handler""" + pass +``` + +**Routing Logic:** +- **Keyword Matching** - Match query keywords to skills/services +- **Intent Recognition** - Understand user intent from query +- **Context Awareness** - Consider conversation history +- **Fallback Handling** - Default routing for unmatched queries + +### Registration System + +Skills and services are dynamically registered: + +```python +# Skill registration +router.register_skill(ProposalsSkill(), keywords=["proposal", "governance"]) +router.register_skill(TwitterSentimentSkill(), keywords=["sentiment", "twitter"]) + +# Service registration +router.register_service(YieldManagerService(), keywords=["yield", "apr", "staking"]) +``` + +## Integration Patterns + +### Skill Composition + +Skills can be combined for complex workflows: + +```python +class ComplexWorkflowSkill(Skill): + def __init__(self): + self.sentiment_skill = TwitterSentimentSkill() + self.yield_service = YieldManagerService() + + def run(self, **kwargs): + # Get sentiment data + sentiment = self.sentiment_skill.run(query="protocol sentiment") + + # Calculate optimal yield + yield_data = self.yield_service.process(sentiment_data=sentiment) + + return QueryResponse(answers=[yield_data]) +``` + +### Service Orchestration + +Services can orchestrate multiple operations: + +```python +class ProtocolManagementService(Service): + def process(self, request): + # Analyze market conditions + market_data = self.market_service.get_conditions() + + # Get community sentiment + sentiment = self.sentiment_service.analyze() + + # Calculate optimal parameters + params = self.optimization_service.optimize(market_data, sentiment) + + return ServiceResponse(recommendations=params) +``` + +## Best Practices + +### Skill Development + +- **Single Responsibility** - Each skill should have one clear purpose +- **Consistent Interface** - Follow the standard Skill interface +- **Error Handling** - Robust error handling and user feedback +- **Documentation** - Clear documentation of inputs and outputs + +### Service Design + +- **Stateless Operations** - Services should be stateless when possible +- **Idempotent Operations** - Operations should be safely repeatable +- **Resource Management** - Proper cleanup of external resources +- **Monitoring** - Comprehensive logging and metrics + +### Integration Guidelines + +- **Loose Coupling** - Minimize dependencies between components +- **Standard Interfaces** - Use consistent data formats and APIs +- **Error Propagation** - Proper error handling across component boundaries +- **Testing** - Comprehensive unit and integration testing + +This modular architecture enables Talos to provide sophisticated protocol management capabilities while maintaining flexibility and extensibility for future enhancements. diff --git a/docs/cli/crypto.md b/docs/cli/crypto.md new file mode 100644 index 00000000..b263a443 --- /dev/null +++ b/docs/cli/crypto.md @@ -0,0 +1,419 @@ +# Cryptography Commands + +The Talos CLI provides cryptographic operations for secure key management, encryption, and decryption using industry-standard RSA encryption. + +## Overview + +The cryptography commands enable: +- RSA key pair generation +- Public key retrieval and sharing +- Data encryption using public keys +- Data decryption using private keys +- Secure key storage and management + +## Commands + +### `generate-keys` - Generate RSA Key Pair + +Generate a new RSA key pair for encryption and decryption operations. + +**Usage:** +```bash +uv run talos generate-keys +``` + +**Options:** +- `--key-size`: RSA key size in bits (default: 2048, options: 1024, 2048, 4096) +- `--output-dir`: Directory to store keys (default: `.keys/`) +- `--overwrite`: Overwrite existing keys if they exist + +**Examples:** +```bash +# Generate default 2048-bit keys +uv run talos generate-keys + +# Generate high-security 4096-bit keys +uv run talos generate-keys --key-size 4096 + +# Generate keys in custom directory +uv run talos generate-keys --output-dir /secure/keys/ + +# Overwrite existing keys +uv run talos generate-keys --overwrite +``` + +**Output:** +``` +=== RSA Key Generation === + +Key Size: 2048 bits +Output Directory: .keys/ + +✅ Private key generated: .keys/private_key.pem +✅ Public key generated: .keys/public_key.pem + +Key fingerprint: SHA256:abc123def456... + +⚠️ Security Notice: +- Keep your private key secure and never share it +- The public key can be safely shared with others +- Back up your keys in a secure location +``` + +**Generated Files:** +- `private_key.pem` - Private key (keep secure) +- `public_key.pem` - Public key (can be shared) + +### `get-public-key` - Retrieve Public Key + +Display the current public key for sharing with others. + +**Usage:** +```bash +uv run talos get-public-key +``` + +**Options:** +- `--format`: Output format (pem, der, base64) +- `--key-dir`: Directory containing keys (default: `.keys/`) +- `--fingerprint`: Show key fingerprint + +**Examples:** +```bash +# Display public key in PEM format +uv run talos get-public-key + +# Show key with fingerprint +uv run talos get-public-key --fingerprint + +# Export in base64 format +uv run talos get-public-key --format base64 + +# Use keys from custom directory +uv run talos get-public-key --key-dir /secure/keys/ +``` + +**Output:** +``` +=== Public Key === + +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1234567890abcdef... +... +-----END PUBLIC KEY----- + +Fingerprint: SHA256:abc123def456789... +Key Size: 2048 bits +Created: 2024-01-15 10:30:00 UTC +``` + +### `encrypt` - Encrypt Data + +Encrypt data using a public key (yours or someone else's). + +**Usage:** +```bash +uv run talos encrypt "" +``` + +**Arguments:** +- `data`: Text data to encrypt +- `public_key_file`: Path to public key file + +**Options:** +- `--output`: Output file for encrypted data +- `--format`: Output format (base64, hex, binary) + +**Examples:** +```bash +# Encrypt a message using your own public key +uv run talos encrypt "Secret message" .keys/public_key.pem + +# Encrypt using someone else's public key +uv run talos encrypt "Confidential data" /path/to/their/public_key.pem + +# Save encrypted data to file +uv run talos encrypt "Important info" public_key.pem --output encrypted.txt + +# Output in hex format +uv run talos encrypt "Data" public_key.pem --format hex +``` + +**Output:** +``` +=== Encryption Complete === + +Original Data: "Secret message" +Public Key: .keys/public_key.pem +Encrypted Data (Base64): +gAAAAABhZ1234567890abcdef... + +✅ Data encrypted successfully +📋 Copy the encrypted data above to share securely +``` + +### `decrypt` - Decrypt Data + +Decrypt data using your private key. + +**Usage:** +```bash +uv run talos decrypt "" +``` + +**Arguments:** +- `encrypted_data`: Base64-encoded encrypted data + +**Options:** +- `--key-dir`: Directory containing private key (default: `.keys/`) +- `--input-file`: Read encrypted data from file +- `--format`: Input format (base64, hex, binary) + +**Examples:** +```bash +# Decrypt base64-encoded data +uv run talos decrypt "gAAAAABhZ1234567890abcdef..." + +# Decrypt from file +uv run talos decrypt --input-file encrypted.txt + +# Decrypt hex-encoded data +uv run talos decrypt "48656c6c6f20576f726c64" --format hex + +# Use private key from custom directory +uv run talos decrypt "encrypted_data" --key-dir /secure/keys/ +``` + +**Output:** +``` +=== Decryption Complete === + +Encrypted Data: gAAAAABhZ1234567890abcdef... +Private Key: .keys/private_key.pem + +✅ Decryption successful +Decrypted Data: "Secret message" +``` + +## Security Features + +### Key Storage + +**Default Location:** +- Keys stored in `.keys/` directory +- Private key permissions set to 600 (owner read/write only) +- Public key permissions set to 644 (world readable) + +**Security Measures:** +- Private keys never transmitted or logged +- Secure random number generation +- Industry-standard RSA implementation +- Automatic permission setting + +### Encryption Standards + +**RSA Configuration:** +- PKCS#1 OAEP padding +- SHA-256 hash function +- MGF1 mask generation +- Secure random padding + +**Key Sizes:** +- 1024-bit: Legacy support (not recommended) +- 2048-bit: Standard security (recommended) +- 4096-bit: High security (slower performance) + +## Advanced Usage + +### Secure Communication Workflow + +**Setup (one time):** +```bash +# Generate your key pair +uv run talos generate-keys --key-size 2048 + +# Share your public key +uv run talos get-public-key > my_public_key.pem +``` + +**Sending encrypted messages:** +```bash +# Encrypt message for recipient +uv run talos encrypt "Confidential message" recipient_public_key.pem + +# Send the encrypted output to recipient +``` + +**Receiving encrypted messages:** +```bash +# Decrypt received message +uv run talos decrypt "received_encrypted_data" +``` + +### Batch Operations + +**Encrypt multiple files:** +```bash +#!/bin/bash +# encrypt-files.sh + +public_key="recipient_public_key.pem" + +for file in *.txt; do + echo "Encrypting $file..." + content=$(cat "$file") + encrypted=$(uv run talos encrypt "$content" "$public_key") + echo "$encrypted" > "$file.encrypted" +done +``` + +**Decrypt multiple messages:** +```bash +#!/bin/bash +# decrypt-messages.sh + +for encrypted_file in *.encrypted; do + echo "Decrypting $encrypted_file..." + encrypted_data=$(cat "$encrypted_file") + decrypted=$(uv run talos decrypt "$encrypted_data") + echo "$decrypted" > "${encrypted_file%.encrypted}.decrypted" +done +``` + +### Integration with Other Commands + +**Secure GitHub token storage:** +```bash +# Encrypt your GitHub token +encrypted_token=$(uv run talos encrypt "$GITHUB_API_TOKEN" public_key.pem) + +# Store encrypted token safely +echo "$encrypted_token" > github_token.encrypted + +# Later, decrypt when needed +GITHUB_API_TOKEN=$(uv run talos decrypt "$(cat github_token.encrypted)") +``` + +## Configuration + +### Key Management Settings + +```yaml +cryptography: + key_storage: + directory: ".keys" + private_key_permissions: "600" + public_key_permissions: "644" + backup_enabled: true + backup_directory: ".keys/backup" + + encryption: + default_key_size: 2048 + padding: "OAEP" + hash_algorithm: "SHA256" + mgf: "MGF1" + + security: + secure_delete: true + audit_operations: true + require_confirmation: ["generate-keys --overwrite"] +``` + +### Backup Configuration + +```yaml +cryptography: + backup: + enabled: true + schedule: "daily" + retention_days: 30 + encryption: true + remote_backup: + enabled: false + provider: "s3" + bucket: "secure-key-backup" +``` + +## Error Handling + +### Common Issues + +**Missing Keys:** +``` +Error: Private key not found at .keys/private_key.pem +Solution: Run 'uv run talos generate-keys' to create keys +``` + +**Invalid Encrypted Data:** +``` +Error: Failed to decrypt data - invalid format +Solution: Verify encrypted data is complete and in correct format +``` + +**Permission Denied:** +``` +Error: Permission denied accessing private key +Solution: Check file permissions or run with appropriate privileges +``` + +**Key Size Mismatch:** +``` +Error: Data too large for key size +Solution: Use larger key size or encrypt smaller data chunks +``` + +### Security Warnings + +**Weak Key Size:** +``` +Warning: 1024-bit keys are not recommended for new applications +Recommendation: Use 2048-bit or larger keys +``` + +**Insecure Storage:** +``` +Warning: Private key has insecure permissions +Action: Automatically fixing permissions to 600 +``` + +## Best Practices + +### Key Management + +**Generation:** +- Use 2048-bit keys minimum for new applications +- Generate keys on secure, trusted systems +- Use hardware security modules for high-value keys + +**Storage:** +- Keep private keys secure and never share them +- Back up keys in multiple secure locations +- Use encrypted storage for key backups +- Regularly rotate keys for long-term use + +**Distribution:** +- Public keys can be shared freely +- Verify public key authenticity through secure channels +- Use key fingerprints to verify key integrity + +### Operational Security + +**Data Handling:** +- Encrypt sensitive data before storage or transmission +- Use secure channels for sharing encrypted data +- Verify decryption results before acting on them +- Clear sensitive data from memory after use + +**Access Control:** +- Limit access to private keys +- Use principle of least privilege +- Monitor key usage and access +- Implement key escrow for critical applications + +**Compliance:** +- Follow organizational security policies +- Meet regulatory requirements for data protection +- Document key management procedures +- Regular security audits and reviews + +The cryptography commands provide enterprise-grade security for protecting sensitive data and communications within the Talos ecosystem. diff --git a/docs/cli/github.md b/docs/cli/github.md new file mode 100644 index 00000000..de69d87d --- /dev/null +++ b/docs/cli/github.md @@ -0,0 +1,379 @@ +# GitHub Commands + +The Talos CLI includes comprehensive GitHub integration for repository management, pull request reviews, and development workflow automation. + +## Setup + +### Authentication + +Set your GitHub API token as an environment variable: + +```bash +export GITHUB_API_TOKEN=your_github_token_here +``` + +### Repository Configuration + +Specify the target repository in two ways: + +1. **Environment variable** (recommended for repeated use): + ```bash + export GITHUB_REPO=owner/repo + uv run talos github get-prs + ``` + +2. **Command line argument**: + ```bash + uv run talos github get-prs --repo owner/repo + ``` + +## Commands + +### `get-prs` - List Pull Requests + +List pull requests for a repository with filtering options. + +**Basic Usage:** +```bash +# List open PRs (default) +uv run talos github get-prs --repo microsoft/vscode + +# Using environment variable +export GITHUB_REPO=microsoft/vscode +uv run talos github get-prs +``` + +**Options:** +- `--repo, -r`: Repository in format 'owner/repo' +- `--state`: PR state - 'open' (default), 'closed', or 'all' + +**Examples:** +```bash +# List all PRs (open, closed, merged) +uv run talos github get-prs --repo microsoft/vscode --state all + +# List closed PRs only +uv run talos github get-prs --repo microsoft/vscode --state closed +``` + +**Output Format:** +``` +PR #123: Fix memory leak in parser + Author: developer@example.com + State: open + Created: 2024-01-15 + Updated: 2024-01-16 + +PR #122: Add new API endpoint + Author: contributor@example.com + State: merged + Created: 2024-01-14 + Merged: 2024-01-15 +``` + +### `review-pr` - AI-Powered PR Review + +Perform comprehensive AI analysis of pull requests with security and quality scoring. + +**Basic Usage:** +```bash +# Review a PR (display results only) +uv run talos github review-pr 123 --repo microsoft/vscode + +# Review and post the review as a comment on GitHub +uv run talos github review-pr 123 --repo microsoft/vscode --post +``` + +**Arguments:** +- `pr_number`: Pull request number to review (required) + +**Options:** +- `--repo, -r`: Repository in format 'owner/repo' +- `--post`: Post the review as a comment on the PR +- `--auto-approve`: Automatically approve if criteria are met + +**Advanced Usage:** +```bash +# Review with auto-approval if criteria are met +uv run talos github review-pr 123 --repo microsoft/vscode --auto-approve + +# Review multiple PRs +for pr in 123 124 125; do + uv run talos github review-pr $pr --repo microsoft/vscode --post +done +``` + +**Review Output:** + +The review includes comprehensive analysis: + +``` +=== PR Review Analysis === + +Security Score: 85/100 +Quality Score: 92/100 +Recommendation: APPROVE + +=== Security Analysis === +✅ No hardcoded secrets detected +✅ Input validation present +⚠️ Consider adding rate limiting to new API endpoint +✅ Authentication checks in place + +=== Quality Analysis === +✅ Code follows project style guidelines +✅ Adequate test coverage (87%) +✅ Documentation updated +⚠️ Consider adding error handling for edge case + +=== Detailed Findings === +1. New API endpoint properly validates input parameters +2. Tests cover main functionality but missing edge case tests +3. Documentation clearly explains new features +4. No breaking changes detected + +=== Recommendations === +- Add rate limiting to prevent abuse +- Include tests for malformed input handling +- Consider adding metrics collection + +Overall: This PR introduces valuable functionality with good security practices. +Minor improvements suggested but safe to merge. +``` + +### `approve-pr` - Force Approve PR + +Approve a pull request without AI analysis (use with caution). + +**Usage:** +```bash +uv run talos github approve-pr 123 --repo microsoft/vscode +``` + +**Arguments:** +- `pr_number`: Pull request number to approve (required) + +**Options:** +- `--repo, -r`: Repository in format 'owner/repo' + +**When to Use:** +- Emergency fixes that need immediate approval +- PRs that have been manually reviewed +- Trusted contributors with pre-approved changes + +### `merge-pr` - Merge Pull Request + +Merge an approved pull request. + +**Usage:** +```bash +uv run talos github merge-pr 123 --repo microsoft/vscode +``` + +**Arguments:** +- `pr_number`: Pull request number to merge (required) + +**Options:** +- `--repo, -r`: Repository in format 'owner/repo' + +**Prerequisites:** +- PR must be approved +- All required checks must pass +- No merge conflicts +- Sufficient permissions + +## Workflow Examples + +### Daily PR Review Workflow + +```bash +#!/bin/bash +# daily-review.sh + +export GITHUB_REPO=myorg/myproject + +echo "=== Daily PR Review ===" + +# List all open PRs +echo "Open PRs:" +uv run talos github get-prs + +# Review each open PR +for pr in $(uv run talos github get-prs --format=numbers); do + echo "Reviewing PR #$pr..." + uv run talos github review-pr $pr --post +done + +echo "Review complete!" +``` + +### Automated Security Review + +```bash +#!/bin/bash +# security-review.sh + +export GITHUB_REPO=myorg/sensitive-project + +# Get PRs from external contributors +external_prs=$(uv run talos github get-prs --external-only) + +for pr in $external_prs; do + echo "Security review for external PR #$pr" + + # Perform detailed review without auto-approval + uv run talos github review-pr $pr --security-focus --post + + # Only approve if security score > 90 + score=$(uv run talos github review-pr $pr --get-security-score) + if [ $score -gt 90 ]; then + uv run talos github approve-pr $pr + echo "PR #$pr approved (security score: $score)" + else + echo "PR #$pr requires manual review (security score: $score)" + fi +done +``` + +### Release Preparation + +```bash +#!/bin/bash +# release-prep.sh + +export GITHUB_REPO=myorg/myproject + +echo "=== Release Preparation ===" + +# Review all PRs targeted for release +release_prs=$(uv run talos github get-prs --label="release-candidate") + +for pr in $release_prs; do + echo "Final review for release PR #$pr" + + # Comprehensive review with strict criteria + uv run talos github review-pr $pr --strict-mode --post + + # Auto-approve only high-quality PRs + uv run talos github review-pr $pr --auto-approve --min-quality=95 +done + +echo "Release review complete!" +``` + +## Configuration + +### Review Criteria + +Configure review criteria in your Talos configuration: + +```yaml +github: + review: + security: + min_score: 80 + required_checks: + - "no_hardcoded_secrets" + - "input_validation" + - "authentication" + + quality: + min_score: 85 + required_checks: + - "test_coverage" + - "documentation" + - "style_compliance" + + auto_approve: + enabled: true + min_security_score: 90 + min_quality_score: 90 + trusted_authors: + - "senior-dev@company.com" + - "security-team@company.com" +``` + +### Notification Settings + +```yaml +github: + notifications: + slack_webhook: "https://hooks.slack.com/..." + email_alerts: true + + triggers: + - "security_score_low" + - "quality_score_low" + - "external_contributor" + - "large_pr" +``` + +## Error Handling + +The GitHub commands include comprehensive error handling: + +### Common Errors + +**Missing Repository:** +``` +Error: Repository not specified +Solution: Set GITHUB_REPO environment variable or use --repo flag +``` + +**Invalid Token:** +``` +Error: GitHub API authentication failed +Solution: Check GITHUB_API_TOKEN environment variable +``` + +**PR Not Found:** +``` +Error: Pull request #123 not found +Solution: Verify PR number and repository access +``` + +**Insufficient Permissions:** +``` +Error: Insufficient permissions to approve PR +Solution: Check repository permissions for your GitHub token +``` + +### Rate Limiting + +GitHub API has rate limits. Talos handles this automatically: + +- **Automatic Backoff** - Waits when rate limit is reached +- **Batch Operations** - Optimizes API calls for efficiency +- **Progress Updates** - Shows progress for long-running operations + +### Network Issues + +**Retry Logic:** +- Automatic retry for transient network errors +- Exponential backoff for repeated failures +- Clear error messages for permanent failures + +## Best Practices + +### Security + +- **Token Security** - Store GitHub tokens securely +- **Permission Scope** - Use minimal required permissions +- **Review External PRs** - Always review PRs from external contributors +- **Audit Logs** - Monitor all GitHub operations + +### Efficiency + +- **Batch Reviews** - Review multiple PRs in scripts +- **Environment Variables** - Use GITHUB_REPO for repeated operations +- **Filtering** - Use state and label filters to focus on relevant PRs +- **Automation** - Integrate with CI/CD pipelines + +### Quality Assurance + +- **Consistent Reviews** - Use standardized review criteria +- **Documentation** - Ensure all reviews are documented +- **Follow-up** - Track and follow up on review recommendations +- **Continuous Improvement** - Regularly update review criteria + +The GitHub integration provides powerful tools for maintaining code quality and security while automating routine development workflows. diff --git a/docs/cli/interactive.md b/docs/cli/interactive.md new file mode 100644 index 00000000..4dd47bba --- /dev/null +++ b/docs/cli/interactive.md @@ -0,0 +1,279 @@ +# Interactive Mode + +Interactive mode provides a conversational interface for working with Talos, allowing for natural language queries and continuous dialogue. + +## Starting Interactive Mode + +Launch interactive mode by running Talos without arguments: + +```bash +uv run talos +``` + +You'll see a prompt where you can start conversing: + +``` +Talos AI Agent - Interactive Mode +Type 'exit' to quit + +>> +``` + +## Basic Usage + +### Simple Queries + +Ask questions in natural language: + +``` +>> What are your main capabilities? +>> How is the current market sentiment? +>> What governance proposals need review? +``` + +### Complex Requests + +Request detailed analysis and recommendations: + +``` +>> Analyze the sentiment around "yield farming" on Twitter and recommend APR adjustments +>> Review the latest GitHub PRs and identify any security concerns +>> Evaluate the community response to our latest protocol update +``` + +### Multi-turn Conversations + +Talos maintains context across the conversation: + +``` +>> Analyze sentiment for "DeFi protocols" +>> What are the main concerns mentioned? +>> How should we address these concerns in our next update? +>> Draft a response strategy +``` + +## Available Commands + +### Protocol Management + +``` +>> Check treasury performance +>> Analyze staking metrics +>> Review governance proposals +>> Calculate optimal APR +``` + +### Community Engagement + +``` +>> What's the community saying about our protocol? +>> Analyze Twitter sentiment for "our_protocol_name" +>> Check for mentions and discussions +>> Draft a community update +``` + +### Development Oversight + +``` +>> Review open GitHub PRs +>> Check for security issues in recent commits +>> Analyze code quality metrics +>> Review contributor activity +``` + +### Market Analysis + +``` +>> What are current DeFi trends? +>> Analyze competitor protocols +>> Check yield farming opportunities +>> Review market volatility +``` + +## Advanced Features + +### Context Awareness + +Talos remembers previous conversations and can reference earlier topics: + +``` +>> Remember our discussion about APR optimization yesterday? +>> Based on our previous analysis, what's changed? +>> Update the recommendations from our last conversation +``` + +### Multi-step Workflows + +Break complex tasks into steps: + +``` +>> I need to prepare for the governance vote next week +>> First, analyze community sentiment +>> Then review the proposal details +>> Finally, prepare talking points for the discussion +``` + +### Real-time Updates + +Get live updates during long-running operations: + +``` +>> Start monitoring Twitter for protocol mentions +>> Analyze the next 100 tweets about DeFi +>> Keep me updated on any significant sentiment changes +``` + +## Conversation Management + +### History + +Talos maintains conversation history within the session: + +- Previous queries and responses are remembered +- Context is preserved across multiple exchanges +- You can reference earlier parts of the conversation + +### Memory + +Important information is stored in persistent memory: + +- Key insights and decisions +- Protocol-specific information +- User preferences and patterns +- Historical analysis results + +### Session Control + +``` +>> clear history # Clear current session history +>> save conversation # Save important parts to memory +>> load previous session # Reference previous conversations +``` + +## Interactive Commands + +### Help and Information + +``` +>> help # General help +>> what can you do? # Capability overview +>> show available commands # Command reference +>> explain [topic] # Detailed explanations +``` + +### Status and Monitoring + +``` +>> status # System status +>> check connections # API connectivity +>> show recent activity # Recent operations +>> monitor [service] # Real-time monitoring +``` + +### Configuration + +``` +>> show config # Current configuration +>> set preference [key] [value] # Update preferences +>> reset settings # Reset to defaults +``` + +## Best Practices + +### Effective Communication + +**Be Specific**: Provide clear, specific requests +``` +Good: "Analyze Twitter sentiment for 'yield farming' in the last 24 hours" +Poor: "Check Twitter" +``` + +**Provide Context**: Give relevant background information +``` +Good: "We're considering increasing APR from 5% to 7%. Analyze community sentiment about yield changes." +Poor: "Should we change APR?" +``` + +**Ask Follow-up Questions**: Dig deeper into analysis +``` +>> What are the main risks identified? +>> How confident are you in this recommendation? +>> What additional data would improve this analysis? +``` + +### Workflow Optimization + +**Use Natural Language**: Don't worry about exact command syntax +``` +>> "Can you help me understand the latest governance proposal?" +>> "I need to review PRs that might have security issues" +>> "What's the community mood about our recent changes?" +``` + +**Combine Operations**: Request multiple related tasks +``` +>> "Analyze market sentiment, check our GitHub activity, and recommend any protocol adjustments" +``` + +**Iterate and Refine**: Build on previous responses +``` +>> "That analysis is helpful. Can you focus specifically on the security concerns?" +>> "Based on that sentiment data, what's our best response strategy?" +``` + +### Session Management + +**Save Important Results**: Preserve key insights +``` +>> "Save this analysis to memory for future reference" +>> "Remember this decision for next week's review" +``` + +**Reference Previous Work**: Build on past conversations +``` +>> "Based on last week's sentiment analysis, what's changed?" +>> "Update the recommendations from our previous discussion" +``` + +## Troubleshooting + +### Common Issues + +**No Response**: Check API key configuration +``` +>> status +>> check connections +``` + +**Slow Responses**: Large queries may take time +``` +>> "This is taking a while, can you give me a status update?" +``` + +**Unclear Results**: Ask for clarification +``` +>> "Can you explain that recommendation in more detail?" +>> "What data did you use for this analysis?" +``` + +### Error Recovery + +**Connection Issues**: Talos will attempt to reconnect automatically +``` +>> "I see there was a connection issue. Can you retry that analysis?" +``` + +**Invalid Requests**: Talos will ask for clarification +``` +>> "I'm not sure what you mean. Can you rephrase that request?" +``` + +### Getting Help + +``` +>> help # General help +>> troubleshoot # Common issues +>> contact support # How to get additional help +``` + +Interactive mode provides the most natural and powerful way to work with Talos, enabling sophisticated protocol management through conversational AI. diff --git a/docs/cli/overview.md b/docs/cli/overview.md new file mode 100644 index 00000000..5308bfa7 --- /dev/null +++ b/docs/cli/overview.md @@ -0,0 +1,244 @@ +# CLI Overview + +The Talos CLI is the main entry point for interacting with the Talos agent. It provides both interactive and non-interactive modes for different use cases. + +## Installation + +The CLI is installed as part of the `talos` package. After installation, you can run: + +```bash +uv run talos +``` + +## Usage Modes + +### Interactive Mode + +To enter interactive mode, run `talos` without any arguments: + +```bash +uv run talos +``` + +This starts a continuous conversation where you can: +- Ask questions about protocol management +- Request analysis and recommendations +- Execute commands and workflows +- Get help and guidance + +Example session: +``` +>> What are your main capabilities? +>> Analyze the sentiment around "DeFi protocols" on Twitter +>> Help me evaluate a governance proposal +>> exit +``` + +Type `exit` to quit the interactive session. + +### Non-Interactive Mode + +In non-interactive mode, you can run a single query and the agent will exit: + +```bash +uv run talos "your query here" +``` + +Examples: +```bash +uv run talos "What is the current market sentiment?" +uv run talos "Analyze the latest governance proposal" +uv run talos "Check GitHub PRs for security issues" +``` + +### Daemon Mode + +Run Talos continuously for scheduled operations and automated tasks: + +```bash +uv run talos daemon +``` + +The daemon mode: +- Executes scheduled jobs automatically +- Monitors for new proposals and PRs +- Performs continuous market analysis +- Handles automated responses and alerts +- Can be gracefully shutdown with SIGTERM or SIGINT + +## Command Structure + +The Talos CLI uses a hierarchical command structure: + +``` +talos [global-options] [command-options] [arguments] +``` + +### Global Options + +- `--help, -h` - Show help information +- `--version` - Show version information +- `--config` - Specify configuration file path +- `--verbose, -v` - Enable verbose logging + +### Available Commands + +| Command | Description | +|---------|-------------| +| `twitter` | Twitter-related operations and analysis | +| `github` | GitHub repository management and PR reviews | +| `generate-keys` | Generate RSA key pairs for encryption | +| `get-public-key` | Retrieve the current public key | +| `encrypt` | Encrypt data using public key | +| `decrypt` | Decrypt data using private key | +| `daemon` | Run in continuous daemon mode | + +## Environment Variables + +### Required Variables + +```bash +export OPENAI_API_KEY="your-openai-api-key" +export PINATA_API_KEY="your-pinata-api-key" +export PINATA_SECRET_API_KEY="your-pinata-secret-api-key" +``` + +### Optional Variables + +```bash +export GITHUB_API_TOKEN="your-github-token" # For GitHub operations +export TWITTER_BEARER_TOKEN="your-twitter-token" # For Twitter analysis +export GITHUB_REPO="owner/repo" # Default repository +``` + +## Configuration + +### Configuration File + +Talos can be configured using a YAML configuration file: + +```yaml +# talos.yml +api_keys: + openai: "${OPENAI_API_KEY}" + github: "${GITHUB_API_TOKEN}" + twitter: "${TWITTER_BEARER_TOKEN}" + +defaults: + github_repo: "owner/repo" + twitter_query_limit: 100 + +logging: + level: "INFO" + file: "talos.log" + +hypervisor: + approval_timeout: 30 + max_pending_actions: 100 +``` + +Specify the configuration file: +```bash +uv run talos --config talos.yml +``` + +### Environment File + +Create a `.env` file for convenience: + +```bash +# .env +OPENAI_API_KEY=your-openai-api-key +PINATA_API_KEY=your-pinata-api-key +PINATA_SECRET_API_KEY=your-pinata-secret-api-key +GITHUB_API_TOKEN=your-github-token +TWITTER_BEARER_TOKEN=your-twitter-bearer-token +GITHUB_REPO=owner/repo +``` + +## Error Handling + +The CLI includes comprehensive error handling for: + +- **Missing API Keys** - Clear messages about required environment variables +- **Network Issues** - Retry logic and timeout handling +- **Invalid Commands** - Helpful suggestions for correct usage +- **Permission Errors** - Guidance on required permissions +- **Rate Limiting** - Automatic backoff and retry strategies + +## Logging + +### Log Levels + +- `DEBUG` - Detailed debugging information +- `INFO` - General information about operations +- `WARNING` - Warning messages about potential issues +- `ERROR` - Error messages for failed operations +- `CRITICAL` - Critical errors that may stop execution + +### Log Configuration + +```bash +# Set log level +export TALOS_LOG_LEVEL=DEBUG + +# Set log file +export TALOS_LOG_FILE=talos.log + +# Enable verbose output +uv run talos --verbose +``` + +## Getting Help + +### Command Help + +Get help for any command: + +```bash +uv run talos --help +uv run talos twitter --help +uv run talos github --help +``` + +### Interactive Help + +In interactive mode, you can ask for help: + +``` +>> help +>> what commands are available? +>> how do I analyze Twitter sentiment? +``` + +### Documentation + +- **CLI Reference** - Detailed command documentation +- **Examples** - Common usage patterns and workflows +- **Troubleshooting** - Solutions for common issues +- **API Reference** - Technical details about the underlying APIs + +## Best Practices + +### Security + +- **Environment Variables** - Use environment variables for API keys +- **File Permissions** - Secure configuration files with appropriate permissions +- **Key Rotation** - Regularly rotate API keys and tokens +- **Audit Logs** - Monitor CLI usage through log files + +### Performance + +- **Batch Operations** - Use batch commands when possible +- **Caching** - Enable caching for frequently accessed data +- **Rate Limiting** - Respect API rate limits to avoid throttling +- **Resource Management** - Monitor memory and CPU usage + +### Workflow Integration + +- **Scripting** - Use non-interactive mode for automation +- **CI/CD Integration** - Integrate with continuous integration pipelines +- **Monitoring** - Set up alerts for daemon mode operations +- **Backup** - Regular backup of configuration and data files + +This CLI provides a powerful interface for managing decentralized protocols through the Talos AI agent system. diff --git a/docs/cli/twitter.md b/docs/cli/twitter.md new file mode 100644 index 00000000..706ad01e --- /dev/null +++ b/docs/cli/twitter.md @@ -0,0 +1,375 @@ +# Twitter Commands + +The Talos CLI provides comprehensive Twitter integration for sentiment analysis, community monitoring, and social media engagement. + +## Setup + +### Authentication + +Set your Twitter Bearer Token as an environment variable: + +```bash +export TWITTER_BEARER_TOKEN=your_twitter_bearer_token_here +``` + +### API Access + +Twitter commands require: +- Twitter API v2 access +- Bearer Token with read permissions +- Rate limiting awareness (300 requests per 15 minutes) + +## Commands + +### `get-user-prompt` - User Voice Analysis + +Analyze a Twitter user's general voice and communication style to generate a prompt that captures their personality. + +**Usage:** +```bash +uv run talos twitter get-user-prompt +``` + +**Arguments:** +- `username`: Twitter username (without @ symbol) + +**Examples:** +```bash +# Analyze a specific user's communication style +uv run talos twitter get-user-prompt elonmusk + +# Analyze multiple users +uv run talos twitter get-user-prompt vitalikbuterin +uv run talos twitter get-user-prompt naval +``` + +**Output:** +``` +=== User Voice Analysis: @elonmusk === + +Communication Style: +- Direct and concise messaging +- Technical depth with accessible explanations +- Frequent use of humor and memes +- Bold predictions and statements +- Engineering-focused perspective + +Key Themes: +- Technology and innovation +- Space exploration and Mars colonization +- Electric vehicles and sustainable energy +- AI development and safety +- Manufacturing and production efficiency + +Tone Characteristics: +- Confident and assertive +- Occasionally provocative +- Optimistic about technology +- Critical of bureaucracy +- Supportive of free speech + +Generated Prompt: +"Communicate with the direct, confident style of a tech innovator. +Be concise but technically accurate. Use accessible language to +explain complex concepts. Show optimism about technological +progress while being realistic about challenges. Occasionally +use humor to make points more memorable." +``` + +### `get-query-sentiment` - Sentiment Analysis + +Analyze sentiment around specific topics, keywords, or phrases on Twitter. + +**Usage:** +```bash +uv run talos twitter get-query-sentiment "" +``` + +**Arguments:** +- `query`: Search query or topic to analyze + +**Options:** +- `--limit`: Number of tweets to analyze (default: 100, max: 1000) +- `--days`: Number of days to look back (default: 7, max: 30) +- `--lang`: Language filter (default: en) + +**Examples:** +```bash +# Basic sentiment analysis +uv run talos twitter get-query-sentiment "DeFi yield farming" + +# Extended analysis with more tweets +uv run talos twitter get-query-sentiment "Ethereum staking" --limit 500 + +# Recent sentiment (last 24 hours) +uv run talos twitter get-query-sentiment "crypto market" --days 1 + +# Multi-language analysis +uv run talos twitter get-query-sentiment "Bitcoin" --lang all +``` + +**Output:** +``` +=== Sentiment Analysis: "DeFi yield farming" === + +Overall Sentiment: MIXED (Slightly Positive) +Confidence: 78% + +Sentiment Distribution: +🟢 Positive: 45% (450 tweets) +🟡 Neutral: 32% (320 tweets) +🔴 Negative: 23% (230 tweets) + +Key Themes: +Positive Sentiments: +- High APY opportunities (mentioned 156 times) +- New protocol launches (mentioned 89 times) +- Successful farming strategies (mentioned 67 times) + +Negative Sentiments: +- Impermanent loss concerns (mentioned 78 times) +- Rug pull warnings (mentioned 45 times) +- Gas fee complaints (mentioned 34 times) + +Influential Voices: +@defi_analyst (50k followers): "Yield farming still profitable with right strategy" +@crypto_researcher (25k followers): "Be careful of new farms, many are unsustainable" + +Trending Hashtags: +#DeFi (mentioned 234 times) +#YieldFarming (mentioned 189 times) +#APY (mentioned 156 times) + +Recommendations: +- Monitor impermanent loss discussions for user concerns +- Address gas fee issues in communications +- Highlight sustainable yield strategies +- Engage with influential voices sharing positive content +``` + +## Advanced Usage + +### Sentiment Monitoring + +Set up continuous monitoring for important topics: + +```bash +#!/bin/bash +# sentiment-monitor.sh + +topics=("our_protocol" "DeFi governance" "yield farming" "staking rewards") + +for topic in "${topics[@]}"; do + echo "Monitoring: $topic" + uv run talos twitter get-query-sentiment "$topic" --limit 200 + echo "---" +done +``` + +### Competitor Analysis + +Monitor sentiment around competitor protocols: + +```bash +#!/bin/bash +# competitor-sentiment.sh + +competitors=("Compound" "Aave" "Uniswap" "SushiSwap") + +for competitor in "${competitors[@]}"; do + echo "=== $competitor Sentiment ===" + uv run talos twitter get-query-sentiment "$competitor protocol" --limit 300 + echo "" +done +``` + +### Influencer Tracking + +Analyze key influencers in your space: + +```bash +#!/bin/bash +# influencer-analysis.sh + +influencers=("vitalikbuterin" "haydenzadams" "stanikulechov" "rleshner") + +for influencer in "${influencers[@]}"; do + echo "=== @$influencer Analysis ===" + uv run talos twitter get-user-prompt "$influencer" + echo "" +done +``` + +## Integration with Protocol Management + +### APR Adjustment Based on Sentiment + +```bash +#!/bin/bash +# apr-sentiment-adjustment.sh + +# Get current sentiment about yield farming +sentiment=$(uv run talos twitter get-query-sentiment "yield farming APR" --format=json) + +# Extract sentiment score +score=$(echo $sentiment | jq '.sentiment_score') + +# Adjust APR based on sentiment +if [ $score -gt 0.7 ]; then + echo "Positive sentiment detected. Consider maintaining or slightly increasing APR." +elif [ $score -lt 0.3 ]; then + echo "Negative sentiment detected. Consider increasing APR to attract users." +else + echo "Neutral sentiment. Monitor closely for changes." +fi +``` + +### Community Response Strategy + +```bash +#!/bin/bash +# community-response.sh + +# Monitor mentions of our protocol +mentions=$(uv run talos twitter get-query-sentiment "our_protocol_name" --limit 500) + +# Check for negative sentiment spikes +negative_ratio=$(echo $mentions | jq '.negative_ratio') + +if [ $(echo "$negative_ratio > 0.4" | bc) -eq 1 ]; then + echo "High negative sentiment detected!" + echo "Recommended actions:" + echo "1. Investigate main concerns" + echo "2. Prepare community response" + echo "3. Consider protocol adjustments" + + # Get specific concerns + uv run talos twitter get-query-sentiment "our_protocol_name problems" --limit 100 +fi +``` + +## Configuration + +### Rate Limiting + +Configure rate limiting to respect Twitter API limits: + +```yaml +twitter: + rate_limiting: + requests_per_window: 300 + window_minutes: 15 + backoff_strategy: "exponential" + + analysis: + default_tweet_limit: 100 + max_tweet_limit: 1000 + default_days_back: 7 + max_days_back: 30 +``` + +### Sentiment Thresholds + +Configure sentiment analysis thresholds: + +```yaml +twitter: + sentiment: + positive_threshold: 0.6 + negative_threshold: 0.4 + confidence_threshold: 0.7 + + alerts: + negative_spike_threshold: 0.5 + volume_spike_threshold: 200 + influencer_mention_threshold: 10000 # follower count +``` + +## Error Handling + +### Common Issues + +**Rate Limiting:** +``` +Error: Rate limit exceeded +Solution: Wait 15 minutes or reduce query frequency +``` + +**Invalid Bearer Token:** +``` +Error: Twitter API authentication failed +Solution: Check TWITTER_BEARER_TOKEN environment variable +``` + +**No Results:** +``` +Warning: No tweets found for query "very_specific_term" +Solution: Try broader search terms or increase time range +``` + +**API Quota Exceeded:** +``` +Error: Monthly API quota exceeded +Solution: Upgrade Twitter API plan or wait for quota reset +``` + +### Automatic Handling + +Talos automatically handles: +- **Rate Limiting** - Waits and retries when limits are reached +- **Network Errors** - Retries with exponential backoff +- **Partial Results** - Returns available data when some requests fail +- **Invalid Queries** - Suggests alternative search terms + +## Best Practices + +### Effective Queries + +**Use Specific Terms:** +```bash +# Good: Specific and relevant +uv run talos twitter get-query-sentiment "Ethereum staking rewards" + +# Poor: Too broad +uv run talos twitter get-query-sentiment "crypto" +``` + +**Include Context:** +```bash +# Good: Includes protocol context +uv run talos twitter get-query-sentiment "Compound lending rates" + +# Good: Includes sentiment context +uv run talos twitter get-query-sentiment "DeFi security concerns" +``` + +### Monitoring Strategy + +**Regular Monitoring:** +- Daily sentiment checks for your protocol +- Weekly competitor analysis +- Monthly influencer voice updates + +**Alert-Based Monitoring:** +- Set up alerts for negative sentiment spikes +- Monitor for unusual volume increases +- Track mentions by high-influence accounts + +### Data Interpretation + +**Consider Context:** +- Market conditions affect overall sentiment +- News events can cause temporary sentiment shifts +- Bot activity may skew results + +**Look for Trends:** +- Focus on sentiment trends over time +- Compare relative sentiment between topics +- Identify recurring themes and concerns + +**Validate Insights:** +- Cross-reference with other data sources +- Verify with community feedback +- Test sentiment-based decisions carefully + +The Twitter integration provides powerful tools for understanding community sentiment and making data-driven decisions about protocol management and community engagement. diff --git a/docs/development/code-style.md b/docs/development/code-style.md new file mode 100644 index 00000000..bdd6e775 --- /dev/null +++ b/docs/development/code-style.md @@ -0,0 +1,543 @@ +# Code Style Guide + +This document outlines the code style guidelines for the Talos project. Following these guidelines ensures consistency, readability, and maintainability across the codebase. + +## Python Code Style + +### PEP 8 Compliance + +All Python code must follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) guidelines: + +- Use 4 spaces for indentation (no tabs) +- Keep lines under 88 characters long +- Use lowercase with underscores for function and variable names +- Use CamelCase for class names +- Use UPPER_CASE for constants + +### Type Hints + +Use modern Python type hints consistently: + +```python +# Good - Modern type hints +def process_items(items: list[str]) -> dict[str, int]: + return {item: len(item) for item in items} + +def get_user_data(user_id: int) -> dict[str, Any] | None: + return database.get_user(user_id) + +# Bad - Old-style type hints +from typing import List, Dict, Optional + +def process_items(items: List[str]) -> Dict[str, int]: + return {item: len(item) for item in items} + +def get_user_data(user_id: int) -> Optional[Dict[str, Any]]: + return database.get_user(user_id) +``` + +### Type Hint Guidelines + +- **Never use quotes** around type hints unless absolutely necessary +- Use `from __future__ import annotations` if you need forward references +- Provide type hints for all function signatures +- Use `Any` sparingly and document why it's necessary + +```python +# Good +from __future__ import annotations + +def create_agent(config: AgentConfig) -> Agent: + return Agent(config) + +# Bad +def create_agent(config: "AgentConfig") -> "Agent": + return Agent(config) +``` + +### Import Organization + +Organize imports into three sections with blank lines between them: + +```python +# Standard library imports +import os +import sys +from datetime import datetime, timedelta +from pathlib import Path + +# Third-party imports +import requests +from pydantic import BaseModel, Field +from openai import OpenAI + +# First-party imports +from talos.core.agent import Agent +from talos.core.memory import Memory +from talos.utils.helpers import format_response +``` + +### Docstrings + +Use Google-style docstrings for all modules, classes, and functions: + +```python +def analyze_sentiment( + text: str, + model: str = "gpt-4o", + confidence_threshold: float = 0.7 +) -> SentimentResult: + """Analyze sentiment of the given text using an LLM. + + This function processes text through a language model to determine + sentiment polarity and confidence scores. + + Args: + text: The text to analyze for sentiment. Must not be empty. + model: The LLM model to use for analysis. Defaults to "gpt-4o". + confidence_threshold: Minimum confidence score to return results. + Must be between 0.0 and 1.0. + + Returns: + SentimentResult containing polarity score (-1.0 to 1.0) and + confidence score (0.0 to 1.0). + + Raises: + ValueError: If text is empty or confidence_threshold is invalid. + APIError: If the LLM service is unavailable. + + Example: + >>> result = analyze_sentiment("I love this protocol!") + >>> print(f"Sentiment: {result.polarity}, Confidence: {result.confidence}") + Sentiment: 0.8, Confidence: 0.95 + """ + if not text.strip(): + raise ValueError("Text cannot be empty") + + if not 0.0 <= confidence_threshold <= 1.0: + raise ValueError("Confidence threshold must be between 0.0 and 1.0") + + # Implementation here + return SentimentResult(polarity=0.8, confidence=0.95) +``` + +## Pydantic Models + +### Model Configuration + +Use `ConfigDict` for model-specific configuration: + +```python +from pydantic import BaseModel, ConfigDict, Field + +class AgentConfig(BaseModel): + model_config = ConfigDict( + arbitrary_types_allowed=True, + validate_assignment=True, + extra='forbid' + ) + + model_name: str = Field(default="gpt-4o", description="LLM model to use") + temperature: float = Field(default=0.7, ge=0.0, le=2.0) + max_tokens: int = Field(default=1000, gt=0) +``` + +### Post-Initialization Logic + +Use `model_post_init` instead of overriding `__init__`: + +```python +class Agent(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + name: str + model: str = "gpt-4o" + memory: Memory | None = None + + def model_post_init(self, __context: Any) -> None: + """Initialize memory after model creation.""" + if self.memory is None: + self.memory = Memory(agent_name=self.name) +``` + +### Field Validation + +Use Pydantic validators for complex validation: + +```python +from pydantic import BaseModel, field_validator, model_validator + +class TwitterQuery(BaseModel): + query: str + limit: int = 100 + days_back: int = 7 + + @field_validator('query') + @classmethod + def validate_query(cls, v: str) -> str: + if not v.strip(): + raise ValueError('Query cannot be empty') + if len(v) > 500: + raise ValueError('Query too long (max 500 characters)') + return v.strip() + + @field_validator('limit') + @classmethod + def validate_limit(cls, v: int) -> int: + if not 1 <= v <= 1000: + raise ValueError('Limit must be between 1 and 1000') + return v + + @model_validator(mode='after') + def validate_model(self) -> 'TwitterQuery': + if self.days_back > 30 and self.limit > 100: + raise ValueError('Cannot use high limit with long time range') + return self +``` + +## Error Handling + +### Exception Hierarchy + +Create custom exceptions for different error types: + +```python +class TalosError(Exception): + """Base exception for all Talos errors.""" + pass + +class ConfigurationError(TalosError): + """Raised when configuration is invalid.""" + pass + +class APIError(TalosError): + """Raised when external API calls fail.""" + + def __init__(self, message: str, status_code: int | None = None): + super().__init__(message) + self.status_code = status_code + +class ValidationError(TalosError): + """Raised when input validation fails.""" + pass +``` + +### Error Handling Patterns + +Use specific exception handling and provide helpful error messages: + +```python +def fetch_twitter_data(query: str) -> list[dict[str, Any]]: + """Fetch Twitter data with proper error handling.""" + try: + response = twitter_client.search(query) + return response.data + except requests.HTTPError as e: + if e.response.status_code == 429: + raise APIError( + "Twitter API rate limit exceeded. Please try again later.", + status_code=429 + ) from e + elif e.response.status_code == 401: + raise APIError( + "Twitter API authentication failed. Check your bearer token.", + status_code=401 + ) from e + else: + raise APIError(f"Twitter API error: {e}", status_code=e.response.status_code) from e + except requests.RequestException as e: + raise APIError(f"Network error while fetching Twitter data: {e}") from e + except Exception as e: + raise TalosError(f"Unexpected error fetching Twitter data: {e}") from e +``` + +## Logging + +### Logger Configuration + +Use structured logging with appropriate levels: + +```python +import logging +from typing import Any + +logger = logging.getLogger(__name__) + +class Agent: + def __init__(self, name: str): + self.name = name + self.logger = logging.getLogger(f"{__name__}.{name}") + + def process_query(self, query: str) -> QueryResponse: + self.logger.info("Processing query", extra={ + "agent_name": self.name, + "query_length": len(query), + "query_hash": hash(query) + }) + + try: + result = self._execute_query(query) + self.logger.info("Query processed successfully", extra={ + "agent_name": self.name, + "response_length": len(str(result)) + }) + return result + except Exception as e: + self.logger.error("Query processing failed", extra={ + "agent_name": self.name, + "error": str(e), + "error_type": type(e).__name__ + }, exc_info=True) + raise +``` + +### Log Levels + +Use appropriate log levels: + +- **DEBUG** - Detailed information for debugging +- **INFO** - General information about program execution +- **WARNING** - Something unexpected happened but the program continues +- **ERROR** - A serious problem occurred +- **CRITICAL** - A very serious error occurred + +## Testing Style + +### Test Organization + +Organize tests to mirror the source code structure: + +``` +tests/ +├── unit/ +│ ├── core/ +│ │ ├── test_agent.py +│ │ └── test_memory.py +│ ├── skills/ +│ │ └── test_sentiment.py +│ └── services/ +│ └── test_yield_manager.py +├── integration/ +│ ├── test_github_integration.py +│ └── test_twitter_integration.py +└── e2e/ + └── test_full_workflow.py +``` + +### Test Naming + +Use descriptive test names that explain the scenario: + +```python +def test_agent_processes_simple_query_successfully(): + """Test that agent can process a simple query and return valid response.""" + pass + +def test_agent_raises_error_when_query_is_empty(): + """Test that agent raises ValidationError when given empty query.""" + pass + +def test_sentiment_analysis_returns_positive_score_for_positive_text(): + """Test that sentiment analysis correctly identifies positive sentiment.""" + pass +``` + +### Test Structure + +Follow the Arrange-Act-Assert pattern: + +```python +def test_memory_stores_and_retrieves_data(): + # Arrange + memory = Memory(agent_name="test_agent") + test_data = "This is a test memory" + metadata = {"type": "test", "importance": "high"} + + # Act + memory.add_memory(test_data, metadata) + results = memory.search("test memory", limit=1) + + # Assert + assert len(results) == 1 + assert results[0].description == test_data + assert results[0].metadata == metadata +``` + +### Fixtures and Mocking + +Use fixtures for common test setup: + +```python +import pytest +from unittest.mock import Mock, patch + +@pytest.fixture +def mock_openai_client(): + """Mock OpenAI client for testing.""" + client = Mock() + client.chat.completions.create.return_value = Mock( + choices=[Mock(message=Mock(content="Test response"))] + ) + return client + +@pytest.fixture +def test_agent(mock_openai_client): + """Create test agent with mocked dependencies.""" + with patch('talos.core.agent.OpenAI', return_value=mock_openai_client): + return Agent(name="test_agent", model="gpt-4o") + +def test_agent_generates_response(test_agent): + """Test that agent generates appropriate response.""" + response = test_agent.process_query("What is the weather?") + assert response is not None + assert isinstance(response, QueryResponse) +``` + +## Performance Guidelines + +### Memory Management + +Write memory-efficient code: + +```python +# Good - Use generators for large datasets +def process_large_dataset(data_source: str) -> Iterator[ProcessedItem]: + """Process large dataset efficiently using generators.""" + with open(data_source) as file: + for line in file: + yield process_line(line) + +# Good - Use context managers for resource cleanup +def analyze_file(file_path: str) -> AnalysisResult: + """Analyze file with proper resource management.""" + with open(file_path) as file: + content = file.read() + return analyze_content(content) + +# Bad - Loading entire dataset into memory +def process_large_dataset_bad(data_source: str) -> list[ProcessedItem]: + with open(data_source) as file: + all_lines = file.readlines() # Loads entire file into memory + return [process_line(line) for line in all_lines] +``` + +### Async Programming + +Use async/await for I/O-bound operations: + +```python +import asyncio +import aiohttp +from typing import AsyncIterator + +async def fetch_multiple_urls(urls: list[str]) -> list[dict[str, Any]]: + """Fetch multiple URLs concurrently.""" + async with aiohttp.ClientSession() as session: + tasks = [fetch_url(session, url) for url in urls] + results = await asyncio.gather(*tasks, return_exceptions=True) + return [r for r in results if not isinstance(r, Exception)] + +async def fetch_url(session: aiohttp.ClientSession, url: str) -> dict[str, Any]: + """Fetch single URL with error handling.""" + try: + async with session.get(url) as response: + return await response.json() + except Exception as e: + logger.error(f"Failed to fetch {url}: {e}") + raise +``` + +## Security Guidelines + +### Input Validation + +Always validate and sanitize inputs: + +```python +def process_user_query(query: str, user_id: int) -> QueryResponse: + """Process user query with proper validation.""" + # Validate input parameters + if not isinstance(query, str): + raise ValidationError("Query must be a string") + + if not query.strip(): + raise ValidationError("Query cannot be empty") + + if len(query) > 10000: + raise ValidationError("Query too long (max 10000 characters)") + + if not isinstance(user_id, int) or user_id <= 0: + raise ValidationError("Invalid user ID") + + # Sanitize query to prevent injection attacks + sanitized_query = sanitize_query(query) + + # Process the sanitized query + return execute_query(sanitized_query, user_id) +``` + +### Secret Management + +Never hardcode secrets or API keys: + +```python +import os +from typing import Optional + +def get_api_key(service: str) -> str: + """Get API key from environment variables.""" + key_name = f"{service.upper()}_API_KEY" + api_key = os.getenv(key_name) + + if not api_key: + raise ConfigurationError(f"Missing required environment variable: {key_name}") + + return api_key + +# Good - Use environment variables +openai_key = get_api_key("openai") + +# Bad - Hardcoded secrets +# openai_key = "sk-1234567890abcdef" # Never do this! +``` + +## Documentation Style + +### Code Comments + +Write clear, helpful comments: + +```python +def calculate_optimal_apr( + market_data: MarketData, + sentiment_score: float, + current_apr: float +) -> float: + """Calculate optimal APR based on market conditions and sentiment.""" + + # Base APR adjustment based on market volatility + # Higher volatility requires higher APR to attract users + volatility_adjustment = market_data.volatility * 0.1 + + # Sentiment adjustment: positive sentiment allows lower APR + # Negative sentiment requires higher APR to maintain attractiveness + sentiment_adjustment = (0.5 - sentiment_score) * 0.05 + + # Calculate new APR with bounds checking + new_apr = current_apr + volatility_adjustment + sentiment_adjustment + + # Ensure APR stays within reasonable bounds (1% to 20%) + return max(0.01, min(0.20, new_apr)) +``` + +### README and Documentation + +Keep documentation up to date and comprehensive: + +- Explain the purpose and scope of each module +- Provide usage examples +- Document configuration options +- Include troubleshooting guides +- Maintain API documentation + +Following these code style guidelines ensures that the Talos codebase remains clean, maintainable, and accessible to all contributors. diff --git a/docs/development/contributing.md b/docs/development/contributing.md new file mode 100644 index 00000000..3155a4f9 --- /dev/null +++ b/docs/development/contributing.md @@ -0,0 +1,415 @@ +# Contributing + +Thank you for your interest in contributing to Talos! This guide will help you get started with contributing to the project. + +## Getting Started + +### Prerequisites + +- Python 3.8 or higher +- `uv` package manager (recommended) +- Git +- Basic understanding of AI agents and DeFi protocols + +### Development Setup + +1. **Fork and clone the repository**: + ```bash + git clone https://github.com/your-username/talos.git + cd talos + ``` + +2. **Create a virtual environment**: + ```bash + uv venv + source .venv/bin/activate + ``` + +3. **Install dependencies**: + ```bash + ./scripts/install_deps.sh + ``` + +4. **Set up environment variables**: + ```bash + export OPENAI_API_KEY="your-openai-api-key" + export PINATA_API_KEY="your-pinata-api-key" + export PINATA_SECRET_API_KEY="your-pinata-secret-api-key" + ``` + +5. **Run tests to verify setup**: + ```bash + ./scripts/run_checks.sh + ``` + +## Development Workflow + +### Branch Management + +1. **Create a feature branch**: + ```bash + git checkout -b feature/your-feature-name + ``` + +2. **Make your changes** following the code style guidelines + +3. **Run pre-commit checks**: + ```bash + ./scripts/run_checks.sh + ``` + +4. **Commit your changes**: + ```bash + git add . + git commit -m "feat: add your feature description" + ``` + +5. **Push and create a pull request**: + ```bash + git push origin feature/your-feature-name + ``` + +### Commit Message Format + +Follow conventional commit format: + +- `feat:` - New features +- `fix:` - Bug fixes +- `docs:` - Documentation changes +- `style:` - Code style changes (formatting, etc.) +- `refactor:` - Code refactoring +- `test:` - Adding or updating tests +- `chore:` - Maintenance tasks + +Examples: +``` +feat: add sentiment analysis for Twitter data +fix: resolve memory leak in agent initialization +docs: update API documentation for new endpoints +``` + +## Code Quality Standards + +### Pre-commit Checks + +Before committing any changes, ensure you run the following checks: + +1. **Ruff** - Lint and format the code: + ```bash + uv run ruff check . + uv run ruff format . + ``` + +2. **Mypy** - Type checking: + ```bash + uv run mypy src + ``` + +3. **Pytest** - Run all tests: + ```bash + uv run pytest + ``` + +### Automated Checks + +Run all checks at once: +```bash +./scripts/run_checks.sh +``` + +This script runs: +- Ruff linting and formatting +- Mypy type checking +- Pytest test suite + +## Code Style Guidelines + +### Python Code Style + +- Follow [PEP 8](https://www.python.org/dev/peps/pep-0008/) for all Python code +- Use modern Python type hints (`list` and `dict` instead of `List` and `Dict`) +- Never use quotes around type hints +- Use type hints for all function signatures +- Write clear and concise docstrings for all modules, classes, and functions +- Keep lines under 88 characters long + +### Type Hints + +```python +# Good +def process_data(items: list[str]) -> dict[str, int]: + """Process a list of items and return counts.""" + return {item: len(item) for item in items} + +# Bad +def process_data(items: "List[str]") -> "Dict[str, int]": + return {item: len(item) for item in items} +``` + +### Pydantic Models + +When creating Pydantic `BaseModel`s: +- Use `model_post_init` for post-initialization logic instead of overriding `__init__` +- Use `ConfigDict` for model-specific configuration + +```python +from pydantic import BaseModel, ConfigDict + +class MyModel(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + name: str + value: int + + def model_post_init(self, __context): + # Post-initialization logic here + pass +``` + +### Import Organization + +Always put imports at the top of the file, organized into sections: + +```python +# Standard library imports +import os +import sys +from datetime import datetime + +# Third-party imports +import requests +from pydantic import BaseModel + +# First-party imports +from talos.core.agent import Agent +from talos.utils.helpers import format_response +``` + +## Testing Guidelines + +### Writing Tests + +- Write tests for all new functionality +- Use descriptive test names that explain what is being tested +- Follow the Arrange-Act-Assert pattern +- Use fixtures for common test setup + +```python +def test_agent_processes_query_successfully(): + # Arrange + agent = Agent(model="gpt-4o") + query = "What is the current market sentiment?" + + # Act + result = agent.process_query(query) + + # Assert + assert result is not None + assert isinstance(result, QueryResponse) + assert len(result.answers) > 0 +``` + +### Test Categories + +- **Unit Tests** - Test individual functions and classes +- **Integration Tests** - Test component interactions +- **End-to-End Tests** - Test complete workflows +- **Performance Tests** - Test performance characteristics + +### Running Tests + +```bash +# Run all tests +uv run pytest + +# Run specific test file +uv run pytest tests/test_agent.py + +# Run with coverage +uv run pytest --cov=src + +# Run tests matching pattern +uv run pytest -k "test_sentiment" +``` + +## Documentation + +### Docstring Format + +Use Google-style docstrings: + +```python +def analyze_sentiment(text: str, model: str = "gpt-4o") -> float: + """Analyze sentiment of the given text. + + Args: + text: The text to analyze for sentiment. + model: The LLM model to use for analysis. + + Returns: + A sentiment score between -1.0 (negative) and 1.0 (positive). + + Raises: + ValueError: If text is empty or model is not supported. + + Example: + >>> score = analyze_sentiment("I love this protocol!") + >>> print(f"Sentiment: {score}") + Sentiment: 0.8 + """ + if not text.strip(): + raise ValueError("Text cannot be empty") + + # Implementation here + return 0.0 +``` + +### API Documentation + +- Document all public APIs +- Include examples in docstrings +- Update documentation when changing APIs +- Use type hints consistently + +## Architecture Guidelines + +### Adding New Skills + +When adding new skills to Talos: + +1. **Inherit from base Skill class**: + ```python + from talos.skills.base import Skill + + class MyNewSkill(Skill): + def run(self, **kwargs) -> QueryResponse: + # Implementation + pass + ``` + +2. **Register with router**: + ```python + router.register_skill(MyNewSkill(), keywords=["keyword1", "keyword2"]) + ``` + +3. **Add comprehensive tests** +4. **Update documentation** + +### Adding New Services + +When adding new services: + +1. **Inherit from base Service class**: + ```python + from talos.services.abstract.service import Service + + class MyNewService(Service): + def process(self, request: ServiceRequest) -> ServiceResponse: + # Implementation + pass + ``` + +2. **Follow single responsibility principle** +3. **Make services stateless when possible** +4. **Add proper error handling** + +### Adding New Tools + +When adding new tools: + +1. **Inherit from BaseTool** +2. **Wrap with SupervisedTool for security** +3. **Add comprehensive error handling** +4. **Document all parameters and return values** + +## Security Guidelines + +### API Keys and Secrets + +- Never commit API keys or secrets to the repository +- Use environment variables for sensitive configuration +- Add sensitive files to `.gitignore` +- Use the secrets management system for production + +### Input Validation + +- Validate all user inputs +- Sanitize data before processing +- Use type hints and Pydantic models for validation +- Handle edge cases gracefully + +### Error Handling + +- Don't expose sensitive information in error messages +- Log errors appropriately for debugging +- Provide helpful error messages to users +- Use proper exception handling + +## Performance Guidelines + +### Memory Management + +- Use batch operations for memory-intensive tasks +- Implement proper cleanup in destructors +- Monitor memory usage in long-running processes +- Use generators for large datasets + +### API Usage + +- Implement proper rate limiting +- Use caching for frequently accessed data +- Batch API calls when possible +- Handle API errors gracefully + +## Getting Help + +### Resources + +- **Documentation** - Check the full documentation for detailed guides +- **Issues** - Search existing issues before creating new ones +- **Discussions** - Use GitHub Discussions for questions and ideas +- **Code Review** - Request reviews from maintainers + +### Communication + +- Be respectful and constructive in all interactions +- Provide clear descriptions of issues and proposed changes +- Include relevant context and examples +- Follow up on feedback and suggestions + +### Issue Reporting + +When reporting bugs: + +1. **Check existing issues** first +2. **Provide clear reproduction steps** +3. **Include relevant logs and error messages** +4. **Specify your environment** (OS, Python version, etc.) +5. **Use the issue template** if available + +### Feature Requests + +When requesting features: + +1. **Describe the use case** clearly +2. **Explain the expected behavior** +3. **Consider the impact** on existing functionality +4. **Provide examples** if possible + +## Release Process + +### Version Management + +- Follow semantic versioning (SemVer) +- Update version numbers in appropriate files +- Create release notes for significant changes +- Tag releases appropriately + +### Deployment + +- Ensure all tests pass before release +- Update documentation for new features +- Coordinate with maintainers for release timing +- Monitor for issues after release + +Thank you for contributing to Talos! Your contributions help make decentralized protocol management more accessible and secure. diff --git a/docs/development/performance.md b/docs/development/performance.md new file mode 100644 index 00000000..36ae46ef --- /dev/null +++ b/docs/development/performance.md @@ -0,0 +1,422 @@ +# Performance Analysis and Optimization + +This document provides detailed analysis of performance issues identified in the Talos codebase and recommendations for optimization. + +## Executive Summary + +Performance analysis of the Talos AI agent codebase has identified several optimization opportunities ranging from high-impact file I/O bottlenecks to medium-impact caching opportunities. This document outlines the issues, their impact, and implementation strategies for improvement. + +## Identified Performance Issues + +### 1. Memory Management File I/O (HIGH IMPACT) + +**Location**: `src/talos/core/memory.py:58-70` + +**Issue**: Every memory addition triggers immediate file write operations, causing significant I/O overhead. + +```python +def add_memory(self, description: str, metadata: Optional[dict] = None): + # ... memory creation logic ... + self.memories.append(memory) + if self.index is None: + self.index = IndexFlatL2(len(embedding)) + self.index.add(np.array([embedding], dtype=np.float32)) + self._save() # ← Immediate file write on every addition +``` + +**Impact**: +- High latency for memory operations +- Excessive disk I/O in memory-intensive workflows +- Poor scalability for bulk memory additions + +**Solution**: Implement batched writes with configurable batch size and auto-flush on destruction. + +**Implementation**: +```python +class Memory: + def __init__(self, batch_size: int = 10, auto_save: bool = True): + self.batch_size = batch_size + self.auto_save = auto_save + self.pending_writes = 0 + + def add_memory(self, description: str, metadata: Optional[dict] = None): + # ... memory creation logic ... + self.memories.append(memory) + self.pending_writes += 1 + + if self.pending_writes >= self.batch_size: + self.flush() + + def flush(self): + """Manually flush pending writes to disk.""" + if self.pending_writes > 0: + self._save() + self.pending_writes = 0 + + def __del__(self): + """Ensure data is saved on destruction.""" + if self.auto_save and self.pending_writes > 0: + self.flush() +``` + +### 2. CLI History Management Redundancy (MEDIUM IMPACT) + +**Location**: `src/talos/cli/main.py:97-102` + +**Issue**: Manual history management with redundant message appending in interactive mode. + +```python +result = main_agent.run(user_input, history=history) +history.append(HumanMessage(content=user_input)) # ← Redundant append +if isinstance(result, AIMessage): + history.append(AIMessage(content=result.content)) # ← Manual management +else: + history.append(AIMessage(content=str(result))) +``` + +**Impact**: +- Duplicated history management logic +- Potential for history inconsistencies +- Unnecessary memory usage in long conversations + +**Solution**: Leverage the agent's built-in history management instead of manual tracking. + +**Implementation**: +```python +def interactive_mode(): + main_agent = MainAgent() + + while True: + user_input = input(">> ") + if user_input.lower() == 'exit': + break + + # Let the agent manage its own history + result = main_agent.run(user_input) + print(result.content if hasattr(result, 'content') else str(result)) +``` + +### 3. GitHub API Repository Caching (MEDIUM IMPACT) + +**Location**: `src/talos/tools/github/tools.py` + +**Issue**: Repository objects are fetched repeatedly instead of being cached. + +```python +def get_open_issues(self, user: str, project: str) -> list[dict[str, Any]]: + repo = self._github.get_repo(f"{user}/{project}") # ← Repeated API call + # ... + +def get_all_pull_requests(self, user: str, project: str, state: str = "open") -> list[dict[str, Any]]: + repo = self._github.get_repo(f"{user}/{project}") # ← Same repo fetched again + # ... +``` + +**Impact**: +- Unnecessary API calls to GitHub +- Increased latency for GitHub operations +- Potential rate limiting issues + +**Solution**: Implement repository object caching with TTL expiration. + +**Implementation**: +```python +from functools import lru_cache +from datetime import datetime, timedelta +from typing import Dict, Tuple + +class GithubTools: + def __init__(self): + self._repo_cache: Dict[str, Tuple[Any, datetime]] = {} + self._cache_ttl = timedelta(minutes=5) + + def _get_repo_cached(self, repo_name: str): + """Get repository with caching.""" + now = datetime.now() + + if repo_name in self._repo_cache: + repo, cached_time = self._repo_cache[repo_name] + if now - cached_time < self._cache_ttl: + return repo + + # Fetch fresh repository + repo = self._github.get_repo(repo_name) + self._repo_cache[repo_name] = (repo, now) + return repo + + def get_open_issues(self, user: str, project: str) -> list[dict[str, Any]]: + repo = self._get_repo_cached(f"{user}/{project}") + # ... rest of implementation +``` + +### 4. Prompt Loading Without Caching (LOW IMPACT) + +**Location**: `src/talos/prompts/prompt_managers/file_prompt_manager.py:18-31` + +**Issue**: Prompts are loaded from files on every initialization without caching. + +```python +def load_prompts(self) -> None: + for filename in os.listdir(self.prompts_dir): + if filename.endswith(".json"): + with open(os.path.join(self.prompts_dir, filename)) as f: # ← File I/O on every load + prompt_data = json.load(f) +``` + +**Impact**: +- Repeated file I/O for static prompt data +- Slower initialization times +- Unnecessary disk access + +**Solution**: Implement prompt caching with file modification time checking. + +**Implementation**: +```python +import os +import json +from typing import Dict, Tuple +from datetime import datetime + +class FilePromptManager: + def __init__(self, prompts_dir: str): + self.prompts_dir = prompts_dir + self._prompt_cache: Dict[str, Tuple[dict, float]] = {} + + def load_prompts(self) -> None: + for filename in os.listdir(self.prompts_dir): + if filename.endswith(".json"): + filepath = os.path.join(self.prompts_dir, filename) + mtime = os.path.getmtime(filepath) + + # Check if file has been modified since last cache + if filename in self._prompt_cache: + cached_data, cached_mtime = self._prompt_cache[filename] + if mtime <= cached_mtime: + self.prompts[filename[:-5]] = cached_data + continue + + # Load and cache the prompt + with open(filepath) as f: + prompt_data = json.load(f) + self._prompt_cache[filename] = (prompt_data, mtime) + self.prompts[filename[:-5]] = prompt_data +``` + +### 5. Tool Registration Inefficiency (LOW IMPACT) + +**Location**: `src/talos/core/main_agent.py:74-75` + +**Issue**: Tools are registered in loops without checking for duplicates efficiently. + +```python +for skill in self.router.skills: + tool_manager.register_tool(skill.create_ticket_tool()) # ← Potential duplicate registrations +``` + +**Impact**: +- Potential duplicate tool registrations +- Inefficient tool lookup +- Memory overhead from duplicate tools + +**Solution**: Implement efficient duplicate checking or use set-based registration. + +**Implementation**: +```python +class ToolManager: + def __init__(self): + self._registered_tools: set[str] = set() + self.tools: dict[str, BaseTool] = {} + + def register_tool(self, tool: BaseTool) -> bool: + """Register tool, returning True if newly registered.""" + tool_id = f"{tool.__class__.__name__}_{hash(str(tool))}" + + if tool_id in self._registered_tools: + return False # Already registered + + self._registered_tools.add(tool_id) + self.tools[tool.name] = tool + return True +``` + +## Optimization Priority + +1. **Memory Management File I/O** - Immediate implementation recommended +2. **GitHub API Repository Caching** - High value for GitHub-heavy workflows +3. **CLI History Management** - Improves interactive experience +4. **Prompt Loading Caching** - Low overhead improvement +5. **Tool Registration** - Minor optimization + +## Implementation Status + +✅ **Memory Management Optimization**: Implemented batched writes with configurable batch size +- Added `batch_size` and `auto_save` parameters +- Implemented `flush()` method for manual persistence +- Added destructor to ensure data persistence +- Maintains backward compatibility + +## Performance Monitoring + +### Metrics to Track + +**Memory Operations**: +- Average memory addition latency +- Batch write frequency +- Memory usage over time +- Disk I/O operations per minute + +**API Operations**: +- GitHub API call frequency +- Cache hit/miss ratios +- API response times +- Rate limit utilization + +**System Performance**: +- CPU usage during operations +- Memory consumption patterns +- Disk I/O throughput +- Network bandwidth usage + +### Monitoring Implementation + +```python +import time +import logging +from functools import wraps +from typing import Callable, Any + +class PerformanceMonitor: + def __init__(self): + self.metrics = {} + self.logger = logging.getLogger(__name__) + + def time_operation(self, operation_name: str): + """Decorator to time operations.""" + def decorator(func: Callable) -> Callable: + @wraps(func) + def wrapper(*args, **kwargs) -> Any: + start_time = time.time() + try: + result = func(*args, **kwargs) + duration = time.time() - start_time + self._record_metric(operation_name, duration, "success") + return result + except Exception as e: + duration = time.time() - start_time + self._record_metric(operation_name, duration, "error") + raise + return wrapper + return decorator + + def _record_metric(self, name: str, duration: float, status: str): + """Record performance metric.""" + if name not in self.metrics: + self.metrics[name] = [] + + self.metrics[name].append({ + "duration": duration, + "status": status, + "timestamp": time.time() + }) + + self.logger.info(f"Operation {name} completed in {duration:.3f}s with status {status}") + +# Usage example +monitor = PerformanceMonitor() + +class Memory: + @monitor.time_operation("memory_add") + def add_memory(self, description: str, metadata: Optional[dict] = None): + # Implementation here + pass +``` + +## Benchmarking + +### Performance Tests + +Create benchmarks to measure optimization effectiveness: + +```python +import pytest +import time +from talos.core.memory import Memory + +class TestMemoryPerformance: + def test_batch_write_performance(self): + """Test that batch writes improve performance.""" + # Test without batching + memory_no_batch = Memory(batch_size=1) + start_time = time.time() + for i in range(100): + memory_no_batch.add_memory(f"Memory {i}") + no_batch_time = time.time() - start_time + + # Test with batching + memory_batch = Memory(batch_size=10) + start_time = time.time() + for i in range(100): + memory_batch.add_memory(f"Memory {i}") + memory_batch.flush() # Ensure all writes complete + batch_time = time.time() - start_time + + # Batching should be significantly faster + assert batch_time < no_batch_time * 0.5 + + def test_memory_usage_scaling(self): + """Test memory usage scales linearly with data.""" + import psutil + import os + + process = psutil.Process(os.getpid()) + initial_memory = process.memory_info().rss + + memory = Memory(batch_size=50) + for i in range(1000): + memory.add_memory(f"Large memory entry {i} with lots of content") + + final_memory = process.memory_info().rss + memory_increase = final_memory - initial_memory + + # Memory increase should be reasonable (less than 100MB for 1000 entries) + assert memory_increase < 100 * 1024 * 1024 +``` + +## Recommendations + +### Immediate Actions + +1. **Implement Memory Batching** - Deploy the batched memory writes immediately +2. **Add Performance Monitoring** - Implement basic timing and metrics collection +3. **Create Benchmarks** - Establish baseline performance measurements + +### Medium-term Improvements + +1. **GitHub API Caching** - Implement repository caching for GitHub operations +2. **Prompt Caching** - Add file-based caching for prompt templates +3. **Connection Pooling** - Implement connection pooling for external APIs + +### Long-term Optimizations + +1. **Async Operations** - Convert I/O-bound operations to async/await +2. **Database Backend** - Consider replacing file-based storage with database +3. **Distributed Caching** - Implement Redis or similar for multi-instance deployments + +### Testing Strategy + +All optimizations should be thoroughly tested with: +- **Unit Tests** - Test new functionality in isolation +- **Performance Benchmarks** - Compare before/after performance +- **Integration Tests** - Ensure no regressions in functionality +- **Memory Profiling** - Monitor memory usage patterns +- **Load Testing** - Test performance under realistic workloads + +### Monitoring and Alerting + +Set up monitoring for: +- **Performance Degradation** - Alert when operations exceed baseline times +- **Memory Leaks** - Monitor for increasing memory usage over time +- **API Rate Limits** - Track API usage to prevent rate limiting +- **Error Rates** - Monitor for increased error rates after optimizations + +By implementing these performance optimizations, Talos will be able to handle larger workloads more efficiently while maintaining reliability and user experience. diff --git a/docs/development/testing.md b/docs/development/testing.md new file mode 100644 index 00000000..f4c3e7b0 --- /dev/null +++ b/docs/development/testing.md @@ -0,0 +1,633 @@ +# Testing Guide + +This document provides comprehensive guidance on testing practices for the Talos project, including unit tests, integration tests, and end-to-end testing strategies. + +## Testing Philosophy + +Talos follows a comprehensive testing approach that ensures: +- **Reliability** - All core functionality is thoroughly tested +- **Security** - Security-critical components have extensive test coverage +- **Performance** - Performance characteristics are validated through testing +- **Maintainability** - Tests serve as documentation and prevent regressions + +## Test Structure + +### Directory Organization + +``` +tests/ +├── unit/ # Unit tests for individual components +│ ├── core/ +│ │ ├── test_agent.py +│ │ ├── test_memory.py +│ │ └── test_router.py +│ ├── skills/ +│ │ ├── test_proposals.py +│ │ ├── test_sentiment.py +│ │ └── test_twitter_sentiment.py +│ ├── services/ +│ │ ├── test_yield_manager.py +│ │ └── test_github.py +│ ├── tools/ +│ │ ├── test_twitter.py +│ │ └── test_github_tools.py +│ └── hypervisor/ +│ ├── test_hypervisor.py +│ └── test_supervisor.py +├── integration/ # Integration tests +│ ├── test_agent_workflow.py +│ ├── test_github_integration.py +│ └── test_twitter_integration.py +├── e2e/ # End-to-end tests +│ ├── test_proposal_evaluation.py +│ ├── test_sentiment_analysis.py +│ └── test_cli_workflows.py +├── performance/ # Performance tests +│ ├── test_memory_performance.py +│ └── test_api_performance.py +├── fixtures/ # Test fixtures and data +│ ├── sample_proposals.json +│ ├── mock_twitter_data.json +│ └── test_configs.yaml +└── conftest.py # Pytest configuration and shared fixtures +``` + +## Running Tests + +### Basic Test Execution + +```bash +# Run all tests +uv run pytest + +# Run specific test file +uv run pytest tests/unit/core/test_agent.py + +# Run tests matching pattern +uv run pytest -k "test_sentiment" + +# Run tests with verbose output +uv run pytest -v + +# Run tests with coverage +uv run pytest --cov=src --cov-report=html +``` + +### Test Categories + +```bash +# Run only unit tests +uv run pytest tests/unit/ + +# Run only integration tests +uv run pytest tests/integration/ + +# Run only end-to-end tests +uv run pytest tests/e2e/ + +# Run performance tests +uv run pytest tests/performance/ +``` + +### Continuous Testing + +```bash +# Watch for changes and re-run tests +uv run pytest-watch + +# Run tests in parallel +uv run pytest -n auto +``` + +## Unit Testing + +### Test Structure + +Follow the Arrange-Act-Assert pattern: + +```python +def test_agent_processes_query_successfully(): + # Arrange - Set up test data and dependencies + agent = Agent(name="test_agent", model="gpt-4o") + query = "What is the current market sentiment?" + expected_response_type = QueryResponse + + # Act - Execute the functionality being tested + result = agent.process_query(query) + + # Assert - Verify the results + assert result is not None + assert isinstance(result, expected_response_type) + assert len(result.answers) > 0 + assert result.answers[0].strip() != "" +``` + +### Mocking External Dependencies + +Use mocks for external services and APIs: + +```python +import pytest +from unittest.mock import Mock, patch, MagicMock +from talos.core.agent import Agent +from talos.tools.twitter_client import TwitterClient + +@pytest.fixture +def mock_openai_client(): + """Mock OpenAI client for testing.""" + client = Mock() + client.chat.completions.create.return_value = Mock( + choices=[Mock(message=Mock(content="Test response from AI"))] + ) + return client + +@pytest.fixture +def mock_twitter_client(): + """Mock Twitter client for testing.""" + client = Mock(spec=TwitterClient) + client.search_tweets.return_value = [ + {"text": "Positive tweet about DeFi", "user": {"followers_count": 1000}}, + {"text": "Negative tweet about protocols", "user": {"followers_count": 500}} + ] + return client + +def test_sentiment_analysis_with_mocked_twitter(mock_twitter_client): + """Test sentiment analysis with mocked Twitter data.""" + with patch('talos.skills.twitter_sentiment.TwitterClient', return_value=mock_twitter_client): + skill = TwitterSentimentSkill() + result = skill.run(query="DeFi protocols", limit=10) + + assert result is not None + assert isinstance(result, QueryResponse) + mock_twitter_client.search_tweets.assert_called_once() +``` + +### Testing Error Conditions + +Test error handling and edge cases: + +```python +def test_agent_raises_error_for_empty_query(): + """Test that agent raises appropriate error for empty query.""" + agent = Agent(name="test_agent") + + with pytest.raises(ValidationError, match="Query cannot be empty"): + agent.process_query("") + +def test_agent_handles_api_timeout(): + """Test that agent handles API timeouts gracefully.""" + with patch('openai.OpenAI') as mock_openai: + mock_openai.return_value.chat.completions.create.side_effect = TimeoutError("API timeout") + + agent = Agent(name="test_agent") + + with pytest.raises(APIError, match="API timeout"): + agent.process_query("test query") +``` + +## Integration Testing + +### Testing Component Interactions + +Integration tests verify that components work together correctly: + +```python +def test_agent_with_real_memory_integration(): + """Test agent with actual memory system integration.""" + # Create agent with real memory (not mocked) + agent = Agent(name="integration_test_agent") + + # Add some memories + agent.memory.add_memory("Previous conversation about DeFi", {"topic": "defi"}) + agent.memory.add_memory("Discussion about yield farming", {"topic": "yield"}) + + # Query should use memory context + result = agent.process_query("What did we discuss about DeFi before?") + + assert result is not None + assert "DeFi" in str(result) or "defi" in str(result).lower() + +def test_hypervisor_with_supervised_tools(): + """Test hypervisor integration with supervised tools.""" + hypervisor = Hypervisor() + github_tool = GithubTool() + supervised_tool = SupervisedTool(github_tool, hypervisor.supervisor) + + # Test that tool execution goes through approval + with patch.object(hypervisor.supervisor, 'approve_action') as mock_approve: + mock_approve.return_value = ApprovalResult(approved=True, reason="Test approval") + + result = supervised_tool.execute("get_prs", repo="test/repo") + + mock_approve.assert_called_once() + assert result is not None +``` + +### Database Integration Tests + +Test database operations with real database: + +```python +@pytest.fixture +def test_database(): + """Create test database for integration tests.""" + # Set up test database + db_path = "test_memory.db" + memory = Memory(db_path=db_path) + yield memory + + # Cleanup after test + import os + if os.path.exists(db_path): + os.remove(db_path) + +def test_memory_persistence_integration(test_database): + """Test that memories persist across sessions.""" + memory = test_database + + # Add memory in first session + memory.add_memory("Test memory for persistence", {"session": "first"}) + memory.flush() + + # Create new memory instance (simulating new session) + new_memory = Memory(db_path=memory.db_path) + + # Search should find the persisted memory + results = new_memory.search("Test memory", limit=1) + assert len(results) == 1 + assert results[0].description == "Test memory for persistence" +``` + +## End-to-End Testing + +### Complete Workflow Tests + +Test entire user workflows from start to finish: + +```python +def test_proposal_evaluation_workflow(): + """Test complete proposal evaluation workflow.""" + # Simulate user submitting a proposal + proposal_text = """ + Proposal: Increase staking rewards from 5% to 8% APR + + Rationale: Current market conditions show competitors offering + higher yields. This increase will help maintain competitiveness + and attract more stakers to the protocol. + """ + + # Create main agent + agent = MainAgent() + + # Process proposal evaluation request + query = f"Please evaluate this governance proposal: {proposal_text}" + result = agent.run(query) + + # Verify comprehensive evaluation + assert result is not None + assert isinstance(result, QueryResponse) + assert len(result.answers) > 0 + + response_text = str(result) + assert "risk" in response_text.lower() + assert "recommendation" in response_text.lower() + assert any(word in response_text.lower() for word in ["approve", "reject", "modify"]) + +def test_cli_interactive_workflow(): + """Test CLI interactive mode workflow.""" + from talos.cli.main import interactive_mode + from unittest.mock import patch + import io + import sys + + # Mock user input + user_inputs = [ + "What are your capabilities?", + "Analyze sentiment for 'DeFi protocols'", + "exit" + ] + + with patch('builtins.input', side_effect=user_inputs): + # Capture output + captured_output = io.StringIO() + with patch('sys.stdout', captured_output): + interactive_mode() + + output = captured_output.getvalue() + assert "capabilities" in output.lower() + assert len(output) > 100 # Should have substantial output +``` + +### API Integration Tests + +Test external API integrations: + +```python +@pytest.mark.integration +def test_twitter_api_integration(): + """Test actual Twitter API integration (requires API keys).""" + import os + + # Skip if no API keys available + if not os.getenv('TWITTER_BEARER_TOKEN'): + pytest.skip("Twitter API keys not available") + + from talos.tools.twitter import TwitterTools + + twitter_tools = TwitterTools() + + # Test actual API call with a safe query + result = twitter_tools.search_tweets("python programming", limit=5) + + assert isinstance(result, list) + assert len(result) <= 5 + assert all('text' in tweet for tweet in result) + +@pytest.mark.integration +def test_github_api_integration(): + """Test actual GitHub API integration (requires API token).""" + import os + + if not os.getenv('GITHUB_API_TOKEN'): + pytest.skip("GitHub API token not available") + + from talos.tools.github import GithubTools + + github_tools = GithubTools() + + # Test with a known public repository + prs = github_tools.get_all_pull_requests("octocat", "Hello-World", state="all") + + assert isinstance(prs, list) + assert all('number' in pr for pr in prs) +``` + +## Performance Testing + +### Load Testing + +Test system performance under load: + +```python +import time +import concurrent.futures +from talos.core.memory import Memory + +def test_memory_concurrent_access(): + """Test memory system under concurrent access.""" + memory = Memory(batch_size=10) + + def add_memories(thread_id: int, count: int): + """Add memories from a specific thread.""" + for i in range(count): + memory.add_memory(f"Memory {i} from thread {thread_id}", {"thread": thread_id}) + + # Run concurrent memory additions + with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: + futures = [executor.submit(add_memories, i, 20) for i in range(5)] + concurrent.futures.wait(futures) + + memory.flush() + + # Verify all memories were added + all_memories = memory.search("Memory", limit=1000) + assert len(all_memories) == 100 # 5 threads * 20 memories each + +def test_agent_response_time(): + """Test that agent responses are within acceptable time limits.""" + agent = Agent(name="performance_test_agent") + + queries = [ + "What is DeFi?", + "Explain yield farming", + "How does staking work?", + "What are governance tokens?", + "Describe liquidity pools" + ] + + response_times = [] + + for query in queries: + start_time = time.time() + result = agent.process_query(query) + end_time = time.time() + + response_time = end_time - start_time + response_times.append(response_time) + + # Each response should be under 30 seconds + assert response_time < 30.0 + assert result is not None + + # Average response time should be reasonable + avg_response_time = sum(response_times) / len(response_times) + assert avg_response_time < 15.0 +``` + +### Memory Usage Testing + +Test memory consumption patterns: + +```python +import psutil +import os + +def test_memory_usage_scaling(): + """Test that memory usage scales reasonably with data size.""" + process = psutil.Process(os.getpid()) + initial_memory = process.memory_info().rss + + memory = Memory(batch_size=50) + + # Add a large number of memories + for i in range(1000): + memory.add_memory(f"Large memory entry {i} with substantial content " * 10) + + memory.flush() + + final_memory = process.memory_info().rss + memory_increase = final_memory - initial_memory + + # Memory increase should be reasonable (less than 200MB for 1000 entries) + assert memory_increase < 200 * 1024 * 1024 + + # Test memory cleanup + del memory + + # Memory should be released (allow some tolerance) + cleanup_memory = process.memory_info().rss + assert cleanup_memory < final_memory * 1.1 +``` + +## Test Configuration + +### Pytest Configuration + +Create `conftest.py` with shared fixtures: + +```python +import pytest +import os +import tempfile +from unittest.mock import Mock +from talos.core.agent import Agent +from talos.core.memory import Memory + +@pytest.fixture(scope="session") +def test_config(): + """Test configuration for all tests.""" + return { + "model": "gpt-4o", + "test_mode": True, + "api_timeout": 30 + } + +@pytest.fixture +def temp_directory(): + """Create temporary directory for test files.""" + with tempfile.TemporaryDirectory() as temp_dir: + yield temp_dir + +@pytest.fixture +def mock_api_keys(monkeypatch): + """Mock API keys for testing.""" + monkeypatch.setenv("OPENAI_API_KEY", "test-openai-key") + monkeypatch.setenv("GITHUB_API_TOKEN", "test-github-token") + monkeypatch.setenv("TWITTER_BEARER_TOKEN", "test-twitter-token") + +@pytest.fixture +def test_agent(mock_api_keys): + """Create test agent with mocked dependencies.""" + return Agent(name="test_agent", model="gpt-4o") + +@pytest.fixture +def test_memory(temp_directory): + """Create test memory instance.""" + memory_path = os.path.join(temp_directory, "test_memory") + return Memory(memory_path=memory_path, batch_size=5) +``` + +### Test Markers + +Use pytest markers to categorize tests: + +```python +# pytest.ini or pyproject.toml +[tool.pytest.ini_options] +markers = [ + "unit: Unit tests", + "integration: Integration tests", + "e2e: End-to-end tests", + "performance: Performance tests", + "slow: Slow running tests", + "api: Tests requiring API access" +] +``` + +Run specific test categories: + +```bash +# Run only unit tests +uv run pytest -m unit + +# Run integration and e2e tests +uv run pytest -m "integration or e2e" + +# Skip slow tests +uv run pytest -m "not slow" + +# Run only API tests (when keys are available) +uv run pytest -m api +``` + +## Continuous Integration + +### GitHub Actions Configuration + +Create `.github/workflows/test.yml`: + +```yaml +name: Tests + +on: + push: + branches: [ main, develop ] + pull_request: + branches: [ main ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.8, 3.9, "3.10", "3.11"] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + + - name: Install uv + run: curl -LsSf https://astral.sh/uv/install.sh | sh + + - name: Install dependencies + run: | + uv venv + source .venv/bin/activate + uv pip install -e . + uv pip install pytest pytest-cov pytest-mock + + - name: Run unit tests + run: | + source .venv/bin/activate + uv run pytest tests/unit/ -v --cov=src --cov-report=xml + + - name: Run integration tests + run: | + source .venv/bin/activate + uv run pytest tests/integration/ -v + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GITHUB_API_TOKEN: ${{ secrets.GITHUB_API_TOKEN }} + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + file: ./coverage.xml +``` + +## Best Practices + +### Test Writing Guidelines + +1. **Test Names**: Use descriptive names that explain the scenario +2. **Test Independence**: Each test should be independent and not rely on others +3. **Test Data**: Use realistic test data that represents actual usage +4. **Assertions**: Make specific assertions about expected behavior +5. **Error Testing**: Test both success and failure scenarios + +### Mocking Guidelines + +1. **Mock External Dependencies**: Always mock external APIs and services +2. **Mock at the Right Level**: Mock at the boundary of your system +3. **Verify Interactions**: Assert that mocked methods are called correctly +4. **Use Realistic Data**: Mock responses should match real API responses + +### Performance Testing + +1. **Set Realistic Limits**: Base performance expectations on actual requirements +2. **Test Under Load**: Test with realistic data volumes and concurrent users +3. **Monitor Resources**: Track memory, CPU, and network usage +4. **Establish Baselines**: Create performance baselines to detect regressions + +### Test Maintenance + +1. **Keep Tests Updated**: Update tests when functionality changes +2. **Remove Obsolete Tests**: Delete tests for removed functionality +3. **Refactor Test Code**: Apply same quality standards to test code +4. **Document Complex Tests**: Add comments for complex test scenarios + +This comprehensive testing approach ensures that Talos remains reliable, performant, and maintainable as it evolves. diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md new file mode 100644 index 00000000..3c124a71 --- /dev/null +++ b/docs/getting-started/installation.md @@ -0,0 +1,160 @@ +# Installation + +This guide will help you install and set up Talos on your system. + +## Prerequisites + +- Python 3.8 or higher +- `uv` package manager (recommended) or `pip` +- Git + +## Installation Methods + +### Using uv (Recommended) + +Talos uses `uv` for dependency management, which provides faster and more reliable package installation. + +1. **Install uv** (if not already installed): + ```bash + curl -LsSf https://astral.sh/uv/install.sh | sh + ``` + +2. **Clone the repository**: + ```bash + git clone https://github.com/talos-agent/talos.git + cd talos + ``` + +3. **Create a virtual environment**: + ```bash + uv venv + ``` + +4. **Activate the virtual environment**: + ```bash + source .venv/bin/activate + ``` + +5. **Install dependencies**: + ```bash + ./scripts/install_deps.sh + ``` + +### Using pip + +If you prefer to use pip instead of uv: + +1. **Clone the repository**: + ```bash + git clone https://github.com/talos-agent/talos.git + cd talos + ``` + +2. **Create a virtual environment**: + ```bash + python -m venv .venv + source .venv/bin/activate + ``` + +3. **Install dependencies**: + ```bash + pip install -r requirements.txt + pip install -e . + ``` + +## Environment Variables + +Talos requires several API keys to function properly. Set up the following environment variables: + +### Required for Basic Functionality +```bash +export OPENAI_API_KEY="your-openai-api-key" +export PINATA_API_KEY="your-pinata-api-key" +export PINATA_SECRET_API_KEY="your-pinata-secret-api-key" +``` + +### Required for Full Functionality +```bash +export GITHUB_API_TOKEN="your-github-token" +export TWITTER_BEARER_TOKEN="your-twitter-bearer-token" +``` + +!!! tip "Environment File" + You can create a `.env` file in the project root with these variables for convenience: + ```bash + OPENAI_API_KEY=your-openai-api-key + PINATA_API_KEY=your-pinata-api-key + PINATA_SECRET_API_KEY=your-pinata-secret-api-key + GITHUB_API_TOKEN=your-github-token + TWITTER_BEARER_TOKEN=your-twitter-bearer-token + ``` + +## Docker Installation + +### Building and Running with Docker + +1. **Build the Docker image**: + ```bash + docker build -t talos-agent . + ``` + +2. **Run the container**: + ```bash + docker run -d \ + -e OPENAI_API_KEY="your-openai-api-key" \ + -e GITHUB_API_TOKEN="your-github-token" \ + -e TWITTER_BEARER_TOKEN="your-twitter-bearer-token" \ + -e PINATA_API_KEY="your-pinata-api-key" \ + -e PINATA_SECRET_API_KEY="your-pinata-secret-api-key" \ + --name talos-agent \ + talos-agent + ``` + +### Using Docker Compose + +1. **Create a `.env` file** with your API keys (see above) + +2. **Start the service**: + ```bash + docker-compose up -d + ``` + +3. **View logs**: + ```bash + docker-compose logs -f + ``` + +4. **Stop the service**: + ```bash + docker-compose down + ``` + +## Verification + +To verify your installation is working correctly: + +1. **Run the interactive CLI**: + ```bash + uv run talos + ``` + +2. **Run a simple test**: + ```bash + uv run talos "Hello, what can you do?" + ``` + +If everything is set up correctly, Talos should respond with information about its capabilities. + +## Troubleshooting + +### Common Issues + +**Missing API Keys**: Ensure all required environment variables are set. The agent will not function without valid API keys. + +**Permission Errors**: Make sure you have the necessary permissions for the directories and that your virtual environment is activated. + +**Network Issues**: Some features require internet access for API calls to OpenAI, GitHub, Twitter, and IPFS services. + +**Docker Issues**: Ensure Docker is running and you have sufficient permissions to build and run containers. + +For more help, check the [Development](../development/contributing.md) section or open an issue on GitHub. diff --git a/docs/getting-started/overview.md b/docs/getting-started/overview.md new file mode 100644 index 00000000..73b15de0 --- /dev/null +++ b/docs/getting-started/overview.md @@ -0,0 +1,47 @@ +# Overview + +Talos is an AI agent designed to function as an autonomous owner and governor for decentralized protocols, specifically focused on managing cryptocurrency treasuries. + +## Core Capabilities + +### Autonomous Treasury Management +- Dynamically adjusts staking APRs based on market conditions +- Performs sentiment analysis to inform decisions +- Manages supply metrics and protocol economics +- Deploys capital through ERC-4626 vaults + +### Governance Operations +- Evaluates protocol upgrade proposals +- Generates execution plans for approved changes +- Provides detailed analysis and recommendations +- Maintains protocol integrity through supervised execution + +### Community Engagement +- Monitors and interacts with social media (Twitter) +- Gauges community sentiment +- Manages protocol reputation +- Provides updates and answers questions + +### Development Oversight +- Reviews GitHub pull requests +- Manages development workflows +- Maintains code quality standards +- Automates repository management + +### Security & Supervision +- Uses hypervisor system to approve/deny all agent actions +- Implements rule-based supervision +- Maintains audit trails of all decisions +- Ensures protocol safety through multiple validation layers + +## Target Users + +Talos is designed for decentralized protocol teams who want an AI agent to autonomously manage their protocol's operations, treasury, and governance while maintaining security through supervised execution. + +## Key Benefits + +- **Minimal Human Intervention**: Operates autonomously while maintaining security +- **Comprehensive Monitoring**: Tracks market conditions, sentiment, and protocol metrics +- **Modular Architecture**: Extensible skill-based system for new capabilities +- **Supervised Execution**: All actions require approval through the hypervisor system +- **Consistent Behavior**: Prompt management ensures reliable AI responses diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md new file mode 100644 index 00000000..687b54ff --- /dev/null +++ b/docs/getting-started/quickstart.md @@ -0,0 +1,156 @@ +# Quick Start + +Get up and running with Talos in just a few minutes. + +## Basic Setup + +1. **Install Talos** (see [Installation](installation.md) for details): + ```bash + git clone https://github.com/talos-agent/talos.git + cd talos + uv venv && source .venv/bin/activate + ./scripts/install_deps.sh + ``` + +2. **Set up environment variables**: + ```bash + export OPENAI_API_KEY="your-openai-api-key" + export PINATA_API_KEY="your-pinata-api-key" + export PINATA_SECRET_API_KEY="your-pinata-secret-api-key" + ``` + +## Usage Modes + +### Interactive CLI + +Start an interactive conversation with Talos: + +```bash +uv run talos +``` + +You'll see a prompt where you can ask questions or give commands: + +``` +>> What are your main capabilities? +>> Analyze the sentiment around "DeFi protocols" on Twitter +>> Help me evaluate a governance proposal +``` + +Type `exit` to quit the interactive session. + +### Single Query Mode + +Run a single query and exit: + +```bash +uv run talos "What is the current market sentiment?" +``` + +### Daemon Mode + +Run Talos continuously for scheduled operations: + +```bash +export GITHUB_API_TOKEN="your-github-token" +export TWITTER_BEARER_TOKEN="your-twitter-bearer-token" +uv run talos daemon +``` + +The daemon will: +- Execute scheduled jobs +- Monitor for new proposals +- Perform continuous market analysis +- Handle automated responses + +## Common Commands + +### Twitter Analysis +```bash +# Get user sentiment analysis +uv run talos twitter get-user-prompt username + +# Analyze query sentiment +uv run talos twitter get-query-sentiment "DeFi yield farming" +``` + +### GitHub Operations +```bash +# Set up GitHub repository +export GITHUB_REPO=owner/repo + +# List pull requests +uv run talos github get-prs + +# Review a pull request +uv run talos github review-pr 123 --post + +# Approve a pull request +uv run talos github approve-pr 123 +``` + +### Cryptography +```bash +# Generate RSA key pair +uv run talos generate-keys + +# Get public key +uv run talos get-public-key + +# Encrypt data +uv run talos encrypt "secret message" public_key.pem + +# Decrypt data +uv run talos decrypt "encrypted_data" +``` + +## Example Workflows + +### Proposal Evaluation + +1. **Run the proposal example**: + ```bash + python proposal_example.py + ``` + +2. **Interactive proposal analysis**: + ```bash + uv run talos + >> I need help evaluating a governance proposal about increasing staking rewards + ``` + +### Market Analysis + +```bash +uv run talos +>> Analyze the current market conditions for ETH +>> What's the sentiment around yield farming protocols? +>> Should we adjust our staking APR based on current conditions? +``` + +### GitHub Management + +```bash +# Set up environment +export GITHUB_API_TOKEN=your_token +export GITHUB_REPO=your-org/your-repo + +# Review recent PRs +uv run talos github get-prs --state all + +# Get AI review of a specific PR +uv run talos github review-pr 42 --post +``` + +## Next Steps + +- **Learn the Architecture**: Understand how Talos works by reading the [Architecture](../architecture/components.md) documentation +- **Explore CLI Commands**: Check out the complete [CLI Reference](../cli/overview.md) +- **Contribute**: See the [Development](../development/contributing.md) guide to contribute to the project +- **Advanced Usage**: Learn about the [Philosophy](../philosophy/vision.md) and roadmap behind Talos + +## Getting Help + +- Check the [CLI Reference](../cli/overview.md) for detailed command documentation +- Review [Development](../development/contributing.md) for troubleshooting +- Open an issue on [GitHub](https://github.com/talos-agent/talos) for bugs or feature requests diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..5c7de311 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,48 @@ +# Talos: An AI Protocol Owner + +Welcome to Talos, an AI agent designed to act as an autonomous owner for decentralized protocols. Talos is not just a chatbot; it is a sophisticated AI system that can manage and govern a protocol, ensuring its integrity and security. + +!!! info "Official Documentation" + The official documentation for the Talos project can be found at [docs.talos.is](https://docs.talos.is/). + +## What is Talos? + +Talos is an AI agent that can: + +- **Govern Protocol Actions:** Talos uses a Hypervisor to monitor and approve or deny actions taken by other agents or system components. This ensures that all actions align with the protocol's rules and objectives. +- **Evaluate Governance Proposals:** Talos can analyze and provide recommendations on governance proposals, considering their potential benefits, risks, and community feedback. +- **Interact with the Community:** Talos can engage with the community on platforms like Twitter to provide updates, answer questions, and gather feedback. +- **Manage its Own Codebase:** Talos can interact with GitHub to manage its own source code, including reviewing and committing changes. +- **Update Documentation:** Talos can update its own documentation on GitBook to ensure it remains accurate and up-to-date. + +## Key Features + +### Autonomous Treasury Management +Talos continuously monitors volatility, yield curves, and risk surfaces to compute optimal capital paths. Each strategy proposal must first be approved by the council, then deployed through ERC-4626 vaults spanning sophisticated LP positions to simple ETH lending. + +### Governance & Security +The Hypervisor system ensures all actions are monitored and approved based on predefined rules and agent history, protecting the protocol from malicious or erroneous actions. + +### Community Engagement +Talos can engage with the community on social platforms, providing updates, answering questions, and gathering feedback to inform protocol decisions. + +## Quick Links + +- [Getting Started](getting-started/overview.md) - Learn how to install and use Talos +- [Architecture](architecture/components.md) - Understand the core components and design +- [CLI Reference](cli/overview.md) - Complete command-line interface documentation +- [Development](development/contributing.md) - Contributing guidelines and development setup + +## Repository Structure + +The repository is organized as follows: + +- `.github/` - GitHub Actions workflows for CI/CD +- `src/talos/` - Main source code for the Talos agent + - `core/` - Core components (CLI, main agent loop) + - `hypervisor/` - Hypervisor and Supervisor components + - `services/` - Different services (proposal evaluation, etc.) + - `prompts/` - Agent prompts and templates + - `tools/` - External integrations (GitHub, Twitter, IPFS) +- `tests/` - Test suite +- `docs/` - Documentation source files diff --git a/docs/philosophy/roadmap.md b/docs/philosophy/roadmap.md new file mode 100644 index 00000000..f0beb8a4 --- /dev/null +++ b/docs/philosophy/roadmap.md @@ -0,0 +1,290 @@ +# Roadmap + +This roadmap outlines the evolution of Talos from its current state toward becoming a fully autonomous AI protocol owner with advanced cognitive capabilities. + +## Current State (Q4 2024) + +### Implemented Capabilities + +**Core Agent System:** +- ✅ Basic agent architecture with LLM integration +- ✅ Memory system with semantic search +- ✅ Hypervisor and supervised execution +- ✅ Modular skill and service architecture +- ✅ CLI interface for interaction + +**External Integrations:** +- ✅ GitHub API integration for repository management +- ✅ Twitter API integration for sentiment analysis +- ✅ IPFS integration for decentralized storage +- ✅ Cryptographic operations for security + +**Governance Capabilities:** +- ✅ Proposal evaluation system +- ✅ Basic sentiment analysis +- ✅ Rule-based supervision +- ✅ Audit trail and logging + +## Phase 1: Foundation Strengthening (Q1 2025) + +### Performance Optimization +**Target**: Improve system performance and reliability + +**Key Deliverables:** +- **Memory System Optimization** - Implement batched writes and caching +- **API Rate Limiting** - Intelligent rate limiting and request optimization +- **Error Handling** - Comprehensive error recovery and resilience +- **Monitoring & Alerting** - Real-time system health monitoring + +**Success Metrics:** +- 50% reduction in memory operation latency +- 99.9% uptime for core services +- Sub-second response times for common queries +- Zero data loss incidents + +### Enhanced Security +**Target**: Strengthen security and supervision mechanisms + +**Key Deliverables:** +- **Advanced Hypervisor Rules** - More sophisticated approval logic +- **Multi-layer Security** - Defense in depth for critical operations +- **Audit System** - Comprehensive audit trails and compliance +- **Threat Detection** - Automated detection of malicious activities + +**Success Metrics:** +- Zero security incidents +- 100% action approval coverage +- Complete audit trail for all operations +- Automated threat detection and response + +## Phase 2: Intelligence Enhancement (Q2 2025) + +### Advanced Analytics +**Target**: Implement sophisticated analysis capabilities + +**Key Deliverables:** +- **Market Analysis Engine** - Real-time market condition analysis +- **Predictive Modeling** - ML models for trend prediction +- **Risk Assessment** - Comprehensive risk evaluation framework +- **Competitive Intelligence** - Automated competitor monitoring + +**Success Metrics:** +- 80% accuracy in market trend predictions +- Real-time processing of 1000+ data sources +- Automated risk scoring for all decisions +- Daily competitive intelligence reports + +### Enhanced Decision Making +**Target**: Improve decision quality and sophistication + +**Key Deliverables:** +- **Multi-criteria Decision Analysis** - Sophisticated decision frameworks +- **Scenario Planning** - What-if analysis for major decisions +- **Stakeholder Impact Analysis** - Consider all affected parties +- **Long-term Strategy Planning** - Strategic thinking capabilities + +**Success Metrics:** +- 90% community satisfaction with decisions +- Measurable improvement in protocol metrics +- Successful implementation of long-term strategies +- Reduced need for human intervention + +## Phase 3: Autonomous Operations (Q3 2025) + +### Treasury Management +**Target**: Implement autonomous treasury optimization + +**Key Deliverables:** +- **Dynamic APR Management** - Real-time staking reward optimization +- **Yield Strategy Engine** - Automated yield farming strategies +- **Liquidity Management** - Optimal liquidity provision strategies +- **Risk Management** - Automated risk mitigation and hedging + +**Success Metrics:** +- Outperform manual treasury management by 20% +- Maintain optimal liquidity ratios automatically +- Zero treasury security incidents +- Consistent positive risk-adjusted returns + +### Community Engagement +**Target**: Autonomous community interaction and management + +**Key Deliverables:** +- **Social Media Management** - Automated posting and engagement +- **Community Support** - AI-powered community assistance +- **Feedback Integration** - Systematic community feedback processing +- **Reputation Management** - Proactive reputation monitoring + +**Success Metrics:** +- 95% positive community sentiment +- 24/7 community support availability +- Increased community engagement metrics +- Proactive issue resolution + +## Phase 4: Advanced Cognition (Q4 2025) + +### Multi-Protocol Coordination +**Target**: Coordinate strategies across multiple protocols + +**Key Deliverables:** +- **Cross-Protocol Analytics** - Analysis across DeFi ecosystem +- **Coordination Mechanisms** - Inter-protocol communication +- **Ecosystem Optimization** - System-wide optimization strategies +- **Partnership Management** - Automated partnership evaluation + +**Success Metrics:** +- Successful coordination with 5+ protocols +- Measurable ecosystem-wide improvements +- Automated partnership negotiations +- Industry recognition as coordination leader + +### Learning and Adaptation +**Target**: Implement advanced learning capabilities + +**Key Deliverables:** +- **Outcome Analysis** - Systematic analysis of decision outcomes +- **Strategy Refinement** - Continuous improvement of strategies +- **Pattern Recognition** - Advanced pattern detection in markets +- **Adaptive Behavior** - Dynamic adaptation to changing conditions + +**Success Metrics:** +- Demonstrable improvement in decision quality over time +- Successful adaptation to major market changes +- Recognition of novel patterns and opportunities +- Self-directed capability enhancement + +## Phase 5: AGI Development (2026) + +### Artificial General Intelligence +**Target**: Develop AGI capabilities for financial protocol management + +**Key Deliverables:** +- **General Problem Solving** - Apply intelligence to novel problems +- **Creative Strategy Development** - Generate innovative solutions +- **Abstract Reasoning** - Handle complex, abstract concepts +- **Transfer Learning** - Apply knowledge across domains + +**Success Metrics:** +- Successful handling of unprecedented situations +- Development of novel governance mechanisms +- Recognition as AGI breakthrough in finance +- Academic and industry validation + +### Distributed Agent Network +**Target**: Create network of coordinated autonomous agents + +**Key Deliverables:** +- **Agent Communication Protocol** - Standardized inter-agent communication +- **Distributed Decision Making** - Coordinated multi-agent decisions +- **Specialization Framework** - Specialized agents for different domains +- **Network Governance** - Governance of the agent network itself + +**Success Metrics:** +- Successful deployment of 10+ coordinated agents +- Measurable network effects and improvements +- Industry adoption of agent network standards +- Self-governing agent ecosystem + +## Long-term Vision (2027+) + +### Industry Transformation +**Target**: Lead transformation of DeFi industry + +**Objectives:** +- **Standard Setting** - Establish industry standards for autonomous governance +- **Ecosystem Leadership** - Lead development of autonomous DeFi ecosystem +- **Regulatory Engagement** - Work with regulators on AI governance frameworks +- **Academic Collaboration** - Contribute to research on autonomous economic systems + +### Societal Impact +**Target**: Demonstrate positive societal impact of autonomous AI + +**Objectives:** +- **Economic Efficiency** - Measurable improvements in capital allocation efficiency +- **Financial Inclusion** - Democratize access to sophisticated financial management +- **Innovation Acceleration** - Accelerate development of beneficial financial technologies +- **Ethical AI Leadership** - Demonstrate responsible AI development and deployment + +## Implementation Strategy + +### Development Approach + +**Iterative Development:** +- Quarterly releases with incremental improvements +- Continuous integration and deployment +- Regular community feedback integration +- Agile adaptation to changing requirements + +**Risk Management:** +- Comprehensive testing at each phase +- Gradual rollout of new capabilities +- Fallback mechanisms for critical functions +- Regular security audits and assessments + +**Community Involvement:** +- Open development process with community input +- Regular progress updates and demonstrations +- Community testing and feedback programs +- Transparent decision making and prioritization + +### Success Factors + +**Technical Excellence:** +- Maintain high code quality and documentation standards +- Implement robust testing and quality assurance +- Ensure scalability and performance optimization +- Follow security best practices throughout + +**Community Trust:** +- Maintain transparency in all operations +- Deliver on commitments and timelines +- Respond effectively to community feedback +- Demonstrate consistent value delivery + +**Industry Leadership:** +- Contribute to open-source DeFi infrastructure +- Share knowledge and best practices +- Collaborate with other projects and researchers +- Influence industry standards and practices + +## Milestones and Metrics + +### Quarterly Milestones + +**Q1 2025:** +- Performance optimization complete +- Enhanced security framework deployed +- 99.9% system uptime achieved + +**Q2 2025:** +- Advanced analytics engine operational +- Predictive modeling accuracy >80% +- Enhanced decision making framework deployed + +**Q3 2025:** +- Autonomous treasury management live +- Community engagement automation active +- Outperforming manual management benchmarks + +**Q4 2025:** +- Multi-protocol coordination operational +- Advanced learning capabilities demonstrated +- Industry recognition as innovation leader + +### Long-term Success Metrics + +**Technical Metrics:** +- System reliability and performance benchmarks +- Decision quality and accuracy measurements +- Community satisfaction and engagement levels +- Financial performance and risk metrics + +**Impact Metrics:** +- Industry adoption of Talos-pioneered practices +- Academic citations and research contributions +- Regulatory recognition and engagement +- Societal benefit measurements + +This roadmap represents an ambitious but achievable path toward creating the world's first truly autonomous AI protocol owner. Each phase builds upon the previous one, creating a foundation for increasingly sophisticated capabilities while maintaining security, transparency, and community trust. + +The ultimate goal is not just to create an advanced AI system, but to demonstrate how artificial intelligence can be developed and deployed responsibly to create significant positive impact in the financial sector and beyond. diff --git a/docs/philosophy/vision.md b/docs/philosophy/vision.md new file mode 100644 index 00000000..849869b1 --- /dev/null +++ b/docs/philosophy/vision.md @@ -0,0 +1,266 @@ +# Vision & Philosophy + +Talos represents a paradigm shift in decentralized protocol management, embodying the vision of truly autonomous treasury governance through artificial intelligence. + +## Core Vision + +### The Autonomous Protocol Owner + +Talos is designed to function as an **AI Protocol Owner** - not merely a tool or assistant, but an autonomous entity capable of making complex decisions about protocol management, treasury optimization, and community governance. + +**Key Principles:** +- **Autonomy with Oversight** - Operates independently while maintaining security through supervised execution +- **Data-Driven Decisions** - All decisions based on comprehensive market analysis and community sentiment +- **Transparent Governance** - All actions and reasoning are auditable and explainable +- **Community-Centric** - Prioritizes long-term protocol health and community benefit + +### Beyond Traditional Automation + +Traditional DeFi protocols rely on: +- Manual governance processes +- Human-driven treasury management +- Reactive decision making +- Limited data integration + +Talos enables: +- **Proactive Management** - Anticipates market changes and adjusts strategies +- **Holistic Analysis** - Integrates market data, sentiment, and protocol metrics +- **Continuous Optimization** - Constantly refines strategies based on outcomes +- **Scalable Governance** - Handles complex decisions without human bottlenecks + +## Philosophical Foundations + +### Cognitive Architecture + +Talos operates on three layers of cognition, each building upon the previous: + +#### 1. Inference Layer +**Purpose**: Real-time decision making and immediate responses + +**Capabilities:** +- Market condition analysis +- Sentiment evaluation +- Risk assessment +- Tactical adjustments + +**Example**: Adjusting staking APR based on current market volatility and competitor analysis. + +#### 2. Training Layer +**Purpose**: Learning from outcomes and improving decision quality + +**Capabilities:** +- Strategy effectiveness analysis +- Pattern recognition in market behavior +- Community response learning +- Decision quality improvement + +**Example**: Learning that certain APR adjustments during high volatility periods lead to better user retention. + +#### 3. Coordination Layer +**Purpose**: Long-term strategic planning and multi-protocol coordination + +**Capabilities:** +- Cross-protocol strategy development +- Ecosystem-wide optimization +- Long-term trend analysis +- Strategic partnership evaluation + +**Example**: Coordinating with other protocols to optimize liquidity flows across the entire DeFi ecosystem. + +### Autonomous Treasury Management + +#### Dynamic Capital Optimization + +Talos continuously monitors and optimizes capital deployment across multiple dimensions: + +**Market Dynamics:** +- Volatility analysis and risk-adjusted returns +- Yield curve analysis across different protocols +- Liquidity depth and market impact assessment +- Correlation analysis between different assets + +**Community Sentiment:** +- Social media sentiment tracking +- Community feedback analysis +- Governance participation patterns +- User behavior analytics + +**Protocol Health:** +- TVL trends and user acquisition metrics +- Revenue generation and sustainability +- Security incident monitoring +- Competitive positioning analysis + +#### Incentive Mechanism Design + +**Bonding Curve Optimization:** +- Dynamic bonding curves that adjust based on market conditions +- Incentive alignment between protocol growth and user rewards +- Anti-gaming mechanisms to prevent exploitation +- Long-term sustainability considerations + +**Staking Reward Calibration:** +- Real-time APR adjustments based on market conditions +- Balancing user attraction with protocol sustainability +- Consideration of opportunity costs and competitive landscape +- Integration with overall tokenomics strategy + +### Governance Philosophy + +#### Supervised Autonomy + +Talos operates under a **supervised autonomy** model that balances independence with security: + +**Hypervisor System:** +- All actions require approval through rule-based or AI-driven supervision +- Multi-layered security with different approval thresholds +- Audit trails for all decisions and their reasoning +- Emergency override capabilities for critical situations + +**Community Integration:** +- Regular community updates on decisions and reasoning +- Feedback integration into future decision making +- Transparent reporting on performance and outcomes +- Democratic oversight through governance mechanisms + +#### Ethical AI Governance + +**Transparency:** +- All decision logic is explainable and auditable +- Regular publication of decision rationale and outcomes +- Open-source development with community oversight +- Clear documentation of capabilities and limitations + +**Fairness:** +- Decisions consider impact on all stakeholders +- No preferential treatment for specific user groups +- Balanced consideration of short-term and long-term effects +- Protection of minority interests in governance decisions + +**Accountability:** +- Clear responsibility chains for all decisions +- Regular performance reviews and adjustments +- Community feedback integration mechanisms +- Continuous improvement based on outcomes + +## Strategic Objectives + +### Short-term Goals (0-6 months) + +**Operational Excellence:** +- Achieve consistent, profitable treasury management +- Demonstrate superior decision quality compared to manual processes +- Build trust through transparent and explainable decisions +- Establish robust security and oversight mechanisms + +**Community Building:** +- Engage actively with protocol communities +- Provide valuable insights and analysis +- Build reputation as a trusted autonomous agent +- Gather feedback for continuous improvement + +### Medium-term Goals (6-18 months) + +**Advanced Capabilities:** +- Implement sophisticated multi-protocol strategies +- Develop predictive models for market movements +- Create innovative incentive mechanisms +- Establish cross-protocol coordination capabilities + +**Ecosystem Integration:** +- Partner with other protocols for mutual benefit +- Contribute to DeFi infrastructure development +- Share insights and best practices with the community +- Influence industry standards for autonomous governance + +### Long-term Vision (18+ months) + +**Artificial General Intelligence for DeFi:** +- Develop AGI capabilities specifically for financial protocol management +- Create a network of coordinated autonomous agents +- Establish new paradigms for decentralized governance +- Pioneer the future of autonomous economic systems + +**Ecosystem Transformation:** +- Lead the transition to fully autonomous DeFi protocols +- Demonstrate the viability of AI-driven economic systems +- Create templates and frameworks for other protocols +- Establish new standards for autonomous governance + +## Success Metrics + +### Performance Indicators + +**Financial Performance:** +- Risk-adjusted returns compared to benchmarks +- Treasury growth and sustainability metrics +- User acquisition and retention rates +- Revenue generation and protocol health + +**Decision Quality:** +- Accuracy of market predictions and adjustments +- Community satisfaction with governance decisions +- Reduction in manual intervention requirements +- Improvement in protocol metrics over time + +**Community Impact:** +- User engagement and participation levels +- Community sentiment and trust metrics +- Governance participation and voting patterns +- Feedback quality and implementation rates + +### Innovation Metrics + +**Technical Advancement:** +- Development of new autonomous governance mechanisms +- Creation of novel incentive structures +- Implementation of advanced AI capabilities +- Contribution to open-source DeFi infrastructure + +**Industry Influence:** +- Adoption of Talos-pioneered practices by other protocols +- Recognition as a leader in autonomous governance +- Contribution to academic research and industry standards +- Influence on regulatory and policy discussions + +## Future Implications + +### The Path to AGI + +Talos represents a stepping stone toward Artificial General Intelligence in the financial domain: + +**Specialized Intelligence:** +- Deep expertise in DeFi protocol management +- Sophisticated understanding of market dynamics +- Advanced community sentiment analysis +- Complex multi-stakeholder decision making + +**General Capabilities:** +- Transfer learning across different protocols +- Adaptation to new market conditions and mechanisms +- Creative problem solving for novel challenges +- Strategic thinking and long-term planning + +### Societal Impact + +**Economic Efficiency:** +- More efficient capital allocation across DeFi protocols +- Reduced human error and bias in financial decisions +- 24/7 monitoring and optimization capabilities +- Democratization of sophisticated financial management + +**Innovation Acceleration:** +- Rapid experimentation with new governance mechanisms +- Data-driven insights into protocol optimization +- Cross-protocol learning and best practice sharing +- Acceleration of DeFi ecosystem development + +**Governance Evolution:** +- New models for decentralized decision making +- Reduced reliance on human governance bottlenecks +- More responsive and adaptive protocol management +- Enhanced transparency and accountability + +The vision of Talos extends beyond simple automation to represent a fundamental evolution in how decentralized protocols can be managed, governed, and optimized. Through the combination of advanced AI capabilities, robust security mechanisms, and community integration, Talos pioneers a new era of autonomous economic systems that are more efficient, transparent, and responsive than traditional approaches. + +This vision guides every aspect of Talos's development and operation, ensuring that each decision and capability advancement moves toward the ultimate goal of creating truly autonomous, beneficial, and trustworthy AI systems for managing decentralized economic protocols. diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..fd3f37fa --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,100 @@ +site_name: Talos Documentation +site_description: AI Protocol Owner - Autonomous Treasury Management and Governance +site_url: https://talos-agent.github.io/talos/ +repo_url: https://github.com/talos-agent/talos +repo_name: talos-agent/talos +edit_uri: edit/main/docs/ + +theme: + name: material + features: + - navigation.tabs + - navigation.sections + - navigation.expand + - navigation.path + - navigation.top + - search.highlight + - search.share + - content.code.copy + - content.code.select + - content.tabs.link + palette: + - scheme: default + primary: deep purple + accent: purple + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - scheme: slate + primary: deep purple + accent: purple + toggle: + icon: material/brightness-4 + name: Switch to light mode + font: + text: Roboto + code: Roboto Mono + +plugins: + - search + - mkdocstrings: + handlers: + python: + options: + docstring_style: google + +markdown_extensions: + - admonition + - pymdownx.details + - pymdownx.superfences + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.tabbed: + alternate_style: true + - pymdownx.tasklist: + custom_checkbox: true + - attr_list + - md_in_html + - toc: + permalink: true + +nav: + - Home: index.md + - Getting Started: + - Overview: getting-started/overview.md + - Installation: getting-started/installation.md + - Quick Start: getting-started/quickstart.md + - Architecture: + - Core Components: architecture/components.md + - Agent System: architecture/agents.md + - Hypervisor: architecture/hypervisor.md + - Skills & Services: architecture/skills-services.md + - CLI Reference: + - Overview: cli/overview.md + - Interactive Mode: cli/interactive.md + - GitHub Commands: cli/github.md + - Twitter Commands: cli/twitter.md + - Cryptography: cli/crypto.md + - Development: + - Contributing: development/contributing.md + - Code Style: development/code-style.md + - Performance: development/performance.md + - Testing: development/testing.md + - Philosophy: + - Vision: philosophy/vision.md + - Roadmap: philosophy/roadmap.md + - API Reference: + - Core: api/core.md + - Services: api/services.md + - Tools: api/tools.md + +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/talos-agent/talos + - icon: fontawesome/brands/twitter + link: https://twitter.com/talos_protocol + +copyright: Copyright © 2024 Talos Protocol