diff --git a/.env.example b/.env.example index 6f1d1da0..c83f1ee9 100644 --- a/.env.example +++ b/.env.example @@ -191,7 +191,7 @@ SURREAL_URL="ws://surrealdb/rpc:8000" SURREAL_USER="root" SURREAL_PASSWORD="root" SURREAL_NAMESPACE="open_notebook" -SURREAL_DATABASE="staging" +SURREAL_DATABASE="open_notebook" # RETRY CONFIGURATION (surreal-commands v1.2.0+) # Global defaults for all background commands unless explicitly overridden at command level diff --git a/.gitignore b/.gitignore index 999338d8..628a4cb7 100644 --- a/.gitignore +++ b/.gitignore @@ -133,4 +133,8 @@ doc_exports/ specs/ .claude -.playwright-mcp/ \ No newline at end of file +.playwright-mcp/ + + + +**/*.local.md \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md index f9df7b2a..11e7b4ba 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -1,3 +1,219 @@ +# Open Notebook - Root CLAUDE.md -We have a good amount of documentation on this project on the ./docs folder. Please read through them when necessary, and always review the docs/index.md file before starting a new feature so you know at least which docs are available. +This file provides architectural guidance for contributors working on Open Notebook at the project level. +## Project Overview + +**Open Notebook** is an open-source, privacy-focused alternative to Google's Notebook LM. It's an AI-powered research assistant enabling users to upload multi-modal content (PDFs, audio, video, web pages), generate intelligent notes, search semantically, chat with AI models, and produce professional podcasts—all with complete control over data and choice of AI providers. + +**Key Values**: Privacy-first, multi-provider AI support, fully self-hosted option, open-source transparency. + +--- + +## Three-Tier Architecture + +``` +┌─────────────────────────────────────────────────────────┐ +│ Frontend (React/Next.js) │ +│ frontend/ @ port 3000 │ +├─────────────────────────────────────────────────────────┤ +│ - Notebooks, sources, notes, chat, podcasts, search UI │ +│ - Zustand state management, TanStack Query (React Query)│ +│ - Shadcn/ui component library with Tailwind CSS │ +└────────────────────────┬────────────────────────────────┘ + │ HTTP REST +┌────────────────────────▼────────────────────────────────┐ +│ API (FastAPI) │ +│ api/ @ port 5055 │ +├─────────────────────────────────────────────────────────┤ +│ - REST endpoints for notebooks, sources, notes, chat │ +│ - LangGraph workflow orchestration │ +│ - Job queue for async operations (podcasts) │ +│ - Multi-provider AI provisioning via Esperanto │ +└────────────────────────┬────────────────────────────────┘ + │ SurrealQL +┌────────────────────────▼────────────────────────────────┐ +│ Database (SurrealDB) │ +│ Graph database @ port 8000 │ +├─────────────────────────────────────────────────────────┤ +│ - Records: Notebook, Source, Note, ChatSession, etc. │ +│ - Relationships: source-to-notebook, note-to-source │ +│ - Vector embeddings for semantic search │ +└─────────────────────────────────────────────────────────┘ +``` + +--- + +## Useful sources + +User documentation is at @docs/ + +## Tech Stack + +### Frontend (`frontend/`) +- **Framework**: Next.js 15 (React 19) +- **Language**: TypeScript +- **State Management**: Zustand +- **Data Fetching**: TanStack Query (React Query) +- **Styling**: Tailwind CSS + Shadcn/ui +- **Build Tool**: Webpack (via Next.js) + +### API Backend (`api/` + `open_notebook/`) +- **Framework**: FastAPI 0.104+ +- **Language**: Python 3.11+ +- **Workflows**: LangGraph state machines +- **Database**: SurrealDB async driver +- **AI Providers**: Esperanto library (8+ providers: OpenAI, Anthropic, Google, Groq, Ollama, Mistral, DeepSeek, xAI) +- **Job Queue**: Surreal-Commands for async jobs (podcasts) +- **Logging**: Loguru +- **Validation**: Pydantic v2 +- **Testing**: Pytest + +### Database +- **SurrealDB**: Graph database with built-in embedding storage and vector search +- **Schema Migrations**: Automatic on API startup via AsyncMigrationManager + +### Additional Services +- **Content Processing**: content-core library (file/URL extraction) +- **Prompts**: AI-Prompter with Jinja2 templating +- **Podcast Generation**: podcast-creator library +- **Embeddings**: Multi-provider via Esperanto + +--- + +## Architecture Highlights + +### 1. Async-First Design +- All database queries, graph invocations, and API calls are async (await) +- SurrealDB async driver with connection pooling +- FastAPI handles concurrent requests efficiently + +### 2. LangGraph Workflows +- **source.py**: Content ingestion (extract → embed → save) +- **chat.py**: Conversational agent with message history +- **ask.py**: Search + synthesis (retrieve relevant sources → LLM) +- **transformation.py**: Custom transformations on sources +- All use `provision_langchain_model()` for smart model selection + +### 3. Multi-Provider AI +- **Esperanto library**: Unified interface to 8+ AI providers +- **ModelManager**: Factory pattern with fallback logic +- **Smart selection**: Detects large contexts, prefers long-context models +- **Override support**: Per-request model configuration + +### 4. Database Schema +- **Automatic migrations**: AsyncMigrationManager runs on API startup +- **SurrealDB graph model**: Records with relationships and embeddings +- **Vector search**: Built-in semantic search across all content +- **Transactions**: Repo functions handle ACID operations + +### 5. Authentication +- **Current**: Simple password middleware (insecure, dev-only) +- **Production**: Replace with OAuth/JWT (see CONFIGURATION.md) + +--- + +## Important Quirks & Gotchas + +### API Startup +- **Migrations run automatically** on startup; check logs for errors +- **Must start API before UI**: UI depends on API for all data +- **SurrealDB must be running**: API fails without database connection + +### Frontend-Backend Communication +- **Base API URL**: Configured in `.env.local` (default: http://localhost:5055) +- **CORS enabled**: Configured in `api/main.py` (allow all origins in dev) +- **Rate limiting**: Not built-in; add at proxy layer for production + +### LangGraph Workflows +- **Blocking operations**: Chat/podcast workflows may take minutes; no timeout +- **State persistence**: Uses SQLite checkpoint storage in `/data/sqlite-db/` +- **Model fallback**: If primary model fails, falls back to cheaper/smaller model + +### Podcast Generation +- **Async job queue**: `podcast_service.py` submits jobs but doesn't wait +- **Track status**: Use `/commands/{command_id}` endpoint to poll status +- **TTS failures**: Fall back to silent audio if speech synthesis fails + +### Content Processing +- **File extraction**: Uses content-core library; supports 50+ file types +- **URL handling**: Extracts text + metadata from web pages +- **Large files**: Content processing is sync; may block API briefly + +--- + +## Component References + +See dedicated CLAUDE.md files for detailed guidance: + +- **[frontend/CLAUDE.md](frontend/CLAUDE.md)**: React/Next.js architecture, state management, API integration +- **[api/CLAUDE.md](api/CLAUDE.md)**: FastAPI structure, service pattern, endpoint development +- **[open_notebook/CLAUDE.md](open_notebook/CLAUDE.md)**: Backend core, domain models, LangGraph workflows, AI provisioning +- **[open_notebook/domain/CLAUDE.md](open_notebook/domain/CLAUDE.md)**: Data models, repository pattern, search functions +- **[open_notebook/ai/CLAUDE.md](open_notebook/ai/CLAUDE.md)**: ModelManager, AI provider integration, Esperanto usage +- **[open_notebook/graphs/CLAUDE.md](open_notebook/graphs/CLAUDE.md)**: LangGraph workflow design, state machines +- **[open_notebook/database/CLAUDE.md](open_notebook/database/CLAUDE.md)**: SurrealDB operations, migrations, async patterns + +--- + +## Documentation Map + +- **[README.md](README.md)**: Project overview, features, quick start +- **[docs/index.md](docs/index.md)**: Complete user & deployment documentation +- **[CONFIGURATION.md](CONFIGURATION.md)**: Environment variables, model configuration +- **[CONTRIBUTING.md](CONTRIBUTING.md)**: Contribution guidelines +- **[MAINTAINER_GUIDE.md](MAINTAINER_GUIDE.md)**: Release & maintenance procedures + +--- + +## Testing Strategy + +- **Unit tests**: `tests/test_domain.py`, `test_models_api.py` +- **Graph tests**: `tests/test_graphs.py` (workflow integration) +- **Utils tests**: `tests/test_utils.py` +- **Run all**: `uv run pytest tests/` +- **Coverage**: Check with `pytest --cov` + +--- + +## Common Tasks + +### Add a New API Endpoint +1. Create router in `api/routers/feature.py` +2. Create service in `api/feature_service.py` +3. Define schemas in `api/models.py` +4. Register router in `api/main.py` +5. Test via http://localhost:5055/docs + +### Add a New LangGraph Workflow +1. Create `open_notebook/graphs/workflow_name.py` +2. Define StateDict and node functions +3. Build graph with `.add_node()` / `.add_edge()` +4. Invoke in service: `graph.ainvoke({"input": ...}, config={"..."})` +5. Test with sample data in `tests/` + +### Add Database Migration +1. Create `migrations/XXX_description.surql` +2. Write SurrealQL schema changes +3. Create `migrations/XXX_description_down.surql` (optional rollback) +4. API auto-detects on startup; migration runs if newer than recorded version + +### Deploy to Production +1. Review [CONFIGURATION.md](CONFIGURATION.md) for security settings +2. Use `make docker-release` for multi-platform image +3. Push to Docker Hub / GitHub Container Registry +4. Deploy `docker compose --profile multi up` +5. Verify migrations via API logs + +--- + +## Support & Community + +- **Documentation**: https://open-notebook.ai +- **Discord**: https://discord.gg/37XJPXfz2w +- **Issues**: https://github.com/lfnovo/open-notebook/issues +- **License**: MIT (see LICENSE) + +--- + +**Last Updated**: January 2026 | **Project Version**: 1.2.4+ diff --git a/CONFIGURATION.md b/CONFIGURATION.md index 3b5b7a66..6763fe44 100644 --- a/CONFIGURATION.md +++ b/CONFIGURATION.md @@ -1,108 +1,36 @@ # Configuration Guide -## API Connection Configuration +**📍 This file has moved!** -Starting from version 1.0.0-alpha, Open Notebook uses a simplified API connection system that automatically configures itself based on your deployment environment. +All configuration documentation has been consolidated into the new documentation structure. -### How It Works +👉 **[Read the Configuration Guide](docs/5-CONFIGURATION/index.md)** -The frontend automatically discovers the API location at runtime by analyzing the incoming HTTP request. This eliminates the need for complex network configurations and works for both Docker deployment modes: -- Multi-container (docker-compose with separate SurrealDB) -- Single-container (all services in one container) +--- -**Auto-detection logic:** -1. If `API_URL` environment variable is set → use it (explicit override) -2. Otherwise, detect from the HTTP request: - - Uses the same hostname you're accessing the frontend from - - Automatically changes port to 5055 (API port) - - Respects `X-Forwarded-Proto` header for reverse proxy setups -3. Falls back to `http://localhost:5055` if detection fails +## Quick Links -**Examples:** -- Access frontend at `http://localhost:8502` → API at `http://localhost:5055` ✅ -- Access frontend at `http://10.20.30.20:8502` → API at `http://10.20.30.20:5055` ✅ -- Access frontend at `http://my-server:8502` → API at `http://my-server:5055` ✅ +- **AI Provider Setup** → [AI Providers](docs/5-CONFIGURATION/ai-providers.md) +- **Environment Variables Reference** → [Environment Reference](docs/5-CONFIGURATION/environment-reference.md) +- **Database Configuration** → [Database Setup](docs/5-CONFIGURATION/database.md) +- **Server Configuration** → [Server Settings](docs/5-CONFIGURATION/server.md) +- **Security Setup** → [Security Configuration](docs/5-CONFIGURATION/security.md) +- **Reverse Proxy** → [Reverse Proxy Setup](docs/5-CONFIGURATION/reverse-proxy.md) +- **Advanced Tuning** → [Advanced Configuration](docs/5-CONFIGURATION/advanced.md) -**No configuration needed** for most deployments! +--- -### Custom Configuration +## What You'll Find -If you need to change the API URL (e.g., running on a different host, port, or domain), you can configure it using the `API_URL` environment variable. +The new configuration documentation includes: -#### Option 1: Using docker-compose (Recommended) +- **Complete environment variable reference** with examples +- **Provider-specific setup guides** for OpenAI, Anthropic, Google, Groq, Ollama, and more +- **Production deployment configurations** with security best practices +- **Reverse proxy examples** for Nginx, Caddy, Traefik +- **Database tuning** for performance optimization +- **Troubleshooting guides** for common configuration issues -Edit your `docker.env` file: +--- -```env -API_URL=http://your-server-ip:5055 -``` - -Or add it to your `docker-compose.yml`: - -```yaml -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest - ports: - - "8502:8502" - - "5055:5055" # API port must be exposed - environment: - - API_URL=http://your-server-ip:5055 -``` - -#### Option 2: Using docker run - -```bash -docker run -e API_URL=http://your-server-ip:5055 \ - -p 8502:8502 \ - -p 5055:5055 \ - lfnovo/open_notebook:v1-latest-single -``` - -### Important Notes - -1. **Port 5055 must be exposed**: The browser needs direct access to the API, so port 5055 must be mapped in your Docker configuration. - -2. **Use the externally accessible URL**: The `API_URL` should be the URL that a browser can reach, not internal Docker networking addresses. - -3. **Protocol matters**: Use `http://` for local deployments, `https://` if you've set up SSL. - -### Examples - -#### Running on a different host -```env -API_URL=http://192.168.1.100:5055 -``` - -#### Running on a custom domain with SSL -```env -API_URL=https://notebook.example.com/api -``` - -#### Running on a custom port -```env -API_URL=http://localhost:3055 -``` -(Remember to update the port mapping in docker-compose accordingly) - -### Troubleshooting - -**"Unable to connect to server" error on login:** -1. Verify port 5055 is exposed in your Docker configuration -2. Check that `API_URL` matches the URL your browser can access -3. Try accessing `http://localhost:5055/health` directly in your browser -4. If that fails, the API isn't running or port isn't exposed - -**API works but frontend doesn't connect:** -1. Check browser console for CORS errors -2. Verify `API_URL` is set correctly -3. Make sure you're using the same protocol (http/https) throughout - -### Migration from Previous Versions - -If you were previously exposing port 5055 manually or had custom configurations, you may need to: -1. Update your `docker.env` or environment variables to include `API_URL` -2. Ensure port 5055 is exposed in your docker-compose.yml (it's now required) -3. Remove any custom Next.js configuration or environment variables you may have added - -The default configuration will work for most users without any changes. +For all configuration details, see **[docs/5-CONFIGURATION/](docs/5-CONFIGURATION/index.md)**. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 833912d9..4139d205 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,112 +1,29 @@ # Contributing to Open Notebook -First off, thank you for considering contributing to Open Notebook! What makes open source great is the fact that we can work together and accomplish things we would never do on our own. All suggestions are welcome. +**📍 This file has moved!** -## 🚨 Important: Read Before Contributing Code +All contribution guidelines have been consolidated into the new development documentation structure. -**To maintain project coherence and avoid wasted effort, please follow this process:** +👉 **[Read the Contributing Guide](docs/7-DEVELOPMENT/contributing.md)** -1. **Create an issue first** - Before writing any code, create an issue describing the bug or feature -2. **Propose your solution** - Explain how you plan to implement the fix or feature -3. **Wait for assignment** - A maintainer will review and assign the issue to you if approved -4. **Only then start coding** - This ensures your work aligns with the project's vision and architecture +--- -**Why this process?** -- Prevents duplicate work -- Ensures solutions align with our architecture and design principles -- Saves your time by getting feedback before coding -- Helps maintainers manage the project direction +## Quick Links -> ⚠️ **Pull requests without an assigned issue may be closed**, even if the code is good. We want to respect your time by making sure work is aligned before it starts. +- **Want to contribute code?** → [Contributing Guide](docs/7-DEVELOPMENT/contributing.md) +- **Want to understand the architecture?** → [Architecture Overview](docs/7-DEVELOPMENT/architecture.md) +- **Want to understand our design philosophy?** → [Design Principles](docs/7-DEVELOPMENT/design-principles.md) +- **Are you a maintainer?** → [Maintainer Guide](docs/7-DEVELOPMENT/maintainer-guide.md) +- **New developer?** → [Quick Start](docs/7-DEVELOPMENT/quick-start.md) -## Code of Conduct +--- -By participating in this project, you are expected to uphold our Code of Conduct. Be respectful, constructive, and collaborative. +## The Issue-First Workflow -## How Can I Contribute? +**TL;DR**: Create an issue first, get it assigned, THEN code. -### Reporting Bugs +This prevents wasted effort and ensures your work aligns with the project. [See details →](docs/7-DEVELOPMENT/contributing.md) -1. **Search existing issues** - Check if the bug was already reported in [Issues](https://github.com/lfnovo/open-notebook/issues) -2. **Create a bug report** - Use the [Bug Report template](https://github.com/lfnovo/open-notebook/issues/new?template=bug_report.yml) -3. **Provide details** - Include: - - Steps to reproduce - - Expected vs actual behavior - - Logs, screenshots, or error messages - - Your environment (OS, Docker version, Open Notebook version) -4. **Indicate if you want to fix it** - Check the "I would like to work on this" box if you're interested +--- -### Suggesting Features - -1. **Search existing issues** - Check if the feature was already suggested -2. **Create a feature request** - Use the [Feature Request template](https://github.com/lfnovo/open-notebook/issues/new?template=feature_request.yml) -3. **Explain the value** - Describe why this feature would be helpful -4. **Propose implementation** - If you have ideas on how to implement it, share them -5. **Indicate if you want to build it** - Check the "I would like to work on this" box if you're interested - -### Contributing Code (Pull Requests) - -**IMPORTANT: Follow the issue-first workflow above before starting any PR** - -Once your issue is assigned: - -1. **Fork the repo** and create your branch from `main` -2. **Understand our vision and principles** - Read [DESIGN_PRINCIPLES.md](DESIGN_PRINCIPLES.md) to understand what guides our decisions -3. **Follow our architecture** - Read [docs/development/architecture.md](docs/development/architecture.md) to understand the project structure -4. **Write quality code**: - - Follow PEP 8 for Python - - Use TypeScript best practices for frontend - - Add type hints and proper error handling - - Write docstrings for functions and classes -4. **Test your changes**: - - Add tests for new features - - Ensure existing tests pass: `uv run pytest` - - Run linter: `make ruff` or `ruff check . --fix` - - Run type checker: `make lint` or `uv run python -m mypy .` -5. **Update documentation** - If you changed functionality, update the relevant docs in `/docs` -6. **Create your PR**: - - Reference the issue number (e.g., "Fixes #123") - - Describe what changed and why - - Include screenshots for UI changes - - Keep PRs focused - one issue per PR - -### What Makes a Good Contribution? - -✅ **We love PRs that:** -- Solve a real problem described in an issue -- Follow our architecture and coding standards -- Include tests and documentation -- Are well-scoped (focused on one thing) -- Have clear commit messages - -❌ **We may close PRs that:** -- Don't have an associated approved issue -- Introduce breaking changes without discussion -- Conflict with our architectural vision -- Lack tests or documentation -- Try to solve multiple unrelated problems - -## Styleguides - -### Git Commit Messages - -- Use the present tense ("Add feature" not "Added feature") -- Use the imperative mood ("Move cursor to..." not "Moves cursor to...") -- Limit the first line to 72 characters or less -- Reference issues and pull requests liberally after the first line - -### Python Styleguide - -- Follow PEP 8 guidelines -- Use type hints where possible -- Write docstrings for all functions, classes, and modules - -### Documentation Styleguide - -- Use Markdown for documentation files -- Reference functions and classes appropriately - -## Additional Notes - - -Thank you for contributing to Open Notebook! \ No newline at end of file +For all contribution details, see **[docs/7-DEVELOPMENT/contributing.md](docs/7-DEVELOPMENT/contributing.md)**. diff --git a/MAINTAINER_GUIDE.md b/MAINTAINER_GUIDE.md index 738ea35f..95f86f7a 100644 --- a/MAINTAINER_GUIDE.md +++ b/MAINTAINER_GUIDE.md @@ -1,408 +1,19 @@ # Maintainer Guide -This guide is for project maintainers to help manage contributions effectively while maintaining project quality and vision. +**📍 This file has moved!** -## Table of Contents +All maintainer guidelines have been consolidated into the new development documentation structure. -- [Issue Management](#issue-management) -- [Pull Request Review](#pull-request-review) -- [Common Scenarios](#common-scenarios) -- [Communication Templates](#communication-templates) +👉 **[Read the Maintainer Guide](docs/7-DEVELOPMENT/maintainer-guide.md)** -## Issue Management - -### When a New Issue is Created - -**1. Initial Triage** (within 24-48 hours) - -- Add appropriate labels: - - `bug`, `enhancement`, `documentation`, etc. - - `good first issue` for beginner-friendly tasks - - `needs-triage` until reviewed - - `help wanted` if you'd welcome community contributions - -- Quick assessment: - - Is it clear and well-described? - - Is it aligned with project vision? (See [DESIGN_PRINCIPLES.md](DESIGN_PRINCIPLES.md)) - - Does it duplicate an existing issue? - -**2. Initial Response** - -```markdown -Thanks for opening this issue! We'll review it and get back to you soon. - -[If it's a bug] In the meantime, have you checked our [troubleshooting guide](docs/troubleshooting/index.md)? - -[If it's a feature] You might find our [design principles](DESIGN_PRINCIPLES.md) helpful for understanding what we're building toward. -``` - -**3. Decision Making** - -Ask yourself: -- Does this align with our [design principles](DESIGN_PRINCIPLES.md)? -- Is this something we want in the core project, or better as a plugin/extension? -- Do we have the capacity to support this feature long-term? -- Will this benefit most users, or just a specific use case? - -**4. Issue Assignment** - -If the contributor checked "I am a developer and would like to work on this": - -**For Accepted Issues:** -```markdown -Great idea! This aligns well with our goals, particularly [specific design principle]. - -I see you'd like to work on this. Before you start: - -1. Please share your proposed approach/solution -2. Review our [Contributing Guide](CONTRIBUTING.md) and [Design Principles](DESIGN_PRINCIPLES.md) -3. Once we agree on the approach, I'll assign this to you - -Looking forward to your thoughts! -``` - -**For Issues Needing Clarification:** -```markdown -Thanks for offering to work on this! Before we proceed, we need to clarify a few things: - -1. [Question 1] -2. [Question 2] - -Once we have these details, we can discuss the best approach. -``` - -**For Issues Not Aligned with Vision:** -```markdown -Thank you for the suggestion and for offering to work on this! - -After reviewing against our [design principles](DESIGN_PRINCIPLES.md), we've decided not to pursue this in the core project because [specific reason]. - -However, you might be able to achieve this through [alternative approach, if applicable]. - -We appreciate your interest in contributing! Feel free to check out our [open issues](link) for other ways to contribute. -``` - -### Labels to Use - -**Priority:** -- `priority: critical` - Security issues, data loss bugs -- `priority: high` - Major functionality broken -- `priority: medium` - Annoying bugs, useful features -- `priority: low` - Nice to have, edge cases - -**Status:** -- `needs-triage` - Not yet reviewed by maintainer -- `needs-info` - Waiting for more information from reporter -- `needs-discussion` - Requires community/team discussion -- `ready` - Approved and ready to be worked on -- `in-progress` - Someone is actively working on this -- `blocked` - Cannot proceed due to external dependency - -**Type:** -- `bug` - Something is broken -- `enhancement` - New feature or improvement -- `documentation` - Documentation improvements -- `question` - General questions -- `refactor` - Code cleanup/restructuring - -**Difficulty:** -- `good first issue` - Good for newcomers -- `help wanted` - Community contributions welcome -- `advanced` - Requires deep codebase knowledge - -## Pull Request Review - -### Initial PR Review Checklist - -**Before diving into code:** - -- [ ] Is there an associated approved issue? -- [ ] Does the PR reference the issue number? -- [ ] Is the PR description clear about what changed and why? -- [ ] Did the contributor check the relevant boxes in the PR template? -- [ ] Are there tests? Screenshots (for UI changes)? - -**Red Flags** (may require closing PR): -- No associated issue -- Issue was not assigned to contributor -- PR tries to solve multiple unrelated problems -- Breaking changes without discussion -- Conflicts with project vision - -### Code Review Process - -**1. High-Level Review** - -- Does the approach align with our architecture? -- Is the solution appropriately scoped? -- Are there simpler alternatives? -- Does it follow our design principles? - -**2. Code Quality Review** - -Python: -- [ ] Follows PEP 8 -- [ ] Has type hints -- [ ] Has docstrings -- [ ] Proper error handling -- [ ] No security vulnerabilities - -TypeScript/Frontend: -- [ ] Follows TypeScript best practices -- [ ] Proper component structure -- [ ] No console.logs left in production code -- [ ] Accessible UI components - -**3. Testing Review** - -- [ ] Has appropriate test coverage -- [ ] Tests are meaningful (not just for coverage percentage) -- [ ] Tests pass locally and in CI -- [ ] Edge cases are tested - -**4. Documentation Review** - -- [ ] Code is well-commented -- [ ] Complex logic is explained -- [ ] User-facing documentation updated (if applicable) -- [ ] API documentation updated (if API changed) -- [ ] Migration guide provided (if breaking change) - -### Providing Feedback - -**Positive Feedback** (important!): -```markdown -Thanks for this PR! I really like [specific thing they did well]. - -[Feedback on what needs to change] -``` - -**Requesting Changes:** -```markdown -This is a great start! A few things to address: - -1. **[High-level concern]**: [Explanation and suggested approach] -2. **[Code quality issue]**: [Specific example and fix] -3. **[Testing gap]**: [What scenarios need coverage] - -Let me know if you have questions about any of this! -``` - -**Suggesting Alternative Approach:** -```markdown -I appreciate the effort you put into this! However, I'm concerned about [specific issue]. - -Have you considered [alternative approach]? It might be better because [reasons]. - -What do you think? -``` - -## Common Scenarios - -### Scenario 1: Good Code, Wrong Approach - -**Situation**: Contributor wrote quality code, but solved the problem in a way that doesn't fit our architecture. - -**Response:** -```markdown -Thank you for this PR! The code quality is great, and I can see you put thought into this. - -However, I'm concerned that this approach [specific architectural concern]. In our architecture, we [explain the pattern we follow]. - -Would you be open to refactoring this to [suggested approach]? I'm happy to provide guidance on the specifics. - -Alternatively, if you don't have time for a refactor, I can take over and finish this up (with credit to you, of course). - -Let me know what you prefer! -``` - -### Scenario 2: PR Without Assigned Issue - -**Situation**: Contributor submitted PR without going through issue approval process. - -**Response:** -```markdown -Thanks for the PR! I appreciate you taking the time to contribute. - -However, to maintain project coherence, we require all PRs to be linked to an approved issue that was assigned to the contributor. This is explained in our [Contributing Guide](CONTRIBUTING.md). - -This helps us: -- Ensure work aligns with project vision -- Prevent duplicate efforts -- Discuss approach before implementation - -Could you please: -1. Create an issue describing this change -2. Wait for it to be reviewed and assigned to you -3. We can then reopen this PR or you can create a new one - -Sorry for the inconvenience - this process helps us manage the project effectively. -``` - -### Scenario 3: Feature Request Not Aligned with Vision - -**Situation**: Well-intentioned feature that doesn't fit project goals. - -**Response:** -```markdown -Thank you for this suggestion! I can see how this would be useful for [specific use case]. - -After reviewing against our [design principles](DESIGN_PRINCIPLES.md), we've decided not to include this in the core project because [specific reason - e.g., "it conflicts with our 'Simplicity Over Features' principle" or "it would require dependencies that conflict with our privacy-first approach"]. - -Some alternatives: -- [If applicable] This could be built as a plugin/extension -- [If applicable] This functionality might be achievable through [existing feature] -- [If applicable] You might be interested in [other tool] which is designed for this use case - -We appreciate your contribution and hope you understand. Feel free to check our [roadmap](link) or [open issues](link) for other ways to contribute! -``` - -### Scenario 4: Contributor Ghosts After Feedback - -**Situation**: You requested changes, but contributor hasn't responded in 2+ weeks. - -**After 2 weeks:** -```markdown -Hey there! Just checking in on this PR. Do you have time to address the feedback, or would you like someone else to take over? - -No pressure either way - just want to make sure this doesn't fall through the cracks. -``` - -**After 1 month with no response:** -```markdown -Thanks again for starting this work! Since we haven't heard back, I'm going to close this PR for now. - -If you want to pick this up again in the future, feel free to reopen it or create a new PR. Alternatively, I'll mark the issue as available for someone else to work on. - -We appreciate your contribution! -``` - -Then: -- Close the PR -- Unassign the issue -- Add `help wanted` label to the issue - -### Scenario 5: Breaking Changes Without Discussion - -**Situation**: PR introduces breaking changes that weren't discussed. - -**Response:** -```markdown -Thanks for this PR! However, I notice this introduces breaking changes that weren't discussed in the original issue. - -Breaking changes require: -1. Prior discussion and approval -2. Migration guide for users -3. Deprecation period (when possible) -4. Clear documentation of the change - -Could we discuss the breaking changes first? Specifically: -- [What breaks and why] -- [Who will be affected] -- [Migration path] - -We may need to adjust the approach to minimize impact on existing users. -``` - -## Communication Templates - -### Closing a PR (Misaligned with Vision) - -```markdown -Thank you for taking the time to contribute! We really appreciate it. - -After careful review, we've decided not to merge this PR because [specific reason related to design principles]. - -This isn't a reflection on your code quality - it's about maintaining focus on our core goals as outlined in [DESIGN_PRINCIPLES.md](DESIGN_PRINCIPLES.md). - -We'd love to have you contribute in other ways! Check out: -- [Good first issues](link) -- [Help wanted issues](link) -- Our [roadmap](link) - -Thanks again for your interest in Open Notebook! -``` - -### Closing a Stale Issue - -```markdown -We're closing this issue due to inactivity. If this is still relevant, feel free to reopen it with updated information. - -Thanks! -``` - -### Asking for More Information - -```markdown -Thanks for reporting this! To help us investigate, could you provide: - -1. [Specific information needed] -2. [Logs, screenshots, etc.] -3. [Steps to reproduce] - -This will help us understand the issue better and find a solution. -``` - -### Thanking a Contributor - -```markdown -Merged! 🎉 - -Thank you so much for this contribution, @username! [Specific thing they did well]. - -This will be included in the next release. -``` - -## Best Practices - -### Be Kind and Respectful - -- Thank contributors for their time and effort -- Assume good intentions -- Be patient with newcomers -- Explain *why*, not just *what* - -### Be Clear and Direct - -- Don't leave ambiguity about next steps -- Be specific about what needs to change -- Explain architectural decisions -- Set clear expectations - -### Be Consistent - -- Apply the same standards to all contributors -- Follow the process you've defined -- Document decisions for future reference - -### Be Protective of Project Vision - -- It's okay to say "no" -- Prioritize long-term maintainability -- Don't accept features you can't support -- Keep the project focused - -### Be Responsive - -- Respond to issues within 48 hours (even just to acknowledge) -- Review PRs within a week when possible -- Keep contributors updated on status -- Close stale issues/PRs to keep things tidy - -## When in Doubt +--- -Ask yourself: -1. Does this align with our [design principles](DESIGN_PRINCIPLES.md)? -2. Will we be able to maintain this feature long-term? -3. Does this benefit most users, or just an edge case? -4. Is there a simpler alternative? -5. Would I want to support this in 2 years? +## Quick Links -If you're unsure, it's perfectly fine to: -- Ask for input from other maintainers -- Start a discussion issue -- Sleep on it before making a decision +- **Maintainer Guide** → [docs/7-DEVELOPMENT/maintainer-guide.md](docs/7-DEVELOPMENT/maintainer-guide.md) +- **Contributing Guide** → [docs/7-DEVELOPMENT/contributing.md](docs/7-DEVELOPMENT/contributing.md) +- **Design Principles** → [docs/7-DEVELOPMENT/design-principles.md](docs/7-DEVELOPMENT/design-principles.md) --- -**Remember**: Good maintainership is about balancing openness to contributions with protection of project vision. You're not being mean by saying "no" to things that don't fit - you're being a responsible steward of the project. +For all maintainer details, see **[docs/7-DEVELOPMENT/maintainer-guide.md](docs/7-DEVELOPMENT/maintainer-guide.md)**. diff --git a/MIGRATION.md b/MIGRATION.md deleted file mode 100644 index faafd6f7..00000000 --- a/MIGRATION.md +++ /dev/null @@ -1,397 +0,0 @@ -# Migration Guide: Streamlit to React/Next.js Frontend - -**Version**: 1.0.0 -**Last Updated**: October 2025 - -This guide helps existing Open Notebook users migrate from the legacy Streamlit frontend to the new React/Next.js frontend. - ---- - -## ⚠️ Breaking Changes in v1.0 - -Open Notebook v1.0 introduces breaking changes that require manual migration. Please read this section carefully before upgrading. - -### Docker Tag Changes - -**The "latest" tag is now frozen** at the last Streamlit version. Starting with v1.0, we use versioned tags to prevent unexpected breaking changes: - -- **`latest`** and **`latest-single`** → FROZEN at Streamlit version (will not update) -- **`v1-latest`** and **`v1-latest-single`** → NEW tags for v1.x releases (recommended) -- **`X.Y.Z`** and **`X.Y.Z-single`** → Specific version tags (unchanged) - -**Why this change?** -The v1.0 release brings significant architectural changes (Streamlit → React/Next.js frontend). Freezing the "latest" tag prevents existing deployments from breaking unexpectedly, while the new "v1-latest" tag allows users to explicitly opt into the v1 architecture. - -### Quick Migration for Docker Users - -If you're currently using `latest` or `latest-single`, you need to: - -1. **Update your docker-compose.yml or docker run command**: - ```yaml - # Before: - image: lfnovo/open_notebook:latest-single - - # After (recommended): - image: lfnovo/open_notebook:v1-latest-single - ``` - -2. **Expose port 5055** for the API (required in v1): - ```yaml - ports: - - "8502:8502" # Frontend - - "5055:5055" # API (NEW - required) - ``` - -3. **Verify API connectivity** after upgrade: - ```bash - curl http://localhost:5055/api/config - ``` - -### API Connectivity (Port 5055) - -**Important:** v1.0 requires port 5055 to be exposed to your host machine so the frontend can communicate with the API. - -**Auto-Detection:** The Next.js frontend automatically detects the API URL: -- If you access the frontend at `http://localhost:8502`, it uses `http://localhost:5055` -- If you access the frontend at `http://192.168.1.100:8502`, it uses `http://192.168.1.100:5055` -- If you access the frontend at `http://my-server:8502`, it uses `http://my-server:5055` - -**Manual Override:** If auto-detection doesn't work (e.g., reverse proxy, complex networking), set the `API_URL` environment variable: - -```bash -# Docker run example -docker run -d \ - --name open-notebook \ - -p 8502:8502 -p 5055:5055 \ - -e API_URL=http://my-custom-api:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_data:/mydata \ - lfnovo/open_notebook:v1-latest-single -``` - -```yaml -# docker-compose.yml example -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" - - "5055:5055" - environment: - - API_URL=http://my-custom-api:5055 - volumes: - - ./notebook_data:/app/data - - ./surreal_data:/mydata -``` - -### Health Check - -Verify your API is accessible with: - -```bash -# Local deployment -curl http://localhost:5055/api/config - -# Remote deployment -curl http://your-server-ip:5055/api/config -``` - -Expected response: -```json -{ - "version": "1.0.0", - "latestVersion": "1.0.0", - "hasUpdate": false, - "dbStatus": "online" -} -``` - -Note: The API URL is now auto-detected by the frontend from the hostname you're accessing, so `/api/config` no longer returns `apiUrl`. - -### Troubleshooting - -**Problem:** Frontend shows "Cannot connect to API" error -- **Check:** Is port 5055 exposed? Run `docker ps` and verify port mapping -- **Check:** Can you reach the API? Run `curl http://localhost:5055/api/config` -- **Solution:** If using custom networking, set `API_URL` environment variable - -**Problem:** Auto-detection uses wrong hostname -- **Example:** Frontend at `http://internal-hostname:8502` but API should use `http://public-hostname:5055` -- **Solution:** Set `API_URL=http://public-hostname:5055` environment variable - -**Problem:** Still running the old Streamlit version after `docker pull` -- **Check:** Are you using the "latest" tag? It's frozen at Streamlit version -- **Solution:** Update to `v1-latest` or `v1-latest-single` tag - ---- - -## What Changed - -Open Notebook has migrated from a Streamlit-based frontend to a modern React/Next.js application. This brings significant improvements in performance, user experience, and maintainability. - -### Key Changes - -| Aspect | Before (Streamlit) | After (React/Next.js) | -|--------|-------------------|----------------------| -| **Frontend Framework** | Streamlit | Next.js 15 + React 18 | -| **UI Components** | Streamlit widgets | shadcn/ui + Radix UI | -| **Frontend Port** | 8502 | 8502 (unchanged) | -| **API Port** | 5055 | 5055 (unchanged) | -| **Navigation** | Sidebar with emoji icons | Clean sidebar navigation | -| **Performance** | Server-side rendering | Client-side React with API calls | -| **Customization** | Limited | Highly customizable | - -### What Stayed the Same - -- **Core functionality**: All features remain available -- **API backend**: FastAPI backend unchanged -- **Database**: SurrealDB unchanged -- **Data format**: No data migration needed -- **Configuration**: Same environment variables -- **Docker deployment**: Same ports and setup - -## Migration Paths - -### Path 1: Docker Users (Recommended) - -If you're running Open Notebook via Docker, migration is automatic: - -1. **Stop the current version**: - ```bash - docker-compose down - ``` - -2. **Update to the latest image**: - ```bash - # Update docker-compose.yml to use v1-latest - # Change from: - image: lfnovo/open_notebook:latest-single - # To: - image: lfnovo/open_notebook:v1-latest-single - ``` - -3. **Start the new version**: - ```bash - docker-compose pull - docker-compose up -d - ``` - -4. **Access the new frontend**: - - Frontend: http://localhost:8502 (new React UI) - - API Docs: http://localhost:5055/docs - -**Your data is automatically preserved!** All notebooks, sources, and notes carry over seamlessly. - -### Path 2: Source Code Users - -If you're running from source code: - -1. **Pull the latest code**: - ```bash - git pull origin main - ``` - -2. **Install frontend dependencies**: - ```bash - cd frontend - npm install - cd .. - ``` - -3. **Update Python dependencies**: - ```bash - uv sync - ``` - -4. **Start services** (3 terminals): - ```bash - # Terminal 1: Database - make database - - # Terminal 2: API - uv run python api/main.py - - # Terminal 3: Frontend (NEW) - cd frontend && npm run dev - ``` - -5. **Access the application**: - - Frontend: http://localhost:8502 - - API: http://localhost:5055 - -## Breaking Changes - -### Removed Features - -The following Streamlit-specific features are no longer available: - -- **Streamlit cache**: Replaced with React Query caching -- **Streamlit session state**: Replaced with React state management -- **Direct file access via Streamlit**: Use API endpoints instead - -### Changed Navigation - -Navigation paths have been simplified: - -| Old Path | New Path | -|----------|----------| -| Settings → Models | Models | -| Settings → Advanced | Advanced | -| Other paths | (Same but cleaner navigation) | - -### API Changes - -**No breaking API changes!** The REST API remains fully backward compatible. - -## New Features in React Version - -The React frontend brings several improvements: - -### Performance -- **Faster page loads**: Client-side rendering with React -- **Better caching**: React Query for intelligent data caching -- **Optimized builds**: Next.js automatic code splitting - -### User Experience -- **Modern UI**: Clean, professional interface with shadcn/ui -- **Responsive design**: Better mobile and tablet support -- **Keyboard shortcuts**: Improved keyboard navigation -- **Real-time updates**: Better WebSocket support - -### Developer Experience -- **TypeScript**: Full type safety -- **Component library**: Reusable UI components -- **Hot reload**: Instant updates during development -- **Testing**: Better test infrastructure - -## Troubleshooting - -### Issue: Can't access the frontend - -**Solution**: -```bash -# Check if services are running -docker-compose ps - -# Check logs -docker-compose logs open_notebook - -# Restart services -docker-compose restart -``` - -### Issue: API errors in new frontend - -**Solution**: -The new frontend requires the API to be running. Ensure: -```bash -# API should be accessible at -curl http://localhost:5055/health - -# If not, check API logs -docker-compose logs open_notebook | grep api -``` - -### Issue: Missing data after migration - -**Solution**: -Data is preserved automatically. If you don't see your data: - -1. Check database volume is mounted correctly: - ```bash - docker-compose down - # Verify volumes in docker-compose.yml: - # - ./surreal_data:/mydata (for multi-container) - # - ./surreal_single_data:/mydata (for single-container) - docker-compose up -d - ``` - -2. Check SurrealDB is running: - ```bash - docker-compose logs surrealdb - ``` - -### Issue: Port conflicts - -**Solution**: -If ports 8502 or 5055 are already in use: - -```bash -# Find what's using the port -lsof -i :8502 -lsof -i :5055 - -# Stop conflicting service or change Open Notebook ports -# Edit docker-compose.yml: -ports: - - "8503:8502" # Change external port - - "5056:5055" # Change external port -``` - -## Rollback Instructions - -If you need to roll back to the Streamlit version: - -### Docker Users - -```bash -# Stop current version -docker-compose down - -# Edit docker-compose.yml to use old image -# Change to: lfnovo/open_notebook:streamlit-latest - -# Start old version -docker-compose up -d -``` - -### Source Code Users - -```bash -# Checkout the last Streamlit version tag -git checkout tags/streamlit-final - -# Install dependencies -uv sync - -# Start Streamlit -uv run streamlit run app_home.py -``` - -## Getting Help - -If you encounter issues during migration: - -- **Discord**: Join our [Discord community](https://discord.gg/37XJPXfz2w) for real-time help -- **GitHub Issues**: Report bugs at [github.com/lfnovo/open-notebook/issues](https://github.com/lfnovo/open-notebook/issues) -- **Documentation**: Check [full documentation](https://github.com/lfnovo/open-notebook/tree/main/docs) - -## FAQs - -### Will my notebooks and data be lost? -No! All data is preserved. The database and API backend are unchanged. - -### Do I need to update my API integrations? -No! The REST API remains fully backward compatible. - -### Can I use both frontends simultaneously? -Technically yes, but not recommended. Choose one for consistency. - -### What about my custom Streamlit pages? -Custom Streamlit pages won't work with the React frontend. Consider: -- Using the REST API to build custom integrations -- Contributing React components to the project -- Requesting features in GitHub issues - -### Is the Streamlit version still supported? -The Streamlit version is no longer actively developed. We recommend migrating to the React version for the best experience and latest features. - -## Timeline - -- **Legacy (Pre-v1.0)**: Streamlit frontend -- **Current (v1.0+)**: React/Next.js frontend -- **Future**: Continued React development with new features - ---- - -**Ready to migrate?** Follow the migration path for your deployment method above. The process is straightforward and your data is safe! diff --git a/Makefile b/Makefile index 69a5a96c..b71b4dae 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ .PHONY: run frontend check ruff database lint api start-all stop-all status clean-cache worker worker-start worker-stop worker-restart .PHONY: docker-buildx-prepare docker-buildx-clean docker-buildx-reset -.PHONY: docker-push docker-push-latest docker-release tag export-docs +.PHONY: docker-push docker-push-latest docker-release docker-build-local tag export-docs # Get version from pyproject.toml VERSION := $(shell grep -m1 version pyproject.toml | cut -d'"' -f2) @@ -45,6 +45,16 @@ docker-buildx-reset: docker-buildx-clean docker-buildx-prepare # === Docker Build Targets === +# Build production image for local platform only (no push) +docker-build-local: + @echo "🔨 Building production image locally ($(shell uname -m))..." + docker build \ + -t $(DOCKERHUB_IMAGE):$(VERSION) \ + -t $(DOCKERHUB_IMAGE):local \ + . + @echo "✅ Built $(DOCKERHUB_IMAGE):$(VERSION) and $(DOCKERHUB_IMAGE):local" + @echo "Run with: docker run -p 5055:5055 -p 3000:3000 $(DOCKERHUB_IMAGE):local" + # Build and push version tags ONLY (no latest) for both regular and single images docker-push: docker-buildx-prepare @echo "📤 Building and pushing version $(VERSION) to both registries..." diff --git a/README.dev.md b/README.dev.md new file mode 100644 index 00000000..1c7d27e5 --- /dev/null +++ b/README.dev.md @@ -0,0 +1,408 @@ +# Developer Guide + +This guide is for developers working on Open Notebook. For end-user documentation, see [README.md](README.md) and [docs/](docs/). + +## Quick Start for Development + +```bash +# 1. Clone and setup +git clone https://github.com/lfnovo/open-notebook.git +cd open-notebook + +# 2. Copy environment files +cp .env.example .env +cp .env.example docker.env + +# 3. Install dependencies +uv sync + +# 4. Start all services (recommended for development) +make start-all +``` + +## Development Workflows + +### When to Use What? + +| Workflow | Use Case | Speed | Production Parity | +|----------|----------|-------|-------------------| +| **Local Services** (`make start-all`) | Day-to-day development, fastest iteration | ⚡⚡⚡ Fast | Medium | +| **Docker Compose** (`make dev`) | Testing containerized setup | ⚡⚡ Medium | High | +| **Local Docker Build** (`make docker-build-local`) | Testing Dockerfile changes | ⚡ Slow | Very High | +| **Multi-platform Build** (`make docker-push`) | Publishing releases | 🐌 Very Slow | Exact | + +--- + +## 1. Local Development (Recommended) + +**Best for:** Daily development, hot reload, debugging + +### Setup + +```bash +# Start database +make database + +# Start all services (DB + API + Worker + Frontend) +make start-all +``` + +### What This Does + +1. Starts SurrealDB in Docker (port 8000) +2. Starts FastAPI backend (port 5055) +3. Starts background worker (surreal-commands) +4. Starts Next.js frontend (port 3000) + +### Individual Services + +```bash +# Just the database +make database + +# Just the API +make api + +# Just the frontend +make frontend + +# Just the worker +make worker +``` + +### Checking Status + +```bash +# See what's running +make status + +# Stop everything +make stop-all +``` + +### Advantages +- ✅ Fastest iteration (hot reload) +- ✅ Easy debugging (direct process access) +- ✅ Low resource usage +- ✅ Direct log access + +### Disadvantages +- ❌ Doesn't test Docker build +- ❌ Environment may differ from production +- ❌ Requires local Python/Node setup + +--- + +## 2. Docker Compose Development + +**Best for:** Testing containerized setup, CI/CD verification + +```bash +# Start with dev profile +make dev + +# Or full stack +make full +``` + +### Configuration Files + +- `docker-compose.dev.yml` - Development setup +- `docker-compose.full.yml` - Full stack setup +- `docker-compose.yml` - Base configuration + +### Advantages +- ✅ Closer to production environment +- ✅ Isolated dependencies +- ✅ Easy to share exact environment + +### Disadvantages +- ❌ Slower rebuilds +- ❌ More complex debugging +- ❌ Higher resource usage + +--- + +## 3. Testing Production Docker Images + +**Best for:** Verifying Dockerfile changes before publishing + +### Build Locally + +```bash +# Build production image for your platform only +make docker-build-local +``` + +This creates two tags: +- `lfnovo/open_notebook:` (from pyproject.toml) +- `lfnovo/open_notebook:local` + +### Run Locally + +```bash +docker run -p 5055:5055 -p 3000:3000 lfnovo/open_notebook:local +``` + +### When to Use +- ✅ Before pushing to registry +- ✅ Testing Dockerfile changes +- ✅ Debugging production-specific issues +- ✅ Verifying build process + +--- + +## 4. Publishing Docker Images + +### Workflow + +```bash +# 1. Test locally first +make docker-build-local + +# 2. If successful, push version tag (no latest update) +make docker-push + +# 3. Test the pushed version in staging/production + +# 4. When ready, promote to latest +make docker-push-latest +``` + +### Available Commands + +| Command | What It Does | Updates Latest? | +|---------|--------------|-----------------| +| `make docker-build-local` | Build for current platform only | No registry push | +| `make docker-push` | Push version tags to registries | ❌ No | +| `make docker-push-latest` | Push version + update v1-latest | ✅ Yes | +| `make docker-release` | Full release (same as docker-push-latest) | ✅ Yes | + +### Publishing Details + +- **Platforms:** `linux/amd64`, `linux/arm64` +- **Registries:** Docker Hub + GitHub Container Registry +- **Image Variants:** Regular + Single-container (`-single`) +- **Version Source:** `pyproject.toml` + +### Creating Git Tags + +```bash +# Create and push git tag matching pyproject.toml version +make tag +``` + +--- + +## Code Quality + +```bash +# Run linter with auto-fix +make ruff + +# Run type checking +make lint + +# Run tests +uv run pytest tests/ + +# Clean cache directories +make clean-cache +``` + +--- + +## Common Development Tasks + +### Adding a New Feature + +1. Create feature branch +2. Develop using `make start-all` +3. Write tests +4. Run `make ruff` and `make lint` +5. Test with `make docker-build-local` +6. Create PR + +### Fixing a Bug + +1. Reproduce locally with `make start-all` +2. Add test case demonstrating bug +3. Fix the bug +4. Verify test passes +5. Check with `make docker-build-local` + +### Updating Dependencies + +```bash +# Add Python dependency +uv add package-name + +# Update dependencies +uv sync + +# Frontend dependencies +cd frontend && npm install package-name +``` + +### Database Migrations + +Database migrations run **automatically** when the API starts. + +1. Create migration file: `migrations/XXX_description.surql` +2. Write SurrealQL schema changes +3. (Optional) Create rollback: `migrations/XXX_description_down.surql` +4. Restart API - migration runs on startup + +--- + +## Troubleshooting + +### Services Won't Start + +```bash +# Check status +make status + +# Check database +docker compose ps surrealdb + +# View logs +docker compose logs surrealdb + +# Restart everything +make stop-all +make start-all +``` + +### Port Already in Use + +```bash +# Find process using port +lsof -i :5055 +lsof -i :3000 +lsof -i :8000 + +# Kill stuck processes +make stop-all +``` + +### Database Connection Issues + +```bash +# Verify SurrealDB is running +docker compose ps surrealdb + +# Check connection settings in .env +cat .env | grep SURREAL +``` + +### Docker Build Fails + +```bash +# Clean Docker cache +docker builder prune + +# Reset buildx +make docker-buildx-reset + +# Try local build first +make docker-build-local +``` + +--- + +## Project Structure + +``` +open-notebook/ +├── api/ # FastAPI backend +├── frontend/ # Next.js React frontend +├── open_notebook/ # Python core library +│ ├── domain/ # Domain models +│ ├── graphs/ # LangGraph workflows +│ ├── ai/ # AI provider integration +│ └── database/ # SurrealDB operations +├── migrations/ # Database migrations +├── tests/ # Test suite +├── docs/ # User documentation +└── Makefile # Development commands +``` + +See component-specific CLAUDE.md files for detailed architecture: +- [frontend/CLAUDE.md](frontend/CLAUDE.md) +- [api/CLAUDE.md](api/CLAUDE.md) +- [open_notebook/CLAUDE.md](open_notebook/CLAUDE.md) + +--- + +## Environment Variables + +### Required for Local Development + +```bash +# .env file +SURREAL_URL=ws://localhost:8000 +SURREAL_USER=root +SURREAL_PASS=root +SURREAL_DB=open_notebook +SURREAL_NS=production + +# AI Provider (at least one required) +OPENAI_API_KEY=sk-... +# OR +ANTHROPIC_API_KEY=sk-ant-... +# OR configure other providers (see docs/5-CONFIGURATION/) +``` + +See [docs/5-CONFIGURATION/](docs/5-CONFIGURATION/) for complete configuration guide. + +--- + +## Performance Tips + +### Speed Up Local Development + +1. **Use `make start-all`** instead of Docker for daily work +2. **Keep SurrealDB running** between sessions (`make database`) +3. **Use `make docker-build-local`** only when testing Dockerfile changes +4. **Skip multi-platform builds** until ready to publish + +### Reduce Resource Usage + +```bash +# Stop unused services +make stop-all + +# Clean up Docker +docker system prune -a + +# Clean Python cache +make clean-cache +``` + +--- + +## TODO: Sections to Add + +- [ ] Frontend development guide (hot reload, component structure) +- [ ] API development guide (adding endpoints, services) +- [ ] LangGraph workflow development +- [ ] Testing strategy and coverage +- [ ] Debugging tips (VSCode/PyCharm setup) +- [ ] CI/CD pipeline overview +- [ ] Release process checklist +- [ ] Common error messages and solutions + +--- + +## Resources + +- **Documentation:** https://open-notebook.ai +- **Discord:** https://discord.gg/37XJPXfz2w +- **Issues:** https://github.com/lfnovo/open-notebook/issues +- **Contributing:** [CONTRIBUTING.md](CONTRIBUTING.md) +- **Maintainer Guide:** [MAINTAINER_GUIDE.md](MAINTAINER_GUIDE.md) + +--- + +**Last Updated:** January 2025 diff --git a/README.md b/README.md index 5c973fdc..3d79ef11 100644 --- a/README.md +++ b/README.md @@ -24,13 +24,13 @@ Checkout our website »

- 📚 Get Started + 📚 Get Started · - 📖 User Guide + 📖 User Guide · - ✨ Features + ✨ Features · - 🚀 Deploy + 🚀 Deploy

@@ -68,19 +68,6 @@ Learn more about our project at [https://www.open-notebook.ai](https://www.open- --- -## ⚠️ IMPORTANT: v1.0 Breaking Changes - -**If you're upgrading from a previous version**, please note: - -- 🏷️ **Docker tags have changed**: The `latest` tag is now **frozen** at the last Streamlit version -- 🆕 **Use `v1-latest` tag** for the new React/Next.js version (recommended) -- 🔌 **Port 5055 required**: You must expose port 5055 for the API to work -- 📖 **Read the migration guide**: See [MIGRATION.md](MIGRATION.md) for detailed upgrade instructions - -**New users**: You can ignore this notice and proceed with the Quick Start below using the `v1-latest-single` tag. - ---- - ## 🆚 Open Notebook vs Google Notebook LM | Feature | Open Notebook | Google Notebook LM | Advantage | @@ -88,13 +75,12 @@ Learn more about our project at [https://www.open-notebook.ai](https://www.open- | **Privacy & Control** | Self-hosted, your data | Google cloud only | Complete data sovereignty | | **AI Provider Choice** | 16+ providers (OpenAI, Anthropic, Ollama, LM Studio, etc.) | Google models only | Flexibility and cost optimization | | **Podcast Speakers** | 1-4 speakers with custom profiles | 2 speakers only | Extreme flexibility | -| **Context Control** | 3 granular levels | All-or-nothing | Privacy and performance tuning | | **Content Transformations** | Custom and built-in | Limited options | Unlimited processing power | | **API Access** | Full REST API | No API | Complete automation | | **Deployment** | Docker, cloud, or local | Google hosted only | Deploy anywhere | -| **Citations** | Comprehensive with sources | Basic references | Research integrity | +| **Citations** | Basic references (will improve) | Comprehensive with sources | Research integrity | | **Customization** | Open source, fully customizable | Closed system | Unlimited extensibility | -| **Cost** | Pay only for AI usage | Monthly subscription + usage | Transparent and controllable | +| **Cost** | Pay only for AI usage | Free tier + Monthly subscription | Transparent and controllable | **Why Choose Open Notebook?** - 🔒 **Privacy First**: Your sensitive research stays completely private @@ -109,181 +95,58 @@ Learn more about our project at [https://www.open-notebook.ai](https://www.open- ## 🚀 Quick Start -**Docker Images Available:** -- **Docker Hub**: `lfnovo/open_notebook:v1-latest-single` -- **GitHub Container Registry**: `ghcr.io/lfnovo/open-notebook:v1-latest-single` +Choose your installation method: -Both registries contain identical images - choose whichever you prefer! +### 🐳 **Docker (Recommended)** -### Choose Your Setup: +**Best for most users** - Fast setup with Docker Compose: - - - - - -
+→ **[Docker Compose Installation Guide](docs/1-INSTALLATION/docker-compose.md)** +- Multi-container setup (recommended) +- 5-10 minutes setup time +- Requires Docker Desktop -#### 🏠 **Local Machine Setup** -Perfect if Docker runs on the **same computer** where you'll access Open Notebook. +**Quick Start:** +- Get an API key (OpenAI, Anthropic, Google, etc.) or setup Ollama +- Create docker-compose.yml (example in guide) +- Run: docker compose up -d +- Access: http://localhost:8502 -```bash -mkdir open-notebook && cd open-notebook - -docker run -d \ - --name open-notebook \ - -p 8502:8502 -p 5055:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_data:/mydata \ - -e OPENAI_API_KEY=your_key_here \ - -e SURREAL_URL="ws://localhost:8000/rpc" \ - -e SURREAL_USER="root" \ - -e SURREAL_PASSWORD="root" \ - -e SURREAL_NAMESPACE="open_notebook" \ - -e SURREAL_DATABASE="production" \ - lfnovo/open_notebook:v1-latest-single -``` +--- -**Access at:** http://localhost:8502 +### 💻 **From Source (Developers)** - +**For development and contributors:** -#### 🌐 **Remote Server Setup** -Use this for servers, Raspberry Pi, NAS, Proxmox, or any remote machine. +→ **[From Source Installation Guide](docs/1-INSTALLATION/from-source.md)** +- Clone and run locally +- 10-15 minutes setup time +- Requires: Python 3.11+, Node.js 18+, Docker, uv +**Quick Start:** ```bash -mkdir open-notebook && cd open-notebook - -docker run -d \ - --name open-notebook \ - -p 8502:8502 -p 5055:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_data:/mydata \ - -e OPENAI_API_KEY=your_key_here \ - -e API_URL=http://YOUR_SERVER_IP:5055 \ - -e SURREAL_URL="ws://localhost:8000/rpc" \ - -e SURREAL_USER="root" \ - -e SURREAL_PASSWORD="root" \ - -e SURREAL_NAMESPACE="open_notebook" \ - -e SURREAL_DATABASE="production" \ - lfnovo/open_notebook:v1-latest-single -``` - -**Replace `YOUR_SERVER_IP`** with your server's IP (e.g., `192.168.1.100`) or domain - -**Access at:** http://YOUR_SERVER_IP:8502 - -
- -> **⚠️ Critical Setup Notes:** -> -> **Both ports are required:** -> - **Port 8502**: Web interface (what you see in your browser) -> - **Port 5055**: API backend (required for the app to function) -> -> **API_URL must match how YOU access the server:** -> - ✅ Access via `http://192.168.1.100:8502` → set `API_URL=http://192.168.1.100:5055` -> - ✅ Access via `http://myserver.local:8502` → set `API_URL=http://myserver.local:5055` -> - ❌ Don't use `localhost` for remote servers - it won't work from other devices! - -### Using Docker Compose (Recommended for Easy Management) - -Create a `docker-compose.yml` file: - -```yaml -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - # Or use: ghcr.io/lfnovo/open-notebook:v1-latest-single - ports: - - "8502:8502" # Web UI - - "5055:5055" # API (required!) - environment: - - OPENAI_API_KEY=your_key_here - # For remote access, uncomment and set your server IP/domain: - # - API_URL=http://192.168.1.100:5055 - # Database connection (required for single-container) - - SURREAL_URL=ws://localhost:8000/rpc - - SURREAL_USER=root - - SURREAL_PASSWORD=root - - SURREAL_NAMESPACE=open_notebook - - SURREAL_DATABASE=production - volumes: - - ./notebook_data:/app/data - - ./surreal_data:/mydata - restart: always -``` - -Start with: `docker compose up -d` - -**What gets created:** -``` -open-notebook/ -├── docker-compose.yml # Your configuration -├── notebook_data/ # Your notebooks and research content -└── surreal_data/ # Database files +git clone https://github.com/lfnovo/open-notebook.git +uv sync +make start-all ``` -### 🆘 Quick Troubleshooting +Access: http://localhost:3000 (dev) or http://localhost:8502 (production) -| Problem | Solution | -|---------|----------| -| **"Unable to connect to server"** | Set `API_URL` environment variable to match how you access the server (see remote setup above) | -| **Blank page or errors** | Ensure BOTH ports (8502 and 5055) are exposed in your docker command | -| **Works on server but not from other computers** | Don't use `localhost` in `API_URL` - use your server's actual IP address | -| **"404" or "config endpoint" errors** | Don't add `/api` to `API_URL` - use just `http://your-ip:5055` | -| **Still having issues?** | Check our [5-minute troubleshooting guide](docs/troubleshooting/quick-fixes.md) or [join Discord](https://discord.gg/37XJPXfz2w) | +--- -### How Open Notebook Works +### 📖 Need Help? -``` -┌─────────────────────────────────────────────────────────┐ -│ Your Browser │ -│ Access: http://your-server-ip:8502 │ -└────────────────┬────────────────────────────────────────┘ - │ - ▼ - ┌───────────────┐ - │ Port 8502 │ ← Next.js Frontend (what you see) - │ Frontend │ Also proxies API requests internally! - └───────┬───────┘ - │ proxies /api/* requests ↓ - ▼ - ┌───────────────┐ - │ Port 5055 │ ← FastAPI Backend (handles requests) - │ API │ - └───────┬───────┘ - │ - ▼ - ┌───────────────┐ - │ SurrealDB │ ← Database (internal, auto-configured) - │ (Port 8000) │ - └───────────────┘ -``` +- **🤖 AI Installation Assistant**: [CustomGPT to help you install](https://chatgpt.com/g/g-68776e2765b48191bd1bae3f30212631-open-notebook-installation-assistant) +- **🆘 Troubleshooting**: [5-minute troubleshooting guide](docs/6-TROUBLESHOOTING/quick-fixes.md) +- **💬 Community Support**: [Discord Server](https://discord.gg/37XJPXfz2w) +- **🐛 Report Issues**: [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) -**Key Points:** -- **v1.1+**: Next.js automatically proxies `/api/*` requests to the backend, simplifying reverse proxy setup -- Your browser loads the frontend from port 8502 -- The frontend needs to know where to find the API - when accessing remotely, set: `API_URL=http://your-server-ip:5055` -- **Behind reverse proxy?** You only need to proxy to port 8502 now! See [Reverse Proxy Guide](docs/deployment/reverse-proxy.md) +--- ## Star History [![Star History Chart](https://api.star-history.com/svg?repos=lfnovo/open-notebook&type=date&legend=top-left)](https://www.star-history.com/#lfnovo/open-notebook&type=date&legend=top-left) -### 🛠️ Full Installation -For development or customization: -```bash -git clone https://github.com/lfnovo/open-notebook -cd open-notebook -make start-all -``` - -### 📖 Need Help? -- **🤖 AI Installation Assistant**: We have a [CustomGPT built to help you install Open Notebook](https://chatgpt.com/g/g-68776e2765b48191bd1bae3f30212631-open-notebook-installation-assistant) - it will guide you through each step! -- **New to Open Notebook?** Start with our [Getting Started Guide](docs/getting-started/index.md) -- **Need installation help?** Check our [Installation Guide](docs/getting-started/installation.md) -- **Want to see it in action?** Try our [Quick Start Tutorial](docs/getting-started/quick-start.md) ## Provider Support Matrix @@ -329,36 +192,34 @@ Thanks to the [Esperanto](https://github.com/lfnovo/esperanto) library, we suppo - **📊 Fine-Grained Context Control**: Choose exactly what to share with AI models - **📎 Citations**: Get answers with proper source citations -### Three-Column Interface -1. **Sources**: Manage all your research materials -2. **Notes**: Create manual or AI-generated notes -3. **Chat**: Converse with AI using your content as context + +## Podcast Feature [![Check out our podcast sample](https://img.youtube.com/vi/D-760MlGwaI/0.jpg)](https://www.youtube.com/watch?v=D-760MlGwaI) ## 📚 Documentation ### Getting Started -- **[📖 Introduction](docs/getting-started/introduction.md)** - Learn what Open Notebook offers -- **[⚡ Quick Start](docs/getting-started/quick-start.md)** - Get up and running in 5 minutes -- **[🔧 Installation](docs/getting-started/installation.md)** - Comprehensive setup guide -- **[🎯 Your First Notebook](docs/getting-started/first-notebook.md)** - Step-by-step tutorial +- **[📖 Introduction](docs/0-START-HERE/index.md)** - Learn what Open Notebook offers +- **[⚡ Quick Start](docs/0-START-HERE/quick-start.md)** - Get up and running in 5 minutes +- **[🔧 Installation](docs/1-INSTALLATION/index.md)** - Comprehensive setup guide +- **[🎯 Your First Notebook](docs/0-START-HERE/first-notebook.md)** - Step-by-step tutorial ### User Guide -- **[📱 Interface Overview](docs/user-guide/interface-overview.md)** - Understanding the layout -- **[📚 Notebooks](docs/user-guide/notebooks.md)** - Organizing your research -- **[📄 Sources](docs/user-guide/sources.md)** - Managing content types -- **[📝 Notes](docs/user-guide/notes.md)** - Creating and managing notes -- **[💬 Chat](docs/user-guide/chat.md)** - AI conversations -- **[🔍 Search](docs/user-guide/search.md)** - Finding information +- **[📱 Interface Overview](docs/3-USER-GUIDE/interface-overview.md)** - Understanding the layout +- **[📚 Notebooks](docs/3-USER-GUIDE/notebooks.md)** - Organizing your research +- **[📄 Sources](docs/3-USER-GUIDE/sources.md)** - Managing content types +- **[📝 Notes](docs/3-USER-GUIDE/notes.md)** - Creating and managing notes +- **[💬 Chat](docs/3-USER-GUIDE/chat.md)** - AI conversations +- **[🔍 Search](docs/3-USER-GUIDE/search.md)** - Finding information ### Advanced Topics -- **[🎙️ Podcast Generation](docs/features/podcasts.md)** - Create professional podcasts -- **[🔧 Content Transformations](docs/features/transformations.md)** - Customize content processing -- **[🤖 AI Models](docs/features/ai-models.md)** - AI model configuration -- **[🔧 REST API Reference](docs/development/api-reference.md)** - Complete API documentation -- **[🔐 Security](docs/deployment/security.md)** - Password protection and privacy -- **[🚀 Deployment](docs/deployment/index.md)** - Complete deployment guides for all scenarios +- **[🎙️ Podcast Generation](docs/2-CORE-CONCEPTS/podcasts.md)** - Create professional podcasts +- **[🔧 Content Transformations](docs/2-CORE-CONCEPTS/transformations.md)** - Customize content processing +- **[🤖 AI Models](docs/4-AI-PROVIDERS/index.md)** - AI model configuration +- **[🔧 REST API Reference](docs/7-DEVELOPMENT/api-reference.md)** - Complete API documentation +- **[🔐 Security](docs/5-CONFIGURATION/security.md)** - Password protection and privacy +- **[🚀 Deployment](docs/1-INSTALLATION/index.md)** - Complete deployment guides for all scenarios

(back to top)

@@ -384,6 +245,12 @@ See the [open issues](https://github.com/lfnovo/open-notebook/issues) for a full

(back to top)

+## 📖 Need Help? +- **🤖 AI Installation Assistant**: We have a [CustomGPT built to help you install Open Notebook](https://chatgpt.com/g/g-68776e2765b48191bd1bae3f30212631-open-notebook-installation-assistant) - it will guide you through each step! +- **New to Open Notebook?** Start with our [Getting Started Guide](docs/0-START-HERE/index.md) +- **Need installation help?** Check our [Installation Guide](docs/1-INSTALLATION/index.md) +- **Want to see it in action?** Try our [Quick Start Tutorial](docs/0-START-HERE/quick-start.md) + ## 🤝 Community & Contributing ### Join the Community @@ -410,25 +277,12 @@ See our [Contributing Guide](CONTRIBUTING.md) for detailed information on how to Open Notebook is MIT licensed. See the [LICENSE](LICENSE) file for details. -## 📞 Contact - -**Luis Novo** - [@lfnovo](https://twitter.com/lfnovo) **Community Support**: - 💬 [Discord Server](https://discord.gg/37XJPXfz2w) - Get help, share ideas, and connect with users - 🐛 [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) - Report bugs and request features - 🌐 [Website](https://www.open-notebook.ai) - Learn more about the project -## 🙏 Acknowledgments - -Open Notebook is built on the shoulders of amazing open-source projects: - -* **[Podcast Creator](https://github.com/lfnovo/podcast-creator)** - Advanced podcast generation capabilities -* **[Surreal Commands](https://github.com/lfnovo/surreal-commands)** - Background job processing -* **[Content Core](https://github.com/lfnovo/content-core)** - Content processing and management -* **[Esperanto](https://github.com/lfnovo/esperanto)** - Multi-provider AI model abstraction -* **[Docling](https://github.com/docling-project/docling)** - Document processing and parsing -

(back to top)

diff --git a/api/CLAUDE.md b/api/CLAUDE.md new file mode 100644 index 00000000..66209701 --- /dev/null +++ b/api/CLAUDE.md @@ -0,0 +1,117 @@ +# API Module + +FastAPI-based REST backend exposing services for notebooks, sources, notes, chat, podcasts, and AI model management. + +## Purpose + +FastAPI application serving three architectural layers: routes (HTTP endpoints), services (business logic), and models (request/response schemas). Integrates LangGraph workflows (chat, ask, source_chat), SurrealDB persistence, and AI providers via Esperanto. + +## Architecture Overview + +**Three layers**: +1. **Routes** (`routers/*`): HTTP endpoints mapping to services +2. **Services** (`*_service.py`): Business logic orchestrating domain models, database, graphs, AI providers +3. **Models** (`models.py`): Pydantic request/response schemas with validation + +**Startup flow**: +- Load .env environment variables +- Initialize CORS middleware + password auth middleware +- Run database migrations via AsyncMigrationManager on lifespan startup +- Register all routers + +**Key services**: +- `chat_service.py`: Invokes chat graph with messages, context +- `podcast_service.py`: Orchestrates outline + transcript generation +- `sources_service.py`: Content ingestion, vectorization, metadata +- `notes_service.py`: Note creation, linking to sources/insights +- `transformations_service.py`: Applies transformations to content +- `models_service.py`: Manages AI provider/model configuration +- `episode_profiles_service.py`: Manages podcast speaker/episode profiles + +## Component Catalog + +### Main Application +- **main.py**: FastAPI app initialization, CORS setup, auth middleware, lifespan event, router registration +- **Lifespan handler**: Runs AsyncMigrationManager on startup (database schema migration) +- **Auth middleware**: PasswordAuthMiddleware protects endpoints (password-based access control) + +### Services (Business Logic) +- **chat_service.py**: Invokes chat.py graph; handles message history via SqliteSaver +- **podcast_service.py**: Generates outline (outline.jinja), then transcript (transcript.jinja) for episodes +- **sources_service.py**: Ingests files/URLs (content_core), extracts text, vectorizes, saves to SurrealDB +- **transformations_service.py**: Applies transformations via transformation.py graph +- **models_service.py**: Manages ModelManager config (AI provider overrides) +- **episode_profiles_service.py**: CRUD for EpisodeProfile and SpeakerProfile models +- **insights_service.py**: Generates and retrieves source insights +- **notes_service.py**: Creates notes linked to sources/insights + +### Models (Schemas) +- **models.py**: Pydantic schemas for request/response validation +- Request bodies: ChatRequest, CreateNoteRequest, PodcastGenerationRequest, etc. +- Response bodies: ChatResponse, NoteResponse, PodcastResponse, etc. +- Custom validators for enum fields, file paths, model references + +### Routers +- **routers/chat.py**: POST /chat +- **routers/source_chat.py**: POST /source/{source_id}/chat +- **routers/podcasts.py**: POST /podcasts, GET /podcasts/{id}, etc. +- **routers/notes.py**: POST /notes, GET /notes/{id} +- **routers/sources.py**: POST /sources, GET /sources/{id}, DELETE /sources/{id} +- **routers/models.py**: GET /models, POST /models/config +- **routers/transformations.py**: POST /transformations +- **routers/insights.py**: GET /sources/{source_id}/insights +- **routers/auth.py**: POST /auth/password (password-based auth) +- **routers/commands.py**: GET /commands/{command_id} (job status tracking) + +## Common Patterns + +- **Service injection via FastAPI**: Routers import services directly; no DI framework +- **Async/await throughout**: All DB queries, graph invocations, AI calls are async +- **SurrealDB transactions**: Services use repo_query, repo_create, repo_upsert from database layer +- **Config override pattern**: Models/config override via models_service passed to graph.ainvoke(config=...) +- **Error handling**: Services catch exceptions and return HTTP status codes (400 Bad Request, 404 Not Found, 500 Internal Server Error) +- **Logging**: loguru logger in main.py; services expected to log key operations +- **Response normalization**: All responses follow standard schema (data + metadata structure) + +## Key Dependencies + +- `fastapi`: FastAPI app, routers, HTTPException +- `pydantic`: Validation models with Field, field_validator +- `open_notebook.graphs`: chat, ask, source_chat, source, transformation graphs +- `open_notebook.database`: SurrealDB repository functions (repo_query, repo_create, repo_upsert) +- `open_notebook.domain`: Notebook, Source, Note, SourceInsight models +- `open_notebook.ai.provision`: provision_langchain_model() factory +- `ai_prompter`: Prompter for template rendering +- `content_core`: extract_content() for file/URL processing +- `esperanto`: AI provider client library (LLM, embeddings, TTS) +- `surreal_commands`: Job queue for async operations (podcast generation) +- `loguru`: Structured logging + +## Important Quirks & Gotchas + +- **Migration auto-run**: Database schema migrations run on every API startup (via lifespan); no manual migration steps +- **PasswordAuthMiddleware is basic**: Uses simple password check; production deployments should replace with OAuth/JWT +- **No request rate limiting**: No built-in rate limiting; deployment must add via proxy/middleware +- **Service state is stateless**: Services don't cache results; each request re-queries database/AI models +- **Graph invocation is blocking**: chat/podcast workflows may take minutes; no timeout handling in services +- **Command job fire-and-forget**: podcast_service.py submits jobs but doesn't wait (async job queue pattern) +- **Model override scoping**: Model config override via RunnableConfig is per-request only (not persistent) +- **CORS open by default**: main.py CORS settings allow all origins (restrict before production) +- **No OpenAPI security scheme**: API docs available without auth (disable before production) +- **Services don't validate user permission**: All endpoints trust authentication layer; no per-notebook permission checks + +## How to Add New Endpoint + +1. Create router file in `routers/` (e.g., `routers/new_feature.py`) +2. Import router into `main.py` and register: `app.include_router(new_feature.router, tags=["new_feature"])` +3. Create service in `new_feature_service.py` with business logic +4. Define request/response schemas in `models.py` (or create `new_feature_models.py`) +5. Implement router functions calling service methods +6. Test with `uv run uvicorn api.main:app --host 0.0.0.0 --port 5055` + +## Testing Patterns + +- **Interactive docs**: http://localhost:5055/docs (Swagger UI) +- **Direct service tests**: Import service, call methods directly with test data +- **Mock graphs**: Replace graph.ainvoke() with mock for testing service logic +- **Database: Use test database** (separate SurrealDB instance or mock repo_query) diff --git a/api/episode_profiles_service.py b/api/episode_profiles_service.py index 420690ef..22deb31c 100644 --- a/api/episode_profiles_service.py +++ b/api/episode_profiles_service.py @@ -7,7 +7,7 @@ from loguru import logger from api.client import api_client -from open_notebook.domain.podcast import EpisodeProfile +from open_notebook.podcasts.models import EpisodeProfile class EpisodeProfilesService: diff --git a/api/main.py b/api/main.py index f2445de3..d4746b87 100644 --- a/api/main.py +++ b/api/main.py @@ -1,5 +1,6 @@ # Load environment variables from dotenv import load_dotenv + load_dotenv() from contextlib import asynccontextmanager diff --git a/api/models_service.py b/api/models_service.py index 8196c610..956c8fd4 100644 --- a/api/models_service.py +++ b/api/models_service.py @@ -7,7 +7,7 @@ from loguru import logger from api.client import api_client -from open_notebook.domain.models import DefaultModels, Model +from open_notebook.ai.models import DefaultModels, Model class ModelsService: diff --git a/api/podcast_service.py b/api/podcast_service.py index 8bee41ef..e7c6dc20 100644 --- a/api/podcast_service.py +++ b/api/podcast_service.py @@ -6,7 +6,7 @@ from surreal_commands import get_command_status, submit_command from open_notebook.domain.notebook import Notebook -from open_notebook.domain.podcast import EpisodeProfile, PodcastEpisode, SpeakerProfile +from open_notebook.podcasts.models import EpisodeProfile, PodcastEpisode, SpeakerProfile class PodcastGenerationRequest(BaseModel): diff --git a/api/routers/embedding.py b/api/routers/embedding.py index 01613ae1..40b70d9b 100644 --- a/api/routers/embedding.py +++ b/api/routers/embedding.py @@ -3,7 +3,7 @@ from api.command_service import CommandService from api.models import EmbedRequest, EmbedResponse -from open_notebook.domain.models import model_manager +from open_notebook.ai.models import model_manager from open_notebook.domain.notebook import Note, Source router = APIRouter() diff --git a/api/routers/episode_profiles.py b/api/routers/episode_profiles.py index 076723a7..e35aa4ec 100644 --- a/api/routers/episode_profiles.py +++ b/api/routers/episode_profiles.py @@ -4,7 +4,7 @@ from loguru import logger from pydantic import BaseModel, Field -from open_notebook.domain.podcast import EpisodeProfile +from open_notebook.podcasts.models import EpisodeProfile router = APIRouter() diff --git a/api/routers/models.py b/api/routers/models.py index 261a4baf..7b2b31c9 100644 --- a/api/routers/models.py +++ b/api/routers/models.py @@ -11,7 +11,7 @@ ModelResponse, ProviderAvailabilityResponse, ) -from open_notebook.domain.models import DefaultModels, Model +from open_notebook.ai.models import DefaultModels, Model from open_notebook.exceptions import InvalidInputError router = APIRouter() diff --git a/api/routers/search.py b/api/routers/search.py index e6059ab5..1c817bee 100644 --- a/api/routers/search.py +++ b/api/routers/search.py @@ -6,7 +6,7 @@ from loguru import logger from api.models import AskRequest, AskResponse, SearchRequest, SearchResponse -from open_notebook.domain.models import Model, model_manager +from open_notebook.ai.models import Model, model_manager from open_notebook.domain.notebook import text_search, vector_search from open_notebook.exceptions import DatabaseOperationError, InvalidInputError from open_notebook.graphs.ask import graph as ask_graph diff --git a/api/routers/speaker_profiles.py b/api/routers/speaker_profiles.py index 3e3366d3..e8611fdf 100644 --- a/api/routers/speaker_profiles.py +++ b/api/routers/speaker_profiles.py @@ -4,7 +4,7 @@ from loguru import logger from pydantic import BaseModel, Field -from open_notebook.domain.podcast import SpeakerProfile +from open_notebook.podcasts.models import SpeakerProfile router = APIRouter() diff --git a/api/routers/transformations.py b/api/routers/transformations.py index 7242a302..6e3d455a 100644 --- a/api/routers/transformations.py +++ b/api/routers/transformations.py @@ -12,7 +12,7 @@ TransformationResponse, TransformationUpdate, ) -from open_notebook.domain.models import Model +from open_notebook.ai.models import Model from open_notebook.domain.transformation import DefaultPrompts, Transformation from open_notebook.exceptions import InvalidInputError from open_notebook.graphs.transformation import graph as transformation_graph diff --git a/batch_fix_services.py b/batch_fix_services.py deleted file mode 100644 index 4db32b66..00000000 --- a/batch_fix_services.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -"""Batch fix service files for mypy errors.""" -import re -from pathlib import Path - -SERVICE_FILES = [ - 'api/notes_service.py', - 'api/insights_service.py', - 'api/episode_profiles_service.py', - 'api/settings_service.py', - 'api/sources_service.py', - 'api/podcast_service.py', - 'api/command_service.py', -] - -BASE_DIR = Path('/Users/luisnovo/dev/projetos/open-notebook/open-notebook') - -for service_file in SERVICE_FILES: - file_path = BASE_DIR / service_file - if not file_path.exists(): - print(f"Skipping {service_file} - file not found") - continue - - content = file_path.read_text() - original_content = content - - # Pattern to find: var_name = api_client.method(args) - # Followed by: var_name["key"] or var_name.get("key") - lines = content.split('\n') - new_lines = [] - i = 0 - - while i < len(lines): - line = lines[i] - - # Check if this line has an api_client call assignment - match = re.match(r'(\s*)(\w+)\s*=\s*api_client\.(\w+)\((.*)\)\s*$', line) - if match and 'response = api_client' not in line: - indent = match.group(1) - var_name = match.group(2) - method_name = match.group(3) - args = match.group(4) - - # Look ahead to see if this variable is used with dict access - has_dict_access = False - for j in range(i+1, min(i+15, len(lines))): - next_line = lines[j] - if f'{var_name}["' in next_line or f"{var_name}['" in next_line or f'{var_name}.get(' in next_line: - has_dict_access = True - break - # Stop looking if we hit a blank line, new function, or new assignment - if (not next_line.strip() or - next_line.strip().startswith('def ') or - next_line.strip().startswith('class ') or - (re.match(r'\s*\w+\s*=', next_line) and var_name not in next_line)): - break - - if has_dict_access: - # Replace with response and isinstance check - new_lines.append(f'{indent}response = api_client.{method_name}({args})') - new_lines.append(f'{indent}{var_name} = response if isinstance(response, dict) else response[0]') - i += 1 - continue - - new_lines.append(line) - i += 1 - - new_content = '\n'.join(new_lines) - - # Check if content changed - if new_content != original_content: - file_path.write_text(new_content) - print(f"✓ Fixed {service_file}") - else: - print(f"- No changes needed for {service_file}") - -print("\nDone!") diff --git a/commands/CLAUDE.md b/commands/CLAUDE.md new file mode 100644 index 00000000..0b0eb617 --- /dev/null +++ b/commands/CLAUDE.md @@ -0,0 +1,49 @@ +# Commands Module + +**Purpose**: Defines async command handlers for long-running operations via `surreal-commands` job queue system. + +## Key Components + +- **`process_source_command`**: Ingests content through `source_graph`, creates embeddings (optional), and generates insights. Retries on transaction conflicts (exp. jitter, max 5×). +- **`embed_single_item_command`**: Embeds individual sources/notes/insights; splits content into chunks for vector storage. +- **`rebuild_embeddings_command`**: Bulk re-embed all/existing items with selective type filtering. +- **`generate_podcast_command`**: Creates podcasts via `podcast-creator` library using stored episode/speaker profiles. +- **`process_text_command`** (example): Test fixture for text operations (uppercase, lowercase, reverse, word_count). +- **`analyze_data_command`** (example): Test fixture for numeric aggregations. + +## Important Patterns + +- **Pydantic I/O**: All commands use `CommandInput`/`CommandOutput` subclasses for type safety and serialization. +- **Error handling**: Permanent errors return failure output; `RuntimeError` exceptions auto-retry via surreal-commands. +- **Model dumping**: Recursive `full_model_dump()` utility converts Pydantic models → dicts for DB/API responses. +- **Logging**: Uses `loguru.logger` throughout; logs execution start/end and key metrics (processing time, counts). +- **Time tracking**: All commands measure `start_time` → `processing_time` for monitoring. + +## Dependencies + +**External**: `surreal_commands` (command decorator, job queue), `loguru`, `pydantic`, `podcast_creator` +**Internal**: `open_notebook.domain.*` (Source, Note, Transformation), `open_notebook.graphs.source`, `open_notebook.ai.models` + +## Quirks & Edge Cases + +- **source_commands**: `ensure_record_id()` wraps command IDs for DB storage; transaction conflicts trigger exponential backoff retry (1-30s). Non-`RuntimeError` exceptions are permanent. +- **embedding_commands**: Queries DB directly for item state; chunk index must match source's chunk list. Model availability checked at command start. +- **podcast_commands**: Profiles loaded from SurrealDB by name (must exist); briefing can be extended with suffix. Episode records created mid-execution. +- **Example commands**: Accept optional `delay_seconds` for testing async behavior; not for production. + +## Code Example + +```python +@command("process_source", app="open_notebook", retry={...}) +async def process_source_command(input_data: SourceProcessingInput) -> SourceProcessingOutput: + start_time = time.time() + try: + transformations = [await Transformation.get(id) for id in input_data.transformations] + source = await Source.get(input_data.source_id) + result = await source_graph.ainvoke({...}) + return SourceProcessingOutput(success=True, ...) + except RuntimeError as e: + raise # Retry this + except Exception as e: + return SourceProcessingOutput(success=False, error_message=str(e)) +``` diff --git a/commands/embedding_commands.py b/commands/embedding_commands.py index 6e0445f8..ec0fb784 100644 --- a/commands/embedding_commands.py +++ b/commands/embedding_commands.py @@ -5,8 +5,8 @@ from pydantic import BaseModel from surreal_commands import CommandInput, CommandOutput, command, submit_command +from open_notebook.ai.models import model_manager from open_notebook.database.repository import ensure_record_id, repo_query -from open_notebook.domain.models import model_manager from open_notebook.domain.notebook import Note, Source, SourceInsight from open_notebook.utils.text_utils import split_text diff --git a/commands/podcast_commands.py b/commands/podcast_commands.py index 1382abb6..2021f61b 100644 --- a/commands/podcast_commands.py +++ b/commands/podcast_commands.py @@ -8,7 +8,7 @@ from open_notebook.config import DATA_FOLDER from open_notebook.database.repository import ensure_record_id, repo_query -from open_notebook.domain.podcast import EpisodeProfile, PodcastEpisode, SpeakerProfile +from open_notebook.podcasts.models import EpisodeProfile, PodcastEpisode, SpeakerProfile try: from podcast_creator import configure, create_podcast diff --git a/docs/0-START-HERE/index.md b/docs/0-START-HERE/index.md new file mode 100644 index 00000000..5f44f752 --- /dev/null +++ b/docs/0-START-HERE/index.md @@ -0,0 +1,63 @@ +# Open Notebook - Start Here + +**Open Notebook** is a privacy-focused AI research assistant. Upload documents, chat with AI, generate notes, and create podcasts—all with complete control over your data. + +## Choose Your Path + +### 🚀 I want to use OpenAI (Fastest) +**5 minutes to running.** GPT, simple setup, powerful results. + +→ [OpenAI Quick Start](quick-start-openai.md) + +--- + +### ☁️ I want to use other cloud AI (Anthropic, Google, OpenRouter, etc.) +**5 minutes to running.** Choose from 15+ AI providers. + +→ [Cloud Providers Quick Start](quick-start-cloud.md) + +--- + +### 🏠 I want to run locally (Ollama or LMStudio, completely private) +**5 minutes to running.** Keep everything private, on your machine. No costs. + +→ [Local Quick Start](quick-start-local.md) + +--- + +## What Can You Do? + +- 📄 **Upload Content**: PDFs, web links, audio, video, text +- 🤖 **Chat with AI**: Ask questions about your documents with citations +- 📝 **Generate Notes**: AI creates summaries and insights +- 🎙️ **Create Podcasts**: Turn research into professional audio content +- 🔍 **Search**: Full-text and semantic search across all content +- ⚙️ **Transform**: Extract insights, analyze themes, create summaries + +## Why Open Notebook? + +| Feature | Open Notebook | Notebook LM | +|---------|---|---| +| **Privacy** | Self-hosted, your control | Cloud, Google's servers | +| **AI Choice** | 15+ providers | Google's models only | +| **Podcast Speakers** | 1-4 customizable | 2 only | +| **Cost** | Completely free | Free (but your data) | +| **Offline** | Yes | No | + +## Prerequisites + +- **Docker**: All paths use Docker (free) +- **AI Provider**: Either a cloud API key OR use free local models (Ollama) + +--- + +## Next Steps + +1. Pick your path above ⬆️ +2. Follow the 5-minute quick start +3. Create your first notebook +4. Start uploading documents! + +--- + +**Need Help?** Join our [Discord community](https://discord.gg/37XJPXfz2w) or see [Full Documentation](../index.md). diff --git a/docs/0-START-HERE/quick-start-cloud.md b/docs/0-START-HERE/quick-start-cloud.md new file mode 100644 index 00000000..1d048995 --- /dev/null +++ b/docs/0-START-HERE/quick-start-cloud.md @@ -0,0 +1,223 @@ +# Quick Start - Cloud AI Providers (5 minutes) + +Get Open Notebook running with **Anthropic, Google, Groq, or other cloud providers**. Same simplicity as OpenAI, with more choices. + +## Prerequisites + +1. **Docker Desktop** installed + - [Download here](https://www.docker.com/products/docker-desktop/) + - Already have it? Skip to step 2 + +2. **API Key** from your chosen provider: + - **OpenRouter** (100+ models, one key): https://openrouter.ai/keys + - **Anthropic (Claude)**: https://console.anthropic.com/ + - **Google (Gemini)**: https://aistudio.google.com/ + - **Groq** (fast, free tier): https://console.groq.com/ + - **Mistral**: https://console.mistral.ai/ + - **DeepSeek**: https://platform.deepseek.com/ + - **xAI (Grok)**: https://console.x.ai/ + +## Step 1: Create Configuration (1 min) + +Create a new folder `open-notebook` and add this file: + +**docker-compose.yml**: +```yaml +services: + surrealdb: + image: surrealdb/surrealdb:v2 + command: start --user root --pass password --bind 0.0.0.0:8000 memory + ports: + - "8000:8000" + + open_notebook: + image: lfnovo/open_notebook:v1-latest + ports: + - "8502:8502" # Web UI + - "5055:5055" # API + environment: + # Choose ONE provider (uncomment your choice): + + # OpenRouter - 100+ models with one API key + - OPENROUTER_API_KEY=sk-or-... + + # Anthropic (Claude) - Excellent reasoning + # - ANTHROPIC_API_KEY=sk-ant-... + + # Google (Gemini) - Large context, cost-effective + # - GOOGLE_API_KEY=... + + # Groq - Ultra-fast inference, free tier available + # - GROQ_API_KEY=gsk_... + + # Mistral - European provider, good quality + # - MISTRAL_API_KEY=... + + # Database (required) + - SURREAL_URL=ws://surrealdb:8000/rpc + - SURREAL_USER=root + - SURREAL_PASSWORD=password + - SURREAL_NAMESPACE=open_notebook + - SURREAL_DATABASE=open_notebook + volumes: + - ./notebook_data:/app/data + depends_on: + - surrealdb + restart: always + +``` + +**Edit the file:** +- Uncomment ONE provider and add your API key +- Comment out or remove the others + +--- + +## Step 2: Start Services (1 min) + +Open terminal in your `open-notebook` folder: + +```bash +docker compose up -d +``` + +Wait 15-20 seconds for services to start. + +--- + +## Step 3: Access Open Notebook (instant) + +Open your browser: +``` +http://localhost:8502 +``` + +You should see the Open Notebook interface! + +--- + +## Step 4: Configure Your Model (1 min) + +1. Go to **Settings** (gear icon) +2. Navigate to **Models** +3. Select your provider's model: + +| Provider | Recommended Model | Notes | +|----------|-------------------|-------| +| **OpenRouter** | `anthropic/claude-3.5-sonnet` | Access 100+ models | +| **Anthropic** | `claude-3-5-sonnet-latest` | Best reasoning | +| **Google** | `gemini-2.0-flash` | Large context, fast | +| **Groq** | `llama-3.3-70b-versatile` | Ultra-fast | +| **Mistral** | `mistral-large-latest` | Strong European option | + +4. Click **Save** + +--- + +## Step 5: Create Your First Notebook (1 min) + +1. Click **New Notebook** +2. Name: "My Research" +3. Click **Create** + +--- + +## Step 6: Add Content & Chat (2 min) + +1. Click **Add Source** +2. Choose **Web Link** +3. Paste any article URL +4. Wait for processing +5. Go to **Chat** and ask questions! + +--- + +## Verification Checklist + +- [ ] Docker is running +- [ ] You can access `http://localhost:8502` +- [ ] Models are configured for your provider +- [ ] You created a notebook +- [ ] Chat works + +**All checked?** You're ready to research! + +--- + +## Provider Comparison + +| Provider | Speed | Quality | Context | Cost | +|----------|-------|---------|---------|------| +| **OpenRouter** | Varies | Varies | Varies | Varies (100+ models) | +| **Anthropic** | Medium | Excellent | 200K | $$$ | +| **Google** | Fast | Very Good | 1M+ | $$ | +| **Groq** | Ultra-fast | Good | 128K | $ (free tier) | +| **Mistral** | Fast | Good | 128K | $$ | +| **DeepSeek** | Medium | Very Good | 64K | $ | + +--- + +## Using Multiple Providers + +You can enable multiple providers simultaneously: + +```yaml +environment: + - OPENROUTER_API_KEY=sk-or-... + - ANTHROPIC_API_KEY=sk-ant-... + - GOOGLE_API_KEY=... + - GROQ_API_KEY=gsk_... +``` + +Then switch between them in **Settings** > **Models** as needed. + +--- + +## Troubleshooting + +### "Model not found" Error + +1. Verify your API key is correct (no extra spaces) +2. Check you have credits/access for the model +3. Restart: `docker compose restart api` + +### "Cannot connect to server" + +```bash +docker ps # Check all services running +docker compose logs # View logs +docker compose restart # Restart everything +``` + +### Provider-Specific Issues + +**Anthropic**: Ensure key starts with `sk-ant-` +**Google**: Use AI Studio key, not Cloud Console +**Groq**: Free tier has rate limits; upgrade if needed + +--- + +## Cost Estimates + +Approximate costs per 1K tokens: + +| Provider | Input | Output | +|----------|-------|--------| +| Anthropic (Sonnet) | $0.003 | $0.015 | +| Google (Flash) | $0.0001 | $0.0004 | +| Groq (Llama 70B) | Free tier available | - | +| Mistral (Large) | $0.002 | $0.006 | + +Check provider websites for current pricing. + +--- + +## Next Steps + +1. **Add Your Content**: PDFs, web links, documents +2. **Explore Features**: Podcasts, transformations, search +3. **Full Documentation**: [See all features](../3-USER-GUIDE/index.md) + +--- + +**Need help?** Join our [Discord community](https://discord.gg/37XJPXfz2w)! diff --git a/docs/0-START-HERE/quick-start-local.md b/docs/0-START-HERE/quick-start-local.md new file mode 100644 index 00000000..ef0f3557 --- /dev/null +++ b/docs/0-START-HERE/quick-start-local.md @@ -0,0 +1,281 @@ +# Quick Start - Local & Private (5 minutes) + +Get Open Notebook running with **100% local AI** using Ollama. No cloud API keys needed, completely private. + +## Prerequisites + +1. **Docker Desktop** installed + - [Download here](https://www.docker.com/products/docker-desktop/) + - Already have it? Skip to step 2 + +2. **Local LLM** - Choose one: + - **Ollama** (recommended): [Download here](https://ollama.ai/) + - **LM Studio** (GUI alternative): [Download here](https://lmstudio.ai) + +## Step 1: Choose Your Setup (1 min) + +### 🏠 Local Machine (Same Computer) +Everything runs on your machine. Recommended for testing/learning. + +### 🌐 Remote Server (Raspberry Pi, NAS, Cloud VM) +Run on a different computer, access from another. Needs network configuration. + +--- + +## Step 2: Create Configuration (1 min) + +Create a new folder `open-notebook-local` and add this file: + +**docker-compose.yml**: +```yaml +services: + surrealdb: + image: surrealdb/surrealdb:v2 + command: start --user root --pass password --bind 0.0.0.0:8000 memory + ports: + - "8000:8000" + + open_notebook: + image: lfnovo/open_notebook:v1-latest-single + ports: + - "8502:8502" # Web UI (React frontend) + - "5055:5055" # API (required!) + environment: + # NO API KEYS NEEDED - Using Ollama (free, local) + - OLLAMA_API_BASE=http://ollama:11434 + + # Database (required) + - SURREAL_URL=ws://surrealdb:8000/rpc + - SURREAL_USER=root + - SURREAL_PASSWORD=password + - SURREAL_NAMESPACE=open_notebook + - SURREAL_DATABASE=open_notebook + volumes: + - ./notebook_data:/app/data + - ./surreal_data:/mydata + depends_on: + - surrealdb + restart: always + + ollama: + image: ollama/ollama:latest + ports: + - "11434:11434" + volumes: + - ./ollama_models:/root/.ollama + environment: + # Optional: set GPU support if available + - OLLAMA_NUM_GPU=0 + restart: always +``` + +**That's it!** No API keys, no secrets, completely private. + +--- + +## Step 3: Start Services (1 min) + +Open terminal in your `open-notebook-local` folder: + +```bash +docker compose up -d +``` + +Wait 10-15 seconds for all services to start. + +--- + +## Step 4: Download a Model (2-3 min) + +Ollama needs at least one language model. Pick one: + +```bash +# Fastest & smallest (recommended for testing) +docker exec open_notebook-ollama-1 ollama pull mistral + +# OR: Better quality but slower +docker exec open_notebook-ollama-1 ollama pull neural-chat + +# OR: Even better quality, more VRAM needed +docker exec open_notebook-ollama-1 ollama pull llama2 +``` + +This downloads the model (will take 1-5 minutes depending on your internet). + +--- + +## Step 5: Access Open Notebook (instant) + +Open your browser: +``` +http://localhost:8502 +``` + +You should see the Open Notebook interface. + +--- + +## Step 6: Configure Local Model (1 min) + +1. Click **Settings** (top right) → **Models** +2. Set: + - **Language Model**: `ollama/mistral` (or whichever model you downloaded) + - **Embedding Model**: `ollama/nomic-embed-text` (auto-downloads if missing) +3. Click **Save** + +--- + +## Step 7: Create Your First Notebook (1 min) + +1. Click **New Notebook** +2. Name: "My Private Research" +3. Click **Create** + +--- + +## Step 8: Add Local Content (1 min) + +1. Click **Add Source** +2. Choose **Text** +3. Paste some text or a local document +4. Click **Add** + +--- + +## Step 9: Chat With Your Content (1 min) + +1. Go to **Chat** +2. Type: "What did you learn from this?" +3. Click **Send** +4. Watch as the local Ollama model responds! + +--- + +## Verification Checklist + +- [ ] Docker is running +- [ ] You can access `http://localhost:8502` +- [ ] Models are configured +- [ ] You created a notebook +- [ ] Chat works with local model + +**All checked?** 🎉 You have a completely **private, offline** research assistant! + +--- + +## Advantages of Local Setup + +✅ **No API costs** - Free forever +✅ **No internet required** - True offline capability +✅ **Privacy first** - Your data never leaves your machine +✅ **No subscriptions** - No monthly bills + +**Trade-off:** Slower than cloud models (depends on your CPU/GPU) + +--- + +## Troubleshooting + +### "ollama: command not found" + +Docker image name might be different: +```bash +docker ps # Find the Ollama container name +docker exec ollama pull mistral +``` + +### Model Download Stuck + +Check internet connection and restart: +```bash +docker compose restart ollama +``` + +Then retry the model pull command. + +### "Address already in use" Error + +```bash +docker compose down +docker compose up -d +``` + +### Low Performance + +Check if GPU is available: +```bash +# Show available GPUs +docker exec open_notebook-ollama-1 ollama ps + +# Enable GPU in docker-compose.yml: +# - OLLAMA_NUM_GPU=1 +``` + +Then restart: `docker compose restart ollama` + +### Adding More Models + +```bash +# List available models +docker exec open_notebook-ollama-1 ollama list + +# Pull additional model +docker exec open_notebook-ollama-1 ollama pull neural-chat +``` + +--- + +## Next Steps + +**Now that it's running:** + +1. **Add Your Own Content**: PDFs, documents, articles (see 3-USER-GUIDE) +2. **Explore Features**: Podcasts, transformations, search +3. **Full Documentation**: [See all features](../3-USER-GUIDE/index.md) +4. **Scale Up**: Deploy to a server with better hardware for faster responses +5. **Benchmark Models**: Try different models to find the speed/quality tradeoff you prefer + +--- + +## Alternative: Using LM Studio Instead of Ollama + +**Prefer a GUI?** LM Studio is easier for non-technical users: + +1. Download LM Studio: https://lmstudio.ai +2. Open the app, download a model from the library +3. Go to "Local Server" tab, start server (port 1234) +4. Update your docker-compose.yml: + ```yaml + environment: + - OPENAI_COMPATIBLE_BASE_URL=http://host.docker.internal:1234/v1 + - OPENAI_COMPATIBLE_API_KEY=not-needed + ``` +5. Configure in Settings → Models → Select your LM Studio model + +**Note**: LM Studio runs outside Docker, use `host.docker.internal` to connect. + +--- + +## Going Further + +- **Switch models**: Change in Settings → Models anytime +- **Add more models**: + - Ollama: Run `ollama pull ` + - LM Studio: Download from the app library +- **Deploy to server**: Same docker-compose.yml works anywhere +- **Use cloud hybrid**: Keep some local models, add OpenAI/Anthropic for complex tasks + +--- + +## Common Model Choices + +| Model | Speed | Quality | VRAM | Best For | +|-------|-------|---------|------|----------| +| **mistral** | Fast | Good | 4GB | Testing, general use | +| **neural-chat** | Medium | Better | 6GB | Balanced, recommended | +| **llama2** | Slow | Best | 8GB+ | Complex reasoning | +| **phi** | Very Fast | Fair | 2GB | Minimal hardware | + +--- + +**Need Help?** Join our [Discord community](https://discord.gg/37XJPXfz2w) - many users run local setups! diff --git a/docs/0-START-HERE/quick-start-openai.md b/docs/0-START-HERE/quick-start-openai.md new file mode 100644 index 00000000..a65fae12 --- /dev/null +++ b/docs/0-START-HERE/quick-start-openai.md @@ -0,0 +1,174 @@ +# Quick Start - OpenAI (5 minutes) + +Get Open Notebook running with OpenAI's GPT models. Fast, powerful, and simple. + +## Prerequisites + +1. **Docker Desktop** installed + - [Download here](https://www.docker.com/products/docker-desktop/) + - Already have it? Skip to step 2 + +2. **OpenAI API Key** (required) + - Go to https://platform.openai.com/api-keys + - Create account → Create new secret key + - Add at least $5 in credits to your account + - Copy the key (starts with `sk-`) + +## Step 1: Create Configuration (1 min) + +Create a new folder `open-notebook` and add this file: + +**docker-compose.yml**: +```yaml +services: + surrealdb: + image: surrealdb/surrealdb:v2 + command: start --user root --pass password --bind 0.0.0.0:8000 memory + ports: + - "8000:8000" + + open_notebook: + image: lfnovo/open_notebook:v1-latest + ports: + - "8502:8502" # Web UI + - "5055:5055" # API + environment: + # Your OpenAI key + - OPENAI_API_KEY=sk-... + + # Database (required) + - SURREAL_URL=ws://surrealdb:8000/rpc + - SURREAL_USER=root + - SURREAL_PASSWORD=password + - SURREAL_NAMESPACE=open_notebook + - SURREAL_DATABASE=open_notebook + volumes: + - ./notebook_data:/app/data + depends_on: + - surrealdb + restart: always +``` + +**Edit the file:** +- Replace `sk-...` with your actual OpenAI API key + +--- + +## Step 2: Start Services (1 min) + +Open terminal in your `open-notebook` folder: + +```bash +docker compose up -d +``` + +Wait 15-20 seconds for services to start. + +--- + +## Step 3: Access Open Notebook (instant) + +Open your browser: +``` +http://localhost:8502 +``` + +You should see the Open Notebook interface! + +--- + +## Step 4: Create Your First Notebook (1 min) + +1. Click **New Notebook** +2. Name: "My Research" +3. Click **Create** + +--- + +## Step 5: Add a Source (1 min) + +1. Click **Add Source** +2. Choose **Web Link** +3. Paste: `https://en.wikipedia.org/wiki/Artificial_intelligence` +4. Click **Add** +5. Wait for processing (30-60 seconds) + +--- + +## Step 6: Chat With Your Content (1 min) + +1. Go to **Chat** +2. Type: "What is artificial intelligence?" +3. Click **Send** +4. Watch as GPT responds with information from your source! + +--- + +## Verification Checklist + +- [ ] Docker is running +- [ ] You can access `http://localhost:8502` +- [ ] You created a notebook +- [ ] You added a source +- [ ] Chat works + +**All checked?** 🎉 You have a fully working AI research assistant! + +--- + +## Using Different Models + +In your notebook, go to **Settings** → **Models** to choose: +- `gpt-4o` - Best quality (recommended) +- `gpt-4o-mini` - Fast and cheap (good for testing) + +--- + +## Troubleshooting + +### "Port 8502 already in use" + +Change the port in docker-compose.yml: +```yaml +ports: + - "8503:8502" # Use 8503 instead +``` + +Then access at `http://localhost:8503` + +### "API key not working" + +1. Double-check your API key (no extra spaces) +2. Verify you added credits at https://platform.openai.com +3. Restart: `docker compose restart api` + +### "Cannot connect to server" + +```bash +docker ps # Check all services running +docker compose logs # View logs +docker compose restart # Restart everything +``` + +--- + +## Next Steps + +1. **Add Your Own Content**: PDFs, web links, documents +2. **Explore Features**: Podcasts, transformations, search +3. **Full Documentation**: [See all features](../3-USER-GUIDE/index.md) + +--- + +## Cost Estimate + +OpenAI pricing (approximate): +- **Conversation**: $0.01-0.10 per 1K tokens +- **Embeddings**: $0.02 per 1M tokens +- **Typical usage**: $1-5/month for light use, $20-50/month for heavy use + +Check https://openai.com/pricing for current rates. + +--- + +**Need help?** Join our [Discord community](https://discord.gg/37XJPXfz2w)! diff --git a/docs/1-INSTALLATION/docker-compose.md b/docs/1-INSTALLATION/docker-compose.md new file mode 100644 index 00000000..8cc4e95d --- /dev/null +++ b/docs/1-INSTALLATION/docker-compose.md @@ -0,0 +1,312 @@ +# Docker Compose Installation (Recommended) + +Multi-container setup with separate services. **Best for most users.** + +> **Alternative Registry:** All images are available on both Docker Hub (`lfnovo/open_notebook`) and GitHub Container Registry (`ghcr.io/lfnovo/open-notebook`). Use GHCR if Docker Hub is blocked or you prefer GitHub-native workflows. + +## Prerequisites + +- **Docker Desktop** installed ([Download](https://www.docker.com/products/docker-desktop/)) +- **5-10 minutes** of your time +- **API key** for at least one AI provider (OpenAI recommended for beginners) + +## Step 1: Get an API Key (2 min) + +Choose at least one AI provider. **OpenAI recommended if you're unsure:** + +``` +OpenAI: https://platform.openai.com/api-keys +Anthropic: https://console.anthropic.com/ +Google: https://aistudio.google.com/ +Groq: https://console.groq.com/ +``` + +Add at least $5 in credits to your account. + +(Skip this if using Ollama for free local models) + +--- + +## Step 2: Create Configuration (2 min) + +Create a folder `open-notebook` and add this file: + +**docker-compose.yml**: +```yaml +services: + surrealdb: + image: surrealdb/surrealdb:v2 + command: start --user root --pass password --bind 0.0.0.0:8000 memory + ports: + - "8000:8000" + volumes: + - surreal_data:/mydata + + open_notebook: + image: lfnovo/open_notebook:v1-latest + ports: + - "8502:8502" # Web UI + - "5055:5055" # API + environment: + # AI Provider (choose ONE) + - OPENAI_API_KEY=sk-... # Your OpenAI key + # - ANTHROPIC_API_KEY=sk-ant-... # Or Anthropic + # - GOOGLE_API_KEY=... # Or Google + + # Database + - SURREAL_URL=ws://surrealdb:8000/rpc + - SURREAL_USER=root + - SURREAL_PASSWORD=password + - SURREAL_NAMESPACE=open_notebook + - SURREAL_DATABASE=open_notebook + volumes: + - ./notebook_data:/app/data + depends_on: + - surrealdb + restart: always + +volumes: + surreal_data: +``` + +**Edit the file:** +- Replace `sk-...` with your actual OpenAI API key +- (Or use Anthropic, Google, Groq keys instead) +- If you have multiple keys, uncomment the ones you want + +--- + +## Step 3: Start Services (2 min) + +Open terminal in the `open-notebook` folder: + +```bash +docker compose up -d +``` + +Wait 15-20 seconds for all services to start: +``` +✅ surrealdb running on :8000 +✅ open_notebook running on :8502 (UI) and :5055 (API) +``` + +Check status: +```bash +docker compose ps +``` + +--- + +## Step 4: Verify Installation (1 min) + +**API Health:** +```bash +curl http://localhost:5055/health +# Should return: {"status": "healthy"} +``` + +**Frontend Access:** +Open browser to: +``` +http://localhost:8502 +``` + +You should see the Open Notebook interface! + +--- + +## Step 5: First Notebook (2 min) + +1. Click **New Notebook** +2. Name: "My Research" +3. Description: "Getting started" +4. Click **Create** + +Done! You now have a fully working Open Notebook instance. 🎉 + +--- + +## Configuration + +### Using Different AI Providers + +Change `environment` section in `docker-compose.yml`: + +```yaml +# For Anthropic (Claude) +- ANTHROPIC_API_KEY=sk-ant-... + +# For Google Gemini +- GOOGLE_API_KEY=... + +# For Groq (fast, free tier available) +- GROQ_API_KEY=... + +# For local Ollama (free, offline) +- OLLAMA_BASE_URL=http://ollama:11434 +``` + +### Adding Ollama (Free Local Models) + +Add to `docker-compose.yml`: + +```yaml + ollama: + image: ollama/ollama:latest + ports: + - "11434:11434" + volumes: + - ollama_models:/root/.ollama + restart: always + +volumes: + surreal_data: + ollama_models: +``` + +Then update API service: +```yaml +environment: + - OLLAMA_BASE_URL=http://ollama:11434 +``` + +Restart and pull a model: +```bash +docker compose restart +docker exec open_notebook-ollama-1 ollama pull mistral +``` + +--- + +## Environment Variables Reference + +| Variable | Purpose | Example | +|----------|---------|---------| +| `OPENAI_API_KEY` | OpenAI API key | `sk-proj-...` | +| `ANTHROPIC_API_KEY` | Anthropic/Claude key | `sk-ant-...` | +| `SURREAL_URL` | Database connection | `ws://surrealdb:8000/rpc` | +| `SURREAL_USER` | Database user | `root` | +| `SURREAL_PASSWORD` | Database password | `password` | +| `API_URL` | API external URL | `http://localhost:5055` | +| `NEXT_PUBLIC_API_URL` | Frontend API URL | `http://localhost:5055` | + +--- + +## Common Tasks + +### Stop Services +```bash +docker compose down +``` + +### View Logs +```bash +# All services +docker compose logs -f + +# Specific service +docker compose logs -f api +``` + +### Restart Services +```bash +docker compose restart +``` + +### Update to Latest Version +```bash +docker compose down +docker compose pull +docker compose up -d +``` + +### Remove All Data +```bash +docker compose down -v +``` + +--- + +## Troubleshooting + +### "Cannot connect to API" Error + +1. Check if Docker is running: +```bash +docker ps +``` + +2. Check if services are running: +```bash +docker compose ps +``` + +3. Check API logs: +```bash +docker compose logs api +``` + +4. Wait longer - services can take 20-30 seconds to start on first run + +--- + +### Port Already in Use + +If you get "Port 8502 already in use", change the port: + +```yaml +ports: + - "8503:8502" # Use 8503 instead + - "5055:5055" # Keep API port same +``` + +Then access at `http://localhost:8503` + +--- + +### API Key Not Working + +1. Double-check your API key in the file (no extra spaces) +2. Verify key is valid at provider's website +3. Check you added credits to your account +4. Restart: `docker compose restart api` + +--- + +### Database Connection Issues + +Check SurrealDB is running: +```bash +docker compose logs surrealdb +``` + +Reset database: +```bash +docker compose down -v +docker compose up -d +``` + +--- + +## Next Steps + +1. **Add Content**: Sources, notebooks, documents +2. **Configure Models**: Settings → Models (choose your preferences) +3. **Explore Features**: Chat, search, transformations +4. **Read Guide**: [User Guide](../3-USER-GUIDE/index.md) + +--- + +## Production Deployment + +For production use, see: +- [Security Hardening](https://github.com/lfnovo/open-notebook/blob/main/docs/deployment/security.md) +- [Reverse Proxy](https://github.com/lfnovo/open-notebook/blob/main/docs/deployment/reverse-proxy.md) + +--- + +## Getting Help + +- **Discord**: [Community support](https://discord.gg/37XJPXfz2w) +- **Issues**: [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) +- **Docs**: [Full documentation](../index.md) diff --git a/docs/1-INSTALLATION/from-source.md b/docs/1-INSTALLATION/from-source.md new file mode 100644 index 00000000..0aa9de32 --- /dev/null +++ b/docs/1-INSTALLATION/from-source.md @@ -0,0 +1,153 @@ +# From Source Installation + +Clone the repository and run locally. **For developers and contributors.** + +## Prerequisites + +- **Python 3.11+** - [Download](https://www.python.org/) +- **Node.js 18+** - [Download](https://nodejs.org/) +- **Git** - [Download](https://git-scm.com/) +- **Docker** (for SurrealDB) - [Download](https://docker.com/) +- **uv** (Python package manager) - `curl -LsSf https://astral.sh/uv/install.sh | sh` +- API key from OpenAI or similar (or use Ollama for free) + +## Quick Setup (10 minutes) + +### 1. Clone Repository + +```bash +git clone https://github.com/lfnovo/open-notebook.git +cd open-notebook + +# If you forked it: +git clone https://github.com/YOUR_USERNAME/open-notebook.git +cd open-notebook +git remote add upstream https://github.com/lfnovo/open-notebook.git +``` + +### 2. Install Python Dependencies + +```bash +uv sync +uv pip install python-magic +``` + +### 3. Start SurrealDB + +```bash +# Terminal 1 +make database +# or: docker compose up surrealdb +``` + +### 4. Set Environment Variables + +```bash +cp .env.example .env +# Edit .env and add your API key: +# OPENAI_API_KEY=sk-... +# (or ANTHROPIC_API_KEY, GROQ_API_KEY, etc.) +``` + +### 5. Start API + +```bash +# Terminal 2 +make api +# or: uv run --env-file .env uvicorn api.main:app --host 0.0.0.0 --port 5055 +``` + +### 6. Start Frontend + +```bash +# Terminal 3 +cd frontend && npm install && npm run dev +``` + +### 7. Access + +- **Frontend**: http://localhost:3000 +- **API Docs**: http://localhost:5055/docs +- **Database**: http://localhost:8000 + +--- + +## Development Workflow + +### Code Quality + +```bash +# Format and lint Python +make ruff +# or: ruff check . --fix + +# Type checking +make lint +# or: uv run python -m mypy . +``` + +### Run Tests + +```bash +uv run pytest tests/ +``` + +### Common Commands + +```bash +# Start everything +make start-all + +# View API docs +open http://localhost:5055/docs + +# Check database migrations +# (Auto-run on API startup) + +# Clean up +make clean +``` + +--- + +## Troubleshooting + +### Python version too old + +```bash +python --version # Check version +uv sync --python 3.11 # Use specific version +``` + +### npm: command not found + +Install Node.js from https://nodejs.org/ + +### Database connection errors + +```bash +docker ps # Check SurrealDB running +docker logs surrealdb # View logs +``` + +### Port 5055 already in use + +```bash +# Use different port +uv run uvicorn api.main:app --port 5056 +``` + +--- + +## Next Steps + +1. Read [Development Guide](../7-DEVELOPMENT/quick-start.md) +2. See [Architecture Overview](../7-DEVELOPMENT/architecture.md) +3. Check [Contributing Guide](../7-DEVELOPMENT/contributing.md) + +--- + +## Getting Help + +- **Discord**: [Community](https://discord.gg/37XJPXfz2w) +- **Issues**: [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) diff --git a/docs/1-INSTALLATION/index.md b/docs/1-INSTALLATION/index.md new file mode 100644 index 00000000..95ad0a0c --- /dev/null +++ b/docs/1-INSTALLATION/index.md @@ -0,0 +1,153 @@ +# Installation Guide + +Choose your installation route based on your setup and use case. + +## Quick Decision: Which Route? + +### 🚀 I want the easiest setup (Recommended for most) +**→ [Docker Compose](docker-compose.md)** - Multi-container setup, production-ready +- ✅ All features working +- ✅ Clear separation of services +- ✅ Easy to scale +- ✅ Works on Mac, Windows, Linux +- ⏱️ 5 minutes to running + +--- + +### 🏠 I want everything in one container (Simplified) +**→ [Single Container](single-container.md)** - All-in-one for simple deployments +- ✅ Minimal configuration +- ✅ Lower resource usage +- ✅ Good for shared hosting +- ✅ Works on PikaPods, Railway, etc. +- ⏱️ 3 minutes to running + +--- + +### 👨‍💻 I want to develop/contribute (Developers only) +**→ [From Source](from-source.md)** - Clone repo, set up locally +- ✅ Full control over code +- ✅ Easy to debug +- ✅ Can modify and test +- ⚠️ Requires Python 3.11+, Node.js +- ⏱️ 10 minutes to running + +--- + + +## System Requirements + +### Minimum +- **RAM**: 4GB +- **Storage**: 2GB for app + space for documents +- **CPU**: Any modern processor +- **Network**: Internet (optional for offline setup) + +### Recommended +- **RAM**: 8GB+ +- **Storage**: 10GB+ for documents and models +- **CPU**: Multi-core processor +- **GPU**: Optional (speeds up local AI models) + +--- + +## AI Provider Options + +### Cloud-Based (Pay-as-you-go) +- **OpenAI** - GPT-4, GPT-4o, fast and capable +- **Anthropic (Claude)** - Claude 3.5 Sonnet, excellent reasoning +- **Google Gemini** - Multimodal, cost-effective +- **Groq** - Ultra-fast inference +- **Others**: Mistral, DeepSeek, xAI, OpenRouter + +**Cost**: Usually $0.01-$0.10 per 1K tokens +**Speed**: Fast (sub-second) +**Privacy**: Your data sent to cloud + +### Local (Free, Private) +- **Ollama** - Run open-source models locally +- **LM Studio** - Desktop app for local models +- **Hugging Face models** - Download and run + +**Cost**: $0 (just electricity) +**Speed**: Depends on your hardware (slow to medium) +**Privacy**: 100% offline + +--- + +## Choose a Route + +**Already know which way to go?** Pick your installation path: + +- [Docker Compose](docker-compose.md) - **Most users** +- [Single Container](single-container.md) - **Shared hosting** +- [From Source](from-source.md) - **Developers** + +> **Privacy-first?** Any installation method works with Ollama for 100% local AI. See [Local Quick Start](../0-START-HERE/quick-start-local.md). + +--- + +## Pre-Installation Checklist + +Before installing, you'll need: + +- [ ] **Docker** (for Docker routes) or **Node.js 18+** (for source) +- [ ] **AI Provider API key** (OpenAI, Anthropic, etc.) OR willingness to use free local models +- [ ] **At least 4GB RAM** available +- [ ] **Stable internet** (or offline setup with Ollama) + +--- + +## Detailed Installation Instructions + +### For Docker Users +1. Install [Docker Desktop](https://docker.com/products/docker-desktop) +2. Choose: [Docker Compose](docker-compose.md) or [Single Container](single-container.md) +3. Follow the step-by-step guide +4. Access at `http://localhost:8502` + +### For Source Installation (Developers) +1. Have Python 3.11+, Node.js 18+, Git installed +2. Follow [From Source](from-source.md) +3. Run `make start-all` +4. Access at `http://localhost:8502` (frontend) or `http://localhost:5055` (API) + +--- + +## After Installation + +Once you're up and running: + +1. **Configure Models** - Choose your AI provider in Settings +2. **Create First Notebook** - Start organizing research +3. **Add Sources** - PDFs, web links, documents +4. **Explore Features** - Chat, search, transformations +5. **Read Full Guide** - [User Guide](../3-USER-GUIDE/index.md) + +--- + +## Troubleshooting During Installation + +**Having issues?** Check the troubleshooting section in your chosen installation guide, or see [Quick Fixes](../6-TROUBLESHOOTING/quick-fixes.md). + +--- + +## Need Help? + +- **Discord**: [Join community](https://discord.gg/37XJPXfz2w) +- **GitHub Issues**: [Report problems](https://github.com/lfnovo/open-notebook/issues) +- **Docs**: See [Full Documentation](../index.md) + +--- + +## Production Deployment + +Installing for production use? See additional resources: + +- [Security Hardening](https://github.com/lfnovo/open-notebook/blob/main/docs/deployment/security.md) +- [Reverse Proxy Setup](https://github.com/lfnovo/open-notebook/blob/main/docs/deployment/reverse-proxy.md) +- [Performance Tuning](https://github.com/lfnovo/open-notebook/blob/main/docs/deployment/retry-configuration.md) + +--- + +**Ready to install?** Pick a route above! ⬆️ diff --git a/docs/1-INSTALLATION/single-container.md b/docs/1-INSTALLATION/single-container.md new file mode 100644 index 00000000..23214142 --- /dev/null +++ b/docs/1-INSTALLATION/single-container.md @@ -0,0 +1,121 @@ +# Single Container Installation + +All-in-one container setup. **Simpler than Docker Compose, but less flexible.** + +**Best for:** PikaPods, Railway, shared hosting, minimal setups + +> **Alternative Registry:** Images available on both Docker Hub (`lfnovo/open_notebook:v1-latest-single`) and GitHub Container Registry (`ghcr.io/lfnovo/open-notebook:v1-latest-single`). + +> ⚠️ **Note**: While this is a simple way to get started, we recommend [Docker Compose](docker-compose.md) for most users. Docker Compose is more flexible and will make it easier if we add more services to the setup in the future. This single-container option is best for platforms that specifically require it (PikaPods, Railway, etc.). + +## Prerequisites + +- Docker installed (for local testing) +- API key from OpenAI, Anthropic, or another provider +- 5 minutes + +## Quick Setup + +### For Local Testing (Docker) + +```yaml +# docker-compose.yml +services: + open_notebook: + image: lfnovo/open_notebook:v1-latest-single + ports: + - "8502:8502" # Web UI (React frontend) + - "5055:5055" # API + environment: + - OPENAI_API_KEY=sk-... + - SURREAL_URL=ws://localhost:8000/rpc + - SURREAL_USER=root + - SURREAL_PASSWORD=password + - SURREAL_NAMESPACE=open_notebook + - SURREAL_DATABASE=open_notebook + volumes: + - ./data:/app/data + restart: always +``` + +Run: +```bash +docker compose up -d +``` + +Access: `http://localhost:8502` + +### For Cloud Platforms + +**PikaPods:** +1. Click "New App" +2. Search "Open Notebook" +3. Set environment variables +4. Click "Deploy" + +**Railway:** +1. Create new project +2. Add `lfnovo/open_notebook:v1-latest-single` +3. Set environment variables +4. Deploy + +**Render:** +1. Create new Web Service +2. Use Docker image: `lfnovo/open_notebook:v1-latest-single` +3. Set environment variables in dashboard +4. Configure persistent disk for `/app/data` and `/mydata` + +**DigitalOcean App Platform:** +1. Create new app from Docker Hub +2. Use image: `lfnovo/open_notebook:v1-latest-single` +3. Set port to 8502 +4. Add environment variables +5. Configure persistent storage + +**Heroku:** +```bash +# Using heroku.yml +heroku container:push web +heroku container:release web +heroku config:set OPENAI_API_KEY=sk-... +``` + +**Coolify:** +1. Add new service → Docker Image +2. Image: `lfnovo/open_notebook:v1-latest-single` +3. Port: 8502 +4. Add environment variables +5. Enable persistent volumes +6. Coolify handles HTTPS automatically + +--- + +## Environment Variables + +| Variable | Purpose | Example | +|----------|---------|---------| +| `OPENAI_API_KEY` | API key | `sk-...` | +| `SURREAL_URL` | Database | `ws://localhost:8000/rpc` | +| `SURREAL_USER` | DB user | `root` | +| `SURREAL_PASSWORD` | DB password | `password` | +| `API_URL` | External URL (for remote access) | `https://myapp.example.com` | + +--- + +## Limitations vs Docker Compose + +| Feature | Single Container | Docker Compose | +|---------|------------------|-----------------| +| Setup time | 2 minutes | 5 minutes | +| Complexity | Minimal | Moderate | +| Services | All bundled | Separated | +| Scalability | Limited | Excellent | +| Memory usage | ~800MB | ~1.2GB | + +--- + +## Next Steps + +Same as Docker Compose setup - just access via `http://localhost:8502` (local) or your platform's URL (cloud). + +See [Docker Compose](docker-compose.md) for full post-install guide. diff --git a/docs/2-CORE-CONCEPTS/ai-context-rag.md b/docs/2-CORE-CONCEPTS/ai-context-rag.md new file mode 100644 index 00000000..5a2cec35 --- /dev/null +++ b/docs/2-CORE-CONCEPTS/ai-context-rag.md @@ -0,0 +1,450 @@ +# AI Context & RAG - How Open Notebook Uses Your Research + +Open Notebook uses different approaches to make AI models aware of your research depending on the feature. This section explains **RAG** (used in Ask) and **full-content context** (used in Chat). + +--- + +## The Problem: Making AI Aware of Your Data + +### Traditional Approaches (and their problems) + +**Option 1: Fine-Tuning** +- Train the model on your data +- Pro: Model becomes specialized +- Con: Expensive, slow, permanent (can't unlearn) + +**Option 2: Send Everything to Cloud** +- Upload all your data to ChatGPT/Claude API +- Pro: Works well, fast +- Con: Privacy nightmare, data leaves your control, expensive + +**Option 3: Ignore Your Data** +- Just use the base model without your research +- Pro: Private, free +- Con: AI doesn't know anything about your specific topic + +### Open Notebook's Dual Approach + +**For Chat**: Sends the entire selected content to the LLM +- Simple and transparent: You select sources, they're sent in full +- Maximum context: AI sees everything you choose +- You control which sources are included + +**For Ask (RAG)**: Retrieval-Augmented Generation +- RAG = Retrieval-Augmented Generation +- The insight: *Search your content, find relevant pieces, send only those* +- Automatic: AI decides what's relevant based on your question + +--- + +## How RAG Works: Three Stages + +### Stage 1: Content Preparation + +When you upload a source, Open Notebook prepares it for retrieval: + +``` +1. EXTRACT TEXT + PDF → text + URL → webpage text + Audio → transcribed text + Video → subtitles + transcription + +2. CHUNK INTO PIECES + Long documents → break into ~500-word chunks + Why? AI context has limits; smaller pieces are more precise + +3. CREATE EMBEDDINGS + Each chunk → semantic vector (numbers representing meaning) + Why? Allows finding chunks by similarity, not just keywords + +4. STORE IN DATABASE + Chunks + embeddings + metadata → searchable storage +``` + +**Example:** +``` +Source: "AI Safety Research 2026" (50-page PDF) +↓ +Extracted: 50 pages of text +↓ +Chunked: 150 chunks (~500 words each) +↓ +Embedded: Each chunk gets a vector (1536 numbers for OpenAI) +↓ +Stored: Ready for search +``` + +--- + +### Stage 2: Query Time (What You Search For) + +When you ask a question, the system finds relevant content: + +``` +1. YOU ASK A QUESTION + "What does the paper say about alignment?" + +2. SYSTEM CONVERTS QUESTION TO EMBEDDING + Your question → vector (same way chunks are vectorized) + +3. SIMILARITY SEARCH + Find chunks most similar to your question + (using vector math, not keyword matching) + +4. RETURN TOP RESULTS + Usually top 5-10 most similar chunks + +5. YOU GET BACK + ✓ The relevant chunks + ✓ Where they came from (sources + page numbers) + ✓ Relevance scores +``` + +**Example:** +``` +Q: "What does the paper say about alignment?" +↓ +Q vector: [0.23, -0.51, 0.88, ..., 0.12] +↓ +Search: Compare to all chunk vectors +↓ +Results: + - Chunk 47 (alignment section): similarity 0.94 + - Chunk 63 (safety approaches): similarity 0.88 + - Chunk 12 (related work): similarity 0.71 +``` + +--- + +### Stage 3: Augmentation (How AI Uses It) + +Now you have the relevant pieces. The AI uses them: + +``` +SYSTEM BUILDS A PROMPT: + "You are an AI research assistant. + + The user has the following research materials: + [CHUNK 47 CONTENT] + [CHUNK 63 CONTENT] + + User question: 'What does the paper say about alignment?' + + Answer based on the above materials." + +AI RESPONDS: + "Based on the research materials, the paper approaches + alignment through [pulls from chunks] and emphasizes + [pulls from chunks]..." + +SYSTEM ADDS CITATIONS: + "- See research materials page 15 for approach details + - See research materials page 23 for emphasis on X" +``` + +--- + +## Two Search Modes: Exact vs. Semantic + +Open Notebook provides two different search strategies for different goals. + +### 1. Text Search (Keyword Matching) + +**How it works:** +- Uses BM25 ranking (the same algorithm Google uses) +- Finds chunks containing your keywords +- Ranks by relevance (how often keywords appear, position, etc.) + +**When to use:** +- "I remember the exact phrase 'X' and want to find it" +- "I'm looking for a specific name or number" +- "I need the exact quote" + +**Example:** +``` +Search: "transformer architecture" +Results: + 1. Chunk with "transformer architecture" 3 times + 2. Chunk with "transformer" and "architecture" separately + 3. Chunk with "transformer-based models" +``` + +### 2. Vector Search (Semantic Similarity) + +**How it works:** +- Converts your question to a vector (number embedding) +- Finds chunks with similar vectors +- No keywords needed—finds conceptually similar content + +**When to use:** +- "Find content about X (without saying exact words)" +- "I'm exploring a concept" +- "Find similar ideas even if worded differently" + +**Example:** +``` +Search: "what's the mechanism for model understanding?" +Results (no "understanding" in any chunk): + 1. Chunk about interpretability and mechanistic analysis + 2. Chunk about feature analysis + 3. Chunk about attention mechanisms + +Why? The vectors are semantically similar to your concept. +``` + +--- + +## Context Management: Your Control Panel + +Here's where Open Notebook is different: **You decide what the AI sees.** + +### The Three Levels + +| Level | What's Shared | Example Cost | Privacy | Use Case | +|-------|---------------|--------------|---------|----------| +| **Full Content** | Complete source text | 10,000 tokens | Low | Detailed analysis, close reading | +| **Summary Only** | AI-generated summary | 2,000 tokens | High | Background material, references | +| **Not in Context** | Nothing | 0 tokens | Max | Confidential, irrelevant, or archived | + +### How It Works + +**Full Content:** +``` +You: "What's the methodology in paper A?" +System: + - Searches paper A + - Retrieves full paper content (or large chunks) + - Sends to AI: "Here's paper A. Answer about methodology." + - AI analyzes complete content + - Result: Detailed, precise answer +``` + +**Summary Only:** +``` +You: "I want to chat using paper A and B" +System: + - For Paper A: Sends AI-generated summary (not full text) + - For Paper B: Sends full content (detailed analysis) + - AI sees 2 sources but in different detail levels + - Result: Uses summaries for context, details for focused content +``` + +**Not in Context:** +``` +You: "I have 10 sources but only want 5 in context" +System: + - Paper A-E: In context (sent to AI) + - Paper F-J: Not in context (AI can't see them, doesn't search them) + - AI never knows these 5 sources exist + - Result: Tight, focused context +``` + +### Why This Matters + +**Privacy**: You control what leaves your system +``` +Scenario: Confidential company docs + public research +Control: Public research in context → Confidential docs excluded +Result: AI never sees confidential content +``` + +**Cost**: You control token usage +``` +Scenario: 100 sources for background + 5 for detailed analysis +Control: Full content for 5 detailed, summaries for 95 background +Result: 80% lower token cost than sending everything +``` + +**Quality**: You control what the AI focuses on +``` +Scenario: 20 sources, question requires deep analysis +Control: Full content for relevant source, exclude others +Result: AI doesn't get distracted; gives better answer +``` + +--- + +## The Difference: Chat vs. Ask + +**IMPORTANT**: These use completely different approaches! + +### Chat: Full-Content Context (NO RAG) + +**How it works:** +``` +YOU: + 1. Select which sources to include in context + 2. Set context level (full/summary/excluded) + 3. Ask question + +SYSTEM: + - Takes ALL selected sources (respecting context levels) + - Sends the ENTIRE content to the LLM at once + - NO search, NO retrieval, NO chunking + - AI sees everything you selected + +AI: + - Responds based on the full content you provided + - Can reference any part of selected sources + - Conversational: context stays for follow-ups +``` + +**Use this when**: +- You know which sources are relevant +- You want conversational back-and-forth +- You want AI to see the complete context +- You're doing close reading or analysis + +**Advantages:** +- Simple and transparent +- AI sees everything (no missed content) +- Conversational flow + +**Limitations:** +- Limited by LLM context window +- You must manually select relevant sources +- Sends more tokens (higher cost with many sources) + +--- + +### Ask: RAG - Automatic Retrieval + +**How it works:** +``` +YOU: + Ask one complex question + +SYSTEM: + 1. Analyzes your question + 2. Searches across ALL your sources automatically + 3. Finds relevant chunks using vector similarity + 4. Retrieves only the most relevant pieces + 5. Sends ONLY those chunks to the LLM + 6. Synthesizes into comprehensive answer + +AI: + - Sees ONLY the retrieved chunks (not full sources) + - Answers based on what was found to be relevant + - One-shot answer (not conversational) +``` + +**Use this when**: +- You have many sources and don't know which are relevant +- You want the AI to search automatically +- You need a comprehensive answer to a complex question +- You want to minimize tokens sent to LLM + +**Advantages:** +- Automatic search (you don't pick sources) +- Works across many sources at once +- Cost-effective (sends only relevant chunks) + +**Limitations:** +- Not conversational (single question/answer) +- AI only sees retrieved chunks (might miss context) +- Search quality depends on how well question matches content + +--- + +## What This Means: Privacy by Design + +Open Notebook's RAG approach gives you something you don't get with ChatGPT or Claude directly: + +**You control the boundary between:** +- What stays private (on your system) +- What goes to AI (explicitly chosen) +- What the AI can see (context levels) + +### The Audit Trail + +Because everything is retrieved explicitly, you can ask: +- "Which sources did the AI use for this answer?" → See citations +- "What exactly did the AI see?" → See chunks in context level +- "Is the AI's claim actually in my sources?" → Verify citation + +This prevents hallucinations or misrepresentation better than most systems. + +--- + +## How Embeddings Work (Simplified) + +The magic of semantic search comes from embeddings. Here's the intuition: + +### The Idea +Instead of storing text, store it as a list of numbers (vectors) that represent "meaning." + +``` +Chunk: "The transformer uses attention mechanisms" +Vector: [0.23, -0.51, 0.88, 0.12, ..., 0.34] + (1536 numbers for OpenAI) + +Another chunk: "Attention allows models to focus on relevant parts" +Vector: [0.24, -0.48, 0.87, 0.15, ..., 0.35] + (similar numbers = similar meaning!) +``` + +### Why This Works +Words that are semantically similar produce similar vectors. So: +- "alignment" and "interpretability" have similar vectors +- "transformer" and "attention" have related vectors +- "cat" and "dog" are more similar than "cat" and "radiator" + +### How Search Works +``` +Your question: "How do models understand their decisions?" +Question vector: [0.25, -0.50, 0.86, 0.14, ..., 0.33] + +Compare to all stored vectors. Find the most similar: +- Chunk about interpretability: similarity 0.94 +- Chunk about explainability: similarity 0.91 +- Chunk about feature attribution: similarity 0.88 + +Return the top matches. +``` + +This is why semantic search finds conceptually similar content even when words are different. + +--- + +## Key Design Decisions + +### 1. Search, Don't Train +**Why?** Fine-tuning is slow and permanent. Search is flexible and reversible. + +### 2. Explicit Retrieval, Not Implicit Knowledge +**Why?** You can verify what the AI saw. You have audit trails. You control what leaves your system. + +### 3. Multiple Search Types +**Why?** Different questions need different search (keyword vs. semantic). Giving you both is more powerful. + +### 4. Context as a Permission System +**Why?** Not everything you save needs to reach AI. You control granularly. + +--- + +## Summary + +Open Notebook gives you **two ways** to work with AI: + +### Chat (Full-Content) +- Sends entire selected sources to LLM +- Manual control: you pick sources +- Conversational: back-and-forth dialog +- Transparent: you know exactly what AI sees +- Best for: focused analysis, close reading + +### Ask (RAG) +- Searches and retrieves relevant chunks automatically +- Automatic: AI finds what's relevant +- One-shot: single comprehensive answer +- Efficient: sends only relevant pieces +- Best for: broad questions across many sources + +**Both approaches:** +1. Keep your data private (doesn't leave your system by default) +2. Give you control (you choose which features to use) +3. Create audit trails (citations show what was used) +4. Support multiple AI providers + +**Coming Soon**: The community is working on adding RAG capabilities to Chat as well, giving you the best of both worlds. diff --git a/docs/2-CORE-CONCEPTS/chat-vs-transformations.md b/docs/2-CORE-CONCEPTS/chat-vs-transformations.md new file mode 100644 index 00000000..0cf401c4 --- /dev/null +++ b/docs/2-CORE-CONCEPTS/chat-vs-transformations.md @@ -0,0 +1,353 @@ +# Chat vs. Ask vs. Transformations - Which Tool for Which Job? + +Open Notebook offers different ways to work with your research. Understanding when to use each is key to using the system effectively. + +--- + +## The Three Interaction Modes + +### 1. CHAT - Conversational Exploration with Manual Context + +**What it is:** Have a conversation with AI about selected sources. + +**The flow:** +``` +1. You select which sources to include ("in context") +2. You ask a question +3. AI responds using ONLY those sources +4. You ask follow-up questions (context stays same) +5. You change sources or context level, then continue +``` + +**Context management:** You explicitly choose which sources the AI can see. + +**Conversational:** Multiple questions with shared history. + +**Example:** +``` +You: [Select sources: "paper1.pdf", "research_notes.txt"] + [Set context: Full content for paper1, Summary for notes] + +You: "What's the main argument in these sources?" +AI: "Paper 1 argues X [citation]. Your notes emphasize Y [citation]." + +You: "How do they differ?" +AI: "Paper 1 focuses on X [citation], while your notes highlight Y [citation]..." + +You: [Now select different sources] + +You: "Compare to this other perspective" +AI: "This new source takes a different approach..." +``` + +**Best for:** +- Exploring a focused topic with specific sources +- Having a dialogue (multiple back-and-forth questions) +- When you know which sources matter +- When you want tight control over what goes to AI + +--- + +### 2. ASK - Automated Comprehensive Search + +**What it is:** Ask one complex question, system automatically finds relevant content. + +**The flow:** +``` +1. You ask a comprehensive question +2. System analyzes the question +3. System automatically searches your sources +4. System retrieves relevant chunks +5. System synthesizes answer from all results +6. You get one detailed answer (not conversational) +``` + +**Context management:** Automatic. System figures out what's relevant. + +**Non-conversational:** One question → one answer. No follow-ups. + +**Example:** +``` +You: "How do these papers compare their approaches to alignment? + What does each one recommend?" + +System: + - Breaks down the question into search strategies + - Searches all sources for alignment approaches + - Searches all sources for recommendations + - Retrieves top 10 relevant chunks + - Synthesizes: "Paper A recommends X [citation]. + Paper B recommends Y [citation]. + They differ in Z." + +You: [Get back one comprehensive answer] + [If you want to follow up, use Chat instead] +``` + +**Best for:** +- Comprehensive, one-time questions +- Comparing multiple sources at once +- When you want the system to decide what's relevant +- Complex questions that need multiple search angles +- When you don't need a back-and-forth conversation + +--- + +### 3. TRANSFORMATIONS - Template-Based Processing + +**What it is:** Apply a reusable template to a source and get structured output. + +**The flow:** +``` +1. You define a transformation (or choose a preset) + "Extract: main argument, methodology, limitations" + +2. You apply it to ONE source at a time + (You can repeat for other sources) + +3. For the source: + - Source content + transformation prompt → AI + - Result stored as new insight/note + +4. You get back + - Structured output (main argument, methodology, limitations) + - Saved as a note in your notebook +``` + +**Context management:** Works on one source at a time. + +**Reusable:** Apply the same template to different sources (one by one). + +**Note**: Currently processes one source at a time. Batch processing (multiple sources at once) is planned for a future release. + +**Example:** +``` +You: Define transformation + "For each academic paper, extract: + - Main research question + - Methodology used + - Key findings + - Limitations and gaps + - Recommended next research" + +You: Apply to paper 1 + +System: + - Runs the transformation on paper 1 + - Result stored as new note + +You: Apply same transformation to paper 2, 3, etc. + +After 10 papers: + - You have 10 structured notes with consistent format + - Perfect for writing a literature review or comparison +``` + +**Best for:** +- Extracting the same information from each source (run repeatedly) +- Creating structured summaries with consistent format +- Building a knowledge base of categorized insights +- When you want reusable templates you can apply to each source + +--- + +## Decision Tree: Which Tool to Use? + +``` +What are you trying to do? + +│ +├─→ "I want to have a conversation about this topic" +│ └─→ Is the conversation exploratory or fixed? +│ ├─→ Exploratory (I'll ask follow-ups) +│ │ └─→ USE: CHAT +│ │ +│ └─→ Fixed (One question → done) +│ └─→ Go to next question +│ +├─→ "I need to compare these sources or get a comprehensive answer" +│ └─→ USE: ASK +│ +├─→ "I want to extract the same info from each source (one at a time)" +│ └─→ USE: TRANSFORMATIONS (apply to each source) +│ +└─→ "I just want to read and search" + └─→ USE: Search (text or vector) + OR read your notes +``` + +--- + +## Side-by-Side Comparison + +| Aspect | CHAT | ASK | TRANSFORMATIONS | +|--------|------|-----|-----------------| +| **What's it for?** | Conversational exploration | Comprehensive Q&A | Template-based extraction | +| **# of questions** | Multiple (conversational) | One | One template per source | +| **Context control** | Manual (you choose) | Automatic (system searches) | One source at a time | +| **Conversational?** | Yes (follow-ups work) | No (one question only) | No (single operation) | +| **Output** | Natural conversation | Natural answer | Structured note | +| **Time** | Quick (back-and-forth) | Longer (comprehensive) | Per source | +| **Best when** | Exploring & uncertain | Need full picture | Want consistent format | +| **Model speed** | Any | Fast preferred | Any | + +--- + +## Workflow Examples + +### Example 1: Academic Research + +``` +Goal: Write literature review from 15 papers + +Step 1: TRANSFORMATIONS + - Define: "Extract abstract, methodology, findings, relevance" + - Apply to paper 1 → get structured note + - Apply to paper 2 → get structured note + - ... repeat for all 15 papers + - Result: 15 structured notes with consistent format + +Step 2: Read the notes + - Now you have consistent summaries + +Step 3: CHAT or ASK + - Chat: "Help me organize these by theme" + - Ask: "What are the common methodologies across these papers?" + +Step 4: Write your review + - Use the transformations as foundation + - Use chat/ask insights for structure +``` + +### Example 2: Product Research + +``` +Goal: Understand customer feedback from interviews + +Step 1: Add sources (interview transcripts) + +Step 2: ASK + - "What are the top 10 pain points mentioned?" + - Get comprehensive answer with citations + +Step 3: CHAT + - "Can you help me group these by severity?" + - Continue conversation to prioritize + +Step 4: TRANSFORMATIONS (optional) + - Define: "Extract: pain point, frequency, who mentioned it" + - Apply to each interview (one by one) + - Get structured data for analysis +``` + +### Example 3: Policy Analysis + +``` +Goal: Compare policy documents + +Step 1: Add all policy documents as sources + +Step 2: ASK + - "How do these policies differ on climate measures?" + - System searches all docs, gives comprehensive comparison + +Step 3: CHAT (if needed) + - "Which policy is most aligned with X goals?" + - Have discussion about trade-offs + +Step 4: Export notes + - Save AI responses as notes for reports +``` + +--- + +## Context Management: The Control Panel + +All three modes let you control what the AI sees. + +### In CHAT and TRANSFORMATIONS +``` +You choose: + - Which sources to include + - Context level for each: + ✓ Full Content (send complete text) + ✓ Summary Only (send AI summary, not full text) + ✓ Not in Context (exclude entirely) + +Example: + Paper A: Full Content (analyzing closely) + Paper B: Summary Only (background) + Paper C: Not in Context (confidential) +``` + +### In ASK +``` +Context is automatic: + - System searches ALL your sources + - Retrieves most relevant chunks + - Sends those to AI + +But you can: + - Search in specific notebook + - Filter by source type + - Use the results to decide context for follow-up Chat +``` + +--- + +## Model Selection + +Each mode works with different models: + +### CHAT +- **Any model** works fine +- Fast models (GPT-4o mini, Claude Haiku): Quick responses, good for conversation +- Powerful models (GPT-4o, Claude Sonnet): Better reasoning, better for complex topics + +### ASK +- **Fast models preferred** (because it processes multiple searches) +- Can use powerful models if you want deep synthesis +- Example: GPT-4 for strategy planning, GPT-4o-mini for quick facts + +### TRANSFORMATIONS +- **Any model** works +- Fast models (cost-effective for batch processing) +- Powerful models (better quality extractions) + +--- + +## Advanced: Chaining Modes Together + +You can combine these modes: + +``` +TRANSFORMATIONS → CHAT + 1. Use transformations to extract structured data + 2. Use chat to discuss the results + +ASK → TRANSFORMATIONS + 1. Use Ask to understand what matters + 2. Use Transformations to extract it from remaining sources + +CHAT → Save as Note → TRANSFORMATIONS + 1. Have conversation (Chat) + 2. Save good responses as notes + 3. Use those notes as context for transformations +``` + +--- + +## Summary: When to Use Each + +| Situation | Use | Why | +|-----------|-----|-----| +| "I want to explore a topic with follow-up questions" | **CHAT** | Conversational, you control context | +| "I need a comprehensive answer to one complex question" | **ASK** | Automatic search, synthesized answer | +| "I want consistent summaries from each source" | **TRANSFORMATIONS** | Template reuse, apply to each source | +| "I'm comparing two specific sources" | **CHAT** | Select just those 2, have discussion | +| "I need to categorize each source by X criteria" | **TRANSFORMATIONS** | Extract category from each source | +| "I want to understand the big picture across all sources" | **ASK** | Automatic comprehensive search | +| "I want to build a knowledge base" | **TRANSFORMATIONS** | Create structured note from each source | +| "I want to iterate on understanding" | **CHAT** | Multiple questions, refine thinking | + +The key insight: **Different questions need different tools.** Open Notebook gives you all three because research rarely fits one mode. diff --git a/docs/2-CORE-CONCEPTS/index.md b/docs/2-CORE-CONCEPTS/index.md new file mode 100644 index 00000000..4f005b3e --- /dev/null +++ b/docs/2-CORE-CONCEPTS/index.md @@ -0,0 +1,70 @@ +# Core Concepts - Understand the Mental Model + +Before diving into how to use Open Notebook, it's important to understand **how it thinks**. These core concepts explain the "why" behind the design. + +## The Five Mental Models + +### 1. [Notebooks, Sources, and Notes](notebooks-sources-notes.md) +How Open Notebook organizes your research. Understand the three-tier container structure and how information flows from raw materials to finished insights. + +**Key idea**: A notebook is a scoped research container. Sources are inputs (PDFs, URLs, etc.). Notes are outputs (your insights, AI-generated summaries, captured responses). + +--- + +### 2. [AI Context & RAG](ai-context-rag.md) +How Open Notebook makes AI aware of your research - two different approaches. + +**Key idea**: **Chat** sends entire selected sources to the LLM (full context, conversational). **Ask** uses RAG (retrieval-augmented generation) to automatically search and retrieve only relevant chunks. Different tools for different needs. + +--- + +### 3. [Chat vs. Transformations](chat-vs-transformations.md) +Why Open Notebook has different interaction modes and when to use each one. + +**Key idea**: Chat is conversational exploration (you control context). Transformations are insight extractions. They reduced content to smaller bits of concentrated/dense information, which is much more suitable for an AI to use. + +--- + +### 4. [Context Management](chat-vs-transformations.md#context-management-the-control-panel) +Your control panel for privacy and cost. Decide what data actually reaches AI. + +**Key idea**: You choose three levels—not in context (private), summary only (condensed), or full content (complete access). This gives you fine-grained control. + +--- + +### 5. [Podcasts Explained](podcasts-explained.md) +Why Open Notebook can turn research into audio and why this matters. + +**Key idea**: Podcasts transform your research into a different consumption format. Instead of reading, someone can listen and absorb your insights passively. + +--- + +## Read This Section If: + +- **You're new to Open Notebook** — Start here to understand how the system works conceptually before learning the features +- **You're confused about Chat vs Ask** — Section 2 explains the difference (full-content vs RAG) +- **You're wondering when to use Chat vs Transformations** — Section 3 clarifies the differences +- **You want to understand privacy controls** — Section 4 shows you what you can control +- **You're curious about podcasts** — Section 5 explains the architecture and why it's different from competitors + +--- + +## The Big Picture + +Open Notebook is built on a simple insight: **Your research deserves to stay yours**. + +That means: +- **Privacy by default** — Your data doesn't leave your infrastructure unless you explicitly choose +- **AI as a tool, not a gatekeeper** — You decide which sources the AI sees, not the AI deciding for you +- **Flexible consumption** — Read, listen, search, chat, or transform your research however makes sense + +These core concepts explain how that works. + +--- + +## Next Steps + +1. **Just want to use it?** → Go to [User Guide](../3-USER-GUIDE/index.md) +2. **Want to understand it first?** → Read the 5 sections above (15 min) +3. **Setting up for the first time?** → Go to [Installation](../1-INSTALLATION/index.md) + diff --git a/docs/2-CORE-CONCEPTS/notebooks-sources-notes.md b/docs/2-CORE-CONCEPTS/notebooks-sources-notes.md new file mode 100644 index 00000000..7b3115cd --- /dev/null +++ b/docs/2-CORE-CONCEPTS/notebooks-sources-notes.md @@ -0,0 +1,284 @@ +# Notebooks, Sources, and Notes - The Container Model + +Open Notebook organizes research in three connected layers. Understanding this hierarchy is key to using the system effectively. + +## The Three-Layer Structure + +``` +┌─────────────────────────────────────┐ +│ NOTEBOOK (The Container) │ +│ "My AI Safety Research 2026" │ +├─────────────────────────────────────┤ +│ │ +│ SOURCES (The Raw Materials) │ +│ ├─ safety_paper.pdf │ +│ ├─ alignment_video.mp4 │ +│ └─ prompt_injection_article.html │ +│ │ +│ NOTES (The Processed Insights) │ +│ ├─ AI Summary (auto-generated) │ +│ ├─ Key Concepts (transformation) │ +│ ├─ My Research Notes (manual) │ +│ └─ Chat Insights (from conversation) +│ │ +└─────────────────────────────────────┘ +``` + +--- + +## 1. NOTEBOOKS - The Research Container + +### What Is a Notebook? + +A **notebook** is a *scoped container* for a research project or topic. It's your research workspace. + +Think of it like a physical notebook: everything inside is about the same topic, shares the same context, and builds toward the same goals. + +### What Goes In? + +- **A description** — "This notebook collects research on X topic" +- **Sources** — The raw materials you add +- **Notes** — Your insights and outputs +- **Conversation history** — Your chats and questions + +### Why This Matters + +**Isolation**: Each notebook is completely separate. Sources in Notebook A never appear in Notebook B. This lets you: +- Keep different research topics completely isolated +- Reuse source names across notebooks without conflicts +- Control which AI context applies to which research + +**Shared Context**: All sources and notes in a notebook inherit the notebook's context. If your notebook is titled "AI Safety 2026" with description "Focusing on alignment and interpretability," that context applies to all AI interactions within that notebook. + +**Parallel Projects**: You can have 10 notebooks running simultaneously. Each one is its own isolated research environment. + +### Example + +``` +Notebook: "Customer Research - Product Launch" +Description: "User interviews and feedback for Q1 2026 launch" + +→ All sources added to this notebook are about customer feedback +→ All notes generated are in that context +→ When you chat, the AI knows you're analyzing product launch feedback +→ Different from your "Market Analysis - Competitors" notebook +``` + +--- + +## 2. SOURCES - The Raw Materials + +### What Is a Source? + +A **source** is a *single piece of input material* — the raw content you bring in. Sources never change; they're just processed and indexed. + +### What Can Be a Source? + +- **PDFs** — Research papers, reports, documents +- **Web links** — Articles, blog posts, web pages +- **Audio files** — Podcasts, interviews, lectures +- **Video files** — Tutorials, presentations, recordings +- **Plain text** — Notes, transcripts, passages +- **Uploaded text** — Paste content directly + +### What Happens When You Add a Source? + +``` +1. EXTRACTION + File/URL → Extract text and metadata + (OCR for PDFs, web scraping for URLs, speech-to-text for audio) + +2. CHUNKING + Long text → Break into searchable chunks + (Prevents "too much context" in single query) + +3. EMBEDDING + Each chunk → Generate semantic vector + (Allows AI to find conceptually similar content) + +4. STORAGE + Chunks + vectors → Store in database + (Ready for search and retrieval) +``` + +### Key Properties + +**Immutable**: Once added, the source doesn't change. If you need a new version, add it as a new source. + +**Indexed**: Sources are automatically indexed for search (both text and semantic). + +**Scoped**: A source belongs to exactly one notebook. + +**Referenceable**: Other sources and notes can reference this source by citation. + +### Example + +``` +Source: "openai_charter.pdf" +Type: PDF document + +What happens: +→ PDF is uploaded +→ Text is extracted (including images) +→ Text is split into 50 chunks (paragraphs, sections) +→ Each chunk gets an embedding vector +→ Now searchable by: "OpenAI's approach to safety" +``` + +--- + +## 3. NOTES - The Processed Insights + +### What Is a Note? + +A **note** is a *processed output* — something you created or AI created based on your sources. Notes are the "results" of your research work. + +### Types of Notes + +#### Manual Notes +You write them yourself. They're your original thinking, capturing: +- What you learned from sources +- Your analysis and interpretations +- Your next steps and questions + +#### AI-Generated Notes +Created by applying AI processing to sources: +- **Transformations** — Structured extraction (main points, key concepts, methodology) +- **Chat Responses** — Answers you saved from conversations +- **Ask Results** — Comprehensive answers saved to your notebook + +#### Captured Insights +Notes you explicitly saved from interactions: +- "Save this response as a note" +- "Save this transformation result" +- Convert any AI output into a permanent note + +### What Can Notes Contain? + +- **Text** — Your writing or AI-generated content +- **Citations** — References to specific sources +- **Metadata** — When created, how created (manual/AI), which sources influenced it +- **Tags** — Your categorization (optional but useful) + +### Why Notes Matter + +**Knowledge Accumulation**: Notes become your actual knowledge base. They're what you take away from the research. + +**Searchable**: Notes are searchable along with sources. "Find everything about X" includes your notes, not just sources. + +**Citable**: Notes can cite sources, creating an audit trail of where insights came from. + +**Shareable**: Notes are your outputs. You can share them, publish them, or build on them in other projects. + +--- + +## How They Connect: The Data Flow + +``` +YOU + │ + ├─→ Create Notebook ("AI Research") + │ + ├─→ Add Sources (papers, articles, videos) + │ └─→ System: Extract, embed, index + │ + ├─→ Search Sources (text or semantic) + │ └─→ System: Find relevant chunks + │ + ├─→ Apply Transformations (extract insights) + │ └─→ Creates Notes + │ + ├─→ Chat with Sources (explore with context control) + │ ├─→ Can save responses as Notes + │ └─→ Notes include citations + │ + ├─→ Ask Questions (automated comprehensive search) + │ ├─→ Can save results as Notes + │ └─→ Notes include citations + │ + └─→ Generate Podcast (transform notebook into audio) + └─→ Uses all sources + notes for content +``` + +--- + +## Key Design Decisions + +### 1. One Notebook Per Source + +Each source belongs to exactly one notebook. This creates clear boundaries: +- No ambiguity about which research project a source is in +- Easy to isolate or export a complete project +- Clean permissions model (if someone gets access to notebook, they get access to all its sources) + +### 2. Immutable Sources, Mutable Notes + +Sources never change (once added, always the same). But notes can be edited or deleted. Why? +- Sources are evidence → evidence shouldn't be altered +- Notes are your thinking → thinking evolves as you learn + +### 3. Explicit Context Control + +Sources don't automatically go to AI. You decide which sources are "in context" for each interaction: +- Chat: You manually select which sources to include +- Ask: System automatically figures out which sources to search +- Transformations: You choose which sources to transform + +This is different from systems that always send everything to AI. + +--- + +## Mental Models Explained + +### Notebook as Boundaries +Think of a notebook like a Git repository: +- Everything in it is about the same topic +- You can clone/fork it (copy to new project) +- It has clear entry/exit points +- You know exactly what's included + +### Sources as Evidence +Think of sources like exhibits in a legal case: +- Once filed, they don't change +- They can be cited and referenced +- They're the ground truth for what you're basing claims on +- Multiple sources can be cross-referenced + +### Notes as Synthesis +Think of notes like your case brief: +- You write them based on evidence +- They're your interpretation +- You can cite which evidence supports each claim +- They're what you actually share or act on + +--- + +## Common Questions + +### Can I move a source to a different notebook? +Not directly. Each source is tied to one notebook. If you want it in multiple notebooks, add it again (uploads are fast if it's already processed). + +### Can a note reference sources from a different notebook? +No. Notes stay within their notebook and reference sources within that notebook. This keeps boundaries clean. + +### What if I want to group sources within a notebook? +Use tags. You can tag sources ("primary research," "background," "methodology") and filter by tags. + +### Can I merge two notebooks? +Not built-in, but you can manually copy sources from one notebook to another by re-uploading them. + +--- + +## Summary + +| Concept | Purpose | Lifecycle | Scope | +|---------|---------|-----------|-------| +| **Notebook** | Container + context | Create once, configure | All its sources + notes | +| **Source** | Raw material | Add → Process → Store | One notebook | +| **Note** | Processed output | Create/capture → Edit → Share | One notebook | + +This three-layer model gives you: +- **Clear organization** (everything scoped to projects) +- **Privacy control** (isolated notebooks) +- **Audit trails** (notes cite sources) +- **Flexibility** (notes can be manual or AI-generated) diff --git a/docs/2-CORE-CONCEPTS/podcasts-explained.md b/docs/2-CORE-CONCEPTS/podcasts-explained.md new file mode 100644 index 00000000..2a95d315 --- /dev/null +++ b/docs/2-CORE-CONCEPTS/podcasts-explained.md @@ -0,0 +1,394 @@ +# Podcasts Explained - Research as Audio Dialogue + +Podcasts are Open Notebook's highest-level transformation: converting your research into audio dialogue for a different consumption pattern. + +--- + +## Why Podcasts Matter + +### The Problem +Research naturally accumulates as text: PDFs, articles, web pages, notes. This creates a friction point: + +**To consume research, you must:** +- Sit down at a desk +- Focus intently +- Read actively +- Take notes +- Set aside dedicated time + +**But much of life is passive time:** +- Commuting +- Exercising +- Doing dishes +- Driving +- Walking +- Idle moments + +### The Solution +Convert your research into audio dialogue so you can consume it passively. + +``` +Before (Text-based): + Research pile → Must schedule reading time → Requires focus + +After (Podcast): + Research pile → Podcast → Can listen while commuting + → Absorb while exercising + → Understand while walking + → Engage without screen time +``` + +--- + +## What Makes It Special: Open Notebook vs. Competitors + +### Google Notebook LM Podcasts +- **Fixed format**: 2 hosts, always conversational +- **Limited customization**: You can't choose who the "hosts" are +- **One TTS voice per speaker**: Can't customize voices +- **Only uses cloud services**: No local options + +### Open Notebook Podcasts +- **Customizable format**: 1-4 speakers, you design them +- **Rich speaker profiles**: Create personas with backstories and expertise +- **Multiple TTS options**: + - OpenAI (natural, fast) + - Google TTS (high quality) + - ElevenLabs (beautiful voices, accents) + - Local TTS (privacy-first, no API calls) +- **Async generation**: Doesn't block your work +- **Full control**: Choose outline structure, tone, depth + +--- + +## How Podcast Generation Works + +### Stage 1: Content Selection + +You choose what goes into the podcast: +``` +Notebook content → Which sources? → Which notes? + → Which topics to focus on? + → Depth of coverage? +``` + +### Stage 2: Episode Profile + +You define how you want the podcast structured: +``` +Episode Profile +├─ Topic: "AI Safety Approaches" +├─ Length: 20 minutes +├─ Tone: Academic but accessible +├─ Format: Debate (2 speakers with opposing views) +├─ Audience: Researchers new to the field +└─ Focus areas: Main approaches, pros/cons, open questions +``` + +### Stage 3: Speaker Configuration + +You create speaker personas (1-4 speakers): + +``` +Speaker 1: "Expert Alex" +├─ Expertise: "Deep knowledge of alignment research" +├─ Personality: "Rigorous, academic, patient with explanation" +├─ Accent: (Optional) "British English" +└─ TTS Voice: "OpenAI Onyx" (or ElevenLabs, Google, etc.) + +Speaker 2: "Researcher Sam" +├─ Expertise: "Field observer, pragmatic perspective" +├─ Personality: "Curious, asks clarifying questions" +├─ Accent: "American English" +└─ TTS Voice: "ElevenLabs - thoughtful" +``` + +### Stage 4: Outline Generation + +System generates episode outline: +``` +EPISODE: "AI Safety Approaches" + +1. Introduction (2 min) + Alex: Introduces topic and speakers + Sam: What will we cover today? + +2. Main Approaches (8 min) + Alex: Explains top 3 approaches + Sam: Asks about tradeoffs + +3. Debate: Best approach? (6 min) + Alex: Advocates for approach A + Sam: Argues for approach B + +4. Open Questions (3 min) + Both: What's unsolved? + +5. Conclusion (1 min) + Recap and where to learn more +``` + +### Stage 5: Dialogue Generation + +System generates dialogue based on outline: +``` +Alex: "Today we're exploring three major approaches to AI alignment..." + +Sam: "That's a great start. Can you break down what we mean by alignment?" + +Alex: "Good question. Alignment means ensuring AI systems pursue the goals + we actually want them to pursue, not just what we literally asked for. + There's a classic example of a paperclip maximizer..." + +Sam: "Interesting. So it's about solving the intention problem?" + +Alex: "Exactly. And that's where the three approaches come in..." +``` + +### Stage 6: Text-to-Speech + +System converts dialogue to audio: +``` +Alex's text → OpenAI TTS → Alex's voice (audio file) +Sam's text → ElevenLabs TTS → Sam's voice (audio file) +Audio files → Mix together → Final podcast MP3 +``` + +--- + +## Key Architecture Decisions + +### 1. Asynchronous Processing +Podcasts are generated in the background. You upload → system processes → you download when ready. + +**Why?** Podcast generation takes time (10+ minutes for a 30-minute episode). Blocking would lock up your interface. + +### 2. Multi-Speaker Support +Unlike Google Notebook LM (always 2 hosts), you choose 1-4 speakers. + +**Why?** Different discussions work better with different formats: +- Expert monologue (1 speaker) +- Interview (2 speakers: host + expert) +- Debate (2 speakers: opposing views) +- Panel discussion (3-4 speakers: different expertise) + +### 3. Speaker Customization +You create rich speaker profiles, not just "Host A" and "Host B". + +**Why?** Makes podcasts more engaging and authentic. Different speakers bring different perspectives. + +### 4. Multiple TTS Providers +You're not locked into one voice provider. + +**Why?** +- Cost optimization (some providers cheaper) +- Quality preferences (some voices more natural) +- Privacy options (local TTS for sensitive content) +- Accessibility (different accents, genders, styles) + +### 5. Local TTS Option +Can generate podcasts entirely offline with local text-to-speech. + +**Why?** For sensitive research, never send audio to external APIs. + +--- + +## Use Cases Show Why This Matters + +### Academic Publishing +``` +Traditional: Academic paper → PDF +Problem: Hard to consume, linear reading required + +Open Notebook: +Research materials → Podcast (expert explaining methodology) + → Podcast (debate format: different interpretations) + → Different consumption for different audiences +``` + +### Content Creation +``` +Blog creator: Has research pile on a topic +Problem: Doesn't have time to write the article + +Solution: +Add research → Create podcast → Transcribe → Becomes article +OR: Podcast BECOMES the content (upload to podcast platforms) +``` + +### Educational Content +``` +Educator: Has reading materials for a course +Problem: Students don't read the papers + +Solution: +Create podcast with expert explaining papers +Students listen → Better engagement → Discussions can reference podcast +``` + +### Market Research +``` +Product manager: Has interviews with customers +Problem: Too many hours of audio to review + +Solution: +Create podcast with debate format (customer perspective vs. team perspective) +Much more engaging than raw transcripts +``` + +### Knowledge Transfer +``` +Domain expert: Leaving the organization +Problem: How to preserve expertise? + +Solution: +Create expert-mode podcast explaining frameworks, decision-making, context +New team member listens, gets context faster than reading 100 documents +``` + +--- + +## The Difference: Active vs. Passive Learning + +### Text-Based Research (Active) +- **Effort**: High (must focus, read, synthesize) +- **When**: Dedicated study time +- **Cost**: Time is expensive (can't multitask) +- **Best for**: Deep dives, precise information +- **Format**: Whatever you write (notes, articles, books) + +### Audio Podcast (Passive) +- **Effort**: Low (just listen) +- **When**: Anywhere, anytime +- **Cost**: Low (can multitask) +- **Best for**: Overview, context, exploration +- **Format**: Dialogue (more engaging than narration) + +**They complement each other:** +1. **First encounter**: Listen to podcast (passive, get context) +2. **Deep dive**: Read source materials (active, precise) +3. **Mastery**: Both together (understand big picture + details) + +--- + +## How Podcasts Fit Into Your Workflow + +``` +1. Build notebook (add sources) + ↓ +2. Apply transformations (extract insights) + ↓ +3. Chat/Ask (explore content) + ↓ +4. Decide on podcast + ├─→ Create speaker profiles + ├─→ Define episode profile + ├─→ Choose TTS provider + └─→ Generate podcast + ↓ +5. Listen while commuting/exercising + ↓ +6. Reference sources for deep dive + ↓ +7. Repeat for different formats/speakers/focus +``` + +--- + +## Advanced: Multiple Podcasts from Same Research + +You can create different podcasts from the same sources: + +### Example: AI Safety Research +``` +Podcast 1: "Expert Monologue" + Speaker: Researcher explaining field + Format: Educational, comprehensive + Audience: Students new to field + +Podcast 2: "Debate Format" + Speakers: Optimist vs. skeptic + Format: Discussion of tradeoffs + Audience: Advanced researchers + +Podcast 3: "Interview Format" + Speakers: Journalist + expert + Format: Q&A about practical applications + Audience: Industry practitioners +``` + +Each tells the same story from different angles. + +--- + +## Privacy & Data Considerations + +### Where Your Data Goes + +**Option 1: Cloud TTS (Faster, Higher Quality)** +``` +Your outline → API call to TTS provider + → Audio returned + → Stored in your notebook + +Provider sees: Your outlined script (not raw sources) +Privacy level: Medium (outline is shared, sources aren't) +``` + +**Option 2: Local TTS (Slower, Maximum Privacy)** +``` +Your outline → Local TTS engine (runs on your machine) + → Audio generated locally + → Stored in your notebook + +Provider sees: Nothing +Privacy level: Maximum (everything local) +``` + +### Recommendation +- **Sensitive research**: Use local TTS, no API calls +- **Less sensitive**: Use ElevenLabs or Google (both handle audio data professionally) +- **Mixed**: Use local TTS for speakers reading sensitive content + +--- + +## Cost Considerations + +### Cloud TTS Costs +| Provider | Cost | Quality | Speed | +|----------|------|---------|-------| +| OpenAI | ~$0.015 per minute | Good | Fast | +| Google | ~$0.004 per minute | Excellent | Fast | +| ElevenLabs | ~$0.10 per minute | Exceptional | Medium | +| Local TTS | Free | Basic | Slow | + +A 30-minute podcast costs: +- OpenAI: ~$0.45 +- Google: ~$0.12 +- ElevenLabs: ~$3.00 +- Local: Free (but slow) + +--- + +## Summary: Why Podcasts Are Special + +**Podcasts transform your research consumption:** + +| Aspect | Text | Podcast | +|--------|------|---------| +| **How consumed?** | Active reading | Passive listening | +| **Where consumed?** | Desk | Anywhere | +| **Multitasking** | Hard | Easy | +| **Time commitment** | Scheduled | Flexible | +| **Format** | Whatever | Natural dialogue | +| **Engagement** | Academic | Conversational | +| **Accessibility** | Text-based | Audio-based | + +**In Open Notebook specifically:** +- **Full customization** — you create speakers and format +- **Privacy options** — local TTS for sensitive content +- **Cost control** — choose TTS provider based on budget +- **Non-blocking** — generates in background +- **Multiple versions** — create different podcasts from same research + +This is why podcasts matter: they change *when* and *how* you can consume your research. diff --git a/docs/3-USER-GUIDE/adding-sources.md b/docs/3-USER-GUIDE/adding-sources.md new file mode 100644 index 00000000..020d9e34 --- /dev/null +++ b/docs/3-USER-GUIDE/adding-sources.md @@ -0,0 +1,429 @@ +# Adding Sources - Getting Content Into Your Notebook + +Sources are the raw materials of your research. This guide covers how to add different types of content. + +--- + +## Quick-Start: Add Your First Source + +### Option 1: Upload a File (PDF, Word, etc.) + +``` +1. In your notebook, click "Add Source" +2. Select "Upload File" +3. Choose a file from your computer +4. Click "Upload" +5. Wait 30-60 seconds for processing +6. Done! Source appears in your notebook +``` + +### Option 2: Add a Web Link + +``` +1. Click "Add Source" +2. Select "Web Link" +3. Paste URL: https://example.com/article +4. Click "Add" +5. Wait for processing (usually faster than files) +6. Done! +``` + +### Option 3: Paste Text + +``` +1. Click "Add Source" +2. Select "Text" +3. Paste or type your content +4. Click "Save" +5. Done! Immediately available +``` + +--- + +## Supported File Types + +### Documents +- **PDF** (.pdf) — Best support, including scanned PDFs with OCR +- **Word** (.docx, .doc) — Full support +- **PowerPoint** (.pptx) — Slides converted to text +- **Excel** (.xlsx, .xls) — Spreadsheet data +- **EPUB** (.epub) — eBook files +- **Markdown** (.md, .txt) — Plain text formats +- **HTML** (.html, .htm) — Web page files + +**File size limits:** Up to ~100MB (varies by system) + +**Processing time:** 10 seconds - 2 minutes (depending on length and file type) + +### Audio & Video +- **Audio**: MP3, WAV, M4A, OGG, FLAC (~30 seconds - 3 minutes per hour) +- **Video**: MP4, AVI, MOV, MKV, WebM (~3-10 minutes per hour) +- **YouTube**: Direct URL support +- **Podcasts**: RSS feed URL + +**Automatic transcription**: Audio/video is transcribed to text automatically. This requires enabling speech-to-text in settings. + +### Web Content +- **Articles**: Blog posts, news articles, Medium +- **YouTube**: Full videos or playlists +- **PDFs online**: Direct PDF links +- **News**: News site articles + +**Just paste the URL** in "Web Link" section. + +### What Doesn't Work +- Paywalled content (WSJ, FT, etc.) — Can't extract +- Password-protected PDFs — Can't open +- Pure image files (.jpg, .png) — Except scanned PDFs which have OCR +- Very large files (>100MB) — Timeout + +--- + +## What Happens When You Add a Source + +The system automatically does four things: + +``` +1. EXTRACT TEXT + File/URL → Readable text + (PDFs get OCR if scanned) + (Videos get transcribed if enabled) + +2. BREAK INTO CHUNKS + Long text → ~500-word pieces + (So search finds specific parts, not whole document) + +3. CREATE EMBEDDINGS + Each chunk → Vector representation + (Enables semantic/concept search) + +4. INDEX & STORE + Everything → Database + (Ready to search and retrieve) +``` + +**Time to use:** After the progress bar completes, the source is ready immediately. Embeddings are created in the background. + +--- + +## Step-by-Step for Different Types + +### PDFs + +**Best practices:** +``` +Clean PDFs: + 1. Upload → Done + 2. Processing time: ~30-60 seconds + +Scanned/Image PDFs: + 1. Upload same way + 2. System auto-detects and uses OCR + 3. Processing time: ~2-3 minutes + 4. (Higher, due to OCR overhead) + +Large PDFs (50+ pages): + 1. Consider splitting into smaller files + 2. Or upload as-is (system handles it) + 3. Processing time scales with size +``` + +**Common issues:** +- "Can't extract text" → PDF is corrupted or has copy protection +- Solution: Try opening in Adobe. If it won't, the PDF is likely protected. + +### Web Links / Articles + +**Best practices:** +``` +1. Copy full URL from browser: https://example.com/article-title +2. Paste in "Web Link" +3. Click Add +4. Wait for extraction + +Processing time: Usually 5-15 seconds +``` + +**What works:** +- Standard web articles +- Blog posts +- News articles +- Wikipedia pages +- Medium posts +- Substack articles + +**What doesn't work:** +- Twitter threads (unreliable) +- Paywalled articles (can't access) +- JavaScript-heavy sites (content not extracted) + +**Pro tip:** If it doesn't work, copy the article text and paste as "Text" instead. + +### Audio Files + +**Best practices:** +``` +1. Ensure speech-to-text is enabled in Settings +2. Upload MP3, WAV, or M4A file +3. System automatically transcribes to text +4. Processing time: ~1 minute per 5 minutes of audio + +Example: + - 1-hour podcast → 12 minutes processing + - 10-minute recording → 2 minutes processing +``` + +**Quality matters:** +- Clear audio: Fast transcription +- Muffled/noisy audio: Slower, less accurate transcription +- Background noise: Try to minimize before uploading + +**Tip:** If audio quality is poor, the AI might misinterpret content. You can manually correct transcription if needed. + +### YouTube Videos + +**Best practices:** +``` +Two ways to add: + +Method 1: Direct URL + 1. Copy YouTube URL: https://www.youtube.com/watch?v=... + 2. Paste in "Web Link" + 3. Click Add + 4. System extracts captions (if available) + transcript + +Method 2: Playlist + 1. Paste playlist URL + 2. System adds all videos as separate sources + 3. Each video processed separately + 4. Takes longer (multiple videos) +``` + +**What's extracted:** +- Captions/subtitles (if available) +- Transcription (if captions aren't available) +- Basic metadata (title, channel, length) + +**Processing:** +- 10-minute video: ~2-3 minutes +- 1-hour video: ~10-15 minutes + +### Text / Paste Content + +**Best practices:** +``` +1. Select "Text" when adding source +2. Paste or type content +3. System processes immediately +4. No wait time needed + +Good for: + - Notes you want to reference + - Quotes from books + - Transcripts you have handy + - Quick research snippets +``` + +--- + +## Managing Your Sources + +### Viewing Source Details + +``` +Click on source → See: + - Original file name/title + - When it was added + - Size and format + - Processing status + - Number of chunks +``` + +### Organizing with Metadata + +You can add to each source: +- **Title**: Better name than original filename +- **Tags**: Category labels ("primary research", "background", "competitor analysis") +- **Description**: A few notes about what it contains + +**Why this matters:** +- Makes sources easier to find +- Helps when contextualizing for Chat +- Useful for organizing large notebooks + +### Searching Within Sources + +``` +After sources are added, you can: + +Text search: "Find exact phrase" +Vector search: "Find conceptually similar" + +Both search across all sources in notebook. +Results show: + - Which source + - Which section + - Relevance score +``` + +--- + +## Context Management: How Sources Get Used + +You control how AI accesses sources: + +### Three Levels (for Chat) + +**Full Content:** +``` +AI sees: Complete source text +Cost: 100% of tokens +Use when: Analyzing in detail, need precise citations +Example: "Analyze this methodology paper closely" +``` + +**Summary Only:** +``` +AI sees: AI-generated summary (not full text) +Cost: ~10-20% of tokens +Use when: Background material, reference context +Example: "Use this as context but focus on the main source" +``` + +**Not in Context:** +``` +AI sees: Nothing (excluded) +Cost: 0 tokens +Use when: Confidential, not relevant, or archived +Example: "Keep this in notebook but don't use in this conversation" +``` + +### How to Set Context (in Chat) + +``` +1. Go to Chat +2. Click "Select Context Sources" +3. For each source: + - Toggle ON/OFF (include/exclude) + - Choose level (Full/Summary/Excluded) +4. Click "Save" +5. Now chat uses these settings +``` + +--- + +## Common Mistakes + +| Mistake | What Happens | How to Fix | +|---------|--------------|-----------| +| Upload 200 sources at once | System gets slow, processing stalls | Add 10-20 at a time, wait for processing | +| Use full content for all sources | Token usage skyrockets, expensive | Use "Summary" or "Excluded" for background material | +| Add huge PDFs without splitting | Processing is slow, search results less precise | Consider splitting large PDFs into chapters | +| Forget source titles | Can't distinguish between similar sources | Rename sources with descriptive titles right after uploading | +| Don't tag sources | Hard to find and organize later | Add tags immediately: "primary", "background", etc. | +| Mix languages in one source | Transcription/embedding quality drops | Keep each language in separate sources | +| Use same source multiple times | Takes up space, creates confusion | Add once; reuse in multiple chats/notebooks | + +--- + +## Processing Status & Troubleshooting + +### What the Status Indicators Mean + +``` +🟡 Processing + → Source is being extracted and embedded + → Wait 30 seconds - 3 minutes depending on size + → Don't use in Chat yet + +🟢 Ready + → Source is processed and searchable + → Can use immediately in Chat + → Can apply transformations + +🔴 Error + → Something went wrong + → Common reasons: + - Unsupported file format + - File too large or corrupted + - Network timeout + +⚪ Not in Context + → Source added but excluded from Chat + → Still searchable, not sent to AI +``` + +### Common Errors & Solutions + +**"Unsupported file type"** +- You tried to upload a format not in the list (e.g., `.webp` image) +- Solution: Convert to supported format (PDF for documents, MP3 for audio) + +**"Processing timeout"** +- Very large file (>100MB) or very long audio +- Solution: Split into smaller pieces or try uploading again + +**"Transcription failed"** +- Audio quality too poor or language not detected +- Solution: Re-record with better quality, or paste text transcript manually + +**"Web link won't extract"** +- Website blocks automated access or uses JavaScript for content +- Solution: Copy the article text and paste as "Text" instead + +--- + +## Tips for Best Results + +### For PDFs +- Clean, digital PDFs work best +- Remove copy protection if present (legally) +- Scanned PDFs work but take longer + +### For Web Articles +- Use full URL including domain +- Avoid cookie/popup-laden sites +- If extraction fails, copy-paste text instead + +### For Audio +- Clear, well-recorded audio transcribes better +- Remove background noise if possible +- YouTube videos usually have good transcriptions built-in + +### For Large Documents +- Consider splitting into smaller sources +- Gives more precise search results +- Processing is faster for smaller pieces + +### For Organization +- Name sources clearly (not "document_2.pdf") +- Add tags immediately after uploading +- Use descriptions for complex documents + +--- + +## What Comes After: Using Your Sources + +Once you've added sources, you can: + +- **Chat** → Ask questions (see [Chat Effectively](chat-effectively.md)) +- **Search** → Find specific content (see [Search Effectively](search.md)) +- **Transformations** → Extract structured insights (see [Working with Notes](working-with-notes.md)) +- **Ask** → Get comprehensive answers (see [Search Effectively](search.md)) +- **Podcasts** → Turn into audio (see [Creating Podcasts](creating-podcasts.md)) + +--- + +## Summary Checklist + +Before adding sources, confirm: + +- [ ] File is in supported format +- [ ] File is under 100MB (or splitting large ones) +- [ ] Web links are full URLs (not shortened) +- [ ] Audio files have clear speech (if transcription-dependent) +- [ ] You've named source clearly +- [ ] You've added tags for organization +- [ ] You understand context levels (Full/Summary/Excluded) + +Done! Sources are now ready for Chat, Search, Transformations, and more. diff --git a/docs/3-USER-GUIDE/chat-effectively.md b/docs/3-USER-GUIDE/chat-effectively.md new file mode 100644 index 00000000..d0775eba --- /dev/null +++ b/docs/3-USER-GUIDE/chat-effectively.md @@ -0,0 +1,554 @@ +# Chat Effectively - Conversations with Your Research + +Chat is your main tool for exploratory questions and back-and-forth dialogue. This guide covers how to use it effectively. + +--- + +## Quick-Start: Your First Chat + +``` +1. Go to your notebook +2. Click "Chat" +3. Select which sources to include (context) +4. Type your question +5. Click "Send" +6. Read the response +7. Ask a follow-up (context stays same) +8. Repeat until satisfied +``` + +That's it! But doing it *well* requires understanding how context works. + +--- + +## Context Management: The Key to Good Chat + +Context controls **what the AI is allowed to see**. This is your most important control. + +### The Three Levels Explained + +**FULL CONTENT** +- AI sees: Complete source text +- Cost: 100 tokens per 1K tokens of source +- Best for: Detailed analysis, precise citations +- Example: "Analyze this research paper closely" + +``` +You set: Paper A → Full Content +AI sees: Every word of Paper A +AI can: Cite specific sentences, notice nuances +Result: Precise, detailed answers (higher cost) +``` + +**SUMMARY ONLY** +- AI sees: AI-generated 200-word summary (not full text) +- Cost: ~10-20% of full content cost +- Best for: Background material, reference context +- Example: "Use this for background, focus on the main paper" + +``` +You set: Paper B → Summary Only +AI sees: Condensed summary, key points +AI can: Reference main ideas but not details +Result: Faster, cheaper answers (loses precision) +``` + +**NOT IN CONTEXT** +- AI sees: Nothing +- Cost: 0 tokens +- Best for: Confidential, irrelevant, archived content +- Example: "Keep this in notebook but don't use now" + +``` +You set: Paper C → Not in Context +AI sees: Nothing (completely excluded) +AI can: Never reference it +Result: No cost, no privacy risk for that source +``` + +### Setting Context (Step by Step) + +``` +1. Click "Select Sources" + (Shows list of all sources in notebook) + +2. For each source: + □ Checkbox: Include or exclude + + Level dropdown: + ├─ Full Content + ├─ Summary Only + └─ Excluded + +3. Check your selections + Example: + ✓ Paper A (Full Content) - "Main focus" + ✓ Paper B (Summary Only) - "Background" + ✓ Paper C (Excluded) - "Keep private" + □ Paper D (Not included) - "Not relevant" + +4. Click "Save Context" + +5. Now chat uses these settings +``` + +### Context Strategies + +**Strategy 1: Minimalist** +- Main source: Full Content +- Everything else: Excluded +- Result: Focused, cheap, precise + +``` +Use when: + - Analyzing one source deeply + - Budget-conscious + - Want focused answers +``` + +**Strategy 2: Comprehensive** +- All sources: Full Content +- Result: All context considered, expensive + +``` +Use when: + - Comprehensive analysis + - Unlimited budget + - Want AI to see everything +``` + +**Strategy 3: Tiered** +- Primary sources: Full Content +- Secondary sources: Summary Only +- Background/reference: Excluded +- Result: Balanced cost/quality + +``` +Use when: + - Mix of important and reference material + - Want thorough but not expensive + - Most common strategy +``` + +**Strategy 4: Privacy-First** +- Sensitive docs: Excluded +- Public research: Full Content +- Result: Never send confidential data + +``` +Use when: + - Company confidential materials + - Personal sensitive data + - Complying with data protection +``` + +--- + +## Asking Effective Questions + +### Good Questions vs. Poor Questions + +**Poor Question** +``` +"What do you think?" + +Problems: +- Too vague (about what?) +- No context (what am I analyzing?) +- Can't verify answer (citing what?) + +Result: Generic, shallow answer +``` + +**Good Question** +``` +"Based on the paper's methodology section, +what are the three main limitations the authors acknowledge? +Please cite which pages mention each one." + +Strengths: +- Specific about what you want +- Clear scope (methodology section) +- Asks for citations +- Requires deep reading + +Result: Precise, verifiable, useful answer +``` + +### Question Patterns That Work + +**Factual Questions** +``` +"What does the paper say about X?" +"Who are the authors?" +"What year was this published?" + +Result: Simple, factual answers with citations +``` + +**Analysis Questions** +``` +"How does this approach differ from the traditional method?" +"What are the main assumptions underlying this argument?" +"Why do you think the author chose this methodology?" + +Result: Deeper thinking, comparison, critique +``` + +**Synthesis Questions** +``` +"How do these two sources approach the problem differently?" +"What's the common theme across all three papers?" +"If we combine these approaches, what would we get?" + +Result: Cross-source insights, connections +``` + +**Actionable Questions** +``` +"What are the practical implications of this research?" +"How could we apply these findings to our situation?" +"What's the next logical research direction?" + +Result: Practical, forward-looking answers +``` + +### The SPECIFIC Formula + +Good questions have: + +1. **SCOPE** - What are you analyzing? + "In this research paper..." + "Looking at these three articles..." + "Based on your experience..." + +2. **SPECIFICITY** - Exactly what do you want? + "...the methodology..." + "...main findings..." + "...recommended next steps..." + +3. **CONSTRAINT** - Any limits? + "...in 3 bullet points..." + "...with citations to page numbers..." + "...comparing these two approaches..." + +4. **VERIFICATION** - How can you check it? + "...with specific quotes..." + "...cite your sources..." + "...link to the relevant section..." + +**Example:** +``` +Poor: "What about transformers?" +Good: "In this research paper on machine learning, + explain the transformer architecture in 2-3 sentences, + then cite which page describes the attention mechanism." +``` + +--- + +## Follow-Up Questions (The Real Power of Chat) + +Chat's strength is dialogue. You ask, get an answer, ask more. + +### Building on Responses + +``` +First question: +"What's the main finding?" + +AI: "The study shows X [citation]" + +Follow-up question: +"How does that compare to Y research?" + +AI: "The key difference is Z [citation]" + +Next question: +"Why do you think that difference matters?" + +AI: "Because it affects A, B, C [explained]" +``` + +### Iterating Toward Understanding + +``` +Round 1: Get overview +"What's this source about?" + +Round 2: Get details +"What's the most important part?" + +Round 3: Compare +"How does it relate to my notes on X?" + +Round 4: Apply +"What should I do with this information?" +``` + +### Changing Direction + +``` +Context stays same, but you ask new questions: + +Question 1: "What's the methodology?" +Question 2: "What are the limitations?" +Question 3: "What about the ethical implications?" +Question 4: "Who else has done similar work?" + +All in one conversation, reusing context. +``` + +### Adjusting Context Between Rounds + +``` +After question 3, you realize: +"I need more context from another source" + +1. Click "Adjust Context" +2. Add new source or change context level +3. Your conversation history stays +4. Continue asking with new context +``` + +--- + +## Citations and Verification + +Citations are how you verify that the AI's answer is accurate. + +### Understanding Citations + +``` +AI Response with Citation: +"The paper reports a 95% accuracy rate [see page 12]" + +What this means: +✓ The claim "95% accuracy rate" is from page 12 +✓ You can verify by reading page 12 +✓ If page 12 doesn't say that, the AI hallucinated +``` + +### Requesting Better Citations + +``` +If you get a response without citations: + +Ask: "Please cite the page number for that claim" +or: "Show me where you found that information" + +AI will: +- Find the citation +- Provide page numbers +- Show you the source +``` + +### Verification Workflow + +``` +1. Get answer from Chat +2. Check citation (which source? which page?) +3. Click citation link (if available) +4. See the actual text in source +5. Does it really say what AI claimed? + +If YES: Great, you can use this answer +If NO: The AI hallucinated, ask for correction +``` + +--- + +## Common Chat Patterns + +### Pattern 1: Deep Dive into One Source + +``` +1. Set context: One source (Full Content) +2. Question 1: Overview +3. Question 2: Main argument +4. Question 3: Evidence for argument +5. Question 4: Limitations +6. Question 5: Next steps + +Result: Complete understanding of one source +``` + +### Pattern 2: Comparative Analysis + +``` +1. Set context: 2-3 sources (all Full Content) +2. Question 1: What does each source say about X? +3. Question 2: How do they agree? +4. Question 3: How do they disagree? +5. Question 4: Which approach is stronger? + +Result: Understanding differences and trade-offs +``` + +### Pattern 3: Research Exploration + +``` +1. Set context: Many sources (mix of Full/Summary) +2. Question 1: What are the main perspectives? +3. Question 2: What's missing from these views? +4. Question 3: What questions does this raise? +5. Question 4: What should I research next? + +Result: Understanding landscape and gaps +``` + +### Pattern 4: Problem Solving + +``` +1. Set context: Relevant sources (Full Content) +2. Question 1: What's the problem? +3. Question 2: What approaches exist? +4. Question 3: Pros and cons of each? +5. Question 4: Which would work best for [my situation]? + +Result: Decision-making informed by research +``` + +--- + +## Optimizing for Cost + +Chat uses tokens for every response. Here's how to use efficiently: + +### Reduce Token Usage + +**Minimize context** +``` +Option A: All sources, Full Content + Cost per response: 5,000 tokens + +Option B: Only relevant sources, Summary Only + Cost per response: 1,000 tokens + +Savings: 80% cheaper, same conversation +``` + +**Shorter questions** +``` +Verbose: "Could you please analyze the methodology + section of this paper and explain in detail + what the authors did?" + +Concise: "Summarize the methodology in 2-3 points." + +Savings: 20-30% per response +``` + +**Use cheaper models** +``` +GPT-4o: $0.15 per 1M input tokens +GPT-4o-mini: $0.03 per 1M input tokens +Claude Sonnet: $0.90 per 1M input tokens + +For chat: Mini/Haiku models are usually fine +For deep analysis: Sonnet/Opus worth the cost +``` + +### Budget Strategies + +**Exploration budget** +- Use cheap model +- Broad context (understand landscape) +- Short questions +- Result: Low cost, good overview + +**Analysis budget** +- Use powerful model +- Focused context (main source only) +- Detailed questions +- Result: Higher cost, deep insights + +**Synthesis budget** +- Use powerful model for final synthesis +- Multiple sources (Full Content) +- Complex comparative questions +- Result: Expensive but valuable output + +--- + +## Troubleshooting Chat Issues + +### Poor Responses + +| Problem | Cause | Solution | +|---------|-------|----------| +| Generic answers | Vague question | Be specific (see question patterns) | +| Missing context | Not enough in context | Add sources or change to Full Content | +| Incorrect info | Source not in context | Add the relevant source | +| Hallucinating | Model confused | Ask for citations, verify claims | +| Shallow analysis | Wrong model | Switch to more powerful model | + +### High Costs + +| Problem | Cause | Solution | +|---------|-------|----------| +| Expensive per response | Too much context | Use Summary Only or exclude sources | +| Many follow-ups | Exploratory chat | Use Ask instead for single comprehensive answer | +| Long conversations | Keeping history | Archive old chats, start fresh | +| Large sources | Full text in context | Use Summary Only for large documents | + +--- + +## Best Practices + +### Before You Chat + +- [ ] Add sources you'll need +- [ ] Decide context strategy (Tiered is usually best) +- [ ] Choose model (cheaper for exploration, powerful for analysis) +- [ ] Have a question in mind + +### During Chat + +- [ ] Ask specific questions (use SPECIFIC formula) +- [ ] Check citations for factual claims +- [ ] Follow up on unclear points +- [ ] Adjust context if you need different sources + +### After Chat + +- [ ] Save good responses as notes +- [ ] Archive conversation if you're done +- [ ] Organize notes for future reference +- [ ] Use insights in other features (Ask, Transformations, Podcasts) + +--- + +## When to Use Chat vs. Ask + +**Use CHAT when:** +- You want a dialogue +- You're exploring a topic +- You'll ask multiple related questions +- You want to adjust context during conversation +- You're not sure exactly what you need + +**Use ASK when:** +- You have one specific question +- You want a comprehensive answer +- You want the system to auto-search +- You want one response, not dialogue +- You want maximum tokens spent on search + +--- + +## Summary: Chat as Conversation + +Chat is fundamentally different from asking ChatGPT directly: + +| Aspect | ChatGPT | Open Notebook Chat | +|--------|---------|-------------------| +| **Source control** | None (uses training) | You control which sources are visible | +| **Cost control** | Per token | Per token, but context is your choice | +| **Iteration** | Works | Works, with your sources changing dynamically | +| **Citations** | Made up often | Tied to your sources (verifiable) | +| **Privacy** | Your data to OpenAI | Your data stays local (unless you choose) | + +The key insight: **Chat is retrieval-augmented generation.** AI sees only what you put in context. You control the conversation and the information flow. + +That's why Chat is powerful for research. You're not just talking to an AI; you're having a conversation with your research itself. diff --git a/docs/3-USER-GUIDE/citations.md b/docs/3-USER-GUIDE/citations.md new file mode 100644 index 00000000..4995f394 --- /dev/null +++ b/docs/3-USER-GUIDE/citations.md @@ -0,0 +1,299 @@ +# Citations - Verify and Trust AI Responses + +Citations connect AI responses to your source materials. This guide covers how to use and verify them. + +--- + +## Why Citations Matter + +Every AI-generated response in Open Notebook includes citations to your sources. This lets you: + +- **Verify claims** - Check that AI actually read what it claims +- **Find original context** - See the full passage around a quote +- **Catch hallucinations** - Spot when AI makes things up +- **Build credibility** - Your notes have traceable sources + +--- + +## Quick Start: Using Citations + +### Reading Citations + +``` +AI Response: +"The study found a 95% accuracy rate [1] using the proposed method." + +[1] = Click to see source + +What happens when you click: +→ Opens the source document +→ Highlights the relevant section +→ You can verify the claim +``` + +### Requesting Better Citations + +If a response lacks citations, ask: + +``` +"Please cite the specific page or section for that claim." +"Where in the document does it say that?" +"Can you quote the exact text?" +``` + +--- + +## How Citations Work + +### Automatic Generation + +When AI references your sources, citations are generated automatically: + +``` +1. AI analyzes your question +2. Retrieves relevant source chunks +3. Generates response with inline citations +4. Links citations to original source locations +``` + +### Citation Format + +``` +Inline format: +"The researchers concluded X [1] and Y [2]." + +Reference list: +[1] Paper Title - Section 3.2 +[2] Report Name - Page 15 + +Clickable: Each [number] links to the source +``` + +--- + +## Verifying Citations + +### The Verification Workflow + +``` +Step 1: Read AI response + "The model achieved 95% accuracy [1]" + +Step 2: Click citation [1] + → Opens source document + → Shows relevant passage + +Step 3: Verify the claim + Does source actually say 95%? + Is context correct? + Any nuance missed? + +Step 4: Trust or correct + ✓ Accurate → Use the insight + ✗ Wrong → Ask AI to correct +``` + +### What to Check + +| Check | Why | +|-------|-----| +| **Exact numbers** | AI sometimes rounds or misremembers | +| **Context** | Quote might mean something different in context | +| **Attribution** | Is this the source's claim or someone they cited? | +| **Completeness** | Did AI miss important caveats? | + +--- + +## Citations in Different Features + +### Chat Citations + +``` +Context: Sources you selected +Citations: Reference chunks used in response +Verification: Click to see original text +Save: Citations preserved when saving as note +``` + +### Ask Feature Citations + +``` +Context: Auto-searched across all sources +Citations: Multiple sources synthesized +Verification: Each source linked separately +Quality: Often more comprehensive than Chat +``` + +### Transformation Citations + +``` +Context: Single source being transformed +Citations: Points back to original document +Verification: Compare output to source +Use: When you need structured extraction +``` + +--- + +## Saving Citations + +### In Notes + +When you save an AI response as a note, citations are preserved: + +``` +Original response: +"According to the paper [1], the method works by..." + +Saved note includes: +- The text +- The citation link +- Reference to source document +``` + +### Exporting + +Citations work in exports: + +| Format | Citation Behavior | +|--------|-------------------| +| **Markdown** | Links preserved as `[text](link)` | +| **Copy/Paste** | Plain text with reference numbers | +| **PDF** | Clickable references (if supported) | + +--- + +## Citation Quality Tips + +### Get Better Citations + +**Be specific in questions:** +``` +Poor: "What does it say about X?" +Good: "What does page 15 say about X? Please quote directly." +``` + +**Request citation format:** +``` +"Include page numbers for each claim." +"Cite specific sections, not just document names." +``` + +**Use Full Content context:** +``` +Summary Only → Less precise citations +Full Content → Exact quotes possible +``` + +### When Citations Are Missing + +| Situation | Cause | Solution | +|-----------|-------|----------| +| No citations | AI used general knowledge | Ask: "Base your answer only on my sources" | +| Vague citations | Source not in Full Content | Change context level | +| Wrong citations | AI confused sources | Ask to verify with quotes | + +--- + +## Common Issues + +### "Citation doesn't match claim" + +``` +Problem: AI says X, but source says Y + +What happened: +- AI paraphrased incorrectly +- AI combined multiple sources confusingly +- Source was taken out of context + +Solution: +1. Click citation to see original +2. Note the discrepancy +3. Ask AI: "The source says Y, not X. Please correct." +``` + +### "Can't find cited section" + +``` +Problem: Citation link doesn't show relevant text + +What happened: +- Source was chunked differently than expected +- Information spread across multiple sections +- Processing missed some content + +Solution: +1. Search within source for key terms +2. Ask AI for more specific location +3. Re-process source if needed +``` + +### "No citations at all" + +``` +Problem: AI response has no source references + +What happened: +- Sources not in context +- Question asked for opinion/general knowledge +- Model didn't find relevant content + +Solution: +1. Check context settings +2. Rephrase: "Based on my sources, what..." +3. Add more relevant sources +``` + +--- + +## Best Practices + +### For Research Integrity + +1. **Always verify important claims** - Don't trust AI blindly +2. **Check context** - Quotes can be misleading out of context +3. **Note limitations** - AI might miss nuance +4. **Keep source access** - Don't delete sources you cite + +### For Academic Work + +1. **Use Full Content** for documents you'll cite +2. **Request specific page numbers** +3. **Cross-check with original sources** +4. **Document your verification process** + +### For Professional Use + +1. **Verify before sharing** - Check claims clients will see +2. **Keep citation trail** - Save notes with sources linked +3. **Be transparent** - Note when insights are AI-assisted + +--- + +## Summary + +``` +Citations = Your verification system + +How to use: +1. Read AI response +2. Note citation markers [1], [2], etc. +3. Click to see original source +4. Verify claim matches source +5. Trust verified insights + +When citations fail: +- Ask for specific quotes +- Change to Full Content +- Request page numbers +- Verify manually + +Why it matters: +- AI can hallucinate +- Context can change meaning +- Trust requires verification +- Good research needs sources +``` + +Citations aren't just references — they're your quality control. Use them to build research you can trust. diff --git a/docs/3-USER-GUIDE/creating-podcasts.md b/docs/3-USER-GUIDE/creating-podcasts.md new file mode 100644 index 00000000..e91eb54f --- /dev/null +++ b/docs/3-USER-GUIDE/creating-podcasts.md @@ -0,0 +1,676 @@ +# Creating Podcasts - Turn Research into Audio + +Podcasts let you consume your research passively. This guide covers the complete workflow from setup to download. + +--- + +## Quick-Start: Your First Podcast (5 Minutes) + +``` +1. Go to your notebook +2. Click "Generate Podcast" +3. Select sources to include +4. Choose a speaker profile (or use default) +5. Click "Generate" +6. Wait 3-10 minutes (non-blocking) +7. Download MP3 when ready +8. Done! +``` + +That's the minimum. Let's make it better. + +--- + +## Step-by-Step: The Complete Workflow + +### Step 1: Prepare Your Notebook + +``` +Before generating, make sure: + +✓ You have sources added + (At least 1-2 sources) + +✓ Sources have been processed + (Green "Ready" status) + +✓ Notes are organized + (If you want notes included) + +✓ You know your message + (What's the main story?) + +Typical preparation: 5-10 minutes +``` + +### Step 2: Choose Content + +``` +Click "Generate Podcast" + +You'll see: +- List of all sources in notebook +- List of all notes + +Select which to include: +☑ Paper A (primary source) +☑ Paper B (supporting source) +☐ Old note (not relevant) +✓ Analysis note (important) + +What to include: +- Primary sources: Always include +- Supporting sources: Usually include +- Notes: Include your analysis/insights +- Everything: Can overload podcast + +Recommended: 3-5 sources per podcast +``` + +### Step 3: Choose Episode Profile + +An episode profile defines the structure and tone. + +**Option A: Use Preset Profile** + +``` +Open Notebook provides templates: + +Academic Presentation (Monologue) +├─ 1 speaker +├─ Tone: Educational +└─ Format: Expert explaining topic + +Expert Interview (2-speaker) +├─ 2 speakers: Host + Expert +├─ Tone: Q&A, conversational +└─ Format: Interview with expert + +Debate Format (2-speaker) +├─ 2 speakers: Pro vs. Con +├─ Tone: Discussion, disagreement +└─ Format: Debate about the topic + +Panel Discussion (3-4 speaker) +├─ 3-4 speakers: Different perspectives +├─ Tone: Thoughtful discussion +└─ Format: Each brings different expertise + +Solo Explanation (Monologue) +├─ 1 speaker +├─ Tone: Conversational, friendly +└─ Format: Personal explanation +``` + +**Pick based on your content:** +- One main idea → Academic Presentation +- You want to explain → Solo Explanation +- Two competing views → Debate Format +- Multiple perspectives → Panel Discussion +- Want to explore → Expert Interview + +### Step 4: Customize Episode Profile (Optional) + +If presets don't fit, customize: + +``` +Episode Profile +├─ Title: "AI Safety in 2026" +├─ Description: "Exploring current approaches" +├─ Length target: 20 minutes +├─ Tone: "Academic but accessible" +├─ Focus areas: +│ ├─ Main approaches to alignment +│ ├─ Pros and cons comparison +│ └─ Open questions +├─ Audience: "Researchers new to field" +└─ Format: "Debate between two perspectives" + +How to set: +1. Click "Customize" +2. Edit each field +3. Click "Save Profile" +4. System uses your profile for outline generation +``` + +### Step 5: Create or Select Speakers + +Speakers are the "voice" of your podcast. + +**Option A: Use Preset Speakers** + +``` +Open Notebook provides templates: + +"Expert Alex" +- Expertise: Deep knowledge +- Personality: Rigorous, patient +- TTS: OpenAI (clear voice) + +"Curious Sam" +- Expertise: Curious newcomer +- Personality: Asks questions +- TTS: Google (natural voice) + +"Skeptic Jordan" +- Expertise: Critical perspective +- Personality: Challenges assumptions +- TTS: ElevenLabs (warm voice) + +For your first podcast: Use presets +For custom podcast: Create your own +``` + +**Option B: Create Custom Speakers** + +``` +Click "Add Speaker" + +Fill in: + +Name: "Dr. Research Expert" + +Expertise: +"20 years in AI safety research, + deep knowledge of alignment approaches" + +Personality: +"Rigorous, academic style, + explains clearly, asks good questions" + +Voice Configuration: +- TTS Provider: OpenAI / Google / ElevenLabs / Local +- Voice selection: Choose from available voices +- Accent (optional): British / American / etc. + +Example: +Name: Dr. Research Expert +Expertise: AI safety alignment research +Personality: Rigorous, academic but accessible +Voice: ElevenLabs - professional male voice +``` + +### Step 6: Generate Podcast + +``` +1. Review your setup: + Sources: ✓ Selected + Profile: ✓ Episode profile chosen + Speakers: ✓ Speakers configured + +2. Click "Generate Podcast" + +3. System begins: + - Analyzing your content + - Creating outline + - Writing dialogue + - Generating audio + - Mixing speakers + +4. Status shows progress: + 20% Outline generation + 40% Dialogue writing + 60% Audio synthesis + 80% Mixing + 100% Complete + +Processing time: +- 5 minutes of content: 3-5 minutes +- 15 minutes of content: 5-10 minutes +- 30 minutes of content: 10-20 minutes +``` + +### Step 7: Review and Download + +``` +When complete: + +Preview: +- Play audio sample +- Review transcript +- Check duration + +Options: +✓ Download as MP3 - Save to computer +✓ Stream directly - Listen in browser +✓ Share link - Get shareable URL (if public) +✓ Regenerate - Try different speakers/profile + +Download: +1. Click "Download as MP3" +2. Choose quality: 128kbps / 192kbps / 320kbps +3. Save file: podcast_[notebook]_[date].mp3 +4. Listen! +``` + +--- + +## Understanding What Happens Behind the Scenes + +### The Generation Pipeline + +``` +Stage 1: CONTENT ANALYSIS (1 minute) + Your sources → What's the main story? + → Key themes? + → Debate points? + +Stage 2: OUTLINE CREATION (2-3 minutes) + Themes → Episode structure + → Section breakdown + → Talking points + +Stage 3: DIALOGUE WRITING (2-3 minutes) + Outline → Convert to natural dialogue + → Add speaker personalities + → Create flow and transitions + +Stage 4: AUDIO SYNTHESIS (3-5 minutes per speaker) + Script + Speaker → Text-to-speech + → Individual audio files + → High quality audio + +Stage 5: MIXING & MASTERING (1-2 minutes) + Multiple audio → Combine speakers + → Level audio + → Add polish + → Final MP3 + +Total: 10-20 minutes for typical podcast +``` + +--- + +## Text-to-Speech Providers + +Different providers, different qualities. + +### OpenAI (Recommended) + +``` +Voices: 5 options (Alloy, Echo, Fable, Onyx, Shimmer) +Quality: Good, natural sounding +Speed: Fast +Cost: ~$0.015 per minute +Best for: General purpose, natural speech +Example: "I have to say, the research shows..." +``` + +### Google TTS + +``` +Voices: Many options, various accents +Quality: Excellent, very natural +Speed: Fast +Cost: ~$0.004 per minute +Best for: High quality output, accents +Example: "The research demonstrates that..." +``` + +### ElevenLabs + +``` +Voices: 100+ voices, highly customizable +Quality: Exceptional, very expressive +Speed: Slower (5-10 seconds per phrase) +Cost: ~$0.10 per minute +Best for: Premium quality, emotional range +Example: [Can convey emotion and tone] +``` + +### Local TTS (Free) + +``` +Voices: Limited, basic options +Quality: Basic, robotic +Speed: Depends on hardware (slow) +Cost: Free (local processing) +Best for: Privacy, testing, offline use +Example: "The research shows..." +Privacy: Everything stays on your computer +``` + +### Which Provider to Choose? + +``` +For your first podcast: Google (quality/cost balance) +For privacy-sensitive: Local TTS (free, private) +For premium quality: ElevenLabs (best voices) +For budget: Google (cheapest quality option) +For speed: OpenAI (fast generation) +``` + +--- + +## Tips for Better Podcasts + +### Choose Right Profile + +``` +Single source analysis → Academic Presentation + "Explaining one paper to someone new" + +Comparing two approaches → Debate Format + "Pros and cons of different methods" + +Multiple sources + insights → Panel Discussion + "Different experts discussing topic" + +Narrative exploration → Expert Interview + "Host interviewing research expert" + +Personal take → Solo Explanation + "You explaining your analysis" +``` + +### Create Good Speakers + +``` +Good Speaker: +✓ Clear expertise (know what they're talking about) +✓ Distinct personality (not generic) +✓ Good voice choice (matches personality) +✓ Realistic backstory (feels like real person) + +Bad Speaker: +✗ Generic expertise ("good at research") +✗ No personality ("just reads") +✗ Mismatched voice (deep voice for young person) +✗ Contradicts personality (serious person uses casual voice) +``` + +### Focus Content + +``` +Better: Podcast on ONE specific topic + "How transformers work" (15 minutes, focused) + +Worse: Podcast on everything + "All of AI 2025" (2 hours, unfocused) + +Guideline: +- 5-10 minutes: One narrow topic +- 15-20 minutes: One broad topic +- 30+ minutes: Multiple related subtopics + +Shorter is usually better for podcasts. +``` + +### Optimize Source Selection + +``` +Too much content: + "Here are all 20 papers" + → Podcast becomes 2+ hours + → Unfocused + → Low quality + +Right amount: + "Here are 3 key papers" + → Podcast is 15-20 minutes + → Focused + → High quality + +Rule: 3-5 sources per podcast + Remove long background papers + Keep focused on main topic +``` + +--- + +## Quality Troubleshooting + +### Audio Sounds Robotic + +**Problem**: TTS voice sounds unnatural + +**Solutions**: +``` +1. Switch provider: Try Google or ElevenLabs instead +2. Choose different voice: Some voices more natural +3. Shorter sentences: Very long sentences sound robotic +4. Adjust pacing: Ask for "natural, conversational pacing" +``` + +### Audio Sounds Unclear + +**Problem**: Hard to understand what's being said + +**Solutions**: +``` +1. Re-generate with different speaker +2. Try different TTS provider +3. Use speakers with clear accents +4. Lower background noise (if any) +5. Increase speech rate (if too slow) +``` + +### Missing Content + +**Problem**: Important information isn't in podcast + +**Solutions**: +``` +1. Include that source in content selection +2. Review generated outline (check before generating) +3. Regenerate with clearer profile instructions +4. Try different model (more thorough model) +``` + +### Speakers Don't Match + +**Problem**: Speakers sound like same person + +**Solutions**: +``` +1. Choose different TTS providers (OpenAI + Google) +2. Choose very different voice options +3. Increase personality differences in profile +4. Try different speaker count (2 vs 3 vs 4) +``` + +### Generation Failed + +**Problem**: "Podcast generation failed" + +**Solutions**: +``` +1. Check internet connection (especially TTS) +2. Try again (might be temporary issue) +3. Use local TTS (doesn't need internet) +4. Reduce source count (less to process) +5. Contact support if persistent +``` + +--- + +## Advanced: Multiple Podcasts from Same Research + +You can generate different podcasts from one notebook: + +``` +Podcast 1: Overview + Profile: Academic Presentation + Sources: Papers A, B, C + Speakers: One expert + Length: 15 minutes + +→ Use for "What's this about?" understanding + +Podcast 2: Deep Dive + Profile: Expert Interview + Sources: Paper A (Full) + B, C (Summary) + Speakers: Expert + Interviewer + Length: 30 minutes + +→ Use for detailed exploration + +Podcast 3: Debate + Profile: Debate Format + Sources: Papers A vs B (different approaches) + Speakers: Pro-A speaker + Pro-B speaker + Length: 20 minutes + +→ Use for comparing approaches +``` + +Each tells the same story from different angles. + +--- + +## Exporting and Sharing + +### Download MP3 + +``` +1. Generation complete +2. Click "Download" +3. Choose quality: + - 128 kbps: Smallest file, lower quality + - 192 kbps: Balanced (recommended) + - 320 kbps: Highest quality, largest file +4. Save to computer +5. Use in podcast app, upload to platform, etc. +``` + +### Export Transcript + +``` +1. Click "Export Transcript" +2. Get full dialogue as text +3. Useful for: + - Blog post content + - Show notes + - Searchable text version + - Accessibility +``` + +### Share Link + +``` +If podcast is public: +1. Click "Share" +2. Get shareable link +3. Others can listen/download +4. Useful for: + - Sharing with team + - Public distribution + - Embedding on website +``` + +### Publish to Podcast Platforms + +``` +If you want to distribute (future feature): +1. Download MP3 +2. Upload to platform (Spotify, Apple Podcasts, etc.) +3. Add metadata (title, description, episode notes) +4. Your research becomes a published podcast! +``` + +--- + +## Best Practices + +### Before Generation +- [ ] Sources are processed and ready +- [ ] You've chosen content to include +- [ ] You have a clear episode profile +- [ ] Speakers are well-defined +- [ ] Content is focused (3-5 sources max) + +### During Generation +- Don't close the browser (use background processing) +- Check back in 5-15 minutes +- Review transcript when complete +- Listen to sample before downloading + +### After Generation +- [ ] Download MP3 to computer +- [ ] Save in organized folder +- [ ] Add metadata (title, description, date) +- [ ] Test listening in podcast app +- [ ] Share with colleagues for feedback + +--- + +## Use Cases + +### Academic Researcher +``` +Podcast: Explaining your dissertation +Speakers: You + colleague +Content: Your papers + supporting research +Use: Share with advisors, test explanations +``` + +### Content Creator +``` +Podcast: Research-to-podcast article +Speakers: Narrator + expert +Content: Articles you've researched +Use: Transform article into podcast version +``` + +### Team Research +``` +Podcast: Weekly research updates +Speakers: Multiple team members +Content: This week's papers +Use: Team updates, knowledge sharing +``` + +### Learning/Teaching +``` +Podcast: Teaching material +Speakers: Teacher + inquisitive student +Content: Textbook + examples +Use: Students learn while commuting +``` + +--- + +## Cost Breakdown Example + +### Generate 15-minute podcast with ElevenLabs + +``` +Generation (outline + dialogue): + No charge (included in service) + +Text-to-speech: + 2 speakers × 15 minutes = 30 minutes TTS + ElevenLabs: $0.10 per minute + Cost: 30 × $0.10 = $3.00 + +Processing: + Included (no additional cost) + +Total: $3.00 per podcast + +Cheaper options: + With Google TTS: ~$0.12 + With OpenAI: ~$0.45 + With Local TTS: ~$0.00 +``` + +--- + +## Summary: Podcasts as Research Tool + +Podcasts transform how you consume research: + +``` +Before: Reading papers takes time, focus +After: Listen while commuting, exercising, doing chores + +Before: Can't share complex research easily +After: Share audio of your analysis + +Before: Different consumption styles isolated +After: Same research, multiple formats (read/listen) +``` + +Podcasts aren't just for entertainment—they're a tool for making research more accessible, shareable, and consumable. + +That's why they're important for Open Notebook. diff --git a/docs/3-USER-GUIDE/index.md b/docs/3-USER-GUIDE/index.md new file mode 100644 index 00000000..d5ea020c --- /dev/null +++ b/docs/3-USER-GUIDE/index.md @@ -0,0 +1,193 @@ +# User Guide - How to Use Open Notebook + +This guide covers practical, step-by-step usage of Open Notebook features. You already understand the concepts; now learn how to actually use them. + +> **Prerequisite**: Review [2-CORE-CONCEPTS](../2-CORE-CONCEPTS/index.md) first to understand the mental models (notebooks, sources, notes, chat, transformations, podcasts). + +--- + +## Start Here + +### [Interface Overview](interface-overview.md) +Learn the layout before diving in. Understand the three-panel design and where everything is. + +--- + +## Eight Core Features + +### 1. [Adding Sources](adding-sources.md) +How to bring content into your notebook. Supports PDFs, web links, audio, video, text, and more. + +**Quick links:** +- Upload a PDF or document +- Add a web link or article +- Transcribe audio or video +- Paste text directly +- Common mistakes + fixes + +--- + +### 2. [Working with Notes](working-with-notes.md) +Creating, organizing, and using notes (both manual and AI-generated). + +**Quick links:** +- Create a manual note +- Save AI responses as notes +- Apply transformations to generate insights +- Organize with tags and naming +- Use notes across your notebook + +--- + +### 3. [Chat Effectively](chat-effectively.md) +Have conversations with AI about your sources. Manage context to control what AI sees. + +**Quick links:** +- Start your first chat +- Select which sources go in context +- Ask effective questions +- Use follow-ups productively +- Understand citations and verify claims + +--- + +### 4. [Creating Podcasts](creating-podcasts.md) +Convert your research into audio dialogue for passive consumption. + +**Quick links:** +- Create your first podcast +- Choose or customize speakers +- Select TTS provider +- Generate and download +- Common audio quality fixes + +--- + +### 5. [Search Effectively](search.md) +Two search modes: text-based (keyword) and vector-based (semantic). Know when to use each. + +**Quick links:** +- Text search vs vector search (when to use) +- Running effective searches +- Using the Ask feature for comprehensive answers +- Saving search results as notes +- Troubleshooting poor results + +--- + +### 6. [Transformations](transformations.md) +Batch-process sources with predefined templates. Extract the same insights from multiple documents. + +**Quick links:** +- Built-in transformation templates +- Creating custom transformations +- Applying to single or multiple sources +- Managing transformation output + +--- + +### 7. [Citations](citations.md) +Verify AI claims by tracing them back to source material. Understand the citation system. + +**Quick links:** +- Reading and clicking citations +- Verifying claims against sources +- Requesting better citations +- Saving cited content as notes + +--- + +## Which Feature for Which Task? + +``` +Task: "I want to explore a topic with follow-ups" +→ Use: Chat (add sources, select context, have conversation) + +Task: "I want one comprehensive answer" +→ Use: Search / Ask (system finds relevant content) + +Task: "I want to extract the same info from many sources" +→ Use: Transformations (define template, apply to all) + +Task: "I want summaries of all my sources" +→ Use: Transformations (with built-in summary template) + +Task: "I want to share my research in audio form" +→ Use: Podcasts (create speakers, generate episode) + +Task: "I want to find that quote I remember" +→ Use: Search / Text Search (keyword matching) + +Task: "I'm exploring a concept without knowing exact words" +→ Use: Search / Vector Search (semantic similarity) +``` + +--- + +## Quick-Start Checklist: First 15 Minutes + +**Step 1: Create a Notebook (1 min)** +- Name: Something descriptive ("Q1 Market Research", "AI Safety Papers", etc.) +- Description: 1-2 sentences about what you're researching +- This is your research container + +**Step 2: Add Your First Source (3 min)** +- Pick one: PDF, web link, or text +- Follow [Adding Sources](adding-sources.md) +- Wait for processing (usually 30-60 seconds) + +**Step 3: Chat About It (3 min)** +- Go to Chat +- Select your source (set context to "Full Content") +- Ask a simple question: "What are the main points?" +- See AI respond with citations + +**Step 4: Save Insight as Note (2 min)** +- Good response? Click "Save as Note" +- Name it something useful ("Main points from source X") +- Now you have a captured insight + +**Step 5: Explore More (6 min)** +- Add another source +- Chat about both together +- Ask a question that compares them +- Follow up with clarifying questions + +**Done!** You've used the core workflow: notebook → sources → chat → notes + +--- + +## Common Mistakes to Avoid + +| Mistake | Problem | Fix | +|---------|---------|-----| +| Adding everything to one notebook | No isolation between projects | Create separate notebooks for different topics | +| Expecting AI to know your context | Questions get generic answers | Describe your research focus in chat context | +| Forgetting to cite sources | You can't verify claims | Click citations to check source chunks | +| Using Chat for one-time questions | Slower than Ask | Use Ask for comprehensive Q&A, Chat for exploration | +| Adding huge PDFs without chunking | Slow processing, poor search | Break into multiple smaller sources if possible | +| Using same context for all chats | Expensive, unfocused | Adjust context level for each chat | +| Ignoring vector search | Only finding exact keywords | Use vector search to explore conceptually | + +--- + +## Next Steps + +1. **Follow each guide** in order (sources → notes → chat → podcasts → search) +2. **Create your first notebook** with real content +3. **Practice each feature** with your own research +4. **Return to CORE-CONCEPTS** if you need to understand the "why" + +--- + +## Getting Help + +- **Feature not working?** → Check the feature's guide (look for "Troubleshooting" section) +- **Error message?** → Check [6-TROUBLESHOOTING](../6-TROUBLESHOOTING/index.md) +- **Understanding how something works?** → Check [2-CORE-CONCEPTS](../2-CORE-CONCEPTS/index.md) +- **Setting up for the first time?** → Go back to [1-INSTALLATION](../1-INSTALLATION/index.md) +- **For developers** → See [7-DEVELOPMENT](../7-DEVELOPMENT/index.md) + +--- + +**Ready to start?** Pick the guide for what you want to do first! diff --git a/docs/3-USER-GUIDE/interface-overview.md b/docs/3-USER-GUIDE/interface-overview.md new file mode 100644 index 00000000..0ca78cf3 --- /dev/null +++ b/docs/3-USER-GUIDE/interface-overview.md @@ -0,0 +1,377 @@ +# Interface Overview - Finding Your Way Around + +Open Notebook uses a clean three-panel layout. This guide shows you where everything is. + +--- + +## The Main Layout + +``` +┌─────────────────────────────────────────────────────────────┐ +│ [Logo] Notebooks Search Podcasts Models Settings │ +├──────────────┬──────────────┬───────────────────────────────┤ +│ │ │ │ +│ SOURCES │ NOTES │ CHAT │ +│ │ │ │ +│ Your docs │ Your │ Talk to AI about │ +│ PDFs, URLs │ insights │ your sources │ +│ Videos │ summaries │ │ +│ │ │ │ +│ [+Add] │ [+Write] │ [Type here...] │ +│ │ │ │ +└──────────────┴──────────────┴───────────────────────────────┘ +``` + +--- + +## Navigation Bar + +The top navigation takes you to main sections: + +| Icon | Page | What It Does | +|------|------|--------------| +| **Notebooks** | Main workspace | Your research projects | +| **Search** | Ask & Search | Query across all notebooks | +| **Podcasts** | Audio generation | Manage podcast profiles | +| **Models** | AI configuration | Set up providers and models | +| **Settings** | Preferences | App configuration | + +--- + +## Left Panel: Sources + +Your research materials live here. + +### What You'll See + +``` +┌─────────────────────────┐ +│ Sources (5) │ +│ [+ Add Source] │ +├─────────────────────────┤ +│ ┌─────────────────┐ │ +│ │ 📄 Paper.pdf │ │ +│ │ 🟢 Full Content │ │ +│ │ [⋮ Menu] │ │ +│ └─────────────────┘ │ +│ │ +│ ┌─────────────────┐ │ +│ │ 🔗 Article URL │ │ +│ │ 🟡 Summary Only │ │ +│ │ [⋮ Menu] │ │ +│ └─────────────────┘ │ +└─────────────────────────┘ +``` + +### Source Card Elements + +- **Icon** - File type (PDF, URL, video, etc.) +- **Title** - Document name +- **Context indicator** - What AI can see: + - 🟢 Full Content + - 🟡 Summary Only + - ⛔ Not in Context +- **Menu (⋮)** - Edit, transform, delete + +### Add Source Button + +Click to add: +- File upload (PDF, DOCX, etc.) +- Web URL +- YouTube video +- Plain text + +--- + +## Middle Panel: Notes + +Your insights and AI-generated content. + +### What You'll See + +``` +┌─────────────────────────┐ +│ Notes (3) │ +│ [+ Write Note] │ +├─────────────────────────┤ +│ ┌─────────────────┐ │ +│ │ 📝 My Analysis │ │ +│ │ Manual note │ │ +│ │ Jan 3, 2026 │ │ +│ └─────────────────┘ │ +│ │ +│ ┌─────────────────┐ │ +│ │ 🤖 Summary │ │ +│ │ From transform │ │ +│ │ Jan 2, 2026 │ │ +│ └─────────────────┘ │ +└─────────────────────────┘ +``` + +### Note Card Elements + +- **Icon** - Note type (manual 📝 or AI 🤖) +- **Title** - Note name +- **Origin** - How it was created +- **Date** - When created + +### Write Note Button + +Click to: +- Create manual note +- Add your own insights +- Markdown supported + +--- + +## Right Panel: Chat + +Your AI conversation space. + +### What You'll See + +``` +┌───────────────────────────────┐ +│ Chat │ +│ Session: Research Discussion │ +│ [+ New Session] [Sessions ▼] │ +├───────────────────────────────┤ +│ │ +│ You: What's the main │ +│ finding? │ +│ │ +│ AI: Based on the paper [1], │ +│ the main finding is... │ +│ [Save as Note] │ +│ │ +│ You: Tell me more about │ +│ the methodology. │ +│ │ +├───────────────────────────────┤ +│ Context: 3 sources (12K tok) │ +├───────────────────────────────┤ +│ [Type your message...] [↑] │ +└───────────────────────────────┘ +``` + +### Chat Elements + +- **Session selector** - Switch between conversations +- **Message history** - Your conversation +- **Save as Note** - Keep good responses +- **Context indicator** - What AI can see +- **Input field** - Type your questions + +--- + +## Context Indicators + +These show what AI can access: + +### Token Counter + +``` +Context: 3 sources (12,450 tokens) + ↑ ↑ + Sources Approximate cost indicator + included +``` + +### Per-Source Indicators + +| Indicator | Meaning | AI Access | +|-----------|---------|-----------| +| 🟢 Full Content | Complete text | Everything | +| 🟡 Summary Only | AI summary | Key points only | +| ⛔ Not in Context | Excluded | Nothing | + +Click any source to change its context level. + +--- + +## Podcasts Tab + +Inside a notebook, switch to Podcasts: + +``` +┌───────────────────────────────┐ +│ [Chat] [Podcasts] │ +├───────────────────────────────┤ +│ Episode Profile: [Select ▼] │ +│ │ +│ Speakers: │ +│ ├─ Host: Alex (OpenAI) │ +│ └─ Guest: Sam (Google) │ +│ │ +│ Include: │ +│ ☑ Paper.pdf │ +│ ☑ My Analysis (note) │ +│ ☐ Background article │ +│ │ +│ [Generate Podcast] │ +└───────────────────────────────┘ +``` + +--- + +## Settings Page + +Access via navigation bar → Settings: + +### Key Sections + +| Section | What It Controls | +|---------|------------------| +| **Processing** | Document and URL extraction engines | +| **Embedding** | Auto-embed settings | +| **Files** | Auto-delete uploads after processing | +| **YouTube** | Preferred transcript languages | + +--- + +## Models Page + +Configure AI providers: + +``` +┌───────────────────────────────────────┐ +│ Models │ +├───────────────────────────────────────┤ +│ Language Models │ +│ ┌─────────────────────────────────┐ │ +│ │ GPT-4o (OpenAI) [Edit] │ │ +│ │ Claude Sonnet (Anthropic) │ │ +│ │ Llama 3.3 (Ollama) [⭐] │ │ +│ └─────────────────────────────────┘ │ +│ [+ Add Model] │ +│ │ +│ Embedding Models │ +│ ┌─────────────────────────────────┐ │ +│ │ text-embedding-3-small [⭐] │ │ +│ └─────────────────────────────────┘ │ +│ │ +│ Text-to-Speech │ +│ ┌─────────────────────────────────┐ │ +│ │ OpenAI TTS [⭐] │ │ +│ │ Google TTS │ │ +│ └─────────────────────────────────┘ │ +└───────────────────────────────────────┘ +``` + +- **⭐** = Default model for that category +- **[Edit]** = Modify configuration +- **[+ Add]** = Add new model + +--- + +## Search Page + +Query across all notebooks: + +``` +┌───────────────────────────────────────┐ +│ Search │ +├───────────────────────────────────────┤ +│ [What are you looking for? ] [🔍] │ +│ │ +│ Search type: [Text ▼] [Vector ▼] │ +│ Search in: [Sources] [Notes] │ +├───────────────────────────────────────┤ +│ Results (15) │ +│ │ +│ 📄 Paper.pdf - Notebook: Research │ +│ "...the transformer model..." │ +│ │ +│ 📝 My Analysis - Notebook: Research │ +│ "...key findings include..." │ +└───────────────────────────────────────┘ +``` + +--- + +## Common Actions + +### Create a Notebook + +``` +Notebooks page → [+ New Notebook] → Enter name → Create +``` + +### Add a Source + +``` +Inside notebook → [+ Add Source] → Choose type → Upload/paste → Wait for processing +``` + +### Ask a Question + +``` +Inside notebook → Chat panel → Type question → Enter → Read response +``` + +### Save AI Response + +``` +Get good response → Click [Save as Note] → Edit title → Save +``` + +### Change Context Level + +``` +Click source → Context dropdown → Select level → Changes apply immediately +``` + +### Generate Podcast + +``` +Podcasts tab → Select profile → Choose sources → [Generate] → Wait → Download +``` + +--- + +## Keyboard Shortcuts + +| Key | Action | +|-----|--------| +| `Enter` | Send chat message | +| `Shift + Enter` | New line in chat | +| `Escape` | Close dialogs | +| `Ctrl/Cmd + F` | Browser find | + +--- + +## Mobile View + +On smaller screens, the three-panel layout stacks vertically: + +``` +┌─────────────────┐ +│ SOURCES │ +│ (tap to expand) +├─────────────────┤ +│ NOTES │ +│ (tap to expand) +├─────────────────┤ +│ CHAT │ +│ (always visible) +└─────────────────┘ +``` + +- Panels collapse to save space +- Tap headers to expand/collapse +- Chat remains accessible +- Full functionality preserved + +--- + +## Tips for Efficient Navigation + +1. **Use keyboard** - Enter sends messages, Escape closes dialogs +2. **Context first** - Set source context before chatting +3. **Sessions** - Create new sessions for different topics +4. **Search globally** - Use Search page to find across all notebooks +5. **Models page** - Bookmark your preferred models + +--- + +Now you know where everything is. Start with [Adding Sources](adding-sources.md) to begin your research! diff --git a/docs/3-USER-GUIDE/search.md b/docs/3-USER-GUIDE/search.md new file mode 100644 index 00000000..fa69351a --- /dev/null +++ b/docs/3-USER-GUIDE/search.md @@ -0,0 +1,475 @@ +# Search Effectively - Finding What You Need + +Search is your gateway into your research. This guide covers two search modes and when to use each. + +--- + +## Quick-Start: Find Something + +### Simple Search + +``` +1. Go to your notebook +2. Type in search box +3. See results (both sources and notes) +4. Click result to view source/note +5. Done! + +That works for basic searches. +But you can do much better... +``` + +--- + +## Two Search Modes Explained + +Open Notebook has two fundamentally different search approaches. + +### Search Type 1: TEXT SEARCH (Keyword Matching) + +**How it works:** +- You search for words: "transformer" +- System finds chunks containing "transformer" +- Ranked by relevance: frequency, position, context + +**Speed:** Very fast (instant) + +**When to use:** +- You remember exact words or phrases +- You're looking for specific terms +- You want precise keyword matches +- You need exact quotes + +**Example:** +``` +Search: "attention mechanism" +Results: + 1. "The attention mechanism allows..." (perfect match) + 2. "Attention and other mechanisms..." (partial match) + 3. "How mechanisms work in attention..." (includes words separately) + +All contain "attention" AND "mechanism" +Ranked by how close together they are +``` + +**What it finds:** +- Exact phrases: "transformer model" +- Individual words: transformer OR model (too broad) +- Names: "Vaswani et al." +- Numbers: "1994", "GPT-4" +- Technical terms: "LSTM", "convolution" + +**What it doesn't find:** +- Similar words: searching "attention" won't find "focus" +- Synonyms: searching "large" won't find "big" +- Concepts: searching "similarity" won't find "likeness" + +--- + +### Search Type 2: VECTOR SEARCH (Semantic/Concept Matching) + +**How it works:** +- Your search converted to embedding (vector) +- All chunks converted to embeddings +- System finds most similar embeddings +- Ranked by semantic similarity + +**Speed:** A bit slower (1-2 seconds) + +**When to use:** +- You're exploring a concept +- You don't know exact words +- You want semantically similar content +- You're discovering, not searching + +**Example:** +``` +Search: "What's the mechanism for understanding in models?" +(Notice: No chunk likely says exactly that) + +Results: + 1. "Mechanistic interpretability allows understanding..." (semantic match) + 2. "Feature attribution reveals how models work..." (conceptually similar) + 3. "Attention visualization shows model decisions..." (same topic) + +None contain your exact words +But all are semantically related +``` + +**What it finds:** +- Similar concepts: "understanding" + "interpretation" + "explainability" (all related) +- Paraphrases: "big" and "large" (same meaning) +- Related ideas: "safety" relates to "alignment" (connected concepts) +- Analogies: content about biological learning when searching "learning" + +**What it doesn't find:** +- Exact keywords: if you search a rare word, vector search might miss it +- Specific numbers: "1994" vs "1993" are semantically different +- Technical jargon: "LSTM" and "RNN" are different even if related + +--- + +## Decision: Text Search vs. Vector Search? + +``` +Question: "Do I remember the exact words?" + +→ YES: Use TEXT SEARCH + Example: "I remember the paper said 'attention is all you need'" + +→ NO: Use VECTOR SEARCH + Example: "I'm looking for content about how models process information" + +→ UNSURE: Try TEXT SEARCH first (faster) + If no results, try VECTOR SEARCH + +Text search: "I know what I'm looking for" +Vector search: "I'm exploring an idea" +``` + +--- + +## Step-by-Step: Using Each Search + +### Text Search + +``` +1. Go to search box +2. Type your keywords: "transformer", "attention", "2017" +3. Press Enter +4. Results appear (usually instant) +5. Click result to see context + +Results show: + - Which source contains it + - How many times it appears + - Relevance score + - Preview of surrounding text +``` + +### Vector Search + +``` +1. Go to search box +2. Type your concept: "How do models understand language?" +3. Choose "Vector Search" from dropdown +4. Press Enter +5. Results appear (1-2 seconds) +6. Click result to see context + +Results show: + - Semantically related chunks + - Similarity score (higher = more related) + - Preview of surrounding text + - Different sources mixed together +``` + +--- + +## The Ask Feature (Automated Search) + +Ask is different from simple search. It automatically searches, synthesizes, and answers. + +### How Ask Works + +``` +Stage 1: QUESTION UNDERSTANDING + "Compare the approaches in my papers" + → System: "This asks for comparison" + +Stage 2: SEARCH STRATEGY + → System: "I should search for each approach separately" + +Stage 3: PARALLEL SEARCHES + → Search 1: "Approach in paper A" + → Search 2: "Approach in paper B" + (Multiple searches happen at once) + +Stage 4: ANALYSIS & SYNTHESIS + → Per-result analysis: "Based on paper A, the approach is..." + → Per-result analysis: "Based on paper B, the approach is..." + → Final synthesis: "Comparing A and B: A differs from B in..." + +Result: Comprehensive answer, not just search results +``` + +### When to Use Ask vs. Simple Search + +| Task | Use | Why | +|------|-----|-----| +| "Find the quote about X" | **TEXT SEARCH** | Need exact words | +| "What does source A say about X?" | **TEXT SEARCH** | Direct, fast answer | +| "Find content about X" | **VECTOR SEARCH** | Semantic discovery | +| "Compare A and B" | **ASK** | Comprehensive synthesis | +| "What's the big picture?" | **ASK** | Full analysis needed | +| "How do these sources relate?" | **ASK** | Cross-source synthesis | +| "I remember something about X" | **TEXT SEARCH** | Recall memory | +| "I'm exploring the topic of X" | **VECTOR SEARCH** | Discovery mode | + +--- + +## Advanced Search Strategies + +### Strategy 1: Simple Search with Follow-Up + +``` +1. Text search: "attention mechanism" + Results: 50 matches + +2. Too many. Follow up with vector search: + "Why is attention useful?" (concept search) + Results: Most relevant papers/notes + +3. Better results with less noise +``` + +### Strategy 2: Ask for Comprehensive, Then Search for Details + +``` +1. Ask: "What are the main approaches to X?" + Result: Comprehensive answer about A, B, C + +2. Use that to identify specific sources + +3. Text search in those specific sources: + "Why did they choose method X?" + Result: Detailed information +``` + +### Strategy 3: Vector Search for Discovery, Text for Verification + +``` +1. Vector search: "How do transformers generalize?" + Results: Related conceptual papers + +2. Skim to understand landscape + +3. Text search in promising sources: + "generalization", "extrapolation", "transfer" + Results: Specific passages to read carefully +``` + +### Strategy 4: Combine Search with Chat + +``` +1. Vector search: "What's new in AI 2026?" + Results: Latest papers + +2. Go to Chat +3. Add those papers to context +4. Ask detailed follow-up questions +5. Get deep analysis of results +``` + +--- + +## Search Quality Issues & Fixes + +### Getting No Results + +| Problem | Cause | Solution | +|---------|-------|----------| +| Text search: no results | Word doesn't appear | Try vector search instead | +| Vector search: no results | Concept not in content | Try broader search term | +| Both empty | Content not in notebook | Add sources to notebook | +| | Sources not processed | Wait for processing to complete | + +### Getting Too Many Results + +| Problem | Cause | Solution | +|---------|-------|----------| +| 1000+ results | Search too broad | Be more specific | +| | All sources | Filter by source | +| | Keyword matches rare words | Use vector search instead | + +### Getting Wrong Results + +| Problem | Cause | Solution | +|---------|-------|----------| +| Results irrelevant | Search term has multiple meanings | Provide more context | +| | Using text search for concepts | Try vector search | +| Different meaning | Homonym (word means multiple things) | Add context (e.g., "attention mechanism") | + +### Getting Low Quality Results + +| Problem | Cause | Solution | +|---------|-------|----------| +| Results don't match intent | Vague search term | Be specific ("Who invented X?" vs "X") | +| | Concept not well-represented | Add more sources on that topic | +| | Vector embedding not trained on domain | Use text search as fallback | + +--- + +## Tips for Better Searches + +### For Text Search +1. **Be specific** — "attention mechanism" not just "attention" +2. **Use exact phrases** — Put quotes around: "attention is all you need" +3. **Include context** — "LSTM vs attention" not just "attention" +4. **Use technical terms** — These are usually more precise +5. **Try synonyms** — If first search fails, try related terms + +### For Vector Search +1. **Ask a question** — "What's the best way to X?" is better than "best way" +2. **Use natural language** — Explain what you're looking for +3. **Be specific about intent** — "Compare X and Y" not "X and Y" +4. **Include context** — "In machine learning, how..." vs just "how..." +5. **Think conceptually** — What idea are you exploring? + +### General Tips +1. **Start broad, then narrow** — "AI papers" → "transformers" → "attention mechanism" +2. **Try both search types** — Each finds different things +3. **Use Ask for complex questions** — Don't just search +4. **Save good results as notes** — Create knowledge base +5. **Filter by source if needed** — "Search in Paper A only" + +--- + +## Search Examples + +### Example 1: Finding a Specific Fact + +**Goal:** "Find the date the transformer was introduced" + +``` +Step 1: Text search + "transformer 2017" (or year you remember) + +If that works: Done! + +If no results: Try + "attention is all you need" (famous paper title) + +Check result for exact date +``` + +### Example 2: Exploring a Concept + +**Goal:** "Find content about alignment interpretability" + +``` +Step 1: Vector search + "How do we make AI interpretable?" + +Results: Papers on interpretability, transparency, alignment + +Step 2: Review results + See which papers are most relevant + +Step 3: Deep dive + Go to Chat, add top 2-3 papers + Ask detailed questions about alignment +``` + +### Example 3: Comprehensive Answer + +**Goal:** "How do different approaches to AI safety compare?" + +``` +Step 1: Ask + "Compare the main approaches to AI safety in my sources" + +Result: Comprehensive analysis comparing approaches + +Step 2: Identify sources + From answer, see which papers were most relevant + +Step 3: Deep dive + Text search in those papers: + "limitations", "critiques", "open problems" + +Step 4: Save as notes + Create comparison note from Ask result +``` + +### Example 4: Finding Pattern + +**Goal:** "Find all papers mentioning transformers" + +``` +Step 1: Text search + "transformer" + +Results: All papers mentioning "transformer" + +Step 2: Vector search + "neural network architecture for sequence processing" + +Results: Papers that don't say "transformer" but discuss similar concept + +Step 3: Combine + Union of text + vector results shows full landscape + +Step 4: Analyze + Go to Chat with all results + Ask: "What's common across all these?" +``` + +--- + +## Search in the Workflow + +How search fits with other features: + +``` +SOURCES + ↓ +SEARCH (find what matters) + ├─ Text search (precise) + ├─ Vector search (exploration) + └─ Ask (comprehensive) + ↓ +CHAT (explore with follow-ups) + ↓ +TRANSFORMATIONS (batch extract) + ↓ +NOTES (save insights) +``` + +### Workflow Example + +``` +1. Add 10 papers to notebook + +2. Search: "What's the state of the art?" + (Vector search explores landscape) + +3. Ask: "Compare these 3 approaches" + (Comprehensive synthesis) + +4. Chat: Deep questions about winner + (Follow-up exploration) + +5. Save best insights as notes + (Knowledge capture) + +6. Transform remaining papers + (Batch extraction for later) + +7. Create podcast from notes + sources + (Share findings) +``` + +--- + +## Summary: Know Your Search + +**TEXT SEARCH** — "I know what I'm looking for" +- Fast, precise, keyword-based +- Use when you remember exact words/phrases +- Best for: Finding specific facts, quotes, technical terms +- Speed: Instant + +**VECTOR SEARCH** — "I'm exploring an idea" +- Slow-ish, concept-based, semantic +- Use when you're discovering connections +- Best for: Concept exploration, related ideas, synonyms +- Speed: 1-2 seconds + +**ASK** — "I want a comprehensive answer" +- Auto-searches, auto-analyzes, synthesizes +- Use for complex questions needing multiple sources +- Best for: Comparisons, big-picture questions, synthesis +- Speed: 10-30 seconds + +Pick the right tool for your search goal, and you'll find what you need faster. diff --git a/docs/3-USER-GUIDE/transformations.md b/docs/3-USER-GUIDE/transformations.md new file mode 100644 index 00000000..beac2576 --- /dev/null +++ b/docs/3-USER-GUIDE/transformations.md @@ -0,0 +1,402 @@ +# Transformations - Batch Processing Your Sources + +Transformations apply the same analysis to multiple sources at once. Instead of asking the same question repeatedly, define a template and run it across your content. + +--- + +## When to Use Transformations + +| Use Transformations When | Use Chat Instead When | +|-------------------------|----------------------| +| Same analysis on many sources | One-off questions | +| Consistent output format needed | Exploratory conversation | +| Batch processing | Follow-up questions needed | +| Creating structured notes | Context changes between questions | + +**Example**: You have 10 papers and want a summary of each. Transformation does it in one operation. + +--- + +## Quick Start: Your First Transformation + +``` +1. Go to your notebook +2. Click "Transformations" in navigation +3. Select a built-in template (e.g., "Summary") +4. Select sources to transform +5. Click "Apply" +6. Wait for processing +7. New notes appear automatically +``` + +--- + +## Built-in Transformations + +Open Notebook includes ready-to-use templates: + +### Summary + +``` +What it does: Creates a 200-300 word overview +Output: Key points, main arguments, conclusions +Best for: Quick reference, getting the gist +``` + +### Key Concepts + +``` +What it does: Extracts main ideas and terminology +Output: List of concepts with explanations +Best for: Learning new topics, building vocabulary +``` + +### Methodology + +``` +What it does: Extracts research approach +Output: How the study was conducted +Best for: Academic papers, research review +``` + +### Takeaways + +``` +What it does: Extracts actionable insights +Output: What you should do with this information +Best for: Business documents, practical guides +``` + +### Questions + +``` +What it does: Generates questions the source raises +Output: Open questions, gaps, follow-up research +Best for: Literature review, research planning +``` + +--- + +## Creating Custom Transformations + +### Step-by-Step + +``` +1. Go to "Transformations" page +2. Click "Create New" +3. Enter a name: "Academic Paper Analysis" +4. Write your prompt template: + + "Analyze this academic paper and extract: + + 1. **Research Question**: What problem does this address? + 2. **Hypothesis**: What did they predict? + 3. **Methodology**: How did they test it? + 4. **Key Findings**: What did they discover? (numbered list) + 5. **Limitations**: What caveats do the authors mention? + 6. **Future Work**: What do they suggest next? + + Be specific and cite page numbers where possible." + +5. Click "Save" +6. Your transformation appears in the list +``` + +### Prompt Template Tips + +**Be specific about format:** +``` +Good: "List 5 key points as bullet points" +Bad: "What are the key points?" +``` + +**Request structure:** +``` +Good: "Create sections for: Summary, Methods, Results" +Bad: "Tell me about this paper" +``` + +**Ask for citations:** +``` +Good: "Cite page numbers for each claim" +Bad: (no citation request) +``` + +**Set length expectations:** +``` +Good: "In 200-300 words, summarize..." +Bad: "Summarize this" +``` + +--- + +## Applying Transformations + +### To a Single Source + +``` +1. In Sources panel, click source menu (⋮) +2. Select "Transform" +3. Choose transformation template +4. Click "Apply" +5. Note appears when done +``` + +### To Multiple Sources (Batch) + +``` +1. Go to Transformations page +2. Select your template +3. Check multiple sources +4. Click "Apply to Selected" +5. Processing runs in parallel +6. One note per source created +``` + +### Processing Time + +| Sources | Typical Time | +|---------|--------------| +| 1 source | 30 seconds - 1 minute | +| 5 sources | 2-3 minutes | +| 10 sources | 4-5 minutes | +| 20+ sources | 8-10 minutes | + +Processing runs in background. You can continue working. + +--- + +## Transformation Examples + +### Literature Review Template + +``` +Name: Literature Review Entry + +Prompt: +"For this research paper, create a literature review entry: + +**Citation**: [Author(s), Year, Title, Journal] +**Research Question**: What problem is addressed? +**Methodology**: What approach was used? +**Sample**: What population/data was studied? +**Key Findings**: +1. [Finding with page citation] +2. [Finding with page citation] +3. [Finding with page citation] +**Strengths**: What did this study do well? +**Limitations**: What are the gaps? +**Relevance**: How does this connect to my research? + +Keep each section to 2-3 sentences." +``` + +### Meeting Notes Template + +``` +Name: Meeting Summary + +Prompt: +"From this meeting transcript, extract: + +**Attendees**: Who was present +**Date/Time**: When it occurred +**Key Decisions**: What was decided (numbered) +**Action Items**: +- [ ] Task (Owner, Due Date) +**Open Questions**: Unresolved issues +**Next Steps**: What happens next + +Format as clear, scannable notes." +``` + +### Competitor Analysis Template + +``` +Name: Competitor Analysis + +Prompt: +"Analyze this company/product document: + +**Company**: Name and overview +**Products/Services**: What they offer +**Target Market**: Who they serve +**Pricing**: If available +**Strengths**: Competitive advantages +**Weaknesses**: Gaps or limitations +**Opportunities**: How we compare +**Threats**: What they do better + +Be objective and cite specific details." +``` + +### Technical Documentation Template + +``` +Name: API Documentation Summary + +Prompt: +"Extract from this technical document: + +**Overview**: What does this do? (1-2 sentences) +**Authentication**: How to authenticate +**Key Endpoints**: +- Endpoint 1: [method] [path] - [purpose] +- Endpoint 2: ... +**Common Parameters**: Frequently used params +**Rate Limits**: If mentioned +**Error Codes**: Key error responses +**Example Usage**: Simple code example if possible + +Keep technical but concise." +``` + +--- + +## Managing Transformations + +### Edit a Transformation + +``` +1. Go to Transformations page +2. Find your template +3. Click "Edit" +4. Modify the prompt +5. Click "Save" +``` + +### Delete a Transformation + +``` +1. Go to Transformations page +2. Find the template +3. Click "Delete" +4. Confirm +``` + +### Reorder/Organize + +Built-in transformations appear first, then custom ones alphabetically. + +--- + +## Transformation Output + +### Where Results Go + +- Each source produces one note +- Notes appear in your notebook's Notes panel +- Notes are tagged with transformation name +- Original source is linked + +### Note Naming + +``` +Default: "[Transformation Name] - [Source Title]" +Example: "Summary - Research Paper 2025.pdf" +``` + +### Editing Output + +``` +1. Click the generated note +2. Click "Edit" +3. Refine the content +4. Save +``` + +--- + +## Best Practices + +### Template Design + +1. **Start specific** - Vague prompts give vague results +2. **Use formatting** - Headings, bullets, numbered lists +3. **Request citations** - Make results verifiable +4. **Set length** - Prevent overly long or short output +5. **Test first** - Run on one source before batch + +### Source Selection + +1. **Similar content** - Same transformation on similar sources +2. **Reasonable size** - Very long sources may need splitting +3. **Processed status** - Ensure sources are fully processed + +### Quality Control + +1. **Review samples** - Check first few outputs before trusting batch +2. **Edit as needed** - Transformations are starting points +3. **Iterate prompts** - Refine based on results + +--- + +## Common Issues + +### Generic Output + +**Problem**: Results are too vague +**Solution**: Make prompt more specific, add format requirements + +### Missing Information + +**Problem**: Key details not extracted +**Solution**: Explicitly ask for what you need in prompt + +### Inconsistent Format + +**Problem**: Each note looks different +**Solution**: Add clear formatting instructions to prompt + +### Too Long/Short + +**Problem**: Output doesn't match expectations +**Solution**: Specify word count or section lengths + +### Processing Fails + +**Problem**: Transformation doesn't complete +**Solution**: +- Check source is processed +- Try shorter/simpler prompt +- Process sources individually + +--- + +## Transformations vs. Chat vs. Ask + +| Feature | Transformations | Chat | Ask | +|---------|----------------|------|-----| +| **Input** | Predefined template | Your questions | Your question | +| **Scope** | One source at a time | Selected sources | Auto-searched | +| **Output** | Structured note | Conversation | Comprehensive answer | +| **Best for** | Batch processing | Exploration | One-shot answers | +| **Follow-up** | Run again | Ask more | New query | + +--- + +## Summary + +``` +Transformations = Batch AI Processing + +How to use: +1. Define template (or use built-in) +2. Select sources +3. Apply transformation +4. Get structured notes + +When to use: +- Same analysis on many sources +- Consistent output needed +- Building structured knowledge base +- Saving time on repetitive tasks + +Tips: +- Be specific in prompts +- Request formatting +- Test before batch +- Edit output as needed +``` + +Transformations turn repetitive analysis into one-click operations. Define once, apply many times. diff --git a/docs/3-USER-GUIDE/working-with-notes.md b/docs/3-USER-GUIDE/working-with-notes.md new file mode 100644 index 00000000..02f56230 --- /dev/null +++ b/docs/3-USER-GUIDE/working-with-notes.md @@ -0,0 +1,581 @@ +# Working with Notes - Capturing and Organizing Insights + +Notes are your processed knowledge. This guide covers how to create, organize, and use them effectively. + +--- + +## What Are Notes? + +Notes are your **research output** — the insights you capture from analyzing sources. They can be: + +- **Manual** — You write them yourself +- **AI-Generated** — From Chat responses, Ask results, or Transformations +- **Hybrid** — AI insight + your edits and additions + +Unlike sources (which never change), notes are mutable — you edit, refine, and organize them. + +--- + +## Quick-Start: Create Your First Note + +### Method 1: Manual Note (Write Yourself) + +``` +1. In your notebook, go to "Notes" section +2. Click "Create New Note" +3. Give it a title: "Key insights from source X" +4. Write your content (markdown supported) +5. Click "Save" +6. Done! Note appears in your notebook +``` + +### Method 2: Save from Chat + +``` +1. Have a Chat conversation +2. Get a good response from AI +3. Click "Save as Note" button under response +4. Give the note a title +5. Add any additional context +6. Click "Save" +7. Done! Note appears in your notebook +``` + +### Method 3: Apply Transformation + +``` +1. Go to "Transformations" +2. Select a template (or create custom) +3. Click "Apply to sources" +4. Select which sources to transform +5. Wait for processing +6. New notes automatically appear +7. Done! Each source produces one note +``` + +--- + +## Creating Manual Notes + +### Basic Structure + +``` +Title: "What you're capturing" + (Make it descriptive) + +Content: + - Main points + - Your analysis + - Questions raised + - Next steps + +Metadata: + - Tags: How to categorize + - Related sources: Which documents influenced this + - Date: Auto-added when created +``` + +### Markdown Support + +You can format notes with markdown: + +```markdown +# Heading +## Subheading +### Sub-subheading + +**Bold text** for emphasis +*Italic text* for secondary emphasis + +- Bullet lists +- Like this + +1. Numbered lists +2. Like this + +> Quotes and important callouts + +[Links work](https://example.com) +``` + +### Example Note Structure + +```markdown +# Key Findings from "AI Safety Paper 2025" + +## Main Argument +The paper argues that X approach is better than Y because... + +## Methodology +The authors use [methodology] to test this hypothesis. + +## Key Results +- Result 1: [specific finding with citation] +- Result 2: [specific finding with citation] +- Result 3: [specific finding with citation] + +## Gaps & Limitations +1. The paper assumes X, which might not hold in Y scenario +2. Limited to Z population/domain +3. Future work needed on A, B, C + +## My Thoughts +- This connects to previous research on... +- Potential application in... + +## Next Steps +- [ ] Read the referenced paper on X +- [ ] Find similar studies on Y +- [ ] Discuss implications with team +``` + +--- + +## AI-Generated Notes: Three Sources + +### 1. Save from Chat + +``` +Workflow: + Chat → Good response → "Save as Note" + → Edit if needed → Save + +When to use: + - AI response answers your question well + - You want to keep the answer for reference + - You're building a knowledge base from conversations + +Quality: + - Quality = quality of your Chat question + - Better context = better responses = better notes + - Ask specific questions for useful notes +``` + +### 2. Save from Ask + +``` +Workflow: + Ask → Comprehensive answer → "Save as Note" + → Edit if needed → Save + +When to use: + - You need a one-time comprehensive answer + - You want to save the synthesized result + - Building a knowledge base of comprehensive answers + +Quality: + - System automatically found relevant sources + - Results already have citations + - Often higher quality than Chat (more thorough) +``` + +### 3. Transformations (Batch Processing) + +``` +Workflow: + Define transformation → Apply to sources → Notes auto-created + → Review & edit → Organize + +Example Transformation: + Template: "Extract: main argument, methodology, key findings" + Apply to: 5 sources + Result: 5 new notes with consistent structure + +When to use: + - Same extraction from many sources + - Building structured knowledge base + - Creating consistent summaries +``` + +--- + +## Using Transformations for Batch Insights + +### Built-in Transformations + +Open Notebook comes with presets: + +**Summary** +``` +Extracts: Main points, key arguments, conclusions +Output: 200-300 word summary of source +Best for: Quick reference summaries +``` + +**Key Concepts** +``` +Extracts: Main ideas, concepts, terminology +Output: List of concepts with explanations +Best for: Learning and terminology +``` + +**Methodology** +``` +Extracts: Research approach, methods, data +Output: How the research was conducted +Best for: Academic sources, methodology review +``` + +**Takeaways** +``` +Extracts: Actionable insights, recommendations +Output: What you should do with this information +Best for: Practical/business sources +``` + +### How to Apply Transformation + +``` +1. Go to "Transformations" +2. Select a template +3. Click "Apply" +4. Select which sources (one or many) +5. Wait for processing (usually 30 seconds - 2 minutes) +6. New notes appear in your notebook +7. Edit if needed +``` + +### Create Custom Transformation + +``` +1. Click "Create Custom Transformation" +2. Write your extraction template: + + Example: + "For this academic paper, extract: + - Central research question + - Hypothesis tested + - Methodology used + - Key findings (numbered) + - Limitations acknowledged + - Recommendations for future work" + +3. Click "Save Template" +4. Apply to one or many sources +5. System generates notes with consistent structure +``` + +--- + +## Organizing Notes + +### Naming Conventions + +**Option 1: Date-based** +``` +2026-01-03 - Key points from X source +2026-01-04 - Comparison between A and B +Benefit: Easy to see what you did when +``` + +**Option 2: Topic-based** +``` +AI Safety - Alignment approaches +AI Safety - Interpretability research +Benefit: Groups by subject matter +``` + +**Option 3: Type-based** +``` +SUMMARY: Paper on X +QUESTION: What about Y? +INSIGHT: Connection between Z and W +Benefit: Easy to filter by type +``` + +**Option 4: Source-based** +``` +From: Paper A - Main insights +From: Video B - Interesting implications +Benefit: Easy to trace back to sources +``` + +**Best practice:** Combine approaches +``` +[Date] [Source] - [Topic] - [Type] +2026-01-03 - Paper A - AI Safety - Takeaways +``` + +### Using Tags + +Tags are labels for categorization. Add them when creating notes: + +``` +Example tags: + - "primary-research" (direct source analysis) + - "background" (supporting material) + - "methodology" (about research methods) + - "insights" (your original thinking) + - "questions" (open questions raised) + - "follow-up" (needs more work) + - "published" (ready to share/use) +``` + +**Benefits of tags:** +- Filter notes by tag +- Find all notes of a type +- Organize workflow (e.g., find all "follow-up" notes) + +### Note Linking & References + +You can reference sources within notes: + +```markdown +# Analysis of Paper A + +As shown in Paper A (see "main argument" section), +the authors argue that... + +## Related Sources +- Paper B discusses similar approach +- Video C shows practical application +- My note on "Comparative analysis" has more +``` + +--- + +## Editing and Refining Notes + +### Improving AI-Generated Notes + +``` +AI Note: + "The paper discusses machine learning" + +What you might change: + "The paper proposes a supervised learning approach + to classification problems, using neural networks + with attention mechanisms (see pp. 15-18)." + +How to edit: + 1. Click note + 2. Click "Edit" + 3. Refine the content + 4. Click "Save" +``` + +### Adding Citations + +``` +When saving from Chat/Ask: + - Citations auto-added + - Shows which sources informed answer + - You can verify by clicking + +When manual notes: + - Add manually: "From Paper A, page 15: ..." + - Or reference: "As discussed in [source]" +``` + +--- + +## Searching Your Notes + +Notes are fully searchable: + +### Text Search +``` +Find exact phrase: "attention mechanism" +Results: All notes containing that phrase +Use when: Looking for specific terms or quotes +``` + +### Vector/Semantic Search +``` +Find concept: "How do models understand?" +Results: Notes about interpretability, mechanistic understanding, etc. +Use when: Exploring conceptually (words not exact) +``` + +### Combined Search +``` +Text search notes → Find keyword matches +Vector search notes → Find conceptual matches +Both work across sources + notes together +``` + +--- + +## Exporting and Sharing Notes + +### Options + +**Copy to clipboard** +``` +Click "Share" → "Copy" → Paste anywhere +Good for: Sharing one note via email/chat +``` + +**Export as Markdown** +``` +Click "Share" → "Export as MD" → Saves as .md file +Good for: Sharing with others, version control +``` + +**Create note collection** +``` +Select multiple notes → "Export collection" +→ Creates organized markdown document +Good for: Sharing a topic overview +``` + +**Publish to web** +``` +Click "Publish" → Get shareable link +Good for: Publishing publicly (if desired) +``` + +--- + +## Organizing Your Notebook's Notes + +### By Research Phase + +**Phase 1: Discovery** +- Initial summaries +- Questions raised +- Interesting findings + +**Phase 2: Deep Dive** +- Detailed analysis +- Comparative insights +- Methodology reviews + +**Phase 3: Synthesis** +- Connections across sources +- Original thinking +- Conclusions + +### By Content Type + +**Summaries** +- High-level overviews +- Generated by transformations +- Quick reference + +**Questions** +- Open questions +- Things to research more +- Gaps to fill + +**Insights** +- Your original analysis +- Connections made +- Conclusions reached + +**Tasks** +- Follow-up research +- Sources to add +- People to contact + +--- + +## Using Notes in Other Features + +### In Chat + +``` +You can reference notes: +"Based on my note 'Key findings from A', +how does this compare to B?" + +Notes become part of context. +Treated like sources but smaller/more focused. +``` + +### In Transformations + +``` +Notes can be transformed: +1. Select notes as input +2. Apply transformation +3. Get new derived notes + +Example: Transform 5 analysis notes → Create synthesis +``` + +### In Podcasts + +``` +Notes are used to create podcast content: +1. Generate podcast for notebook +2. System includes notes in content selection +3. Notes become part of episode outline +``` + +--- + +## Best Practices + +### For Manual Notes +1. **Write clearly** — Future you will appreciate it +2. **Add context** — Why this matters, not just what it says +3. **Link to sources** — You can verify later +4. **Date them** — Track your thinking over time +5. **Tag immediately** — Don't defer organization + +### For AI-Generated Notes +1. **Review before saving** — Verify quality +2. **Edit for clarity** — AI might miss nuance +3. **Add your thoughts** — Make it your own +4. **Include citations** — Understand sources +5. **Organize right away** — While context is fresh + +### For Organization +1. **Consistent naming** — Your future self will thank you +2. **Tag everything** — Makes filtering later much easier +3. **Link related notes** — Create knowledge network +4. **Review periodically** — Refactor as understanding evolves +5. **Archive old notes** — Keep working space clean + +--- + +## Common Mistakes + +| Mistake | Problem | Solution | +|---------|---------|----------| +| Save every Chat response | Notebook becomes cluttered with low-quality notes | Only save good responses that answer your questions | +| Don't add tags | Can't find notes later | Tag immediately when creating | +| Poor note titles | Can't remember what's in them | Use descriptive titles, include key concept | +| Never link notes together | Miss connections between ideas | Add references to related notes | +| Forget the source | Can't verify claims later | Always link back to source | +| Never edit AI notes | Keep generic AI responses | Refine for clarity and context | +| Create one giant note | Too long to be useful | Split into focused notes by subtopic | + +--- + +## Summary: Note Lifecycle + +``` +1. CREATE + ├─ Manual: Write from scratch + ├─ From Chat: Save good response + ├─ From Ask: Save synthesis + └─ From Transform: Batch process + +2. EDIT & REFINE + ├─ Improve clarity + ├─ Add context + ├─ Fix AI mistakes + └─ Add citations + +3. ORGANIZE + ├─ Name clearly + ├─ Add tags + ├─ Link related + └─ Categorize + +4. USE + ├─ Reference in Chat + ├─ Transform for synthesis + ├─ Export for sharing + └─ Build on with new questions + +5. MAINTAIN + ├─ Periodically review + ├─ Update as understanding grows + ├─ Archive when done + └─ Learn from organized knowledge +``` + +Your notes become your actual knowledge base. The more you invest in organizing them, the more valuable they become. diff --git a/docs/4-AI-PROVIDERS/index.md b/docs/4-AI-PROVIDERS/index.md new file mode 100644 index 00000000..c2918325 --- /dev/null +++ b/docs/4-AI-PROVIDERS/index.md @@ -0,0 +1,199 @@ +# AI Providers - Comparison & Selection Guide + +Open Notebook supports 15+ AI providers. This guide helps you **choose the right provider** for your needs. + +> 💡 **Just want to set up a provider?** Skip to the [Configuration Guide](../5-CONFIGURATION/ai-providers.md) for detailed setup instructions. + +--- + +## Quick Decision: Which Provider? + +### Cloud Providers (Easiest) + +**OpenAI (Recommended)** +- Cost: ~$0.03-0.15 per 1K tokens +- Speed: Very fast +- Quality: Excellent +- Best for: Most users (best quality/price balance) + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#openai) + +**Anthropic (Claude)** +- Cost: ~$0.80-3.00 per 1M tokens +- Speed: Fast +- Quality: Excellent +- Best for: Long context (200K tokens), reasoning, latest AI +- Advantage: Superior long-context handling + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#anthropic-claude) + +**Google Gemini** +- Cost: ~$0.075-0.30 per 1K tokens +- Speed: Very fast +- Quality: Good to excellent +- Best for: Multimodal (images, audio, video) +- Advantage: Longest context (up to 2M tokens) + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#google-gemini) + +**Groq (Ultra-Fast)** +- Cost: ~$0.05 per 1M tokens (cheapest) +- Speed: Ultra-fast (fastest available) +- Quality: Good +- Best for: Budget-conscious, transformations, speed-critical tasks +- Disadvantage: Limited model selection + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#groq) + +**OpenRouter (100+ Models)** +- Cost: Pay-per-model (varies widely) +- Speed: Varies by model +- Quality: Varies by model +- Best for: Model comparison, testing, unified billing +- Advantage: One API key for 100+ models from different providers + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#openrouter) + +### Local / Self-Hosted (Free) + +**Ollama (Recommended for Local)** +- Cost: Free (electricity only) +- Speed: Depends on hardware (slow on CPU, fast on GPU) +- Quality: Good (open-source models) +- Setup: 10 minutes +- Best for: Privacy-first, offline use +- Privacy: 100% local, nothing leaves your machine + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#ollama-recommended-for-local) + +**LM Studio (Alternative)** +- Cost: Free (electricity only) +- Speed: Depends on hardware +- Quality: Good (same models as Ollama) +- Setup: 15 minutes (GUI interface) +- Best for: Non-technical users who prefer GUI over CLI +- Privacy: 100% local + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#lm-studio-local-alternative) + +### Enterprise + +**Azure OpenAI** +- Cost: Same as OpenAI (usage-based) +- Speed: Very fast +- Quality: Excellent (same models as OpenAI) +- Setup: 10 minutes (more complex) +- Best for: Enterprise, compliance (HIPAA, SOC2), VPC integration + +→ [Setup Guide](../5-CONFIGURATION/ai-providers.md#azure-openai) + +--- + +## Comparison Table + +| Provider | Speed | Cost | Quality | Privacy | Setup | Context | +|----------|-------|------|---------|---------|-------|---------| +| **OpenAI** | Very Fast | $$ | Excellent | Low | 5 min | 128K | +| **Anthropic** | Fast | $$ | Excellent | Low | 5 min | 200K | +| **Google** | Very Fast | $$ | Good-Excellent | Low | 5 min | 2M | +| **Groq** | Ultra Fast | $ | Good | Low | 5 min | 32K | +| **OpenRouter** | Varies | Varies | Varies | Low | 5 min | Varies | +| **Ollama** | Slow-Medium | Free | Good | Max | 10 min | Varies | +| **LM Studio** | Slow-Medium | Free | Good | Max | 15 min | Varies | +| **Azure** | Very Fast | $$ | Excellent | High | 10 min | 128K | + +--- + +## Choosing Your Provider + +### I want the easiest setup +→ **OpenAI** — Most popular, best community support + +### I have unlimited budget +→ **OpenAI** — Best quality + +### I want to save money +→ **Groq** — Cheapest cloud ($0.05 per 1M tokens) + +### I want privacy/offline +→ **Ollama** — Free, local, private + +### I want a GUI (not CLI) +→ **LM Studio** — Desktop app + +### I'm in an enterprise +→ **Azure OpenAI** — Compliance, support + +### I need long context (200K+ tokens) +→ **Anthropic** — Best long-context model + +### I need multimodal (images, audio, video) +→ **Google Gemini** — Best multimodal support + +### I want access to many models with one API key +→ **OpenRouter** — 100+ models, unified billing + +--- + +## Ready to Set Up Your Provider? + +Now that you've chosen a provider, follow the detailed setup instructions: + +→ **[AI Providers Configuration Guide](../5-CONFIGURATION/ai-providers.md)** + +This guide includes: +- Step-by-step setup instructions for each provider +- Environment variable configuration +- Model selection and recommendations +- Provider-specific troubleshooting +- Hardware requirements (for local providers) +- Cost optimization tips + +--- + +## Cost Estimator + +### OpenAI +``` +Light use (10 chats/day): $1-5/month +Medium use (50 chats/day): $10-30/month +Heavy use (all-day use): $50-100+/month +``` + +### Anthropic +``` +Light use: $1-3/month +Medium use: $5-20/month +Heavy use: $20-50+/month +``` + +### Groq +``` +Light use: $0-1/month +Medium use: $2-5/month +Heavy use: $5-20/month +``` + +### Ollama +``` +Any use: Free (electricity only) +8GB GPU running 24/7: ~$10/month electricity +``` + +--- + +## Next Steps + +1. ✅ **You've chosen a provider** (from this comparison guide) +2. 📖 **Follow the setup guide**: [AI Providers Configuration](../5-CONFIGURATION/ai-providers.md) +3. ⚙️ **Configure your environment** (detailed in the setup guide) +4. 🧪 **Test your setup** in Settings → Models +5. 🚀 **Start using Open Notebook!** + +--- + +## Need Help? + +- **Setup issues?** See [AI Providers Configuration](../5-CONFIGURATION/ai-providers.md) for detailed troubleshooting per provider +- **General problems?** Check [Troubleshooting Guide](../6-TROUBLESHOOTING/index.md) +- **Questions?** Join [Discord community](https://discord.gg/37XJPXfz2w) diff --git a/docs/5-CONFIGURATION/advanced.md b/docs/5-CONFIGURATION/advanced.md new file mode 100644 index 00000000..cc98fb9c --- /dev/null +++ b/docs/5-CONFIGURATION/advanced.md @@ -0,0 +1,567 @@ +# Advanced Configuration + +Performance tuning, debugging, and advanced features. + +--- + +## Performance Tuning + +### Concurrency Control + +```env +# Max concurrent database operations (default: 5) +# Increase: Faster processing, more conflicts +# Decrease: Slower, fewer conflicts +SURREAL_COMMANDS_MAX_TASKS=5 +``` + +**Guidelines:** +- CPU: 2 cores → 2-3 tasks +- CPU: 4 cores → 5 tasks (default) +- CPU: 8+ cores → 10-20 tasks + +Higher concurrency = more throughput but more database conflicts (retries handle this). + +### Retry Strategy + +```env +# How to wait between retries +SURREAL_COMMANDS_RETRY_WAIT_STRATEGY=exponential_jitter + +# Options: +# - exponential_jitter (recommended) +# - exponential +# - fixed +# - random +``` + +For high-concurrency deployments, use `exponential_jitter` to prevent thundering herd. + +### Timeout Tuning + +```env +# Client timeout (default: 300 seconds) +API_CLIENT_TIMEOUT=300 + +# LLM timeout (default: 60 seconds) +ESPERANTO_LLM_TIMEOUT=60 +``` + +**Guideline:** Set `API_CLIENT_TIMEOUT` > `ESPERANTO_LLM_TIMEOUT` + buffer + +``` +Example: + ESPERANTO_LLM_TIMEOUT=120 + API_CLIENT_TIMEOUT=180 # 120 + 60 second buffer +``` + +--- + +## Batching + +### TTS Batch Size + +For podcast generation, control concurrent TTS requests: + +```env +# Default: 5 +TTS_BATCH_SIZE=2 +``` + +**Providers and recommendations:** +- OpenAI: 5 (can handle many concurrent) +- Google: 4 (good concurrency) +- ElevenLabs: 2 (limited concurrent requests) +- Local TTS: 1 (single-threaded) + +Lower = slower but more stable. Higher = faster but more load on provider. + +--- + +## Logging & Debugging + +### Enable Detailed Logging + +```bash +# Start with debug logging +RUST_LOG=debug # For Rust components +LOGLEVEL=DEBUG # For Python components +``` + +### Debug Specific Components + +```bash +# Only surreal operations +RUST_LOG=surrealdb=debug + +# Only langchain +LOGLEVEL=langchain:debug + +# Only specific module +RUST_LOG=open_notebook::database=debug +``` + +### LangSmith Tracing + +For debugging LLM workflows: + +```env +LANGCHAIN_TRACING_V2=true +LANGCHAIN_ENDPOINT="https://api.smith.langchain.com" +LANGCHAIN_API_KEY=your-key +LANGCHAIN_PROJECT="Open Notebook" +``` + +Then visit https://smith.langchain.com to see traces. + +--- + +## Port Configuration + +### Default Ports + +``` +Frontend: 8502 (Docker deployment) +Frontend: 3000 (Development from source) +API: 5055 +SurrealDB: 8000 +``` + +### Changing Frontend Port + +Edit `docker-compose.yml`: + +```yaml +services: + open-notebook: + ports: + - "8001:8502" # Change from 8502 to 8001 +``` + +Access at: `http://localhost:8001` + +API auto-detects to: `http://localhost:5055` ✓ + +### Changing API Port + +```yaml +services: + open-notebook: + ports: + - "127.0.0.1:8502:8502" # Frontend + - "5056:5055" # Change API from 5055 to 5056 + environment: + - API_URL=http://localhost:5056 # Update API_URL +``` + +Access API directly: `http://localhost:5056/docs` + +**Note:** When changing API port, you must set `API_URL` explicitly since auto-detection assumes port 5055. + +### Changing SurrealDB Port + +```yaml +services: + surrealdb: + ports: + - "8001:8000" # Change from 8000 to 8001 + environment: + - SURREAL_URL=ws://surrealdb:8001/rpc # Update connection URL +``` + +**Important:** Internal Docker network uses container name (`surrealdb`), not `localhost`. + +--- + +## SSL/TLS Configuration + +### Custom CA Certificate + +For self-signed certs on local providers: + +```env +ESPERANTO_SSL_CA_BUNDLE=/path/to/ca-bundle.pem +``` + +### Disable Verification (Development Only) + +```env +# WARNING: Only for testing/development +# Vulnerable to MITM attacks +ESPERANTO_SSL_VERIFY=false +``` + +--- + +## Multi-Provider Setup + +### Use Different Providers for Different Tasks + +```env +# Language model (main) +OPENAI_API_KEY=sk-proj-... + +# Embeddings (alternative) +# (Future: Configure different embedding provider) + +# TTS (different provider) +ELEVENLABS_API_KEY=... +``` + +### OpenAI-Compatible with Fallback + +```env +# Primary +OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 +OPENAI_COMPATIBLE_API_KEY=key1 + +# Can also set specific modality endpoints +OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 +OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:8001/v1 +``` + +--- + +## Security Hardening + +### Change Default Credentials + +```env +# Don't use defaults in production +SURREAL_USER=your_secure_username +SURREAL_PASSWORD=$(openssl rand -base64 32) # Generate secure password +``` + +### Add Password Protection + +```env +# Protect your Open Notebook instance +OPEN_NOTEBOOK_PASSWORD=your_secure_password +``` + +### Use HTTPS + +```env +# Always use HTTPS in production +API_URL=https://mynotebook.example.com +``` + +### Firewall Rules + +Restrict access to your Open Notebook: +- Port 8502 (frontend): Only from your IP +- Port 5055 (API): Only from frontend +- Port 8000 (SurrealDB): Never expose to internet + +--- + +## Web Scraping & Content Extraction + +Open Notebook uses multiple services for content extraction: + +### Firecrawl + +For advanced web scraping: + +```env +FIRECRAWL_API_KEY=your-key +``` + +Get key from: https://firecrawl.dev/ + +### Jina AI + +Alternative web extraction: + +```env +JINA_API_KEY=your-key +``` + +Get key from: https://jina.ai/ + +--- + +## Environment Variable Groups + +### API Keys (Choose at least one) +```env +OPENAI_API_KEY +ANTHROPIC_API_KEY +GOOGLE_API_KEY +GROQ_API_KEY +MISTRAL_API_KEY +DEEPSEEK_API_KEY +OPENROUTER_API_KEY +XAI_API_KEY +``` + +### AI Provider Endpoints +```env +OLLAMA_API_BASE +OPENAI_COMPATIBLE_BASE_URL +AZURE_OPENAI_ENDPOINT +GEMINI_API_BASE_URL +``` + +### Database +```env +SURREAL_URL +SURREAL_USER +SURREAL_PASSWORD +SURREAL_NAMESPACE +SURREAL_DATABASE +``` + +### Performance +```env +SURREAL_COMMANDS_MAX_TASKS +SURREAL_COMMANDS_RETRY_ENABLED +SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS +SURREAL_COMMANDS_RETRY_WAIT_STRATEGY +SURREAL_COMMANDS_RETRY_WAIT_MIN +SURREAL_COMMANDS_RETRY_WAIT_MAX +``` + +### API Settings +```env +API_URL +INTERNAL_API_URL +API_CLIENT_TIMEOUT +ESPERANTO_LLM_TIMEOUT +``` + +### Audio/TTS +```env +ELEVENLABS_API_KEY +TTS_BATCH_SIZE +``` + +### Debugging +```env +LANGCHAIN_TRACING_V2 +LANGCHAIN_ENDPOINT +LANGCHAIN_API_KEY +LANGCHAIN_PROJECT +``` + +--- + +## Testing Configuration + +### Quick Test + +```bash +# Add test config +export OPENAI_API_KEY=sk-test-key +export API_URL=http://localhost:5055 + +# Test connection +curl http://localhost:5055/health + +# Test with sample +curl -X POST http://localhost:5055/api/chat \ + -H "Content-Type: application/json" \ + -d '{"message":"Hello"}' +``` + +### Validate Config + +```bash +# Check environment variables are set +env | grep OPENAI_API_KEY + +# Verify database connection +python -c "import os; print(os.getenv('SURREAL_URL'))" +``` + +--- + +## Troubleshooting Performance + +### High Memory Usage + +```env +# Reduce concurrency +SURREAL_COMMANDS_MAX_TASKS=2 + +# Reduce TTS batch size +TTS_BATCH_SIZE=1 +``` + +### High CPU Usage + +```env +# Check worker count +SURREAL_COMMANDS_MAX_TASKS + +# Reduce if maxed out: +SURREAL_COMMANDS_MAX_TASKS=5 +``` + +### Slow Responses + +```env +# Check timeout settings +API_CLIENT_TIMEOUT=300 + +# Check retry config +SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=3 +``` + +### Database Conflicts + +```env +# Reduce concurrency +SURREAL_COMMANDS_MAX_TASKS=3 + +# Use jitter strategy +SURREAL_COMMANDS_RETRY_WAIT_STRATEGY=exponential_jitter +``` + +--- + +## Backup & Restore + +### Data Locations + +| Path | Contents | +|------|----------| +| `./data` or `/app/data` | Uploads, podcasts, checkpoints | +| `./surreal_data` or `/mydata` | SurrealDB database files | + +### Quick Backup + +```bash +# Stop services (recommended for consistency) +docker compose down + +# Create timestamped backup +tar -czf backup-$(date +%Y%m%d-%H%M%S).tar.gz \ + notebook_data/ surreal_data/ + +# Restart services +docker compose up -d +``` + +### Automated Backup Script + +```bash +#!/bin/bash +# backup.sh - Run daily via cron + +BACKUP_DIR="/path/to/backups" +DATE=$(date +%Y%m%d-%H%M%S) + +# Create backup +tar -czf "$BACKUP_DIR/open-notebook-$DATE.tar.gz" \ + /path/to/notebook_data \ + /path/to/surreal_data + +# Keep only last 7 days +find "$BACKUP_DIR" -name "open-notebook-*.tar.gz" -mtime +7 -delete + +echo "Backup complete: open-notebook-$DATE.tar.gz" +``` + +Add to cron: +```bash +# Daily backup at 2 AM +0 2 * * * /path/to/backup.sh >> /var/log/open-notebook-backup.log 2>&1 +``` + +### Restore + +```bash +# Stop services +docker compose down + +# Remove old data (careful!) +rm -rf notebook_data/ surreal_data/ + +# Extract backup +tar -xzf backup-20240115-120000.tar.gz + +# Restart services +docker compose up -d +``` + +### Migration Between Servers + +```bash +# On source server +docker compose down +tar -czf open-notebook-migration.tar.gz notebook_data/ surreal_data/ + +# Transfer to new server +scp open-notebook-migration.tar.gz user@newserver:/path/ + +# On new server +tar -xzf open-notebook-migration.tar.gz +docker compose up -d +``` + +--- + +## Container Management + +### Common Commands + +```bash +# Start services +docker compose up -d + +# Stop services +docker compose down + +# View logs (all services) +docker compose logs -f + +# View logs (specific service) +docker compose logs -f api + +# Restart specific service +docker compose restart api + +# Update to latest version +docker compose down +docker compose pull +docker compose up -d + +# Check resource usage +docker stats + +# Check service health +docker compose ps +``` + +### Clean Up + +```bash +# Remove stopped containers +docker compose rm + +# Remove unused images +docker image prune + +# Full cleanup (careful!) +docker system prune -a +``` + +--- + +## Summary + +**Most deployments need:** +- One AI provider API key +- Default database settings +- Default timeouts + +**Tune performance only if:** +- You have specific bottlenecks +- High-concurrency workload +- Custom hardware (very fast or very slow) + +**Advanced features:** +- Firecrawl for better web scraping +- LangSmith for debugging workflows +- Custom CA bundles for self-signed certs diff --git a/docs/5-CONFIGURATION/ai-providers.md b/docs/5-CONFIGURATION/ai-providers.md new file mode 100644 index 00000000..9ca7aa3a --- /dev/null +++ b/docs/5-CONFIGURATION/ai-providers.md @@ -0,0 +1,494 @@ +# AI Providers - Configuration Reference + +Complete setup instructions for each AI provider. Pick the one you're using. + +--- + +## Cloud Providers (Recommended for Most) + +### OpenAI + +**Cost:** ~$0.03-0.15 per 1K tokens (varies by model) + +**Setup:** +```bash +1. Go to https://platform.openai.com/api-keys +2. Create account (if needed) +3. Create new API key (starts with "sk-proj-") +4. Add $5+ credits to account +5. Add to .env: + OPENAI_API_KEY=sk-proj-... +6. Restart services +``` + +**Environment Variable:** +``` +OPENAI_API_KEY=sk-proj-xxxxx +``` + +**Available Models (in Open Notebook):** +- `gpt-4o` — Best quality, fast (latest version) +- `gpt-4o-mini` — Fast, cheap, good for testing +- `o1` — Advanced reasoning model (slower, more expensive) +- `o1-mini` — Faster reasoning model + +**Recommended:** +- For general use: `gpt-4o` (best balance) +- For testing/cheap: `gpt-4o-mini` (90% cheaper) +- For complex reasoning: `o1` (best for hard problems) + +**Cost Estimate:** +``` +Light use: $1-5/month +Medium use: $10-30/month +Heavy use: $50-100+/month +``` + +**Troubleshooting:** +- "Invalid API key" → Check key starts with "sk-proj-" +- "Rate limit exceeded" → Wait or upgrade account +- "Model not available" → Try gpt-4o-mini instead + +--- + +### Anthropic (Claude) + +**Cost:** ~$0.80-3.00 per 1M tokens (cheaper than OpenAI for long context) + +**Setup:** +```bash +1. Go to https://console.anthropic.com/ +2. Create account or login +3. Go to API keys section +4. Create new API key (starts with "sk-ant-") +5. Add to .env: + ANTHROPIC_API_KEY=sk-ant-... +6. Restart services +``` + +**Environment Variable:** +``` +ANTHROPIC_API_KEY=sk-ant-xxxxx +``` + +**Available Models:** +- `claude-sonnet-4-5-20250929` — Latest, best quality (recommended) +- `claude-3-5-sonnet-20241022` — Previous generation, still excellent +- `claude-3-5-haiku-20241022` — Fast, cheap +- `claude-opus-4-5-20251101` — Most powerful, expensive + +**Recommended:** +- For general use: `claude-sonnet-4-5` (best overall, latest) +- For cheap: `claude-3-5-haiku` (80% cheaper) +- For complex: `claude-opus-4-5` (most capable) + +**Cost Estimate:** +``` +Sonnet: $3-20/month (typical use) +Haiku: $0.50-3/month +Opus: $10-50+/month +``` + +**Advantages:** +- Great long-context support (200K tokens) +- Excellent reasoning +- Fast processing + +**Troubleshooting:** +- "Invalid API key" → Check it starts with "sk-ant-" +- "Overloaded" → Anthropic is busy, retry later +- "Model unavailable" → Check model name is correct + +--- + +### Google Gemini + +**Cost:** ~$0.075-0.30 per 1K tokens (competitive with OpenAI) + +**Setup:** +```bash +1. Go to https://aistudio.google.com/app/apikey +2. Create account or login +3. Create new API key +4. Add to .env: + GOOGLE_API_KEY=AIzaSy... +5. Restart services +``` + +**Environment Variable:** +``` +GOOGLE_API_KEY=AIzaSy... +# Optional: override default endpoint +GEMINI_API_BASE_URL=https://generativelanguage.googleapis.com/v1beta/models +``` + +**Available Models:** +- `gemini-2.0-flash-exp` — Latest experimental, fastest (recommended) +- `gemini-2.0-flash` — Stable version, fast, cheap +- `gemini-1.5-pro-latest` — More capable, longer context +- `gemini-1.5-flash` — Previous generation, very cheap + +**Recommended:** +- For general use: `gemini-2.0-flash-exp` (best value, latest) +- For cheap: `gemini-1.5-flash` (very cheap) +- For complex/long context: `gemini-1.5-pro-latest` (2M token context) + +**Advantages:** +- Very long context (1M tokens) +- Multimodal (images, audio, video) +- Good for podcasts + +**Troubleshooting:** +- "API key invalid" → Get fresh key from aistudio.google.com +- "Quota exceeded" → Free tier limited, upgrade account +- "Model not found" → Check model name spelling + +--- + +### Groq + +**Cost:** ~$0.05 per 1M tokens (cheapest, but limited models) + +**Setup:** +```bash +1. Go to https://console.groq.com/keys +2. Create account or login +3. Create new API key +4. Add to .env: + GROQ_API_KEY=gsk_... +5. Restart services +``` + +**Environment Variable:** +``` +GROQ_API_KEY=gsk_xxxxx +``` + +**Available Models:** +- `llama-3.3-70b-versatile` — Best on Groq (recommended) +- `llama-3.1-70b-versatile` — Fast, capable +- `mixtral-8x7b-32768` — Good alternative +- `gemma2-9b-it` — Small, very fast + +**Recommended:** +- For quality: `llama-3.3-70b-versatile` (best overall) +- For speed: `gemma2-9b-it` (ultra-fast) +- For balance: `llama-3.1-70b-versatile` + +**Advantages:** +- Ultra-fast inference +- Very cheap +- Great for transformations/batch work + +**Disadvantages:** +- Limited model selection +- Smaller models than OpenAI/Anthropic + +**Troubleshooting:** +- "Rate limited" → Free tier has limits, upgrade +- "Model not available" → Check supported models list + +--- + +### OpenRouter + +**Cost:** Varies by model ($0.05-15 per 1M tokens) + +**Setup:** +```bash +1. Go to https://openrouter.ai/keys +2. Create account or login +3. Add credits to your account +4. Create new API key +5. Add to .env: + OPENROUTER_API_KEY=sk-or-... +6. Restart services +``` + +**Environment Variable:** +``` +OPENROUTER_API_KEY=sk-or-xxxxx +``` + +**Available Models (100+ options):** +- OpenAI: `openai/gpt-4o`, `openai/o1` +- Anthropic: `anthropic/claude-sonnet-4.5`, `anthropic/claude-3.5-haiku` +- Google: `google/gemini-2.0-flash-exp`, `google/gemini-1.5-pro` +- Meta: `meta-llama/llama-3.3-70b-instruct`, `meta-llama/llama-3.1-405b-instruct` +- Mistral: `mistralai/mistral-large-2411` +- DeepSeek: `deepseek/deepseek-chat` +- And many more... + +**Recommended:** +- For quality: `anthropic/claude-sonnet-4.5` (best overall) +- For speed/cost: `google/gemini-2.0-flash-exp` (very fast, cheap) +- For open-source: `meta-llama/llama-3.3-70b-instruct` +- For reasoning: `openai/o1` + +**Advantages:** +- One API key for 100+ models +- Unified billing +- Easy model comparison +- Access to models that may have waitlists elsewhere + +**Cost Estimate:** +``` +Light use: $1-5/month +Medium use: $10-30/month +Heavy use: Depends on models chosen +``` + +**Troubleshooting:** +- "Invalid API key" → Check it starts with "sk-or-" +- "Insufficient credits" → Add credits at openrouter.ai +- "Model not available" → Check model ID spelling (use full path) + +--- + +## Self-Hosted / Local + +### Ollama (Recommended for Local) + +**Cost:** Free (electricity only) + +**Setup:** +```bash +1. Install Ollama: https://ollama.ai +2. Run Ollama in background: + ollama serve + +3. Download a model: + ollama pull mistral + # or llama2, neural-chat, phi, etc. + +4. Add to .env: + OLLAMA_API_BASE=http://localhost:11434 + # If on different machine: + # OLLAMA_API_BASE=http://10.0.0.5:11434 + +5. Restart services +``` + +**Environment Variable:** +``` +OLLAMA_API_BASE=http://localhost:11434 +``` + +**Available Models:** +- `llama3.3:70b` — Best quality (requires 40GB+ RAM) +- `llama3.1:8b` — Recommended, balanced (8GB RAM) +- `qwen2.5:7b` — Excellent for code and reasoning +- `mistral:7b` — Good general purpose +- `phi3:3.8b` — Small, fast (4GB RAM) +- `gemma2:9b` — Google's model, balanced +- Many more: `ollama list` to see available + +**Recommended:** +- For quality (with GPU): `llama3.3:70b` (best) +- For general use: `llama3.1:8b` (best balance) +- For speed/low memory: `phi3:3.8b` (very fast) +- For coding: `qwen2.5:7b` (excellent at code) + +**Hardware Requirements:** +``` +GPU (NVIDIA/AMD): + 8GB VRAM: Runs most models fine + 6GB VRAM: Works, slower + 4GB VRAM: Small models only + +CPU-only: + 16GB+ RAM: Slow but works + 8GB RAM: Very slow + 4GB RAM: Not recommended +``` + +**Advantages:** +- Completely private (runs locally) +- Free (electricity only) +- No API key needed +- Works offline + +**Disadvantages:** +- Slower than cloud (unless on GPU) +- Smaller models than cloud +- Requires local hardware + +**Troubleshooting:** +- "Connection refused" → Ollama not running or wrong port +- "Model not found" → Download it: `ollama pull modelname` +- "Out of memory" → Use smaller model or add more RAM + +--- + +### LM Studio (Local Alternative) + +**Cost:** Free + +**Setup:** +```bash +1. Download LM Studio: https://lmstudio.ai +2. Open app +3. Download a model from library +4. Go to "Local Server" tab +5. Start server (default port: 1234) +6. Add to .env: + OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 + OPENAI_COMPATIBLE_API_KEY=not-needed +7. Restart services +``` + +**Environment Variables:** +``` +OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 +OPENAI_COMPATIBLE_API_KEY=lm-studio # Just a placeholder +``` + +**Advantages:** +- GUI interface (easier than Ollama CLI) +- Good model selection +- Privacy-focused +- Works offline + +**Disadvantages:** +- Desktop only (Mac/Windows/Linux) +- Slower than cloud +- Requires local GPU + +--- + +### Custom OpenAI-Compatible + +For Text Generation UI, vLLM, or other OpenAI-compatible endpoints: + +```bash +Add to .env: +OPENAI_COMPATIBLE_BASE_URL=http://your-endpoint/v1 +OPENAI_COMPATIBLE_API_KEY=your-api-key +``` + +If you need different endpoints for different modalities: + +```bash +# Language model +OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:8000/v1 +OPENAI_COMPATIBLE_API_KEY_LLM=sk-... + +# Embeddings +OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:8001/v1 +OPENAI_COMPATIBLE_API_KEY_EMBEDDING=sk-... + +# TTS (text-to-speech) +OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:8002/v1 +OPENAI_COMPATIBLE_API_KEY_TTS=sk-... +``` + +--- + +## Enterprise + +### Azure OpenAI + +**Cost:** Same as OpenAI (usage-based) + +**Setup:** +```bash +1. Create Azure OpenAI service in Azure portal +2. Deploy GPT-4/3.5-turbo model +3. Get your endpoint and key +4. Add to .env: + AZURE_OPENAI_API_KEY=your-key + AZURE_OPENAI_ENDPOINT=https://your-name.openai.azure.com/ + AZURE_OPENAI_API_VERSION=2024-12-01-preview +5. Restart services +``` + +**Environment Variables:** +``` +AZURE_OPENAI_API_KEY=xxxxx +AZURE_OPENAI_ENDPOINT=https://your-instance.openai.azure.com/ +AZURE_OPENAI_API_VERSION=2024-12-01-preview + +# Optional: Different deployments for different modalities +AZURE_OPENAI_API_KEY_LLM=xxxxx +AZURE_OPENAI_ENDPOINT_LLM=https://your-instance.openai.azure.com/ +AZURE_OPENAI_API_VERSION_LLM=2024-12-01-preview +``` + +**Advantages:** +- Enterprise support +- VPC integration +- Compliance (HIPAA, SOC2, etc.) + +**Disadvantages:** +- More complex setup +- Higher overhead +- Requires Azure account + +--- + +## Embeddings (For Search/Semantic Features) + +By default, Open Notebook uses the LLM provider's embeddings. To use a different provider: + +### OpenAI Embeddings (Default) +``` +# Uses OpenAI's embedding model automatically +# Requires OPENAI_API_KEY +# No separate configuration needed +``` + +### Custom Embeddings +``` +# For other embedding providers (future feature) +EMBEDDING_PROVIDER=openai # or custom +``` + +--- + +## Choosing Your Provider + +**1. Don't want to run locally and don't want to mess around with different providers:** + +Use OpenAI +- Cloud-based +- Good quality +- Reasonable cost +- Simplest setup, supports all modes (text, embedding, tts, stt, etc) + +**For budget-conscious:** Groq, OpenRouter or Ollama +- Groq: Super cheap cloud +- Ollama: Free, but local +- OpenRouter: many open source models very accessible + +**For privacy-first:** Ollama or LM Studio and [Speaches](local-tts.md) +- Everything stays local +- Works offline +- No API keys sent anywhere + +**For enterprise:** Azure OpenAI +- Compliance +- VPC integration +- Support + +--- + +## Next Steps + +1. **Choose your provider** from above +2. **Get API key** (if cloud) or install locally (if Ollama) +3. **Add to .env** +4. **Restart services** +5. **Go to Settings → Models** in Open Notebook +6. **Verify it works** with a test chat + +Done! + +--- + +## Related + +- **[Environment Reference](environment-reference.md)** - Complete list of all environment variables +- **[Advanced Configuration](advanced.md)** - Timeouts, SSL, performance tuning +- **[Ollama Setup](ollama.md)** - Detailed Ollama configuration guide +- **[OpenAI-Compatible](openai-compatible.md)** - LM Studio and other compatible providers +- **[Troubleshooting](../6-TROUBLESHOOTING/quick-fixes.md)** - Common issues and fixes diff --git a/docs/5-CONFIGURATION/database.md b/docs/5-CONFIGURATION/database.md new file mode 100644 index 00000000..c726ca1e --- /dev/null +++ b/docs/5-CONFIGURATION/database.md @@ -0,0 +1,50 @@ +# Database - SurrealDB Configuration + +Open Notebook uses SurrealDB for its database needs. + +--- + +## Default Configuration + +Open Notebook should work out of the box with SurrealDB as long as the environment variables are correctly setup. + + +### DB running in the same docker compose as Open Notebook (recommended) + +The example above is for when you are running SurrealDB as a separate docker container, which is the method described [here](../1-INSTALLATION/docker-compose.md) (and our recommended method). + +```env +SURREAL_URL="ws://surrealdb:8000/rpc" +SURREAL_USER="root" +SURREAL_PASSWORD="root" +SURREAL_NAMESPACE="open_notebook" +SURREAL_DATABASE="open_notebook" +``` + +### DB running in the host machine and Open Notebook running in Docker + +If ON is running in docker and SurrealDB is on your host machine, you need to point to it. + +```env +SURREAL_URL="ws://your-machine-ip:8000/rpc" #or host.docker.internal +SURREAL_USER="root" +SURREAL_PASSWORD="root" +SURREAL_NAMESPACE="open_notebook" +SURREAL_DATABASE="open_notebook" +``` + +### Open Notebook and Surreal are running on the same machine + +If you are running both services locally or if you are using the deprecated [single container setup](../1-INSTALLATION/single-container.md) + +```env +SURREAL_URL="ws://localhost:8000/rpc" +SURREAL_USER="root" +SURREAL_PASSWORD="root" +SURREAL_NAMESPACE="open_notebook" +SURREAL_DATABASE="open_notebook" +``` + +## Multiple databases + +You can have multiple namespaces in one SurrealDB instance and you can also have multiple databases in one instance. So, if you want to setup multiple open noteobok deployments for different users, you don't need to deploy multiple databases. diff --git a/docs/5-CONFIGURATION/environment-reference.md b/docs/5-CONFIGURATION/environment-reference.md new file mode 100644 index 00000000..b3cfeb89 --- /dev/null +++ b/docs/5-CONFIGURATION/environment-reference.md @@ -0,0 +1,320 @@ +# Complete Environment Reference + +Comprehensive list of all environment variables available in Open Notebook. + +--- + +## API Configuration + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `API_URL` | No | Auto-detected | URL where frontend reaches API (e.g., http://localhost:5055) | +| `INTERNAL_API_URL` | No | http://localhost:5055 | Internal API URL for Next.js server-side proxying | +| `API_CLIENT_TIMEOUT` | No | 300 | Client timeout in seconds (how long to wait for API response) | +| `OPEN_NOTEBOOK_PASSWORD` | No | None | Password to protect Open Notebook instance | + +--- + +## AI Provider: OpenAI + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `OPENAI_API_KEY` | If using OpenAI | None | OpenAI API key (starts with `sk-`) | + +**Setup:** Get from https://platform.openai.com/api-keys + +--- + +## AI Provider: Anthropic (Claude) + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `ANTHROPIC_API_KEY` | If using Anthropic | None | Claude API key (starts with `sk-ant-`) | + +**Setup:** Get from https://console.anthropic.com/ + +--- + +## AI Provider: Google Gemini + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `GOOGLE_API_KEY` | If using Google | None | Google API key for Gemini | +| `GEMINI_API_BASE_URL` | No | Default endpoint | Override Gemini API endpoint | +| `VERTEX_PROJECT` | If using Vertex AI | None | Google Cloud project ID | +| `VERTEX_LOCATION` | If using Vertex AI | us-east5 | Vertex AI location | +| `GOOGLE_APPLICATION_CREDENTIALS` | If using Vertex AI | None | Path to service account JSON | + +**Setup:** Get from https://aistudio.google.com/app/apikey + +--- + +## AI Provider: Groq + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `GROQ_API_KEY` | If using Groq | None | Groq API key (starts with `gsk_`) | + +**Setup:** Get from https://console.groq.com/keys + +--- + +## AI Provider: Mistral + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `MISTRAL_API_KEY` | If using Mistral | None | Mistral API key | + +**Setup:** Get from https://console.mistral.ai/ + +--- + +## AI Provider: DeepSeek + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `DEEPSEEK_API_KEY` | If using DeepSeek | None | DeepSeek API key | + +**Setup:** Get from https://platform.deepseek.com/ + +--- + +## AI Provider: xAI + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `XAI_API_KEY` | If using xAI | None | xAI API key | + +**Setup:** Get from https://console.x.ai/ + +--- + +## AI Provider: Ollama (Local) + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `OLLAMA_API_BASE` | If using Ollama | None | Ollama endpoint (e.g., http://localhost:11434) | + +**Setup:** Install from https://ollama.ai + +--- + +## AI Provider: OpenRouter + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `OPENROUTER_API_KEY` | If using OpenRouter | None | OpenRouter API key | +| `OPENROUTER_BASE_URL` | No | https://openrouter.ai/api/v1 | OpenRouter endpoint | + +**Setup:** Get from https://openrouter.ai/ + +--- + +## AI Provider: OpenAI-Compatible + +For self-hosted LLMs, LM Studio, or OpenAI-compatible endpoints: + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `OPENAI_COMPATIBLE_BASE_URL` | If using compatible | None | Base URL for OpenAI-compatible endpoint | +| `OPENAI_COMPATIBLE_API_KEY` | If using compatible | None | API key for endpoint | +| `OPENAI_COMPATIBLE_BASE_URL_LLM` | No | Uses generic | Language model endpoint (overrides generic) | +| `OPENAI_COMPATIBLE_API_KEY_LLM` | No | Uses generic | Language model API key (overrides generic) | +| `OPENAI_COMPATIBLE_BASE_URL_EMBEDDING` | No | Uses generic | Embedding endpoint (overrides generic) | +| `OPENAI_COMPATIBLE_API_KEY_EMBEDDING` | No | Uses generic | Embedding API key (overrides generic) | +| `OPENAI_COMPATIBLE_BASE_URL_STT` | No | Uses generic | Speech-to-text endpoint (overrides generic) | +| `OPENAI_COMPATIBLE_API_KEY_STT` | No | Uses generic | STT API key (overrides generic) | +| `OPENAI_COMPATIBLE_BASE_URL_TTS` | No | Uses generic | Text-to-speech endpoint (overrides generic) | +| `OPENAI_COMPATIBLE_API_KEY_TTS` | No | Uses generic | TTS API key (overrides generic) | + +**Setup:** For LM Studio, typically: `OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1` + +--- + +## AI Provider: Azure OpenAI + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `AZURE_OPENAI_API_KEY` | If using Azure | None | Azure OpenAI API key | +| `AZURE_OPENAI_ENDPOINT` | If using Azure | None | Azure OpenAI endpoint URL | +| `AZURE_OPENAI_API_VERSION` | No | 2024-12-01-preview | Azure OpenAI API version | +| `AZURE_OPENAI_API_KEY_LLM` | No | Uses generic | LLM-specific API key | +| `AZURE_OPENAI_ENDPOINT_LLM` | No | Uses generic | LLM-specific endpoint | +| `AZURE_OPENAI_API_VERSION_LLM` | No | Uses generic | LLM-specific API version | +| `AZURE_OPENAI_API_KEY_EMBEDDING` | No | Uses generic | Embedding-specific API key | +| `AZURE_OPENAI_ENDPOINT_EMBEDDING` | No | Uses generic | Embedding-specific endpoint | +| `AZURE_OPENAI_API_VERSION_EMBEDDING` | No | Uses generic | Embedding-specific API version | + +--- + +## AI Provider: VoyageAI (Embeddings) + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `VOYAGE_API_KEY` | If using Voyage | None | Voyage AI API key (for embeddings) | + +**Setup:** Get from https://www.voyageai.com/ + +--- + +## Text-to-Speech (TTS) + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `ELEVENLABS_API_KEY` | If using ElevenLabs TTS | None | ElevenLabs API key for voice generation | +| `TTS_BATCH_SIZE` | No | 5 | Concurrent TTS requests (1-5, depends on provider) | + +**Setup:** Get from https://elevenlabs.io/ + +--- + +## Database: SurrealDB + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `SURREAL_URL` | Yes | ws://surrealdb:8000/rpc | SurrealDB WebSocket connection URL | +| `SURREAL_USER` | Yes | root | SurrealDB username | +| `SURREAL_PASSWORD` | Yes | root | SurrealDB password | +| `SURREAL_NAMESPACE` | Yes | open_notebook | SurrealDB namespace | +| `SURREAL_DATABASE` | Yes | open_notebook | SurrealDB database name | + +--- + +## Database: Retry Configuration + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `SURREAL_COMMANDS_RETRY_ENABLED` | No | true | Enable retries on failure | +| `SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS` | No | 3 | Maximum retry attempts | +| `SURREAL_COMMANDS_RETRY_WAIT_STRATEGY` | No | exponential_jitter | Retry wait strategy (exponential_jitter/exponential/fixed/random) | +| `SURREAL_COMMANDS_RETRY_WAIT_MIN` | No | 1 | Minimum wait time between retries (seconds) | +| `SURREAL_COMMANDS_RETRY_WAIT_MAX` | No | 30 | Maximum wait time between retries (seconds) | + +--- + +## Database: Concurrency + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `SURREAL_COMMANDS_MAX_TASKS` | No | 5 | Maximum concurrent database tasks | + +--- + +## LLM Timeouts + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `ESPERANTO_LLM_TIMEOUT` | No | 60 | LLM inference timeout in seconds | +| `ESPERANTO_SSL_VERIFY` | No | true | Verify SSL certificates (false = development only) | +| `ESPERANTO_SSL_CA_BUNDLE` | No | None | Path to custom CA certificate bundle | + +--- + +## Content Extraction + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `FIRECRAWL_API_KEY` | No | None | Firecrawl API key for advanced web scraping | +| `JINA_API_KEY` | No | None | Jina AI API key for web extraction | + +**Setup:** +- Firecrawl: https://firecrawl.dev/ +- Jina: https://jina.ai/ + +--- + +## Debugging & Monitoring + +| Variable | Required? | Default | Description | +|----------|-----------|---------|-------------| +| `LANGCHAIN_TRACING_V2` | No | false | Enable LangSmith tracing | +| `LANGCHAIN_ENDPOINT` | No | https://api.smith.langchain.com | LangSmith endpoint | +| `LANGCHAIN_API_KEY` | No | None | LangSmith API key | +| `LANGCHAIN_PROJECT` | No | Open Notebook | LangSmith project name | + +**Setup:** https://smith.langchain.com/ + +--- + +## Environment Variables by Use Case + +### Minimal Setup (Cloud Provider) +``` +OPENAI_API_KEY (or ANTHROPIC_API_KEY, etc.) +``` + +### Local Development (Ollama) +``` +OLLAMA_API_BASE=http://localhost:11434 +``` + +### Production (OpenAI + Custom Domain) +``` +OPENAI_API_KEY=sk-proj-... +API_URL=https://mynotebook.example.com +SURREAL_USER=production_user +SURREAL_PASSWORD=secure_password +``` + +### Self-Hosted Behind Reverse Proxy +``` +OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 +API_URL=https://mynotebook.example.com +``` + +### High-Performance Deployment +``` +OPENAI_API_KEY=sk-proj-... +SURREAL_COMMANDS_MAX_TASKS=10 +TTS_BATCH_SIZE=5 +API_CLIENT_TIMEOUT=600 +``` + +### Debugging +``` +OPENAI_API_KEY=sk-proj-... +LANGCHAIN_TRACING_V2=true +LANGCHAIN_API_KEY=your-key +``` + +--- + +## Validation + +Check if a variable is set: + +```bash +# Check single variable +echo $OPENAI_API_KEY + +# Check multiple +env | grep -E "OPENAI|API_URL" + +# Print all config +env | grep -E "^[A-Z_]+=" | sort +``` + +--- + +## Notes + +- **Case-sensitive:** `OPENAI_API_KEY` ≠ `openai_api_key` +- **No spaces:** `OPENAI_API_KEY=sk-proj-...` not `OPENAI_API_KEY = sk-proj-...` +- **Quote values:** Use quotes for values with spaces: `API_URL="http://my server:5055"` +- **Restart required:** Changes take effect after restarting services +- **Secrets:** Don't commit API keys to git + +--- + +## Quick Setup Checklist + +- [ ] Choose AI provider (OpenAI, Anthropic, Ollama, etc.) +- [ ] Get API key if cloud provider +- [ ] Add to .env or docker.env +- [ ] Set `API_URL` if behind reverse proxy +- [ ] Change `SURREAL_PASSWORD` in production +- [ ] Verify with: `docker compose logs api | grep -i "error"` +- [ ] Test in browser: Go to Settings → Models +- [ ] Try a test chat + +Done! diff --git a/docs/5-CONFIGURATION/index.md b/docs/5-CONFIGURATION/index.md new file mode 100644 index 00000000..38f7eaa1 --- /dev/null +++ b/docs/5-CONFIGURATION/index.md @@ -0,0 +1,334 @@ +# Configuration - Essential Settings + +Configuration is how you customize Open Notebook for your specific setup. This section covers what you need to know. + +--- + +## What Needs Configuration? + +Three things: + +1. **AI Provider** — Which LLM/embedding service you're using (OpenAI, Anthropic, Ollama, etc.) +2. **Database** — How to connect to SurrealDB (usually pre-configured) +3. **Server** — API URL, ports, timeouts (usually auto-detected) + +--- + +## Quick Decision: Which Provider? + +### Option 1: Cloud Provider (Fastest) +- **OpenRouter (recommended)** (access to all models with one key) +- **OpenAI** (GPT) +- **Anthropic** (Claude) +- **Google Gemini** (multi-modal, long context) +- **Groq** (ultra-fast inference) + +Setup: Get API key → Set env var → Done + +→ Go to **[AI Providers Guide](ai-providers.md)** + +### Option 2: Local (Free & Private) +- **Ollama** (open-source models, on your machine) + +→ Go to **[Ollama Setup](ollama.md)** + +### Option 3: OpenAI-Compatible +- **LM Studio** (local) +- **Custom endpoints** + +→ Go to **[OpenAI-Compatible Guide](openai-compatible.md)** + +--- + +## Configuration File + +Use the right file depending on your setup. + +### `.env` (Local Development) + +You will only use .env if you are running Open Notebook locally. + +``` +Located in: project root +Use for: Development on your machine +Format: KEY=value, one per line +``` + +### `docker.env` (Docker Deployment) + +You will use this file to hold your environment variables if you are using docker-compose and prefer not to put the variables directly in the compose file. +``` +Located in: project root (or ./docker) +Use for: Docker deployments +Format: Same as .env +Loaded by: docker-compose.yml +``` + +--- + +## Most Important Settings + +All of the settings provided below are to be placed inside your environment file (.env or docker.env depending on your setup). + + +### Surreal Database + +This is the database used by the app. + +``` +SURREAL_URL=ws://surrealdb:8000/rpc +SURREAL_USER=root +SURREAL_PASSWORD=root # Change in production! +SURREAL_NAMESPACE=open_notebook +SURREAL_DATABASE=open_notebook +``` + +> The only thing that is critical to not miss is the hostname in the `SURREAL_URL`. Check what URL to use based on your deployment, [here](database.md). + + +### AI Provider (API Key or URL) + +We need access to LLMs in order for the app to work. You can use any of the support AI Providers by adding their API Keys. + +``` +OPENAI_API_KEY=sk-... +ANTHROPIC_API_KEY=sk-ant-... +GOOGLE_API_KEY=... +OPENROUTER_API_KEY=... +``` + +Or, if you are planning to use only local providers, you can setup Ollama by configuring it's base URL. This will get you set and ready with text and embeddings in one go: + +``` +OLLAMA_BASE_URL=http://localhost:11434 +``` + +> A lot of people screw up on the Ollama BASE URL by not knowing how to point to their Ollama installation. if you are having trouble connecting to Ollama, see [here](ollama.md). + +You can also use LM Studio locally if you prefer by using it as an OpenAI compatible endpoint. + +``` +OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 +OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:1234/v1 +``` + +> For more installation on using OpenAI compatible endpoints, see [here](openai-compatible.md). + + +### API URL (If Behind Reverse Proxy) +You only need to worry about this if you are deploying on a proxy or if you are changing port information. Otherwise, skip this. + +``` +API_URL=https://your-domain.com +# Usually auto-detected. Only set if needed. +``` + +Auto-detection works for most setups. + +--- + +## Configuration by Scenario + +### Scenario 1: Docker on Localhost (Default) +```env +# In docker.env: +OPENAI_API_KEY=sk-... +# Everything else uses defaults +# Done! +``` + +### Scenario 2: Docker on Remote Server +```env +# In docker.env: +OPENAI_API_KEY=sk-... +API_URL=http://your-server-ip:5055 +``` + +### Scenario 3: Behind Reverse Proxy (Nginx/Cloudflare) +```env +# In docker.env: +OPENAI_API_KEY=sk-... +API_URL=https://your-domain.com +# The reverse proxy handles HTTPS +``` + +### Scenario 4: Using Ollama Locally +```env +# In .env: +OLLAMA_API_BASE=http://localhost:11434 +# No API key needed +``` + +### Scenario 5: Using Azure OpenAI +```env +# In docker.env: +AZURE_OPENAI_API_KEY=your-key +AZURE_OPENAI_ENDPOINT=https://your-instance.openai.azure.com/ +AZURE_OPENAI_API_VERSION=2024-12-01-preview +``` + +--- + +## Configuration Sections + +### [AI Providers](ai-providers.md) +- OpenAI configuration +- Anthropic configuration +- Google Gemini configuration +- Groq configuration +- Ollama configuration +- Azure OpenAI configuration +- OpenAI-compatible configuration + +### [Database](database.md) +- SurrealDB setup +- Connection strings +- Database vs. namespace +- Running your own SurrealDB + +### [Advanced](advanced.md) +- Ports and networking +- Timeouts and concurrency +- SSL/security +- Retry configuration +- Worker concurrency +- Language models & embeddings +- Speech-to-text & text-to-speech +- Debugging and logging + +### [Reverse Proxy](reverse-proxy.md) +- Nginx, Caddy, Traefik configs +- Custom domain setup +- SSL/HTTPS configuration +- Coolify and other platforms + +### [Security](security.md) +- Password protection +- API authentication +- Production hardening +- Firewall configuration + +### [Local TTS](local-tts.md) +- Speaches setup for local text-to-speech +- GPU acceleration +- Voice options +- Docker networking + +### [Ollama](ollama.md) +- Setting up and pointing to an Ollama server +- Downloading models +- Using embedding + +### [OpenAI-Compatible Providers](openai-compatible.md) +- LM Studio, vLLM, Text Generation WebUI +- Connection configuration +- Docker networking +- Troubleshooting + +### [Complete Reference](environment-reference.md) +- All environment variables +- Grouped by category +- What each one does +- Default values + +--- + +## How to Add Configuration + +### Method 1: Edit `.env` File (Development) + +```bash +1. Open .env in your editor +2. Find the section for your provider +3. Uncomment and fill in your API key +4. Save +5. Restart services +``` + +### Method 2: Set Docker Environment (Deployment) + +```bash +# In docker-compose.yml: +services: + api: + environment: + - OPENAI_API_KEY=sk-... + - API_URL=https://your-domain.com +``` + +### Method 3: Export Environment Variables + +```bash +# In your terminal: +export OPENAI_API_KEY=sk-... +export API_URL=https://your-domain.com + +# Then start services +docker compose up +``` + +### Method 4: Use docker.env File + +```bash +1. Create/edit docker.env +2. Add your configuration +3. docker-compose automatically loads it +4. docker compose up +``` + +--- + +## Verification + +After configuration, verify it works: + +``` +1. Open your notebook +2. Go to Settings → Models +3. You should see your configured provider +4. Try a simple Chat question +5. If it responds, configuration is correct! +``` + +--- + +## Common Mistakes + +| Mistake | Problem | Fix | +|---------|---------|-----| +| Forget API key | Models not available | Add OPENAI_API_KEY (or your provider) | +| Wrong database URL | Can't start API | Check SURREAL_URL format | +| Expose port 5055 | "Can't connect to server" | Expose 5055 in docker-compose | +| Typo in env var | Settings ignored | Check spelling (case-sensitive!) | +| Quote mismatch | Value cut off | Use quotes: OPENAI_API_KEY="sk-..." | +| Don't restart | Old config still used | Restart services after env changes | + +--- + +## What Comes After Configuration + +Once configured: + +1. **[Quick Start](../0-START-HERE/index.md)** — Run your first notebook +2. **[Installation](../1-INSTALLATION/index.md)** — Multi-route deployment guides +3. **[User Guide](../3-USER-GUIDE/index.md)** — How to use each feature + +--- + +## Getting Help + +- **Configuration error?** → Check [Troubleshooting](../6-TROUBLESHOOTING/quick-fixes.md) +- **Provider-specific issue?** → Check [AI Providers](ai-providers.md) +- **Need complete reference?** → See [Environment Reference](environment-reference.md) + +--- + +## Summary + +**Minimal configuration to run:** +1. Choose an AI provider (or use Ollama locally) +2. Set API key in .env or docker.env +3. Start services +4. Done! + +Everything else is optional optimization. diff --git a/docs/5-CONFIGURATION/local-tts.md b/docs/5-CONFIGURATION/local-tts.md new file mode 100644 index 00000000..8c39cfca --- /dev/null +++ b/docs/5-CONFIGURATION/local-tts.md @@ -0,0 +1,339 @@ +# Local Text-to-Speech Setup + +Run text-to-speech locally for free, private podcast generation using OpenAI-compatible TTS servers. + +--- + +## Why Local TTS? + +| Benefit | Description | +|---------|-------------| +| **Free** | No per-character costs after setup | +| **Private** | Audio never leaves your machine | +| **Unlimited** | No rate limits or quotas | +| **Offline** | Works without internet | + +--- + +## Quick Start with Speaches + +[Speaches](https://github.com/speaches-ai/speaches) is an open-source, OpenAI-compatible TTS server. + +### Step 1: Create Docker Compose File + +Create a folder and add `docker-compose.yml`: + +```yaml +services: + speaches: + image: ghcr.io/speaches-ai/speaches:latest-cpu + container_name: speaches + ports: + - "8969:8000" + volumes: + - hf-hub-cache:/home/ubuntu/.cache/huggingface/hub + restart: unless-stopped + +volumes: + hf-hub-cache: +``` + +### Step 2: Start and Download Model + +```bash +# Start Speaches +docker compose up -d + +# Wait for startup +sleep 10 + +# Download voice model (~500MB) +docker compose exec speaches uv tool run speaches-cli model download speaches-ai/Kokoro-82M-v1.0-ONNX +``` + +### Step 3: Test + +```bash +curl "http://localhost:8969/v1/audio/speech" -s \ + -H "Content-Type: application/json" \ + --output test.mp3 \ + --data '{ + "input": "Hello! Local TTS is working.", + "model": "speaches-ai/Kokoro-82M-v1.0-ONNX", + "voice": "af_bella" + }' +``` + +Play `test.mp3` to verify. + +### Step 4: Configure Open Notebook + +**Docker deployment:** +```yaml +# In your Open Notebook docker-compose.yml +environment: + - OPENAI_COMPATIBLE_BASE_URL_TTS=http://host.docker.internal:8969/v1 +``` + +**Local development:** +```bash +export OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:8969/v1 +``` + +### Step 5: Add Model in Open Notebook + +1. Go to **Settings** → **Models** +2. Click **Add Model** in Text-to-Speech section +3. Configure: + - **Provider**: `openai_compatible` + - **Model Name**: `speaches-ai/Kokoro-82M-v1.0-ONNX` + - **Display Name**: `Local TTS` +4. Click **Save** +5. Set as default if desired + +--- + +## Available Voices + +The Kokoro model includes multiple voices: + +### Female Voices +| Voice ID | Description | +|----------|-------------| +| `af_bella` | Clear, professional | +| `af_sarah` | Warm, friendly | +| `af_nicole` | Energetic, expressive | + +### Male Voices +| Voice ID | Description | +|----------|-------------| +| `am_adam` | Deep, authoritative | +| `am_michael` | Friendly, conversational | + +### British Accents +| Voice ID | Description | +|----------|-------------| +| `bf_emma` | British female, professional | +| `bm_george` | British male, formal | + +### Test Different Voices + +```bash +for voice in af_bella af_sarah am_adam am_michael; do + curl "http://localhost:8969/v1/audio/speech" -s \ + -H "Content-Type: application/json" \ + --output "test_${voice}.mp3" \ + --data "{ + \"input\": \"Hello, this is the ${voice} voice.\", + \"model\": \"speaches-ai/Kokoro-82M-v1.0-ONNX\", + \"voice\": \"${voice}\" + }" +done +``` + +--- + +## GPU Acceleration + +For faster generation with NVIDIA GPUs: + +```yaml +services: + speaches: + image: ghcr.io/speaches-ai/speaches:latest-cuda + container_name: speaches + ports: + - "8969:8000" + volumes: + - hf-hub-cache:/home/ubuntu/.cache/huggingface/hub + restart: unless-stopped + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + +volumes: + hf-hub-cache: +``` + +--- + +## Docker Networking + +### Open Notebook in Docker (macOS/Windows) + +```bash +OPENAI_COMPATIBLE_BASE_URL_TTS=http://host.docker.internal:8969/v1 +``` + +### Open Notebook in Docker (Linux) + +```bash +# Option 1: Docker bridge IP +OPENAI_COMPATIBLE_BASE_URL_TTS=http://172.17.0.1:8969/v1 + +# Option 2: Host networking +docker run --network host ... +``` + +### Remote Server + +Run Speaches on a different machine: + +```bash +# On server, bind to all interfaces +# Then in Open Notebook: +OPENAI_COMPATIBLE_BASE_URL_TTS=http://server-ip:8969/v1 +``` + +--- + +## Multi-Speaker Podcasts + +Configure different voices for each speaker: + +``` +Speaker 1 (Host): + Model: speaches-ai/Kokoro-82M-v1.0-ONNX + Voice: af_bella + +Speaker 2 (Guest): + Model: speaches-ai/Kokoro-82M-v1.0-ONNX + Voice: am_adam + +Speaker 3 (Narrator): + Model: speaches-ai/Kokoro-82M-v1.0-ONNX + Voice: bf_emma +``` + +--- + +## Troubleshooting + +### Service Won't Start + +```bash +# Check logs +docker compose logs speaches + +# Verify port available +lsof -i :8969 + +# Restart +docker compose down && docker compose up -d +``` + +### Connection Refused + +```bash +# Test Speaches is running +curl http://localhost:8969/v1/models + +# From inside Open Notebook container +docker exec -it open-notebook curl http://host.docker.internal:8969/v1/models +``` + +### Model Not Found + +```bash +# List downloaded models +docker compose exec speaches uv tool run speaches-cli model list + +# Download if missing +docker compose exec speaches uv tool run speaches-cli model download speaches-ai/Kokoro-82M-v1.0-ONNX +``` + +### Poor Audio Quality + +- Try different voices +- Adjust speed: `"speed": 0.9` to `1.2` +- Check model downloaded completely +- Allocate more memory + +### Slow Generation + +| Solution | How | +|----------|-----| +| Use GPU | Switch to `latest-cuda` image | +| More CPU | Allocate more cores in Docker | +| Faster model | Use smaller/quantized models | +| SSD storage | Move Docker volumes to SSD | + +--- + +## Performance Tips + +### Recommended Specs + +| Component | Minimum | Recommended | +|-----------|---------|-------------| +| CPU | 2 cores | 4+ cores | +| RAM | 2 GB | 4+ GB | +| Storage | 5 GB | 10 GB (for multiple models) | +| GPU | None | NVIDIA (optional) | + +### Resource Limits + +```yaml +services: + speaches: + # ... other config + mem_limit: 4g + cpus: 2 +``` + +### Monitor Usage + +```bash +docker stats speaches +``` + +--- + +## Comparison: Local vs Cloud + +| Aspect | Local (Speaches) | Cloud (OpenAI/ElevenLabs) | +|--------|------------------|---------------------------| +| **Cost** | Free | $0.015-0.10/min | +| **Privacy** | Complete | Data sent to provider | +| **Speed** | Depends on hardware | Usually faster | +| **Quality** | Good | Excellent | +| **Setup** | Moderate | Simple API key | +| **Offline** | Yes | No | +| **Voices** | Limited | Many options | + +### When to Use Local + +- Privacy-sensitive content +- High-volume generation +- Development/testing +- Offline environments +- Cost control + +### When to Use Cloud + +- Premium quality needs +- Multiple languages +- Time-sensitive projects +- Limited hardware + +--- + +## Other Local TTS Options + +Any OpenAI-compatible TTS server works. The key is: + +1. Server implements `/v1/audio/speech` endpoint +2. Set `OPENAI_COMPATIBLE_BASE_URL_TTS` to server URL +3. Add model with provider `openai_compatible` + +--- + +## Related + +- **[OpenAI-Compatible Providers](openai-compatible.md)** - General compatible provider setup +- **[AI Providers](ai-providers.md)** - All provider configuration +- **[Creating Podcasts](../3-USER-GUIDE/creating-podcasts.md)** - Using TTS for podcasts diff --git a/docs/features/ollama.md b/docs/5-CONFIGURATION/ollama.md similarity index 100% rename from docs/features/ollama.md rename to docs/5-CONFIGURATION/ollama.md diff --git a/docs/5-CONFIGURATION/openai-compatible.md b/docs/5-CONFIGURATION/openai-compatible.md new file mode 100644 index 00000000..9624a58c --- /dev/null +++ b/docs/5-CONFIGURATION/openai-compatible.md @@ -0,0 +1,394 @@ +# OpenAI-Compatible Providers + +Use any server that implements the OpenAI API format with Open Notebook. This includes LM Studio, Text Generation WebUI, vLLM, and many others. + +--- + +## What is OpenAI-Compatible? + +Many AI tools implement the same API format as OpenAI: + +``` +POST /v1/chat/completions +POST /v1/embeddings +POST /v1/audio/speech +``` + +Open Notebook can connect to any server using this format. + +--- + +## Common Compatible Servers + +| Server | Use Case | URL | +|--------|----------|-----| +| **LM Studio** | Desktop GUI for local models | https://lmstudio.ai | +| **Text Generation WebUI** | Full-featured local inference | https://github.com/oobabooga/text-generation-webui | +| **vLLM** | High-performance serving | https://github.com/vllm-project/vllm | +| **Ollama** | Simple local models | (Use native Ollama provider instead) | +| **LocalAI** | Local AI inference | https://github.com/mudler/LocalAI | +| **llama.cpp server** | Lightweight inference | https://github.com/ggerganov/llama.cpp | + +--- + +## Quick Setup: LM Studio + +### Step 1: Install and Start LM Studio + +1. Download from https://lmstudio.ai +2. Install and launch +3. Download a model (e.g., Llama 3) +4. Start the local server (default: port 1234) + +### Step 2: Configure Environment + +```bash +# For language models +export OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 +export OPENAI_COMPATIBLE_API_KEY=not-needed # LM Studio doesn't require key +``` + +### Step 3: Add Model in Open Notebook + +1. Go to **Settings** → **Models** +2. Click **Add Model** +3. Configure: + - **Provider**: `openai_compatible` + - **Model Name**: Your model name from LM Studio + - **Display Name**: `LM Studio - Llama 3` +4. Click **Save** + +--- + +## Environment Variables + +### Language Models (Chat) + +```bash +OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 +OPENAI_COMPATIBLE_API_KEY=optional-api-key +``` + +### Embeddings + +```bash +OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:1234/v1 +OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=optional-api-key +``` + +### Text-to-Speech + +```bash +OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:8969/v1 +OPENAI_COMPATIBLE_API_KEY_TTS=optional-api-key +``` + +### Speech-to-Text + +```bash +OPENAI_COMPATIBLE_BASE_URL_STT=http://localhost:9000/v1 +OPENAI_COMPATIBLE_API_KEY_STT=optional-api-key +``` + +--- + +## Docker Networking + +When Open Notebook runs in Docker and your compatible server runs on the host: + +### macOS / Windows + +```bash +OPENAI_COMPATIBLE_BASE_URL=http://host.docker.internal:1234/v1 +``` + +### Linux + +```bash +# Option 1: Docker bridge IP +OPENAI_COMPATIBLE_BASE_URL=http://172.17.0.1:1234/v1 + +# Option 2: Host networking mode +docker run --network host ... +``` + +### Same Docker Network + +```yaml +# docker-compose.yml +services: + open-notebook: + # ... + environment: + - OPENAI_COMPATIBLE_BASE_URL=http://lm-studio:1234/v1 + + lm-studio: + # your LM Studio container + ports: + - "1234:1234" +``` + +--- + +## Text Generation WebUI Setup + +### Start with API Enabled + +```bash +python server.py --api --listen +``` + +### Configure Open Notebook + +```bash +OPENAI_COMPATIBLE_BASE_URL=http://localhost:5000/v1 +``` + +### Docker Compose Example + +```yaml +services: + text-gen: + image: atinoda/text-generation-webui:default + ports: + - "5000:5000" + - "7860:7860" + volumes: + - ./models:/app/models + command: --api --listen + + open-notebook: + image: lfnovo/open_notebook:v1-latest-single + environment: + - OPENAI_COMPATIBLE_BASE_URL=http://text-gen:5000/v1 + depends_on: + - text-gen +``` + +--- + +## vLLM Setup + +### Start vLLM Server + +```bash +python -m vllm.entrypoints.openai.api_server \ + --model meta-llama/Llama-3.1-8B-Instruct \ + --port 8000 +``` + +### Configure Open Notebook + +```bash +OPENAI_COMPATIBLE_BASE_URL=http://localhost:8000/v1 +``` + +### Docker Compose with GPU + +```yaml +services: + vllm: + image: vllm/vllm-openai:latest + command: --model meta-llama/Llama-3.1-8B-Instruct + ports: + - "8000:8000" + volumes: + - ~/.cache/huggingface:/root/.cache/huggingface + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] + + open-notebook: + image: lfnovo/open_notebook:v1-latest-single + environment: + - OPENAI_COMPATIBLE_BASE_URL=http://vllm:8000/v1 + depends_on: + - vllm +``` + +--- + +## Adding Models in Open Notebook + +### Via Settings UI + +1. Go to **Settings** → **Models** +2. Click **Add Model** in appropriate section +3. Select **Provider**: `openai_compatible` +4. Enter **Model Name**: exactly as the server expects +5. Enter **Display Name**: your preferred name +6. Click **Save** + +### Model Name Format + +The model name must match what your server expects: + +| Server | Model Name Format | +|--------|-------------------| +| LM Studio | As shown in LM Studio UI | +| vLLM | HuggingFace model path | +| Text Gen WebUI | As loaded in UI | +| llama.cpp | Model file name | + +--- + +## Testing Connection + +### Test API Endpoint + +```bash +# Test chat completions +curl http://localhost:1234/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "your-model-name", + "messages": [{"role": "user", "content": "Hello"}] + }' +``` + +### Test from Inside Docker + +```bash +docker exec -it open-notebook curl http://host.docker.internal:1234/v1/models +``` + +--- + +## Troubleshooting + +### Connection Refused + +``` +Problem: Cannot connect to server + +Solutions: +1. Verify server is running +2. Check port is correct +3. Test with curl directly +4. Check Docker networking (use host.docker.internal) +5. Verify firewall allows connection +``` + +### Model Not Found + +``` +Problem: Server returns "model not found" + +Solutions: +1. Check model is loaded in server +2. Verify exact model name spelling +3. List available models: curl http://localhost:1234/v1/models +4. Update model name in Open Notebook +``` + +### Slow Responses + +``` +Problem: Requests take very long + +Solutions: +1. Check server resources (RAM, GPU) +2. Use smaller/quantized model +3. Reduce context length +4. Enable GPU acceleration if available +``` + +### Authentication Errors + +``` +Problem: 401 or authentication failed + +Solutions: +1. Check if server requires API key +2. Set OPENAI_COMPATIBLE_API_KEY +3. Some servers need any non-empty key +``` + +### Timeout Errors + +``` +Problem: Request times out + +Solutions: +1. Model may be loading (first request slow) +2. Increase timeout settings +3. Check server logs for errors +4. Reduce request size +``` + +--- + +## Multiple Compatible Endpoints + +You can use different compatible servers for different purposes: + +```bash +# Chat model from LM Studio +OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 + +# Embeddings from different server +OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:8080/v1 + +# TTS from Speaches +OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:8969/v1 +``` + +Add each as a separate model in Open Notebook settings. + +--- + +## Performance Tips + +### Model Selection + +| Model Size | RAM Needed | Speed | +|------------|------------|-------| +| 7B | 8GB | Fast | +| 13B | 16GB | Medium | +| 70B | 64GB+ | Slow | + +### Quantization + +Use quantized models (Q4, Q5) for faster inference with less RAM: + +``` +llama-3-8b-q4_k_m.gguf → ~4GB RAM, fast +llama-3-8b-f16.gguf → ~16GB RAM, slower +``` + +### GPU Acceleration + +Enable GPU in your server for much faster inference: +- LM Studio: Settings → GPU layers +- vLLM: Automatic with CUDA +- llama.cpp: `--n-gpu-layers 35` + +--- + +## Comparison: Native vs Compatible + +| Aspect | Native Provider | OpenAI Compatible | +|--------|-----------------|-------------------| +| **Setup** | API key only | Server + configuration | +| **Models** | Provider's models | Any compatible model | +| **Cost** | Pay per token | Free (local) | +| **Speed** | Usually fast | Depends on hardware | +| **Features** | Full support | Basic features | + +Use OpenAI-compatible when: +- Running local models +- Using custom/fine-tuned models +- Privacy requirements +- Cost control + +--- + +## Related + +- **[Local TTS Setup](local-tts.md)** - Text-to-speech with Speaches +- **[AI Providers](ai-providers.md)** - All provider options +- **[Ollama Setup](ollama.md)** - Native Ollama integration diff --git a/docs/5-CONFIGURATION/reverse-proxy.md b/docs/5-CONFIGURATION/reverse-proxy.md new file mode 100644 index 00000000..ea88b46e --- /dev/null +++ b/docs/5-CONFIGURATION/reverse-proxy.md @@ -0,0 +1,738 @@ +# Reverse Proxy Configuration + +Deploy Open Notebook behind nginx, Caddy, Traefik, or other reverse proxies with custom domains and HTTPS. + +--- + +## Simplified Setup (v1.1+) + +Starting with v1.1, Open Notebook uses Next.js rewrites to simplify configuration. **You only need to proxy to one port** - Next.js handles internal API routing automatically. + +### How It Works + +``` +Browser → Reverse Proxy → Port 8502 (Next.js) + ↓ (internal proxy) + Port 5055 (FastAPI) +``` + +Next.js automatically forwards `/api/*` requests to the FastAPI backend, so your reverse proxy only needs one port! + +--- + +## Quick Configuration Examples + +### Nginx (Recommended) + +```nginx +server { + listen 443 ssl http2; + server_name notebook.example.com; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + + # Single location block - that's it! + location / { + proxy_pass http://open-notebook:8502; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + } +} + +# HTTP to HTTPS redirect +server { + listen 80; + server_name notebook.example.com; + return 301 https://$server_name$request_uri; +} +``` + +### Caddy + +```caddy +notebook.example.com { + reverse_proxy open-notebook:8502 +} +``` + +That's it! Caddy handles HTTPS automatically. + +### Traefik + +```yaml +services: + open-notebook: + image: lfnovo/open_notebook:v1-latest-single + environment: + - API_URL=https://notebook.example.com + labels: + - "traefik.enable=true" + - "traefik.http.routers.notebook.rule=Host(`notebook.example.com`)" + - "traefik.http.routers.notebook.entrypoints=websecure" + - "traefik.http.routers.notebook.tls.certresolver=myresolver" + - "traefik.http.services.notebook.loadbalancer.server.port=8502" + networks: + - traefik-network +``` + +### Coolify + +1. Create new service with `lfnovo/open_notebook:v1-latest-single` +2. Set port to **8502** +3. Add environment: `API_URL=https://your-domain.com` +4. Enable HTTPS in Coolify +5. Done! + +--- + +## Environment Variables + +```bash +# Required for reverse proxy setups +API_URL=https://your-domain.com + +# Optional: For multi-container deployments +# INTERNAL_API_URL=http://api-service:5055 +``` + +**Important**: Set `API_URL` to your public URL (with https://). + +--- + +## Understanding API_URL + +The frontend uses a three-tier priority system to determine the API URL: + +1. **Runtime Configuration** (Highest Priority): `API_URL` environment variable set at container runtime +2. **Build-time Configuration**: `NEXT_PUBLIC_API_URL` baked into the Docker image +3. **Auto-detection** (Fallback): Infers from the incoming HTTP request headers + +### Auto-Detection Details + +When `API_URL` is not set, the Next.js frontend: +- Analyzes the incoming HTTP request +- Extracts the hostname from the `host` header +- Respects the `X-Forwarded-Proto` header (for HTTPS behind reverse proxies) +- Constructs the API URL as `{protocol}://{hostname}:5055` +- Example: Request to `http://10.20.30.20:8502` → API URL becomes `http://10.20.30.20:5055` + +**Why set API_URL explicitly?** +- **Reliability**: Auto-detection can fail with complex proxy setups +- **HTTPS**: Ensures frontend uses `https://` when behind SSL-terminating proxy +- **Custom domains**: Works correctly with domain names instead of IP addresses +- **Port mapping**: Avoids exposing port 5055 in the URL when using reverse proxy + +**Important**: Don't include `/api` at the end - the system adds this automatically! + +--- + +## Complete Docker Compose Example + +```yaml +services: + open-notebook: + image: lfnovo/open_notebook:v1-latest-single + container_name: open-notebook + environment: + - API_URL=https://notebook.example.com + - OPENAI_API_KEY=${OPENAI_API_KEY} + - OPEN_NOTEBOOK_PASSWORD=${OPEN_NOTEBOOK_PASSWORD} + volumes: + - ./notebook_data:/app/data + - ./surreal_data:/mydata + # Only expose to localhost (nginx handles public access) + ports: + - "127.0.0.1:8502:8502" + restart: unless-stopped + + nginx: + image: nginx:alpine + container_name: nginx-proxy + ports: + - "80:80" + - "443:443" + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf:ro + - ./ssl:/etc/nginx/ssl:ro + depends_on: + - open-notebook + restart: unless-stopped +``` + +--- + +## Full Nginx Configuration + +```nginx +events { + worker_connections 1024; +} + +http { + upstream notebook { + server open-notebook:8502; + } + + # HTTP redirect + server { + listen 80; + server_name notebook.example.com; + return 301 https://$server_name$request_uri; + } + + # HTTPS server + server { + listen 443 ssl http2; + server_name notebook.example.com; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers HIGH:!aNULL:!MD5; + + # Security headers + add_header X-Frame-Options DENY; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; + + # Proxy settings + location / { + proxy_pass http://notebook; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + + # Timeouts for long-running operations (podcasts, etc.) + proxy_read_timeout 300s; + proxy_connect_timeout 60s; + proxy_send_timeout 300s; + } + } +} +``` + +--- + +## Direct API Access (Optional) + +If external scripts or integrations need direct API access, route `/api/*` directly: + +```nginx +# Direct API access (for external integrations) +location /api/ { + proxy_pass http://open-notebook:5055/api/; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; +} + +# Frontend (handles all other traffic) +location / { + proxy_pass http://open-notebook:8502; + # ... same headers as above +} +``` + +**Note**: This is only needed for external API integrations. Browser traffic works fine with single-port setup. + +--- + +## Advanced Scenarios + +### Remote Server Access (LAN/VPS) + +Accessing Open Notebook from a different machine on your network: + +**Step 1: Get your server IP** +```bash +# On the server running Open Notebook: +hostname -I +# or +ifconfig | grep "inet " +# Note the IP (e.g., 192.168.1.100) +``` + +**Step 2: Configure API_URL** +```bash +# In docker-compose.yml or .env: +API_URL=http://192.168.1.100:5055 +``` + +**Step 3: Expose ports** +```yaml +services: + open-notebook: + image: lfnovo/open_notebook:v1-latest-single + environment: + - API_URL=http://192.168.1.100:5055 + ports: + - "8502:8502" + - "5055:5055" +``` + +**Step 4: Access from client machine** +```bash +# In browser on other machine: +http://192.168.1.100:8502 +``` + +**Troubleshooting**: +- Check firewall: `sudo ufw allow 8502 && sudo ufw allow 5055` +- Verify connectivity: `ping 192.168.1.100` from client machine +- Test port: `telnet 192.168.1.100 8502` from client machine + +--- + +### API on Separate Subdomain + +Host the API and frontend on different subdomains: + +**docker-compose.yml:** +```yaml +services: + open-notebook: + image: lfnovo/open_notebook:v1-latest-single + environment: + - API_URL=https://api.notebook.example.com + - OPENAI_API_KEY=${OPENAI_API_KEY} + # Don't expose ports (nginx handles routing) +``` + +**nginx.conf:** +```nginx +# Frontend server +server { + listen 443 ssl http2; + server_name notebook.example.com; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + + location / { + proxy_pass http://open-notebook:8502; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + } +} + +# API server (separate subdomain) +server { + listen 443 ssl http2; + server_name api.notebook.example.com; + + ssl_certificate /etc/nginx/ssl/fullchain.pem; + ssl_certificate_key /etc/nginx/ssl/privkey.pem; + + location / { + proxy_pass http://open-notebook:5055; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} +``` + +**Use case**: Separate DNS records, different rate limiting, or isolated API access control. + +--- + +### Multi-Container Deployment (Advanced) + +For complex deployments with separate frontend and API containers: + +**docker-compose.yml:** +```yaml +services: + frontend: + image: lfnovo/open_notebook_frontend:v1-latest + environment: + - API_URL=https://notebook.example.com + ports: + - "8502:8502" + + api: + image: lfnovo/open_notebook_api:v1-latest + environment: + - OPENAI_API_KEY=${OPENAI_API_KEY} + ports: + - "5055:5055" + depends_on: + - surrealdb + + surrealdb: + image: surrealdb/surrealdb:latest + command: start --log trace --user root --pass root file:/mydata/database.db + ports: + - "8000:8000" + volumes: + - ./surreal_data:/mydata +``` + +**nginx.conf:** +```nginx +http { + upstream frontend { + server frontend:8502; + } + + upstream api { + server api:5055; + } + + server { + listen 443 ssl http2; + server_name notebook.example.com; + + # API routes + location /api/ { + proxy_pass http://api/api/; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + # Frontend (catch-all) + location / { + proxy_pass http://frontend; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_cache_bypass $http_upgrade; + } + } +} +``` + +**Note**: Most users should use the single-container approach (`v1-latest-single`). Multi-container is only needed for custom scaling or isolation requirements. + +--- + +## SSL Certificates + +### Let's Encrypt with Certbot + +```bash +# Install certbot +sudo apt install certbot python3-certbot-nginx + +# Get certificate +sudo certbot --nginx -d notebook.example.com + +# Auto-renewal (usually configured automatically) +sudo certbot renew --dry-run +``` + +### Let's Encrypt with Caddy + +Caddy handles SSL automatically - no configuration needed! + +### Self-Signed (Development Only) + +```bash +openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout ssl/privkey.pem \ + -out ssl/fullchain.pem \ + -subj "/CN=localhost" +``` + +--- + +## Troubleshooting + +### "Unable to connect to server" + +1. **Check API_URL is set**: + ```bash + docker exec open-notebook env | grep API_URL + ``` + +2. **Verify reverse proxy reaches container**: + ```bash + curl -I http://localhost:8502 + ``` + +3. **Check browser console** (F12): + - Look for connection errors + - Check what URL it's trying to reach + +### Mixed Content Errors + +Frontend using HTTPS but trying to reach HTTP API: + +```bash +# Ensure API_URL uses https:// +API_URL=https://notebook.example.com # Not http:// +``` + +### WebSocket Issues + +Ensure your proxy supports WebSocket upgrades: + +```nginx +proxy_http_version 1.1; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection 'upgrade'; +``` + +### 502 Bad Gateway + +1. Check container is running: `docker ps` +2. Check container logs: `docker logs open-notebook` +3. Verify nginx can reach container (same network) + +### Timeout Errors + +Increase timeouts for long operations (podcast generation): + +```nginx +proxy_read_timeout 300s; +proxy_send_timeout 300s; +``` + +--- + +### How to Debug Configuration Issues + +**Step 1: Check browser console** (F12 → Console tab) +``` +Look for messages starting with 🔧 [Config] +These show the configuration detection process +You'll see which API URL is being used +``` + +**Example good output:** +``` +✅ [Config] Runtime API URL from server: https://your-domain.com +``` + +**Example bad output:** +``` +❌ [Config] Failed to fetch runtime config +⚠️ [Config] Using auto-detected URL: http://localhost:5055 +``` + +**Step 2: Test API directly** +```bash +# Should return JSON config +curl https://your-domain.com/api/config + +# Expected output: +{"openai_api_key_set":true,"anthropic_api_key_set":false,...} +``` + +**Step 3: Check Docker logs** +```bash +docker logs open-notebook + +# Look for: +# - Frontend startup: "▲ Next.js ready on http://0.0.0.0:8502" +# - API startup: "INFO: Uvicorn running on http://0.0.0.0:5055" +# - Connection errors or CORS issues +``` + +**Step 4: Verify environment variable** +```bash +docker exec open-notebook env | grep API_URL + +# Should show: +# API_URL=https://your-domain.com +``` + +--- + +### Frontend Adds `:5055` to URL (Versions ≤ 1.0.10) + +**Symptoms** (only in older versions): +- You set `API_URL=https://your-domain.com` +- Browser console shows: "Attempted URL: https://your-domain.com:5055/api/config" +- CORS errors with "Status code: (null)" + +**Root Cause:** +In versions ≤ 1.0.10, the frontend's config endpoint was at `/api/runtime-config`, which got intercepted by reverse proxies routing all `/api/*` requests to the backend. This prevented the frontend from reading the `API_URL` environment variable. + +**Solution:** +Upgrade to version 1.0.11 or later. The config endpoint has been moved to `/config` which avoids the `/api/*` routing conflict. + +**Verification:** +Check browser console (F12) - should see: `✅ [Config] Runtime API URL from server: https://your-domain.com` + +**If you can't upgrade**, explicitly configure the `/config` route: +```nginx +# Only needed for versions ≤ 1.0.10 +location = /config { + proxy_pass http://open-notebook:8502; + proxy_http_version 1.1; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; +} +``` + +--- + +### CORS Errors + +**Symptoms:** +``` +Access-Control-Allow-Origin header is missing +Cross-Origin Request Blocked +Response to preflight request doesn't pass access control check +``` + +**Possible Causes:** + +1. **Missing proxy headers**: + ```nginx + # Make sure these are set: + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Host $host; + ``` + +2. **API_URL protocol mismatch**: + ```bash + # Frontend is HTTPS, but API_URL is HTTP: + API_URL=http://notebook.example.com # ❌ Wrong + API_URL=https://notebook.example.com # ✅ Correct + ``` + +3. **Reverse proxy not forwarding `/api/*` correctly**: + ```nginx + # Make sure this works: + location /api/ { + proxy_pass http://open-notebook:5055/api/; # Note the trailing slash! + } + ``` + +--- + +### Missing Authorization Header + +**Symptoms:** +```json +{"detail": "Missing authorization header"} +``` + +This happens when: +- You have set `OPEN_NOTEBOOK_PASSWORD` for authentication +- You're trying to access `/api/config` directly without logging in first + +**Solution:** +This is **expected behavior**! The frontend handles authentication automatically. Just: +1. Access the frontend URL (not `/api/` directly) +2. Log in through the UI +3. The frontend will handle authorization headers for all API calls + +**For API integrations:** Include the password in the Authorization header: +```bash +curl -H "Authorization: Bearer your-password-here" \ + https://your-domain.com/api/config +``` + +--- + +### SSL/TLS Certificate Errors + +**Symptoms:** +- Browser shows "Your connection is not private" +- Certificate warnings +- Mixed content errors + +**Solutions:** + +1. **Use Let's Encrypt** (recommended): + ```bash + sudo certbot --nginx -d notebook.example.com + ``` + +2. **Check certificate paths** in nginx: + ```nginx + ssl_certificate /etc/nginx/ssl/fullchain.pem; # Full chain + ssl_certificate_key /etc/nginx/ssl/privkey.pem; # Private key + ``` + +3. **Verify certificate is valid**: + ```bash + openssl x509 -in /etc/nginx/ssl/fullchain.pem -text -noout + ``` + +4. **For development**, use self-signed (not for production): + ```bash + openssl req -x509 -nodes -days 365 -newkey rsa:2048 \ + -keyout ssl/privkey.pem -out ssl/fullchain.pem \ + -subj "/CN=localhost" + ``` + +--- + +## Best Practices + +1. **Always use HTTPS** in production +2. **Set API_URL explicitly** when using reverse proxies to avoid auto-detection issues +3. **Bind to localhost** (`127.0.0.1:8502`) and let proxy handle public access for security +4. **Enable security headers** (HSTS, X-Frame-Options, X-Content-Type-Options, X-XSS-Protection) +5. **Set up certificate renewal** for Let's Encrypt (usually automatic with certbot) +6. **Keep ports 5055 and 8502 accessible** from your reverse proxy container (use Docker networks) +7. **Use environment files** (`.env` or `docker.env`) to manage configuration securely +8. **Test your configuration** before going live: + - Check browser console for config messages + - Test API: `curl https://your-domain.com/api/config` + - Verify authentication works + - Check long-running operations (podcast generation) +9. **Monitor logs** regularly: `docker logs open-notebook` +10. **Don't include `/api` in API_URL** - the system adds this automatically + +--- + +## Legacy Configurations (Pre-v1.1) + +If you're running Open Notebook **version 1.0.x or earlier**, you may need to use the legacy two-port configuration where you explicitly route `/api/*` to port 5055. + +**Check your version:** +```bash +docker exec open-notebook cat /app/package.json | grep version +``` + +**If version < 1.1.0**, you may need: +- Explicit `/api/*` routing to port 5055 in reverse proxy +- Explicit `/config` endpoint routing for versions ≤ 1.0.10 +- See the "Frontend Adds `:5055` to URL" troubleshooting section above + +**Recommendation:** Upgrade to v1.1+ for simplified configuration and better performance. + +--- + +## Related + +- **[Security Configuration](security.md)** - Password protection and hardening +- **[Advanced Configuration](advanced.md)** - Ports, timeouts, and SSL settings +- **[Troubleshooting](../6-TROUBLESHOOTING/connection-issues.md)** - Connection problems +- **[Docker Deployment](../1-INSTALLATION/docker-compose.md)** - Complete deployment guide diff --git a/docs/5-CONFIGURATION/security.md b/docs/5-CONFIGURATION/security.md new file mode 100644 index 00000000..ddf389d8 --- /dev/null +++ b/docs/5-CONFIGURATION/security.md @@ -0,0 +1,333 @@ +# Security Configuration + +Protect your Open Notebook deployment with password authentication and production hardening. + +--- + +## When to Use Password Protection + +### Use it for: +- Public cloud deployments (PikaPods, Railway, DigitalOcean) +- Shared network environments +- Any deployment accessible beyond localhost + +### You can skip it for: +- Local development on your machine +- Private, isolated networks +- Single-user local setups + +--- + +## Quick Setup + +### Docker Deployment + +```yaml +# docker-compose.yml +services: + open_notebook: + image: lfnovo/open_notebook:v1-latest-single + environment: + - OPENAI_API_KEY=sk-... + - OPEN_NOTEBOOK_PASSWORD=your_secure_password + # ... rest of config +``` + +Or using environment file: + +```bash +# docker.env +OPENAI_API_KEY=sk-... +OPEN_NOTEBOOK_PASSWORD=your_secure_password +``` + +### Development Setup + +```bash +# .env +OPEN_NOTEBOOK_PASSWORD=your_secure_password +``` + +--- + +## Password Requirements + +### Good Passwords + +```bash +# Strong: 20+ characters, mixed case, numbers, symbols +OPEN_NOTEBOOK_PASSWORD=MySecure2024!Research#Tool +OPEN_NOTEBOOK_PASSWORD=Notebook$Dev$2024$Strong! + +# Generated (recommended) +OPEN_NOTEBOOK_PASSWORD=$(openssl rand -base64 24) +``` + +### Bad Passwords + +```bash +# DON'T use these +OPEN_NOTEBOOK_PASSWORD=password123 +OPEN_NOTEBOOK_PASSWORD=opennotebook +OPEN_NOTEBOOK_PASSWORD=admin +``` + +--- + +## How It Works + +### Frontend Protection + +1. Login form appears on first visit +2. Password stored in browser session +3. Session persists until browser closes +4. Clear browser data to log out + +### API Protection + +All API endpoints require authentication: + +```bash +# Authenticated request +curl -H "Authorization: Bearer your_password" \ + http://localhost:5055/api/notebooks + +# Unauthenticated (will fail) +curl http://localhost:5055/api/notebooks +# Returns: {"detail": "Missing authorization header"} +``` + +### Unprotected Endpoints + +These work without authentication: + +- `/health` - System health check +- `/docs` - API documentation +- `/openapi.json` - OpenAPI spec + +--- + +## API Authentication Examples + +### curl + +```bash +# List notebooks +curl -H "Authorization: Bearer your_password" \ + http://localhost:5055/api/notebooks + +# Create notebook +curl -X POST \ + -H "Authorization: Bearer your_password" \ + -H "Content-Type: application/json" \ + -d '{"name": "My Notebook", "description": "Research notes"}' \ + http://localhost:5055/api/notebooks + +# Upload file +curl -X POST \ + -H "Authorization: Bearer your_password" \ + -F "file=@document.pdf" \ + http://localhost:5055/api/sources/upload +``` + +### Python + +```python +import requests + +class OpenNotebookClient: + def __init__(self, base_url: str, password: str): + self.base_url = base_url + self.headers = {"Authorization": f"Bearer {password}"} + + def get_notebooks(self): + response = requests.get( + f"{self.base_url}/api/notebooks", + headers=self.headers + ) + return response.json() + + def create_notebook(self, name: str, description: str = None): + response = requests.post( + f"{self.base_url}/api/notebooks", + headers=self.headers, + json={"name": name, "description": description} + ) + return response.json() + +# Usage +client = OpenNotebookClient("http://localhost:5055", "your_password") +notebooks = client.get_notebooks() +``` + +### JavaScript/TypeScript + +```javascript +const API_URL = 'http://localhost:5055'; +const PASSWORD = 'your_password'; + +async function getNotebooks() { + const response = await fetch(`${API_URL}/api/notebooks`, { + headers: { + 'Authorization': `Bearer ${PASSWORD}` + } + }); + return response.json(); +} +``` + +--- + +## Production Hardening + +### Docker Security + +```yaml +services: + open_notebook: + image: lfnovo/open_notebook:v1-latest-single + ports: + - "127.0.0.1:8502:8502" # Bind to localhost only + environment: + - OPEN_NOTEBOOK_PASSWORD=your_secure_password + security_opt: + - no-new-privileges:true + deploy: + resources: + limits: + memory: 2G + cpus: "1.0" + restart: always +``` + +### Firewall Configuration + +```bash +# UFW (Ubuntu) +sudo ufw allow ssh +sudo ufw allow 80/tcp +sudo ufw allow 443/tcp +sudo ufw deny 8502/tcp # Block direct access +sudo ufw deny 5055/tcp # Block direct API access +sudo ufw enable + +# iptables +iptables -A INPUT -p tcp --dport 22 -j ACCEPT +iptables -A INPUT -p tcp --dport 80 -j ACCEPT +iptables -A INPUT -p tcp --dport 443 -j ACCEPT +iptables -A INPUT -p tcp --dport 8502 -j DROP +iptables -A INPUT -p tcp --dport 5055 -j DROP +``` + +### Reverse Proxy with SSL + +See [Reverse Proxy Configuration](reverse-proxy.md) for complete nginx/Caddy/Traefik setup with HTTPS. + +--- + +## Security Limitations + +Open Notebook's password protection provides **basic access control**, not enterprise-grade security: + +| Feature | Status | +|---------|--------| +| Password transmission | Plain text (use HTTPS!) | +| Password storage | In memory | +| User management | Single password for all | +| Session timeout | None (until browser close) | +| Rate limiting | None | +| Audit logging | None | + +### Risk Mitigation + +1. **Always use HTTPS** - Encrypt traffic with TLS +2. **Strong passwords** - 20+ characters, complex +3. **Network security** - Firewall, VPN for sensitive deployments +4. **Regular updates** - Keep containers and dependencies updated +5. **Monitoring** - Check logs for suspicious activity +6. **Backups** - Regular backups of data + +--- + +## Enterprise Considerations + +For deployments requiring advanced security: + +| Need | Solution | +|------|----------| +| SSO/OAuth | Implement OAuth2/SAML proxy | +| Role-based access | Custom middleware | +| Audit logging | Log aggregation service | +| Rate limiting | API gateway or nginx | +| Data encryption | Encrypt volumes at rest | +| Network segmentation | Docker networks, VPC | + +--- + +## Troubleshooting + +### Password Not Working + +```bash +# Check env var is set +docker exec open-notebook env | grep OPEN_NOTEBOOK_PASSWORD + +# Check logs +docker logs open-notebook | grep -i auth + +# Test API directly +curl -H "Authorization: Bearer your_password" \ + http://localhost:5055/health +``` + +### 401 Unauthorized Errors + +```bash +# Check header format +curl -v -H "Authorization: Bearer your_password" \ + http://localhost:5055/api/notebooks + +# Verify password matches +echo "Password length: $(echo -n $OPEN_NOTEBOOK_PASSWORD | wc -c)" +``` + +### Cannot Access After Setting Password + +1. Clear browser cache and cookies +2. Try incognito/private mode +3. Check browser console for errors +4. Verify password is correct in environment + +### Security Testing + +```bash +# Without password (should fail) +curl http://localhost:5055/api/notebooks +# Expected: {"detail": "Missing authorization header"} + +# With correct password (should succeed) +curl -H "Authorization: Bearer your_password" \ + http://localhost:5055/api/notebooks + +# Health check (should work without password) +curl http://localhost:5055/health +``` + +--- + +## Reporting Security Issues + +If you discover security vulnerabilities: + +1. **Do NOT open public issues** +2. Contact maintainers directly +3. Provide detailed information +4. Allow time for fixes before disclosure + +--- + +## Related + +- **[Reverse Proxy](reverse-proxy.md)** - HTTPS and SSL setup +- **[Advanced Configuration](advanced.md)** - Ports, timeouts, and SSL settings +- **[Environment Reference](environment-reference.md)** - All configuration options diff --git a/docs/6-TROUBLESHOOTING/ai-chat-issues.md b/docs/6-TROUBLESHOOTING/ai-chat-issues.md new file mode 100644 index 00000000..bc546a70 --- /dev/null +++ b/docs/6-TROUBLESHOOTING/ai-chat-issues.md @@ -0,0 +1,400 @@ +# AI & Chat Issues - Model Configuration & Quality + +Problems with AI models, chat, and response quality. + +--- + +## "Models not available" or "Models not showing" + +**Symptom:** Settings → Models shows empty, or "No models configured" + +**Cause:** Missing or invalid API key + +**Solutions:** + +### Solution 1: Add API Key +```bash +# Check .env has your API key: +cat .env | grep -i "OPENAI\|ANTHROPIC\|GOOGLE" + +# Should see something like: +# OPENAI_API_KEY=sk-proj-... + +# If missing, add it: +OPENAI_API_KEY=sk-proj-your-key-here + +# Save and restart: +docker compose restart api + +# Wait 10 seconds, then refresh browser +``` + +### Solution 2: Check Key is Valid +```bash +# Test API key directly: +curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer sk-proj-..." + +# Should return list of models +# If error: key is invalid +``` + +### Solution 3: Switch Provider +```bash +# Try a different provider: +# Remove: OPENAI_API_KEY +# Add: ANTHROPIC_API_KEY=sk-ant-... + +# Restart and check Settings → Models +``` + +--- + +## "Invalid API key" or "Unauthorized" + +**Symptom:** Error when trying to chat: "Invalid API key" + +**Cause:** API key wrong, expired, or revoked + +**Solutions:** + +### Step 1: Verify Key Format +```bash +# OpenAI: Should start with sk-proj- +# Anthropic: Should start with sk-ant- +# Google: Should be AIzaSy... + +# Check in .env: +cat .env | grep OPENAI_API_KEY +``` + +### Step 2: Get Fresh Key +```bash +# Go to provider's dashboard: +# - OpenAI: https://platform.openai.com/api-keys +# - Anthropic: https://console.anthropic.com/ +# - Google: https://aistudio.google.com/app/apikey + +# Generate new key +# Copy exactly (no extra spaces) +``` + +### Step 3: Update .env +```bash +# Edit .env: +OPENAI_API_KEY=sk-proj-new-key-here +# No quotes needed, no spaces + +# Save and restart: +docker compose restart api +``` + +### Step 4: Verify in UI +``` +1. Open Open Notebook +2. Go to Settings → Models +3. Select your provider +4. Should show available models +``` + +--- + +## Chat Returns Generic/Bad Responses + +**Symptom:** AI responses are shallow, generic, or wrong + +**Cause:** Bad context, vague question, or wrong model + +**Solutions:** + +### Solution 1: Check Context +``` +1. In Chat, click "Select Sources" +2. Verify sources you want are CHECKED +3. Set them to "Full Content" (not "Summary Only") +4. Click "Save" +5. Try chat again +``` + +### Solution 2: Ask Better Question +``` +Bad: "What do you think?" +Good: "Based on the paper's methodology, what are 3 limitations?" + +Bad: "Tell me about X" +Good: "Summarize X in 3 bullet points with page citations" +``` + +### Solution 3: Use Stronger Model +``` +OpenAI: + Current: gpt-4o-mini → Switch to: gpt-4o + +Anthropic: + Current: claude-3-5-haiku → Switch to: claude-3-5-sonnet + +To change: +1. Settings → Models +2. Select model +3. Try chat again +``` + +### Solution 4: Add More Sources +``` +If: "Response seems incomplete" +Try: Add more relevant sources to provide context +``` + +--- + +## Chat is Very Slow + +**Symptom:** Chat responses take minutes + +**Cause:** Large context, slow model, or overloaded API + +**Solutions:** + +### Solution 1: Use Faster Model +```bash +Fastest: Groq (any model) +Fast: OpenAI gpt-4o-mini +Medium: Anthropic claude-3-5-haiku +Slow: Anthropic claude-3-5-sonnet + +Switch in: Settings → Models +``` + +### Solution 2: Reduce Context +``` +1. Chat → Select Sources +2. Uncheck sources you don't need +3. Or switch to "Summary Only" for background sources +4. Save and try again +``` + +### Solution 3: Increase Timeout +```bash +# In .env: +API_CLIENT_TIMEOUT=600 # 10 minutes + +# Restart: +docker compose restart +``` + +### Solution 4: Check System Load +```bash +# See if API is overloaded: +docker stats + +# If CPU >80% or memory >90%: +# Reduce: SURREAL_COMMANDS_MAX_TASKS=2 +# Restart: docker compose restart +``` + +--- + +## Chat Doesn't Remember History + +**Symptom:** Each message treated as separate, no context between questions + +**Cause:** Chat history not saved or new chat started + +**Solution:** + +``` +1. Make sure you're in same Chat (not new Chat) +2. Check Chat title at top +3. If it's blank, start new Chat with a title +4. Each named Chat keeps its history +5. If you start new Chat, history is separate +``` + +--- + +## "Rate limit exceeded" + +**Symptom:** Error: "Rate limit exceeded" or "Too many requests" + +**Cause:** Hit provider's API rate limit + +**Solutions:** + +### For Cloud Providers (OpenAI, Anthropic, etc.) + +**Immediate:** +- Wait 1-2 minutes +- Try again + +**Short term:** +- Use cheaper/smaller model +- Reduce concurrent operations +- Space out requests + +**Long term:** +- Upgrade your account +- Switch to different provider +- Use Ollama (local, no limits) + +### Check Account Status +``` +OpenAI: https://platform.openai.com/account/usage/overview +Anthropic: https://console.anthropic.com/account/billing/overview +Google: Google Cloud Console +``` + +### For Ollama (Local) +- No rate limits +- Use `ollama pull mistral` for best model +- Restart if hitting resource limits + +--- + +## "Context length exceeded" or "Token limit" + +**Symptom:** Error about too many tokens + +**Cause:** Sources too large for model + +**Solutions:** + +### Solution 1: Use Model with Longer Context +``` +Current: GPT-4o (128K tokens) → Switch to: Claude (200K tokens) +Current: Claude Haiku (200K) → Switch to: Gemini (1M tokens) + +To change: Settings → Models +``` + +### Solution 2: Reduce Context +``` +1. Select fewer sources +2. Or use "Summary Only" instead of "Full Content" +3. Or split large documents into smaller pieces +``` + +### Solution 3: For Ollama (Local) +```bash +# Use smaller model: +ollama pull phi # Very small +# Instead of: ollama pull neural-chat # Large +``` + +--- + +## "API call failed" or Timeout + +**Symptom:** Generic API error, response times out + +**Cause:** Provider API down, network issue, or slow service + +**Solutions:** + +### Check Provider Status +``` +OpenAI: https://status.openai.com/ +Anthropic: Check website +Google: Google Cloud Status +Groq: Check website +``` + +### Retry Operation +``` +1. Wait 30 seconds +2. Try again +``` + +### Use Different Model/Provider +``` +1. Settings → Models +2. Try different provider +3. If OpenAI down, use Anthropic +``` + +### Check Network +```bash +# Verify internet working: +ping google.com + +# Test API endpoint directly: +curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer YOUR_KEY" +``` + +--- + +## Responses Include Hallucinations + +**Symptom:** AI makes up facts that aren't in sources + +**Cause:** Sources not in context, or model guessing + +**Solutions:** + +### Solution 1: Verify Context +``` +1. Click citation in response +2. Check source actually says that +3. If not, sources weren't in context +4. Add source to context and try again +``` + +### Solution 2: Request Citations +``` +Ask: "Answer this with citations to specific pages" + +The AI will be more careful if asked for citations +``` + +### Solution 3: Use Stronger Model +``` +Weaker models hallucinate more +Switch to: GPT-4o or Claude Sonnet +``` + +--- + +## High API Costs + +**Symptom:** API bills are higher than expected + +**Cause:** Using expensive model, large context, many requests + +**Solutions:** + +### Use Cheaper Model +``` +Expensive: gpt-4o +Cheaper: gpt-4o-mini (10x cheaper) + +Expensive: Claude Sonnet +Cheaper: Claude Haiku (5x cheaper) + +Groq: Ultra cheap but fewer models +``` + +### Reduce Context +``` +In Chat: +1. Select fewer sources +2. Use "Summary Only" for background +3. Ask more specific questions +``` + +### Switch to Ollama (Free) +```bash +# Install Ollama +# Run: ollama serve +# Download: ollama pull mistral +# Set: OLLAMA_API_BASE=http://localhost:11434 +# Cost: Free! +``` + +--- + +## Still Having Chat Issues? + +- Try [Quick Fixes](quick-fixes.md) +- Try [Chat Effectively Guide](../3-USER-GUIDE/chat-effectively.md) +- Check logs: `docker compose logs api | grep -i "error"` +- Ask for help: [Troubleshooting Index](index.md#getting-help) diff --git a/docs/6-TROUBLESHOOTING/connection-issues.md b/docs/6-TROUBLESHOOTING/connection-issues.md new file mode 100644 index 00000000..b8c47f1e --- /dev/null +++ b/docs/6-TROUBLESHOOTING/connection-issues.md @@ -0,0 +1,447 @@ +# Connection Issues - Network & API Problems + +Frontend can't reach API or services won't communicate. + +--- + +## "Cannot connect to server" (Most Common) + +**What it looks like:** +- Browser shows error page +- "Unable to reach API" +- "Cannot connect to server" +- UI loads but can't create notebooks + +**Diagnosis:** + +```bash +# Check if API is running +docker ps | grep api +# Should see "api" service running + +# Check if API is responding +curl http://localhost:5055/health +# Should show: {"status":"ok"} + +# Check if frontend is running +docker ps | grep frontend +# Should see "frontend" or React service running +``` + +**Solutions:** + +### Solution 1: API Not Running +```bash +# Start API +docker compose up api -d + +# Wait 5 seconds +sleep 5 + +# Verify it's running +docker compose logs api | tail -20 +``` + +### Solution 2: Port Not Exposed +```bash +# Check docker-compose.yml has port mapping: +# api: +# ports: +# - "5055:5055" + +# If missing, add it and restart: +docker compose down +docker compose up -d +``` + +### Solution 3: API_URL Mismatch +```bash +# In .env, check API_URL: +cat .env | grep API_URL + +# Should match your frontend URL: +# Frontend: http://localhost:8502 +# API_URL: http://localhost:5055 + +# If wrong, fix it: +# API_URL=http://localhost:5055 +# Then restart: +docker compose restart frontend +``` + +### Solution 4: Firewall Blocking +```bash +# Verify port 5055 is accessible +netstat -tlnp | grep 5055 +# Should show port listening + +# If on different machine, try: +# Instead of localhost, use your IP: +API_URL=http://192.168.1.100:5055 +``` + +### Solution 5: Services Not Started +```bash +# Restart everything +docker compose restart + +# Wait 10 seconds +sleep 10 + +# Check all services +docker compose ps +# All should show "Up" +``` + +--- + +## Connection Refused + +**What it looks like:** +``` +Connection refused +ECONNREFUSED +Error: socket hang up +``` + +**Diagnosis:** +- API port (5055) not open +- API crashed +- Wrong IP/hostname + +**Solution:** + +```bash +# Step 1: Check if API is running +docker ps | grep api + +# Step 2: Check if port is listening +lsof -i :5055 +# or +netstat -tlnp | grep 5055 + +# Step 3: Check API logs +docker compose logs api | tail -30 +# Look for errors + +# Step 4: Restart API +docker compose restart api +docker compose logs api | grep -i "error" +``` + +--- + +## Timeout / Slow Connection + +**What it looks like:** +- Page loads slowly +- Request times out +- "Gateway timeout" error + +**Causes:** +- API is overloaded +- Network is slow +- Reverse proxy issue + +**Solutions:** + +### Check API Performance +```bash +# See CPU/memory usage +docker stats + +# Check logs for slow operations +docker compose logs api | grep "slow\|timeout" +``` + +### Reduce Load +```bash +# In .env: +SURREAL_COMMANDS_MAX_TASKS=2 +API_CLIENT_TIMEOUT=600 + +# Restart +docker compose restart +``` + +### Check Network +```bash +# Test latency +ping localhost + +# Test API directly +time curl http://localhost:5055/health + +# Should be < 100ms +``` + +--- + +## 502 Bad Gateway (Reverse Proxy) + +**What it looks like:** +``` +502 Bad Gateway +The server is temporarily unable to service the request +``` + +**Cause:** Reverse proxy can't reach API + +**Solutions:** + +### Check Backend is Running +```bash +# From the reverse proxy server +curl http://localhost:5055/health + +# Should work +``` + +### Check Reverse Proxy Config +```nginx +# Nginx example (correct): +location /api { + proxy_pass http://localhost:5055/api; + proxy_http_version 1.1; +} + +# Common mistake (wrong): +location /api { + proxy_pass http://localhost:5055; # Missing /api +} +``` + +### Set API_URL for HTTPS +```bash +# In .env: +API_URL=https://yourdomain.com + +# Restart +docker compose restart +``` + +--- + +## Intermittent Disconnects + +**What it looks like:** +- Works sometimes, fails other times +- Sporadic "cannot connect" errors +- Works then stops working + +**Cause:** Transient network issue or database conflicts + +**Solutions:** + +### Enable Retry Logic +```bash +# In .env: +SURREAL_COMMANDS_RETRY_ENABLED=true +SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=5 +SURREAL_COMMANDS_RETRY_WAIT_STRATEGY=exponential_jitter + +# Restart +docker compose restart +``` + +### Reduce Concurrency +```bash +# In .env: +SURREAL_COMMANDS_MAX_TASKS=2 + +# Restart +docker compose restart +``` + +### Check Network Stability +```bash +# Monitor connection +ping google.com + +# Long-running test +ping -c 100 google.com | grep "packet loss" +# Should be 0% loss +``` + +--- + +## Different Machine / Remote Access + +**You want to access Open Notebook from another computer** + +**Solution:** + +### Step 1: Get Your Machine IP +```bash +# On the server running Open Notebook: +ifconfig | grep "inet " +# or +hostname -I +# Note the IP (e.g., 192.168.1.100) +``` + +### Step 2: Update API_URL +```bash +# In .env: +API_URL=http://192.168.1.100:5055 + +# Restart +docker compose restart +``` + +### Step 3: Access from Other Machine +```bash +# In browser on other machine: +http://192.168.1.100:8502 +# (or your server IP) +``` + +### Step 4: Verify Port is Exposed +```bash +# On server: +docker compose ps + +# Should show port mapping: +# 0.0.0.0:8502->8502/tcp +# 0.0.0.0:5055->5055/tcp +``` + +### If Still Doesn't Work +```bash +# Check firewall on server +sudo ufw status +# May need to open ports: +sudo ufw allow 8502 +sudo ufw allow 5055 + +# Check on different machine: +telnet 192.168.1.100 5055 +# Should connect +``` + +--- + +## CORS Error (Browser Console) + +**What it looks like:** +``` +Cross-Origin Request Blocked +Access-Control-Allow-Origin +``` + +**In browser console (F12):** +``` +CORS policy: Response to preflight request doesn't pass access control check +``` + +**Cause:** Frontend and API URLs don't match + +**Solution:** + +```bash +# Check browser console error for what URLs are being used +# The error shows: +# - Requesting from: http://localhost:8502 +# - Trying to reach: http://localhost:5055 + +# Make sure API_URL matches: +API_URL=http://localhost:5055 + +# And protocol matches (http/https) +# Restart +docker compose restart frontend +``` + +--- + +## Testing Connection + +**Full diagnostic:** + +```bash +# 1. Services running? +docker compose ps +# All should show "Up" + +# 2. Ports listening? +netstat -tlnp | grep -E "8502|5055|8000" + +# 3. API responding? +curl http://localhost:5055/health + +# 4. Frontend accessible? +curl http://localhost:8502 | head + +# 5. Network OK? +ping google.com + +# 6. No firewall? +sudo ufw status | grep -E "5055|8502|8000" +``` + +--- + +## Checklist for Remote Access + +- [ ] Server IP noted (e.g., 192.168.1.100) +- [ ] Ports 8502, 5055, 8000 exposed in docker-compose +- [ ] API_URL set to server IP +- [ ] Firewall allows ports 8502, 5055, 8000 +- [ ] Can reach server from client machine (ping IP) +- [ ] All services running (docker compose ps) +- [ ] Can curl API from client (curl http://IP:5055/health) + +--- + +## SSL Certificate Errors + +**What it looks like:** +``` +[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed +Connection error when using HTTPS endpoints +Works with HTTP but fails with HTTPS +``` + +**Cause:** Self-signed certificates not trusted by Python's SSL verification + +**Solutions:** + +### Solution 1: Use Custom CA Bundle (Recommended) +```bash +# In .env: +ESPERANTO_SSL_CA_BUNDLE=/path/to/your/ca-bundle.pem + +# For Docker, mount the certificate: +# In docker-compose.yml: +volumes: + - /path/to/your/ca-bundle.pem:/certs/ca-bundle.pem:ro +environment: + - ESPERANTO_SSL_CA_BUNDLE=/certs/ca-bundle.pem +``` + +### Solution 2: Disable SSL Verification (Development Only) +```bash +# WARNING: Only use in trusted development environments +# In .env: +ESPERANTO_SSL_VERIFY=false +``` + +### Solution 3: Use HTTP Instead +If services are on a trusted local network, HTTP is acceptable: +```bash +# Change endpoint from https:// to http:// +OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 +``` + +> **Security Note:** Disabling SSL verification exposes you to man-in-the-middle attacks. Always prefer custom CA bundle or HTTP on trusted networks. + +--- + +## Still Having Issues? + +- Check [Quick Fixes](quick-fixes.md) +- Check [FAQ](faq.md) +- Check logs: `docker compose logs` +- Try restart: `docker compose restart` +- Check firewall: `sudo ufw status` +- Ask for help on [Discord](https://discord.gg/37XJPXfz2w) diff --git a/docs/6-TROUBLESHOOTING/faq.md b/docs/6-TROUBLESHOOTING/faq.md new file mode 100644 index 00000000..ff798ccc --- /dev/null +++ b/docs/6-TROUBLESHOOTING/faq.md @@ -0,0 +1,258 @@ +# Frequently Asked Questions + +Common questions about Open Notebook usage, configuration, and best practices. + +--- + +## General Usage + +### What is Open Notebook? + +Open Notebook is an open-source, privacy-focused alternative to Google's Notebook LM. It allows you to: +- Create and manage research notebooks +- Chat with your documents using AI +- Generate podcasts from your content +- Search across all your sources with semantic search +- Transform and analyze your content + +### How is it different from Google Notebook LM? + +**Privacy**: Your data stays local by default. Only your chosen AI providers receive queries. +**Flexibility**: Support for 15+ AI providers (OpenAI, Anthropic, Google, local models, etc.) +**Customization**: Open source, so you can modify and extend functionality +**Control**: You control your data, models, and processing + +### Can I use Open Notebook offline? + +**Partially**: The application runs locally, but requires internet for: +- AI model API calls (unless using local models like Ollama) +- Web content scraping + +**Fully offline**: Possible with local models (Ollama) for basic functionality. + +### What file types are supported? + +**Documents**: PDF, DOCX, TXT, Markdown +**Web Content**: URLs, YouTube videos +**Media**: MP3, WAV, M4A (audio), MP4, AVI, MOV (video) +**Other**: Direct text input, CSV, code files + +### How much does it cost? + +**Software**: Free (open source) +**AI API costs**: Pay-per-use to providers: +- OpenAI: ~$0.50-5 per 1M tokens +- Anthropic: ~$3-75 per 1M tokens +- Google: Often free tier available +- Local models: Free after initial setup + +**Typical monthly costs**: $5-50 for moderate usage. + +--- + +## AI Models and Providers + +### Which AI provider should I choose? + +**For beginners**: OpenAI (reliable, well-documented) +**For privacy**: Local models (Ollama) or European providers (Mistral) +**For cost optimization**: Groq, Google (free tier), or OpenRouter +**For long context**: Anthropic (200K tokens) or Google Gemini (1M tokens) + +### Can I use multiple providers? + +**Yes**: Configure different providers for different tasks: +- OpenAI for chat +- Google for embeddings +- ElevenLabs for text-to-speech +- Anthropic for complex reasoning + +### What are the best model combinations? + +**Budget-friendly**: +- Language: `gpt-4o-mini` (OpenAI) or `deepseek-chat` +- Embedding: `text-embedding-3-small` (OpenAI) + +**High-quality**: +- Language: `claude-3-5-sonnet` (Anthropic) or `gpt-4o` (OpenAI) +- Embedding: `text-embedding-3-large` (OpenAI) + +**Privacy-focused**: +- Language: Local Ollama models (mistral, llama3) +- Embedding: Local embedding models + +### How do I optimize AI costs? + +**Model selection**: +- Use smaller models for simple tasks (gpt-4o-mini, claude-3-5-haiku) +- Use larger models only for complex reasoning +- Leverage free tiers when available + +**Usage optimization**: +- Use "Summary Only" context for background sources +- Ask more specific questions +- Use local models (Ollama) for frequent tasks + +--- + +## Data Management + +### Where is my data stored? + +**Local storage**: By default, all data is stored locally: +- Database: SurrealDB files in `surreal_data/` +- Uploads: Files in `data/uploads/` +- Podcasts: Generated audio in `data/podcasts/` +- No external data transmission (except to chosen AI providers) + +### How do I backup my data? + +```bash +# Create backup +tar -czf backup-$(date +%Y%m%d).tar.gz data/ surreal_data/ + +# Restore backup +tar -xzf backup-20240101.tar.gz +``` + +### Can I sync data between devices? + +**Currently**: No built-in sync functionality. +**Workarounds**: +- Use shared network storage for data directories +- Manual backup/restore between devices + +### What happens if I delete a notebook? + +**Soft deletion**: Notebooks are marked as archived, not permanently deleted. +**Recovery**: Archived notebooks can be restored from the database. + +--- + +## Best Practices + +### How should I organize my notebooks? + +- **By topic**: Separate notebooks for different research areas +- **By project**: One notebook per project or course +- **By time period**: Monthly or quarterly notebooks + +**Recommended size**: 20-100 sources per notebook for best performance. + +### How do I get the best search results? + +- Use descriptive queries ("data analysis methods" not just "data") +- Combine multiple related terms +- Use natural language (ask questions as you would to a human) +- Try both text search (keywords) and vector search (concepts) + +### How can I improve chat responses? + +- Provide context: Reference specific sources or topics +- Be specific: Ask detailed questions rather than general ones +- Request citations: "Answer with page citations" +- Use follow-up questions: Build on previous responses + +### What are the security best practices? + +- Never share API keys publicly +- Use `OPEN_NOTEBOOK_PASSWORD` for public deployments +- Use HTTPS for production (via reverse proxy) +- Keep Docker images updated +- Encrypt backups if they contain sensitive data + +--- + +## Technical Questions + +### Can I use Open Notebook programmatically? + +**Yes**: Open Notebook provides a REST API: +- Full API documentation at `http://localhost:5055/docs` +- Support for all UI functionality +- Authentication via password header + +### Can I run Open Notebook in production? + +**Yes**: Designed for production use with: +- Docker deployment +- Security features (password protection) +- Monitoring and logging +- Reverse proxy support (nginx, Caddy, Traefik) + +### What are the system requirements? + +**Minimum**: +- 4GB RAM +- 2 CPU cores +- 10GB disk space + +**Recommended**: +- 8GB+ RAM +- 4+ CPU cores +- SSD storage +- For local models: 16GB+ RAM, GPU recommended + +--- + +## Timeout and Performance + +### Why do I get timeout errors? + +**Common causes**: +- Large context (too many sources) +- Slow AI provider +- Local models on CPU (slow) +- First request (model loading) + +**Solutions**: +```bash +# In .env: +API_CLIENT_TIMEOUT=600 # 10 minutes for slow setups +ESPERANTO_LLM_TIMEOUT=180 # 3 minutes for model inference +``` + +### Recommended timeouts by setup: + +| Setup | API_CLIENT_TIMEOUT | +|-------|-------------------| +| Cloud APIs (OpenAI, Anthropic) | 300 (default) | +| Local Ollama with GPU | 600 | +| Local Ollama with CPU | 1200 | +| Remote LM Studio | 900 | + +--- + +## Getting Help + +### My question isn't answered here + +1. Check the troubleshooting guides in this section +2. Search existing GitHub issues +3. Ask in the Discord community +4. Create a GitHub issue with detailed information + +### How do I report a bug? + +Include: +- Steps to reproduce +- Expected vs actual behavior +- Error messages and logs +- System information +- Configuration details (without API keys) + +Submit to: [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) + +### Where can I get help? + +- **Discord**: https://discord.gg/37XJPXfz2w (fastest) +- **GitHub Issues**: Bug reports and feature requests +- **Documentation**: This docs site + +--- + +## Related + +- [Quick Fixes](quick-fixes.md) - Common issues with 1-minute solutions +- [AI & Chat Issues](ai-chat-issues.md) - Model and chat problems +- [Connection Issues](connection-issues.md) - Network and API problems diff --git a/docs/6-TROUBLESHOOTING/index.md b/docs/6-TROUBLESHOOTING/index.md new file mode 100644 index 00000000..0ab2471b --- /dev/null +++ b/docs/6-TROUBLESHOOTING/index.md @@ -0,0 +1,239 @@ +# Troubleshooting - Problem Solving Guide + +Having issues? Use this guide to diagnose and fix problems. + +--- + +## How to Use This Guide + +**Step 1: Identify your problem** +- What's the symptom? (error message, behavior, something not working?) +- When did it happen? (during install, while using, after update?) + +**Step 2: Find the right guide** +- Look below for your symptom +- Go to the specific troubleshooting guide + +**Step 3: Follow the steps** +- Guides are organized by symptom, not by root cause +- Each has diagnostic steps and solutions + +--- + +## Quick Problem Map + +### During Installation + +- **Docker won't start** → [Quick Fixes](quick-fixes.md#9-services-wont-start-or-docker-error) +- **Port already in use** → [Quick Fixes](quick-fixes.md#3-port-x-already-in-use) +- **Permission denied** → [Quick Fixes](quick-fixes.md#9-services-wont-start-or-docker-error) +- **Can't connect to database** → [Connection Issues](connection-issues.md) + +### When Starting + +- **API won't start** → [Quick Fixes](quick-fixes.md#9-services-wont-start-or-docker-error) +- **Frontend won't load** → [Connection Issues](connection-issues.md) +- **"Cannot connect to server" error** → [Connection Issues](connection-issues.md) + +### Settings / Configuration + +- **Models not showing** → [AI & Chat Issues](ai-chat-issues.md) +- **"Invalid API key"** → [AI & Chat Issues](ai-chat-issues.md) +- **Can't find Settings** → [Quick Fixes](quick-fixes.md) + +### Using Features + +- **Chat not working** → [AI & Chat Issues](ai-chat-issues.md) +- **Chat responses are slow** → [AI & Chat Issues](ai-chat-issues.md) +- **Chat gives bad answers** → [AI & Chat Issues](ai-chat-issues.md) + +### Adding Content + +- **Can't upload PDF** → [Quick Fixes](quick-fixes.md#4-cannot-process-file-or-unsupported-format) +- **File won't process** → [Quick Fixes](quick-fixes.md#4-cannot-process-file-or-unsupported-format) +- **Web link won't extract** → [Quick Fixes](quick-fixes.md#4-cannot-process-file-or-unsupported-format) + +### Search + +- **Search returns no results** → [Quick Fixes](quick-fixes.md#7-search-returns-nothing) +- **Search returns wrong results** → [Quick Fixes](quick-fixes.md#7-search-returns-nothing) + +### Podcasts + +- **Can't generate podcast** → [Quick Fixes](quick-fixes.md#8-podcast-generation-failed) +- **Podcast audio is robotic** → [Quick Fixes](quick-fixes.md#8-podcast-generation-failed) +- **Podcast generation times out** → [Quick Fixes](quick-fixes.md#8-podcast-generation-failed) + +--- + +## Troubleshooting by Error Message + +### "Cannot connect to server" +→ [Connection Issues](connection-issues.md) — Frontend can't reach API + +### "Invalid API key" +→ [AI & Chat Issues](ai-chat-issues.md) — Wrong or missing API key + +### "Models not available" +→ [AI & Chat Issues](ai-chat-issues.md) — Model not configured + +### "Connection refused" +→ [Connection Issues](connection-issues.md) — Service not running or port wrong + +### "Port already in use" +→ [Quick Fixes](quick-fixes.md#3-port-x-already-in-use) — Port conflict + +### "Permission denied" +→ [Quick Fixes](quick-fixes.md#9-services-wont-start-or-docker-error) — File permissions issue + +### "Unsupported file type" +→ [Quick Fixes](quick-fixes.md#4-cannot-process-file-or-unsupported-format) — File format not supported + +### "Processing timeout" +→ [Quick Fixes](quick-fixes.md#5-chat-is-very-slow) — File too large or slow processing + +--- + +## Troubleshooting by Component + +### Frontend (Browser/UI) +- Can't access UI → [Connection Issues](connection-issues.md) +- UI is slow → [Quick Fixes](quick-fixes.md) +- Button/feature missing → [Quick Fixes](quick-fixes.md) + +### API (Backend) +- API won't start → [Quick Fixes](quick-fixes.md#9-services-wont-start-or-docker-error) +- API errors in logs → [Quick Fixes](quick-fixes.md#9-services-wont-start-or-docker-error) +- API is slow → [Quick Fixes](quick-fixes.md) + +### Database +- Can't connect to database → [Connection Issues](connection-issues.md) +- Data lost after restart → [FAQ](faq.md#how-do-i-backup-my-data) + +### AI / Chat +- Chat not working → [AI & Chat Issues](ai-chat-issues.md) +- Bad responses → [AI & Chat Issues](ai-chat-issues.md) +- Cost too high → [AI & Chat Issues](ai-chat-issues.md#high-api-costs) + +### Sources +- Can't upload file → [Quick Fixes](quick-fixes.md#4-cannot-process-file-or-unsupported-format) +- File won't process → [Quick Fixes](quick-fixes.md#4-cannot-process-file-or-unsupported-format) + +### Podcasts +- Won't generate → [Quick Fixes](quick-fixes.md#8-podcast-generation-failed) +- Bad audio quality → [Quick Fixes](quick-fixes.md#8-podcast-generation-failed) + +--- + +## Diagnostic Checklist + +**When something isn't working:** + +- [ ] Check if services are running: `docker ps` +- [ ] Check logs: `docker compose logs api` (or frontend, surrealdb) +- [ ] Verify ports are exposed: `netstat -tlnp` or `lsof -i :5055` +- [ ] Test connectivity: `curl http://localhost:5055/health` +- [ ] Check environment variables: `docker inspect ` +- [ ] Try restarting: `docker compose restart` +- [ ] Check firewall/antivirus isn't blocking + +--- + +## Getting Help + +If you can't find the answer here: + +1. **Check the relevant guide** — Read completely, try all steps +2. **Check the FAQ** — [Frequently Asked Questions](faq.md) +3. **Search our Discord** — Others may have had same issue +4. **Check logs** — Most issues show error messages in logs +5. **Report on GitHub** — Include error message, steps to reproduce + +### How to Report an Issue + +Include: +1. Error message (exact) +2. Steps to reproduce +3. Logs: `docker compose logs` +4. Your setup: Docker/local, provider, OS +5. What you've already tried + +→ [Report on GitHub](https://github.com/lfnovo/open-notebook/issues) + +--- + +## Guides + +### [Quick Fixes](quick-fixes.md) +Top 10 most common issues with 1-minute solutions. + +### [Connection Issues](connection-issues.md) +Frontend can't reach API, network problems. + +### [AI & Chat Issues](ai-chat-issues.md) +Chat not working, bad responses, slow performance. + +### [FAQ](faq.md) +Frequently asked questions about usage, costs, and best practices. + +--- + +## Common Solutions + +**Service won't start?** +```bash +# Check logs +docker compose logs + +# Restart everything +docker compose restart + +# Nuclear option: rebuild +docker compose down +docker compose up --build +``` + +**Port conflict?** +```bash +# Find what's using port 5055 +lsof -i :5055 +# Kill it or use different port +``` + +**Can't connect?** +```bash +# Test API directly +curl http://localhost:5055/health +# Should return: {"status":"ok"} +``` + +**Slow performance?** +```bash +# Check resource usage +docker stats + +# Reduce concurrency in .env +SURREAL_COMMANDS_MAX_TASKS=2 +``` + +**High costs?** +```bash +# Switch to cheaper model +# In Settings → Models → Choose gpt-4o-mini (OpenAI) +# Or use Ollama (free) +``` + +--- + +## Still Stuck? + +**Before asking for help:** +1. Read the relevant guide completely +2. Try all the steps +3. Check the logs +4. Restart services +5. Search existing issues on GitHub + +**Then:** +- **Discord**: https://discord.gg/37XJPXfz2w (fastest response) +- **GitHub Issues**: https://github.com/lfnovo/open-notebook/issues diff --git a/docs/6-TROUBLESHOOTING/quick-fixes.md b/docs/6-TROUBLESHOOTING/quick-fixes.md new file mode 100644 index 00000000..16ae9515 --- /dev/null +++ b/docs/6-TROUBLESHOOTING/quick-fixes.md @@ -0,0 +1,380 @@ +# Quick Fixes - Top 11 Issues & Solutions + +Common problems with 1-minute solutions. + +--- + +## #1: "Cannot connect to server" + +**Symptom:** Browser shows error "Cannot connect to server" or "Unable to reach API" + +**Cause:** Frontend can't reach API + +**Solution (1 minute):** + +```bash +# Step 1: Check if API is running +docker ps | grep api + +# Step 2: Verify port 5055 is accessible +curl http://localhost:5055/health + +# Expected output: {"status":"ok"} + +# If that doesn't work: +# Step 3: Restart services +docker compose restart + +# Step 4: Try again +# Open http://localhost:8502 in browser +``` + +**If still broken:** +- Check `API_URL` in .env (should match your frontend URL) +- See [Connection Issues](connection-issues.md) + +--- + +## #2: "Invalid API key" or "Models not showing" + +**Symptom:** Settings → Models shows "No models available" + +**Cause:** API key missing, wrong, or not set + +**Solution (1 minute):** + +```bash +# Step 1: Check your .env has API key +cat .env | grep OPENAI_API_KEY + +# Step 2: Verify it's correct (from https://platform.openai.com/api-keys) +# Should look like: sk-proj-xxx... + +# Step 3: Restart services +docker compose restart api + +# Step 4: Wait 10 seconds, then refresh browser +# Go to Settings → Models + +# If still no models: +# Check logs for error +docker compose logs api | grep -i "api key\|error" +``` + +**If still broken:** +- Make sure key has no extra spaces +- Generate a fresh key from provider dashboard +- See [AI & Chat Issues](ai-chat-issues.md) + +--- + +## #3: "Port X already in use" + +**Symptom:** Docker error "Port 8502 is already allocated" + +**Cause:** Another service using that port + +**Solution (1 minute):** + +```bash +# Option 1: Stop the other service +# Find what's using port 8502 +lsof -i :8502 +# Kill it or close the app + +# Option 2: Use different port +# Edit docker-compose.yml +# Change: - "8502:8502" +# To: - "8503:8502" + +# Then restart +docker compose restart +# Access at: http://localhost:8503 +``` + +--- + +## #4: "Cannot process file" or "Unsupported format" + +**Symptom:** Upload fails or says "File format not supported" + +**Cause:** File type not supported or too large + +**Solution (1 minute):** + +```bash +# Check if file format is supported: +# ✓ PDF, DOCX, PPTX, XLSX (documents) +# ✓ MP3, WAV, M4A (audio) +# ✓ MP4, AVI, MOV (video) +# ✓ URLs/web links + +# ✗ Pure images (.jpg without OCR) +# ✗ Files > 100MB + +# Try these: +# - Convert to PDF if possible +# - Split large files +# - Try uploading again +``` + +--- + +## #5: "Chat is very slow" + +**Symptom:** Chat responses take minutes or timeout + +**Cause:** Slow AI provider, large context, or overloaded system + +**Solution (1 minute):** + +```bash +# Step 1: Check which model you're using +# Settings → Models +# Note the model name + +# Step 2: Try a cheaper/faster model +# OpenAI: Switch to gpt-4o-mini (10x cheaper, slightly faster) +# Anthropic: Switch to claude-3-5-haiku (fastest) +# Groq: Use any model (ultra-fast) + +# Step 3: Reduce context +# Chat: Select fewer sources +# Use "Summary Only" instead of "Full Content" + +# Step 4: Check if API is overloaded +docker stats +# Look at CPU/memory usage +``` + +For deep dive: See [AI & Chat Issues](ai-chat-issues.md) + +--- + +## #6: "Chat gives bad responses" + +**Symptom:** AI responses are generic, wrong, or irrelevant + +**Cause:** Bad context, vague question, or wrong model + +**Solution (1 minute):** + +```bash +# Step 1: Make sure sources are in context +# Click "Select Sources" in Chat +# Verify relevant sources are checked and set to "Full Content" + +# Step 2: Ask a specific question +# Bad: "What do you think?" +# Good: "Based on the paper's methodology section, what are the 3 main limitations?" + +# Step 3: Try a more powerful model +# OpenAI: Use gpt-4o (better reasoning) +# Anthropic: Use claude-3-5-sonnet (best reasoning) + +# Step 4: Check citations +# Click citations to verify AI actually saw those sources +``` + +For detailed help: See [Chat Effectively](../3-USER-GUIDE/chat-effectively.md) + +--- + +## #7: "Search returns nothing" + +**Symptom:** Search shows 0 results even though content exists + +**Cause:** Wrong search type or poor query + +**Solution (1 minute):** + +```bash +# Try a different search type: + +# If you searched with KEYWORDS: +# Try VECTOR SEARCH instead +# (Concept-based, not keyword-based) + +# If you searched for CONCEPTS: +# Try TEXT SEARCH instead +# (Look for specific words in your query) + +# Try simpler search: +# Instead of: "How do transformers work in neural networks?" +# Try: "transformers" or "neural networks" + +# Check sources are processed: +# Go to notebook +# All sources should show green "Ready" status +``` + +For detailed help: See [Search Effectively](../3-USER-GUIDE/search.md) + +--- + +## #8: "Podcast generation failed" + +**Symptom:** "Podcast generation failed" error + +**Cause:** Insufficient content, API quota, or network issue + +**Solution (1 minute):** + +```bash +# Step 1: Make sure you have content +# Select at least 1-2 sources +# Avoid single-sentence sources + +# Step 2: Try again +# Sometimes it's a temporary API issue +# Wait 30 seconds and retry + +# Step 3: Check your TTS provider has quota +# OpenAI: Check account has credits +# ElevenLabs: Check monthly quota +# Google: Check API quota + +# Step 4: Try different TTS provider +# In podcast generation, choose "Google" or "Local" +# instead of "ElevenLabs" +``` + +For detailed help: See [FAQ](faq.md) + +--- + +## #9: "Services won't start" or Docker error + +**Symptom:** Docker error when running `docker compose up` + +**Cause:** Corrupt configuration, permission issue, or resource issue + +**Solution (1 minute):** + +```bash +# Step 1: Check logs +docker compose logs + +# Step 2: Try restart +docker compose restart + +# Step 3: If that fails, rebuild +docker compose down +docker compose up --build + +# Step 4: Check disk space +df -h +# Need at least 5GB free + +# Step 5: Check Docker has enough memory +# Docker settings → Resources → Memory: 4GB+ +``` + +--- + +## #10: "Database says 'too many connections'" + +**Symptom:** Error about database connections + +**Cause:** Too many concurrent operations + +**Solution (1 minute):** + +```bash +# In .env, reduce concurrency: +SURREAL_COMMANDS_MAX_TASKS=2 + +# Then restart: +docker compose restart + +# This makes it slower but more stable +``` + +--- + +## #11: Slow Startup or Download Timeouts (China/Slow Networks) + +**Symptom:** Container crashes on startup, worker enters FATAL state, or pip/uv downloads fail + +**Cause:** Slow network or restricted access to Python package repositories + +**Solution:** + +### Increase Download Timeout +```yaml +# In docker-compose.yml environment: +environment: + - UV_HTTP_TIMEOUT=600 # 10 minutes (default is 30s) +``` + +### Use Chinese Mirrors (if in China) +```yaml +environment: + - UV_HTTP_TIMEOUT=600 + - UV_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple + - PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple +``` + +**Alternative Chinese mirrors:** +- Tsinghua: `https://pypi.tuna.tsinghua.edu.cn/simple` +- Aliyun: `https://mirrors.aliyun.com/pypi/simple/` +- Huawei: `https://repo.huaweicloud.com/repository/pypi/simple` + +**Note:** First startup may take several minutes while dependencies download. Subsequent starts will be faster. + +--- + +## Quick Troubleshooting Checklist + +When something breaks: + +- [ ] **Restart services:** `docker compose restart` +- [ ] **Check logs:** `docker compose logs` +- [ ] **Verify connectivity:** `curl http://localhost:5055/health` +- [ ] **Check .env:** API keys set? API_URL correct? +- [ ] **Check resources:** `docker stats` (CPU/memory) +- [ ] **Clear cache:** `docker system prune` (free space) +- [ ] **Rebuild if needed:** `docker compose up --build` + +--- + +## Nuclear Options (Last Resort) + +**Completely reset (will lose all data in Docker):** + +```bash +docker compose down -v +docker compose up --build +``` + +**Reset to defaults:** +```bash +# Backup your .env first! +cp .env .env.backup + +# Reset to example +cp .env.example .env + +# Edit with your API keys +# Restart +docker compose up +``` + +--- + +## Prevention Tips + +1. **Keep backups** — Export your notebooks regularly +2. **Monitor logs** — Check `docker compose logs` periodically +3. **Update regularly** — Pull latest image: `docker pull lfnovo/open_notebook:latest` +4. **Document changes** — Keep notes on what you configured +5. **Test after updates** — Verify everything works + +--- + +## Still Stuck? + +- **Look up your exact error** in [Troubleshooting Index](index.md) +- **Check the FAQ** in [FAQ](faq.md) +- **Check logs:** `docker compose logs | head -50` +- **Ask for help:** [Discord](https://discord.gg/37XJPXfz2w) or [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) diff --git a/docs/7-DEVELOPMENT/api-reference.md b/docs/7-DEVELOPMENT/api-reference.md new file mode 100644 index 00000000..5cfa1712 --- /dev/null +++ b/docs/7-DEVELOPMENT/api-reference.md @@ -0,0 +1,213 @@ +# API Reference + +Complete REST API for Open Notebook. All endpoints are served from the API backend (default: `http://localhost:5055`). + +**Base URL**: `http://localhost:5055` (development) or environment-specific production URL + +**Interactive Docs**: Use FastAPI's built-in Swagger UI at `http://localhost:5055/docs` for live testing and exploration. This is the primary reference for all endpoints, request/response schemas, and real-time testing. + +--- + +## Quick Start + +### 1. Authentication + +Simple password-based (development only): + +```bash +curl http://localhost:5055/api/notebooks \ + -H "Authorization: Bearer your_password" +``` + +**⚠️ Production**: Replace with OAuth/JWT. See [Security Configuration](../5-CONFIGURATION/security.md) for details. + +### 2. Base API Flow + +Most operations follow this pattern: +1. Create a **Notebook** (container for research) +2. Add **Sources** (PDFs, URLs, text) +3. Query via **Chat** or **Search** +4. View results and **Notes** + +### 3. Testing Endpoints + +Instead of memorizing endpoints, use the interactive API docs: +- Navigate to `http://localhost:5055/docs` +- Try requests directly in the browser +- See request/response schemas in real-time +- Test with your own data + +--- + +## API Endpoints Overview + +### Main Resource Types + +**Notebooks** - Research projects containing sources and notes +- `GET/POST /notebooks` - List and create +- `GET/PUT/DELETE /notebooks/{id}` - Read, update, delete + +**Sources** - Content items (PDFs, URLs, text) +- `GET/POST /sources` - List and add content +- `GET /sources/{id}` - Fetch source details +- `POST /sources/{id}/retry` - Retry failed processing +- `GET /sources/{id}/download` - Download original file + +**Notes** - User-created or AI-generated research notes +- `GET/POST /notes` - List and create +- `GET/PUT/DELETE /notes/{id}` - Read, update, delete + +**Chat** - Conversational AI interface +- `GET/POST /chat/sessions` - Manage chat sessions +- `POST /chat/execute` - Send message and get response +- `POST /chat/context/build` - Prepare context for chat + +**Search** - Find content by text or semantic similarity +- `POST /search` - Full-text or vector search +- `POST /ask` - Ask a question (search + synthesize) + +**Transformations** - Custom prompts for extracting insights +- `GET/POST /transformations` - Create custom extraction rules +- `POST /sources/{id}/insights` - Apply transformation to source + +**Models** - Configure AI providers +- `GET /models` - Available models +- `GET /models/defaults` - Current defaults +- `POST /models/config` - Set defaults + +**Health & Status** +- `GET /health` - Health check +- `GET /commands/{id}` - Track async operations + +--- + +## Authentication + +### Current (Development) + +All requests require password header: + +```bash +curl -H "Authorization: Bearer your_password" http://localhost:5055/api/notebooks +``` + +Password configured via `OPEN_NOTEBOOK_PASSWORD` environment variable. + +> **📖 See [Security Configuration](../5-CONFIGURATION/security.md)** for complete authentication setup, API examples, and production hardening. + +### Production + +**⚠️ Not secure.** Replace with: +- OAuth 2.0 (recommended) +- JWT tokens +- API keys + +See [Security Configuration](../5-CONFIGURATION/security.md) for production setup. + +--- + +## Common Patterns + +### Pagination + +```bash +# List sources with limit/offset +curl 'http://localhost:5055/sources?limit=20&offset=10' +``` + +### Filtering & Sorting + +```bash +# Filter by notebook, sort by date +curl 'http://localhost:5055/sources?notebook_id=notebook:abc&sort_by=created&sort_order=asc' +``` + +### Async Operations + +Some operations (source processing, podcast generation) return immediately with a command ID: + +```bash +# Submit async operation +curl -X POST http://localhost:5055/sources -F async_processing=true +# Response: {"id": "source:src001", "command_id": "command:cmd123"} + +# Poll status +curl http://localhost:5055/commands/command:cmd123 +``` + +### Streaming Responses + +The `/ask` endpoint streams responses as Server-Sent Events: + +```bash +curl -N 'http://localhost:5055/ask' \ + -H "Content-Type: application/json" \ + -d '{"question": "What is AI?"}' + +# Outputs: data: {"type":"strategy",...} +# data: {"type":"answer",...} +# data: {"type":"final_answer",...} +``` + +### Multipart File Upload + +```bash +curl -X POST http://localhost:5055/sources \ + -F "type=upload" \ + -F "notebook_id=notebook:abc" \ + -F "file=@document.pdf" +``` + +--- + +## Error Handling + +All errors return JSON with status code: + +```json +{"detail": "Notebook not found"} +``` + +### Common Status Codes + +| Code | Meaning | Example | +|------|---------|---------| +| 200 | Success | Operation completed | +| 400 | Bad Request | Invalid input | +| 404 | Not Found | Resource doesn't exist | +| 409 | Conflict | Resource already exists | +| 500 | Server Error | Database/processing error | + +--- + +## Tips for Developers + +1. **Start with interactive docs** (`http://localhost:5055/docs`) - this is the definitive reference +2. **Enable logging** for debugging (check API logs: `docker logs`) +3. **Streaming endpoints** require special handling (Server-Sent Events, not standard JSON) +4. **Async operations** return immediately; always poll status before assuming completion +5. **Vector search** requires embedding model configured (check `/models`) +6. **Model overrides** are per-request; set in body, not config +7. **CORS enabled** in development; configure for production + +--- + +## Learning Path + +1. **Authentication**: Add `X-Password` header to all requests +2. **Create a notebook**: `POST /notebooks` with name and description +3. **Add a source**: `POST /sources` with file, URL, or text +4. **Query your content**: `POST /chat/execute` to ask questions +5. **Explore advanced features**: Search, transformations, streaming + +--- + +## Production Considerations + +- Replace password auth with OAuth/JWT (see [Security](../5-CONFIGURATION/security.md)) +- Add rate limiting via reverse proxy (Nginx, CloudFlare, Kong) +- Enable CORS restrictions (currently allows all origins) +- Use HTTPS via reverse proxy (see [Reverse Proxy](../5-CONFIGURATION/reverse-proxy.md)) +- Set up API versioning strategy (currently implicit) + +See [Security Configuration](../5-CONFIGURATION/security.md) and [Reverse Proxy Setup](../5-CONFIGURATION/reverse-proxy.md) for complete production setup. diff --git a/docs/7-DEVELOPMENT/architecture.md b/docs/7-DEVELOPMENT/architecture.md new file mode 100644 index 00000000..3ef2f8a6 --- /dev/null +++ b/docs/7-DEVELOPMENT/architecture.md @@ -0,0 +1,891 @@ +# Open Notebook Architecture + +## High-Level Overview + +Open Notebook follows a three-tier architecture with clear separation of concerns: + +``` +┌─────────────────────────────────────────────────────────┐ +│ Your Browser │ +│ Access: http://your-server-ip:8502 │ +└────────────────┬────────────────────────────────────────┘ + │ + ▼ + ┌───────────────┐ + │ Port 8502 │ ← Next.js Frontend (what you see) + │ Frontend │ Also proxies API requests internally! + └───────┬───────┘ + │ proxies /api/* requests ↓ + ▼ + ┌───────────────┐ + │ Port 5055 │ ← FastAPI Backend (handles requests) + │ API │ + └───────┬───────┘ + │ + ▼ + ┌───────────────┐ + │ SurrealDB │ ← Database (internal, auto-configured) + │ (Port 8000) │ + └───────────────┘ +``` + +**Key Points:** +- **v1.1+**: Next.js automatically proxies `/api/*` requests to the backend, simplifying reverse proxy setup +- Your browser loads the frontend from port 8502 +- The frontend needs to know where to find the API - when accessing remotely, set: `API_URL=http://your-server-ip:5055` +- **Behind reverse proxy?** You only need to proxy to port 8502 now! See [Reverse Proxy Configuration](../5-CONFIGURATION/reverse-proxy.md) + +--- + +## Detailed Architecture + +Open Notebook is built on a **three-tier, async-first architecture** designed for scalability, modularity, and multi-provider AI flexibility. The system separates concerns across frontend, API, and database layers, with LangGraph powering intelligent workflows and Esperanto enabling seamless integration with 8+ AI providers. + +**Core Philosophy**: +- Privacy-first: Users control their data and AI provider choice +- Async/await throughout: Non-blocking operations for responsive UX +- Domain-Driven Design: Clear separation between domain models, repositories, and orchestrators +- Multi-provider flexibility: Swap AI providers without changing application code +- Self-hosted capable: All components deployable in isolated environments + +--- + +## Three-Tier Architecture + +### Layer 1: Frontend (React/Next.js @ port 3000) + +**Purpose**: Responsive, interactive user interface for research, notes, chat, and podcast management. + +**Technology Stack**: +- **Framework**: Next.js 15 with React 19 +- **Language**: TypeScript with strict type checking +- **State Management**: Zustand (lightweight store) + TanStack Query (server state) +- **Styling**: Tailwind CSS + Shadcn/ui component library +- **Build Tool**: Webpack (bundled via Next.js) + +**Key Responsibilities**: +- Render notebooks, sources, notes, chat sessions, and podcasts +- Handle user interactions (create, read, update, delete operations) +- Manage complex UI state (modals, file uploads, real-time search) +- Stream responses from API (chat, podcast generation) +- Display embeddings, vector search results, and insights + +**Communication Pattern**: +- All data fetched via REST API (async requests to port 5055) +- Configured base URL: `http://localhost:5055` (dev) or environment-specific (prod) +- TanStack Query handles caching, refetching, and data synchronization +- Zustand stores global state (user, notebooks, selected context) +- CORS enabled on API side for cross-origin requests + +**Component Architecture**: +- `/src/app/`: Next.js App Router (pages, layouts) +- `/src/components/`: Reusable React components (buttons, forms, cards) +- `/src/hooks/`: Custom hooks (useNotebook, useChat, useSearch) +- `/src/lib/`: Utility functions, API clients, validators +- `/src/styles/`: Global CSS, Tailwind config + +--- + +### Layer 2: API (FastAPI @ port 5055) + +**Purpose**: RESTful backend exposing operations on notebooks, sources, notes, chat sessions, and AI models. + +**Technology Stack**: +- **Framework**: FastAPI 0.104+ (async Python web framework) +- **Language**: Python 3.11+ +- **Validation**: Pydantic v2 (request/response schemas) +- **Logging**: Loguru (structured JSON logging) +- **Testing**: Pytest (unit and integration tests) + +**Architecture**: +``` +FastAPI App (main.py) + ├── Routers (HTTP endpoints) + │ ├── routers/notebooks.py (CRUD operations) + │ ├── routers/sources.py (content ingestion, upload) + │ ├── routers/notes.py (note management) + │ ├── routers/chat.py (conversation sessions) + │ ├── routers/search.py (full-text + vector search) + │ ├── routers/transformations.py (custom transformations) + │ ├── routers/models.py (AI model configuration) + │ └── routers/*.py (11 additional routers) + │ + ├── Services (business logic) + │ ├── *_service.py (orchestration, graph invocation) + │ ├── command_service.py (async job submission) + │ └── middleware (auth, logging) + │ + ├── Models (Pydantic schemas) + │ └── models.py (validation, serialization) + │ + └── Lifespan (startup/shutdown) + └── AsyncMigrationManager (database schema migrations) +``` + +**Key Responsibilities**: +1. **HTTP Interface**: Accept REST requests, validate, return JSON responses +2. **Business Logic**: Orchestrate domain models, repository operations, and workflows +3. **Async Job Queue**: Submit long-running tasks (podcast generation, source processing) +4. **Database Migrations**: Run schema updates on startup +5. **Error Handling**: Catch exceptions, return appropriate HTTP status codes +6. **Logging**: Track operations for debugging and monitoring + +**Startup Flow**: +1. Load `.env` environment variables +2. Initialize FastAPI app with CORS + auth middleware +3. Run AsyncMigrationManager (creates/updates database schema) +4. Register all routers (20+ endpoints) +5. Server ready on port 5055 + +**Request-Response Cycle**: +``` +HTTP Request → Router → Service → Domain/Repository → SurrealDB + ↓ + LangGraph (optional) + ↓ +Response ← Pydantic serialization ← Service ← Result +``` + +--- + +### Layer 3: Database (SurrealDB @ port 8000) + +**Purpose**: Graph database with built-in vector embeddings, semantic search, and relationship management. + +**Technology Stack**: +- **Database**: SurrealDB (multi-model, ACID transactions) +- **Query Language**: SurrealQL (SQL-like syntax with graph operations) +- **Async Driver**: Async Rust client for Python +- **Migrations**: Manual `.surql` files in `/migrations/` (auto-run on API startup) + +**Core Tables**: + +| Table | Purpose | Key Fields | +|-------|---------|-----------| +| `notebook` | Research project container | id, name, description, archived, created, updated | +| `source` | Content item (PDF, URL, text) | id, title, full_text, topics, asset, created, updated | +| `source_embedding` | Vector embeddings for semantic search | id, source, embedding, chunk_text, chunk_index | +| `note` | User-created research notes | id, title, content, note_type (human/ai), created, updated | +| `chat_session` | Conversation session | id, notebook_id, title, messages (JSON), created, updated | +| `transformation` | Custom transformation rules | id, name, description, prompt, created, updated | +| `source_insight` | Transformation output | id, source_id, insight_type, content, created, updated | +| `reference` | Relationship: source → notebook | out (source), in (notebook) | +| `artifact` | Relationship: note → notebook | out (note), in (notebook) | + +**Relationship Graph**: +``` +Notebook + ↓ (referenced_by) +Source + ├→ SourceEmbedding (1:many for chunked text) + ├→ SourceInsight (1:many for transformation outputs) + └→ Note (via artifact relationship) + ├→ Embedding (semantic search) + └→ Topics (tags) + +ChatSession + ├→ Notebook + └→ Messages (stored as JSON array) +``` + +**Vector Search Capability**: +- Embeddings stored natively in SurrealDB +- Full-text search on `source.full_text` and `note.content` +- Cosine similarity search on embedding vectors +- Semantic search integrates with search endpoint + +**Connection Management**: +- Async connection pooling (configurable size) +- Transaction support for multi-record operations +- Schema auto-validation via migrations +- Query timeout protection (prevent infinite queries) + +--- + +## Tech Stack Rationale + +### Why Python + FastAPI? + +**Python**: +- Rich AI/ML ecosystem (LangChain, LangGraph, transformers, scikit-learn) +- Rapid prototyping and deployment +- Extensive async support (asyncio, async/await) +- Strong type hints (Pydantic, mypy) + +**FastAPI**: +- Modern, async-first framework +- Automatic OpenAPI documentation (Swagger UI @ /docs) +- Built-in request validation (Pydantic) +- Excellent performance (benchmarked near C/Rust speeds) +- Easy middleware/dependency injection + +### Why Next.js + React + TypeScript? + +**Next.js**: +- Full-stack React framework with SSR/SSG +- File-based routing (intuitive project structure) +- Built-in API routes (optional backend co-location) +- Optimized image/code splitting +- Easy deployment (Vercel, Docker, self-hosted) + +**React 19**: +- Component-based UI (reusable, testable) +- Excellent tooling and community +- Client-side state management (Zustand) +- Server-side state sync (TanStack Query) + +**TypeScript**: +- Type safety catches errors at compile time +- Better IDE autocomplete and refactoring +- Documentation via types (self-documenting code) +- Easier onboarding for new contributors + +### Why SurrealDB? + +**SurrealDB**: +- Native graph database (relationships are first-class) +- Built-in vector embeddings (no separate vector DB) +- ACID transactions (data consistency) +- Multi-model (relational + document + graph) +- Full-text search + semantic search in one query +- Self-hosted (unlike managed Pinecone/Weaviate) +- Flexible SurrealQL (SQL-like syntax) + +**Alternative Considered**: PostgreSQL + pgvector (more mature but separate extensions) + +### Why Esperanto for AI Providers? + +**Esperanto Library**: +- Unified interface to 8+ LLM providers (OpenAI, Anthropic, Google, Groq, Ollama, Mistral, DeepSeek, xAI) +- Multi-provider embeddings (OpenAI, Google, Ollama, Mistral, Voyage) +- TTS/STT integration (OpenAI, Groq, ElevenLabs, Google) +- Smart provider selection (fallback logic, cost optimization) +- Per-request model override support +- Local Ollama support (completely self-hosted option) + +**Alternative Considered**: LangChain's provider abstraction (more verbose, less flexible) + +--- + +## LangGraph Workflows + +LangGraph is a state machine library that orchestrates multi-step AI workflows. Open Notebook uses five core workflows: + +### 1. **Source Processing Workflow** (`open_notebook/graphs/source.py`) + +**Purpose**: Ingest content (PDF, URL, text) and prepare for search/insights. + +**Flow**: +``` +Input (file/URL/text) + ↓ +Extract Content (content-core library) + ↓ +Clean & tokenize text + ↓ +Generate Embeddings (Esperanto) + ↓ +Create SourceEmbedding records (chunked + indexed) + ↓ +Extract Topics (LLM summarization) + ↓ +Save to SurrealDB + ↓ +Output (Source record with embeddings) +``` + +**State Dict**: +```python +{ + "content_state": {"file_path" | "url" | "content": str}, + "source_id": str, + "full_text": str, + "embeddings": List[Dict], + "topics": List[str], + "notebook_ids": List[str], +} +``` + +**Invoked By**: Sources API (`POST /sources`) + +--- + +### 2. **Chat Workflow** (`open_notebook/graphs/chat.py`) + +**Purpose**: Conduct multi-turn conversations with AI model, referencing notebook context. + +**Flow**: +``` +User Message + ↓ +Build Context (selected sources/notes) + ↓ +Add Message to Session + ↓ +Create Chat Prompt (system + history + context) + ↓ +Call LLM (via Esperanto) + ↓ +Stream Response + ↓ +Save AI Message to ChatSession + ↓ +Output (complete message) +``` + +**State Dict**: +```python +{ + "session_id": str, + "messages": List[BaseMessage], + "context": Dict[str, Any], # sources, notes, snippets + "response": str, + "model_override": Optional[str], +} +``` + +**Key Features**: +- Message history persisted in SurrealDB (SqliteSaver checkpoint) +- Context building via `build_context_for_chat()` utility +- Token counting to prevent overflow +- Per-message model override support + +**Invoked By**: Chat API (`POST /chat/execute`) + +--- + +### 3. **Ask Workflow** (`open_notebook/graphs/ask.py`) + +**Purpose**: Answer user questions by searching sources and synthesizing responses. + +**Flow**: +``` +User Question + ↓ +Plan Search Strategy (LLM generates searches) + ↓ +Execute Searches (vector + text search) + ↓ +Score & Rank Results + ↓ +Provide Answers (LLM synthesizes from results) + ↓ +Stream Responses + ↓ +Output (final answer) +``` + +**State Dict**: +```python +{ + "question": str, + "strategy": SearchStrategy, + "answers": List[str], + "final_answer": str, + "sources_used": List[Source], +} +``` + +**Streaming**: Uses `astream()` to emit updates in real-time (strategy → answers → final answer) + +**Invoked By**: Search API (`POST /ask` with streaming) + +--- + +### 4. **Transformation Workflow** (`open_notebook/graphs/transformation.py`) + +**Purpose**: Apply custom transformations to sources (extract summaries, key points, etc). + +**Flow**: +``` +Source + Transformation Rule + ↓ +Generate Prompt (Jinja2 template) + ↓ +Call LLM + ↓ +Parse Output + ↓ +Create SourceInsight record + ↓ +Output (insight with type + content) +``` + +**Example Transformations**: +- Summary (5-sentence overview) +- Key Points (bulleted list) +- Quotes (notable excerpts) +- Q&A (generated questions and answers) + +**Invoked By**: Sources API (`POST /sources/{id}/insights`) + +--- + +### 5. **Prompt Workflow** (`open_notebook/graphs/prompt.py`) + +**Purpose**: Generic LLM task execution (e.g., auto-generate note titles, analyze content). + +**Flow**: +``` +Input Text + Prompt + ↓ +Call LLM (simple request-response) + ↓ +Output (completion) +``` + +**Used For**: Note title generation, content analysis, etc. + +--- + +## AI Provider Integration Pattern + +### ModelManager: Centralized Factory + +Located in `open_notebook/ai/models.py`, ModelManager handles: + +1. **Provider Detection**: Check environment variables for available providers +2. **Model Selection**: Choose best model based on context size and task +3. **Fallback Logic**: If primary provider unavailable, try backup +4. **Cost Optimization**: Prefer cheaper models for simple tasks +5. **Token Calculation**: Estimate cost before LLM call + +**Usage**: +```python +from open_notebook.ai.provision import provision_langchain_model + +# Get best LLM for context size +model = await provision_langchain_model( + task="chat", # or "search", "extraction" + model_override="anthropic/claude-opus-4", # optional + context_size=8000, # estimated tokens +) + +# Invoke model +response = await model.ainvoke({"input": prompt}) +``` + +### Multi-Provider Support + +**LLM Providers**: +- OpenAI (gpt-4, gpt-4-turbo, gpt-3.5-turbo) +- Anthropic (claude-opus, claude-sonnet, claude-haiku) +- Google (gemini-pro, gemini-1.5) +- Groq (mixtral, llama-2) +- Ollama (local models) +- Mistral (mistral-large, mistral-medium) +- DeepSeek (deepseek-chat) +- xAI (grok) + +**Embedding Providers**: +- OpenAI (text-embedding-3-large, text-embedding-3-small) +- Google (embedding-001) +- Ollama (local embeddings) +- Mistral (mistral-embed) +- Voyage (voyage-large-2) + +**TTS Providers**: +- OpenAI (tts-1, tts-1-hd) +- Groq (no TTS, fallback to OpenAI) +- ElevenLabs (multilingual voices) +- Google TTS (text-to-speech) + +### Per-Request Override + +Every LangGraph invocation accepts a `config` parameter to override models: + +```python +result = await graph.ainvoke( + input={...}, + config={ + "configurable": { + "model_override": "anthropic/claude-opus-4" # Use Claude instead + } + } +) +``` + +--- + +## Design Patterns + +### 1. **Domain-Driven Design (DDD)** + +**Domain Objects** (`open_notebook/domain/`): +- `Notebook`: Research container with relationships to sources/notes +- `Source`: Content item (PDF, URL, text) with embeddings +- `Note`: User-created or AI-generated research note +- `ChatSession`: Conversation history for a notebook +- `Transformation`: Custom rule for extracting insights + +**Repository Pattern**: +- Database access layer (`open_notebook/database/repository.py`) +- `repo_query()`: Execute SurrealQL queries +- `repo_create()`: Insert records +- `repo_upsert()`: Merge records +- `repo_delete()`: Remove records + +**Entity Methods**: +```python +# Domain methods (business logic) +notebook = await Notebook.get(id) +await notebook.save() +notes = await notebook.get_notes() +sources = await notebook.get_sources() +``` + +### 2. **Async-First Architecture** + +**All I/O is async**: +- Database queries: `await repo_query(...)` +- LLM calls: `await model.ainvoke(...)` +- File I/O: `await upload_file.read()` +- Graph invocations: `await graph.ainvoke(...)` + +**Benefits**: +- Non-blocking request handling (FastAPI serves multiple concurrent requests) +- Better resource utilization (I/O waiting doesn't block CPU) +- Natural fit for Python async/await syntax + +**Example**: +```python +@router.post("/sources") +async def create_source(source_data: SourceCreate): + # All operations are non-blocking + source = Source(title=source_data.title) + await source.save() # async database operation + await graph.ainvoke({...}) # async LangGraph invocation + return SourceResponse(...) +``` + +### 3. **Service Pattern** + +Services orchestrate domain objects, repositories, and workflows: + +```python +# api/notebook_service.py +class NotebookService: + async def get_notebook_with_stats(notebook_id: str): + notebook = await Notebook.get(notebook_id) + sources = await notebook.get_sources() + notes = await notebook.get_notes() + return { + "notebook": notebook, + "source_count": len(sources), + "note_count": len(notes), + } +``` + +**Responsibilities**: +- Validate inputs (Pydantic) +- Orchestrate database operations +- Invoke workflows (LangGraph graphs) +- Handle errors and return appropriate status codes +- Log operations + +### 4. **Streaming Pattern** + +For long-running operations (ask workflow, podcast generation), stream results as Server-Sent Events: + +```python +@router.post("/ask", response_class=StreamingResponse) +async def ask(request: AskRequest): + async def stream_response(): + async for chunk in ask_graph.astream(input={...}): + yield f"data: {json.dumps(chunk)}\n\n" + return StreamingResponse(stream_response(), media_type="text/event-stream") +``` + +### 5. **Job Queue Pattern** + +For async background tasks (source processing), use Surreal-Commands job queue: + +```python +# Submit job +command_id = await CommandService.submit_command_job( + app="open_notebook", + command="process_source", + input={...} +) + +# Poll status +status = await source.get_status() +``` + +--- + +## Service Communication Patterns + +### Frontend → API + +1. **REST requests** (HTTP GET/POST/PUT/DELETE) +2. **JSON request/response bodies** +3. **Standard HTTP status codes** (200, 400, 404, 500) +4. **Optional streaming** (Server-Sent Events for long operations) + +**Example**: +```typescript +// Frontend +const response = await fetch("http://localhost:5055/sources", { + method: "POST", + body: formData, // multipart/form-data for file upload +}); +const source = await response.json(); +``` + +### API → SurrealDB + +1. **SurrealQL queries** (similar to SQL) +2. **Async driver** with connection pooling +3. **Type-safe record IDs** (record_id syntax) +4. **Transaction support** for multi-step operations + +**Example**: +```python +# API +result = await repo_query( + "SELECT * FROM source WHERE notebook = $notebook_id", + {"notebook_id": ensure_record_id(notebook_id)} +) +``` + +### API → AI Providers (via Esperanto) + +1. **Esperanto unified interface** +2. **Per-request provider override** +3. **Automatic fallback on failure** +4. **Token counting and cost estimation** + +**Example**: +```python +# API +model = await provision_langchain_model(task="chat") +response = await model.ainvoke({"input": prompt}) +``` + +### API → Job Queue (Surreal-Commands) + +1. **Async job submission** +2. **Fire-and-forget pattern** +3. **Status polling via `/commands/{id}` endpoint** +4. **Job completion callbacks (optional)** + +**Example**: +```python +# Submit async source processing +command_id = await CommandService.submit_command_job(...) + +# Client polls status +response = await fetch(f"http://localhost:5055/commands/{command_id}") +status = await response.json() # returns { status: "running|queued|completed|failed" } +``` + +--- + +## Database Schema Overview + +### Core Schema Structure + +**Tables** (20+): +- Notebooks (with soft-delete via `archived` flag) +- Sources (content + metadata) +- SourceEmbeddings (vector chunks) +- Notes (user-created + AI-generated) +- ChatSessions (conversation history) +- Transformations (custom rules) +- SourceInsights (transformation outputs) +- Relationships (notebook→source, notebook→note) + +**Migrations**: +- Automatic on API startup +- Located in `/migrations/` directory +- Numbered sequentially (001_*.surql, 002_*.surql, etc) +- Tracked in `_sbl_migrations` table +- Rollback via `_down.surql` files (manual) + +### Relationship Model + +**Graph Relationships**: +``` +Notebook + ← reference ← Source (many:many) + ← artifact ← Note (many:many) + +Source + → source_embedding (one:many) + → source_insight (one:many) + → embedding (via source_embedding) + +ChatSession + → messages (JSON array in database) + → notebook_id (reference to Notebook) + +Transformation + → source_insight (one:many) +``` + +**Query Example** (get all sources in a notebook with counts): +```sql +SELECT id, title, + count(<-reference.in) as note_count, + count(<-embedding.in) as embedded_chunks +FROM source +WHERE notebook = $notebook_id +ORDER BY updated DESC +``` + +--- + +## Key Architectural Decisions + +### 1. **Async Throughout** + +All I/O operations are non-blocking to maximize concurrency and responsiveness. + +**Trade-off**: Slightly more complex code (async/await syntax) vs. high throughput. + +### 2. **Multi-Provider from Day 1** + +Built-in support for 8+ AI providers prevents vendor lock-in. + +**Trade-off**: Added complexity in ModelManager vs. flexibility and cost optimization. + +### 3. **Graph-First Workflows** + +LangGraph state machines for complex multi-step operations (ask, chat, transformations). + +**Trade-off**: Steeper learning curve vs. maintainable, debuggable workflows. + +### 4. **Self-Hosted Database** + +SurrealDB for graph + vector search in one system (no external dependencies). + +**Trade-off**: Operational responsibility vs. simplified architecture and cost savings. + +### 5. **Job Queue for Long-Running Tasks** + +Async job submission (source processing, podcast generation) prevents request timeouts. + +**Trade-off**: Eventual consistency vs. responsive user experience. + +--- + +## Important Quirks & Gotchas + +### API Startup + +- **Migrations run automatically** on every startup; check logs for errors +- **SurrealDB must be running** before starting API (connection test in lifespan) +- **Auth middleware is basic** (password-only); upgrade to OAuth/JWT for production + +### Database Operations + +- **Record IDs use SurrealDB syntax** (table:id format, e.g., "notebook:abc123") +- **ensure_record_id()** helper prevents malformed IDs +- **Soft deletes** via `archived` field (data not removed, just marked inactive) +- **Timestamps in ISO 8601 format** (created, updated fields) + +### LangGraph Workflows + +- **State persistence** via SqliteSaver in `/data/sqlite-db/` +- **No built-in timeout**; long workflows may block requests (use streaming for UX) +- **Model fallback** automatic if primary provider unavailable +- **Checkpoint IDs** must be unique per session (avoid collisions) + +### AI Provider Integration + +- **Esperanto library** handles all provider APIs (no direct API calls) +- **Per-request override** via RunnableConfig (temporary, not persistent) +- **Cost estimation** via token counting (not 100% accurate, use for guidance) +- **Fallback logic** tries cheaper models if primary fails + +### File Uploads + +- **Stored in `/data/uploads/`** directory (not database) +- **Unique filename generation** prevents overwrites (counter suffix) +- **Content-core library** extracts text from 50+ file types +- **Large files** may block API briefly (sync content extraction) + +--- + +## Performance Considerations + +### Optimization Strategies + +1. **Connection Pooling**: SurrealDB async driver with configurable pool size +2. **Query Caching**: TanStack Query on frontend (client-side caching) +3. **Embedding Reuse**: Vector search uses pre-computed embeddings +4. **Chunking**: Sources split into chunks for better search relevance +5. **Async Operations**: Non-blocking I/O for high concurrency +6. **Lazy Loading**: Frontend requests only needed data (pagination) + +### Bottlenecks + +1. **LLM Calls**: Latency depends on provider (typically 1-30 seconds) +2. **Embedding Generation**: Time proportional to content size and provider +3. **Vector Search**: Similarity computation over all embeddings +4. **Content Extraction**: Sync operation in source processing + +### Monitoring + +- **API Logs**: Check loguru output for errors and slow operations +- **Database Queries**: SurrealDB metrics available via admin UI +- **Token Usage**: Estimated via `estimate_tokens()` utility +- **Job Status**: Poll `/commands/{id}` for async operations + +--- + +## Extension Points + +### Adding a New Workflow + +1. Create `open_notebook/graphs/workflow_name.py` +2. Define StateDict and node functions +3. Build graph with `.add_node()` / `.add_edge()` +4. Create service in `api/workflow_service.py` +5. Register router in `api/main.py` +6. Add tests in `tests/test_workflow.py` + +### Adding a New Data Model + +1. Create model in `open_notebook/domain/model_name.py` +2. Inherit from BaseModel (domain object) +3. Implement `save()`, `get()`, `delete()` methods (CRUD) +4. Add repository functions if complex queries needed +5. Create database migration in `migrations/` +6. Add API routes and models in `api/` + +### Adding a New AI Provider + +1. Configure Esperanto for new provider (see .env.example) +2. ModelManager automatically detects via environment variables +3. Override via per-request config (no code changes needed) +4. Test fallback logic if provider unavailable + +--- + +## Deployment Considerations + +### Development + +- All services on localhost (3000, 5055, 8000) +- Auto-reload on file changes (Next.js, FastAPI) +- Hot-reload database migrations +- Open API docs at http://localhost:5055/docs + +### Production + +- **Frontend**: Deploy to Vercel, Netlify, or Docker +- **API**: Docker container (see Dockerfile) +- **Database**: SurrealDB container or managed service +- **Environment**: Secure .env file with API keys +- **SSL/TLS**: Reverse proxy (Nginx, CloudFlare) +- **Rate Limiting**: Add at proxy layer +- **Auth**: Replace PasswordAuthMiddleware with OAuth/JWT +- **Monitoring**: Log aggregation (CloudWatch, DataDog, etc) + +--- + +## Summary + +Open Notebook's architecture provides a solid foundation for privacy-focused, AI-powered research. The separation of concerns (frontend/API/database), async-first design, and multi-provider flexibility enable rapid development and easy deployment. LangGraph workflows orchestrate complex AI tasks, while Esperanto abstracts provider details. The result is a scalable, maintainable system that puts users in control of their data and AI provider choice. diff --git a/docs/7-DEVELOPMENT/code-standards.md b/docs/7-DEVELOPMENT/code-standards.md new file mode 100644 index 00000000..a0108003 --- /dev/null +++ b/docs/7-DEVELOPMENT/code-standards.md @@ -0,0 +1,375 @@ +# Code Standards + +This document outlines coding standards and best practices for Open Notebook contributions. All code should follow these guidelines to ensure consistency, readability, and maintainability. + +## Python Standards + +### Code Formatting + +We follow **PEP 8** with some specific guidelines: + +- Use **Ruff** for linting and formatting +- Maximum line length: **88 characters** +- Use **double quotes** for strings +- Use **trailing commas** in multi-line structures + +### Type Hints + +Always use type hints for function parameters and return values: + +```python +from typing import List, Optional, Dict, Any +from pydantic import BaseModel + +async def process_content( + content: str, + options: Optional[Dict[str, Any]] = None +) -> ProcessedContent: + """Process content with optional configuration.""" + # Implementation +``` + +### Async/Await Patterns + +Use async/await consistently throughout the codebase: + +```python +# Good +async def fetch_data(url: str) -> Dict[str, Any]: + async with aiohttp.ClientSession() as session: + async with session.get(url) as response: + return await response.json() + +# Bad - mixing sync and async +def fetch_data(url: str) -> Dict[str, Any]: + loop = asyncio.get_event_loop() + return loop.run_until_complete(async_fetch(url)) +``` + +### Error Handling + +Use structured error handling with custom exceptions: + +```python +from open_notebook.exceptions import DatabaseOperationError, InvalidInputError + +async def create_notebook(name: str, description: str) -> Notebook: + """Create a new notebook with validation.""" + if not name.strip(): + raise InvalidInputError("Notebook name cannot be empty") + + try: + notebook = Notebook(name=name, description=description) + await notebook.save() + return notebook + except Exception as e: + raise DatabaseOperationError(f"Failed to create notebook: {str(e)}") +``` + +### Documentation (Google-style Docstrings) + +Use Google-style docstrings for all functions, classes, and modules: + +```python +async def vector_search( + query: str, + limit: int = 10, + minimum_score: float = 0.2 +) -> List[SearchResult]: + """Perform vector search across embedded content. + + Args: + query: Search query string + limit: Maximum number of results to return + minimum_score: Minimum similarity score for results + + Returns: + List of search results sorted by relevance score + + Raises: + InvalidInputError: If query is empty or limit is invalid + DatabaseOperationError: If search operation fails + """ + # Implementation +``` + +#### Module Docstrings +```python +""" +Notebook domain model and operations. + +This module contains the core Notebook class and related operations for +managing research notebooks within the Open Notebook system. +""" +``` + +#### Class Docstrings +```python +class Notebook(BaseModel): + """A research notebook containing sources, notes, and chat sessions. + + Notebooks are the primary organizational unit in Open Notebook, allowing + users to group related research materials and maintain separate contexts + for different projects. + + Attributes: + name: The notebook's display name + description: Optional description of the notebook's purpose + archived: Whether the notebook is archived (default: False) + created: Timestamp of creation + updated: Timestamp of last update + """ +``` + +#### Function Docstrings +```python +async def create_notebook( + name: str, + description: str = "", + user_id: Optional[str] = None +) -> Notebook: + """Create a new notebook with validation. + + Args: + name: The notebook name (required, non-empty) + description: Optional notebook description + user_id: Optional user ID for multi-user deployments + + Returns: + The created notebook instance + + Raises: + InvalidInputError: If name is empty or invalid + DatabaseOperationError: If creation fails + + Example: + ```python + notebook = await create_notebook( + name="AI Research", + description="Research on AI applications" + ) + ``` + """ +``` + +## FastAPI Standards + +### Router Organization + +Organize endpoints by domain: + +```python +# api/routers/notebooks.py +from fastapi import APIRouter, HTTPException, Query +from typing import List, Optional + +router = APIRouter() + +@router.get("/notebooks", response_model=List[NotebookResponse]) +async def get_notebooks( + archived: Optional[bool] = Query(None, description="Filter by archived status"), + order_by: str = Query("updated desc", description="Order by field and direction"), +): + """Get all notebooks with optional filtering and ordering.""" + # Implementation +``` + +### Request/Response Models + +Use Pydantic models for validation: + +```python +from pydantic import BaseModel, Field +from typing import Optional + +class NotebookCreate(BaseModel): + name: str = Field(..., description="Name of the notebook", min_length=1) + description: str = Field(default="", description="Description of the notebook") + +class NotebookResponse(BaseModel): + id: str + name: str + description: str + archived: bool + created: str + updated: str +``` + +### Error Handling + +Use consistent error responses: + +```python +from fastapi import HTTPException +from loguru import logger + +try: + result = await some_operation() + return result +except InvalidInputError as e: + raise HTTPException(status_code=400, detail=str(e)) +except DatabaseOperationError as e: + logger.error(f"Database error: {str(e)}") + raise HTTPException(status_code=500, detail="Internal server error") +``` + +### API Documentation + +Use FastAPI's automatic documentation features: + +```python +@router.post( + "/notebooks", + response_model=NotebookResponse, + summary="Create a new notebook", + description="Create a new notebook with the specified name and description.", + responses={ + 201: {"description": "Notebook created successfully"}, + 400: {"description": "Invalid input data"}, + 500: {"description": "Internal server error"} + } +) +async def create_notebook(notebook: NotebookCreate): + """Create a new notebook.""" + # Implementation +``` + +## Database Standards + +### SurrealDB Patterns + +Use the repository pattern consistently: + +```python +from open_notebook.database.repository import repo_create, repo_query, repo_update + +# Create records +async def create_notebook(data: Dict[str, Any]) -> Dict[str, Any]: + """Create a new notebook record.""" + return await repo_create("notebook", data) + +# Query with parameters +async def find_notebooks_by_user(user_id: str) -> List[Dict[str, Any]]: + """Find notebooks for a specific user.""" + return await repo_query( + "SELECT * FROM notebook WHERE user_id = $user_id", + {"user_id": user_id} + ) + +# Update records +async def update_notebook(notebook_id: str, data: Dict[str, Any]) -> Dict[str, Any]: + """Update a notebook record.""" + return await repo_update("notebook", notebook_id, data) +``` + +### Schema Management + +Use migrations for schema changes: + +```surrealql +-- migrations/8.surrealql +DEFINE TABLE IF NOT EXISTS new_feature SCHEMAFULL; +DEFINE FIELD IF NOT EXISTS name ON TABLE new_feature TYPE string; +DEFINE FIELD IF NOT EXISTS description ON TABLE new_feature TYPE option; +DEFINE FIELD IF NOT EXISTS created ON TABLE new_feature TYPE datetime DEFAULT time::now(); +DEFINE FIELD IF NOT EXISTS updated ON TABLE new_feature TYPE datetime DEFAULT time::now(); +``` + +## TypeScript Standards + +### Basic Guidelines + +Follow TypeScript best practices: + +- Use strict mode enabled in `tsconfig.json` +- Use proper type annotations for all variables and functions +- Avoid using `any` type unless absolutely necessary +- Use `interface` for object shapes, `type` for unions and other advanced types + +### Component Structure + +- Use functional components with hooks +- Keep components focused and single-responsibility +- Extract reusable logic into custom hooks +- Use proper TypeScript types for props + +### Error Handling + +- Handle errors explicitly +- Provide meaningful error messages +- Log errors appropriately +- Don't suppress errors silently + +## Code Quality Tools + +We use these tools to maintain code quality: + +- **Ruff**: Linting and code formatting + - Run with: `uv run ruff check . --fix` + - Format with: `uv run ruff format .` + +- **MyPy**: Static type checking + - Run with: `uv run python -m mypy .` + +- **Pytest**: Testing framework + - Run with: `uv run pytest` + +## Common Patterns + +### Async Database Operations + +```python +async def get_notebook_with_sources(notebook_id: str) -> Notebook: + """Retrieve notebook with all related sources.""" + notebook_data = await repo_query( + "SELECT * FROM notebook WHERE id = $id", + {"id": notebook_id} + ) + if not notebook_data: + raise InvalidInputError(f"Notebook {notebook_id} not found") + + sources_data = await repo_query( + "SELECT * FROM source WHERE notebook_id = $notebook_id", + {"notebook_id": notebook_id} + ) + + return Notebook( + **notebook_data[0], + sources=[Source(**s) for s in sources_data] + ) +``` + +### Model Validation + +```python +from pydantic import BaseModel, validator + +class NotebookInput(BaseModel): + name: str + description: str = "" + + @validator('name') + def name_not_empty(cls, v): + if not v.strip(): + raise ValueError('Name cannot be empty') + return v.strip() +``` + +## Code Review Checklist + +Before submitting code for review, ensure: + +- [ ] Code follows PEP 8 / TypeScript best practices +- [ ] Type hints are present for all functions +- [ ] Docstrings are complete and accurate +- [ ] Error handling is appropriate +- [ ] Tests are included and passing +- [ ] No debug code (console.logs, print statements) left behind +- [ ] Commit messages are clear and follow conventions +- [ ] Documentation is updated if needed + +--- + +**See also:** +- [Testing Guide](testing.md) - How to write tests +- [Contributing Guide](contributing.md) - Overall contribution workflow diff --git a/docs/7-DEVELOPMENT/contributing.md b/docs/7-DEVELOPMENT/contributing.md new file mode 100644 index 00000000..d2d8968a --- /dev/null +++ b/docs/7-DEVELOPMENT/contributing.md @@ -0,0 +1,201 @@ +# Contributing to Open Notebook + +Thank you for your interest in contributing to Open Notebook! We welcome contributions from developers of all skill levels. This guide will help you understand our contribution workflow and what makes a good contribution. + +## 🚨 Issue-First Workflow + +**To maintain project coherence and avoid wasted effort, please follow this process:** + +1. **Create an issue first** - Before writing any code, create an issue describing the bug or feature +2. **Propose your solution** - Explain how you plan to implement the fix or feature +3. **Wait for assignment** - A maintainer will review and assign the issue to you if approved +4. **Only then start coding** - This ensures your work aligns with the project's vision and architecture + +**Why this process?** +- Prevents duplicate work +- Ensures solutions align with our architecture and design principles +- Saves your time by getting feedback before coding +- Helps maintainers manage the project direction + +> ⚠️ **Pull requests without an assigned issue may be closed**, even if the code is good. We want to respect your time by making sure work is aligned before it starts. + +## Code of Conduct + +By participating in this project, you are expected to uphold our Code of Conduct. Be respectful, constructive, and collaborative. + +## How Can I Contribute? + +### Reporting Bugs + +1. **Search existing issues** - Check if the bug was already reported +2. **Create a bug report** - Use the [Bug Report template](https://github.com/lfnovo/open-notebook/issues/new?template=bug_report.yml) +3. **Provide details** - Include: + - Steps to reproduce + - Expected vs actual behavior + - Logs, screenshots, or error messages + - Your environment (OS, Docker version, Open Notebook version) +4. **Indicate if you want to fix it** - Check the "I would like to work on this" box if you're interested + +### Suggesting Features + +1. **Search existing issues** - Check if the feature was already suggested +2. **Create a feature request** - Use the [Feature Request template](https://github.com/lfnovo/open-notebook/issues/new?template=feature_request.yml) +3. **Explain the value** - Describe why this feature would be helpful +4. **Propose implementation** - If you have ideas on how to implement it, share them +5. **Indicate if you want to build it** - Check the "I would like to work on this" box if you're interested + +### Contributing Code (Pull Requests) + +**IMPORTANT: Follow the issue-first workflow above before starting any PR** + +Once your issue is assigned: + +1. **Fork the repo** and create your branch from `main` +2. **Understand our vision and principles** - Read [design-principles.md](design-principles.md) to understand what guides our decisions +3. **Follow our architecture** - Refer to the architecture documentation to understand project structure +4. **Write quality code** - Follow the standards outlined in [code-standards.md](code-standards.md) +5. **Test your changes** - See [testing.md](testing.md) for test guidelines +6. **Update documentation** - If you changed functionality, update the relevant docs +7. **Create your PR**: + - Reference the issue number (e.g., "Fixes #123") + - Describe what changed and why + - Include screenshots for UI changes + - Keep PRs focused - one issue per PR + +### What Makes a Good Contribution? + +✅ **We love PRs that:** +- Solve a real problem described in an issue +- Follow our architecture and coding standards +- Include tests and documentation +- Are well-scoped (focused on one thing) +- Have clear commit messages + +❌ **We may close PRs that:** +- Don't have an associated approved issue +- Introduce breaking changes without discussion +- Conflict with our architectural vision +- Lack tests or documentation +- Try to solve multiple unrelated problems + +## Git Commit Messages + +- Use the present tense ("Add feature" not "Added feature") +- Use the imperative mood ("Move cursor to..." not "Moves cursor to...") +- Limit the first line to 72 characters or less +- Reference issues and pull requests liberally after the first line + +## Development Workflow + +### Branch Strategy + +We use a **feature branch workflow**: + +1. **Main Branch**: `main` - production-ready code +2. **Feature Branches**: `feature/description` - new features +3. **Bug Fixes**: `fix/description` - bug fixes +4. **Documentation**: `docs/description` - documentation updates + +### Making Changes + +1. **Create a feature branch**: +```bash +git checkout -b feature/amazing-new-feature +``` + +2. **Make your changes** following our coding standards + +3. **Test your changes**: +```bash +# Run tests +uv run pytest + +# Run linting +uv run ruff check . + +# Run formatting +uv run ruff format . +``` + +4. **Commit your changes**: +```bash +git add . +git commit -m "feat: add amazing new feature" +``` + +5. **Push and create PR**: +```bash +git push origin feature/amazing-new-feature +# Then create a Pull Request on GitHub +``` + +### Keeping Your Fork Updated + +```bash +# Fetch upstream changes +git fetch upstream + +# Switch to main and merge +git checkout main +git merge upstream/main + +# Push to your fork +git push origin main +``` + +## Pull Request Process + +When you create a pull request: + +1. **Link your issue** - Reference the issue number in PR description +2. **Describe your changes** - Explain what changed and why +3. **Provide test evidence** - Screenshots, test results, or logs +4. **Check PR template** - Ensure you've completed all required sections +5. **Wait for review** - A maintainer will review your PR within a week + +### PR Review Expectations + +- Code review feedback is about the code, not the person +- Be open to suggestions and alternative approaches +- Address review comments with clarity and respect +- Ask questions if feedback is unclear + +## Current Priority Areas + +We're actively looking for contributions in these areas: + +1. **Frontend Enhancement** - Help improve the Next.js/React UI with real-time updates and better UX +2. **Testing** - Expand test coverage across all components +3. **Performance** - Async processing improvements and caching +4. **Documentation** - API examples and user guides +5. **Integrations** - New content sources and AI providers + +## Getting Help + +### Community Support + +- **Discord**: [Join our Discord server](https://discord.gg/37XJPXfz2w) for real-time help +- **GitHub Discussions**: For longer-form questions and ideas +- **GitHub Issues**: For bug reports and feature requests + +### Documentation References + +- [Design Principles](design-principles.md) - Understanding our project vision +- [Code Standards](code-standards.md) - Coding guidelines by language +- [Testing Guide](testing.md) - How to write tests +- [Development Setup](development-setup.md) - Getting started locally + +## Recognition + +We recognize contributions through: + +- **GitHub credits** on releases +- **Community recognition** in Discord +- **Contribution statistics** in project analytics +- **Maintainer consideration** for active contributors + +--- + +Thank you for contributing to Open Notebook! Your contributions help make research more accessible and private for everyone. + +For questions about this guide or contributing in general, please reach out on [Discord](https://discord.gg/37XJPXfz2w) or open a GitHub Discussion. diff --git a/DESIGN_PRINCIPLES.md b/docs/7-DEVELOPMENT/design-principles.md similarity index 100% rename from DESIGN_PRINCIPLES.md rename to docs/7-DEVELOPMENT/design-principles.md diff --git a/docs/7-DEVELOPMENT/development-setup.md b/docs/7-DEVELOPMENT/development-setup.md new file mode 100644 index 00000000..377df327 --- /dev/null +++ b/docs/7-DEVELOPMENT/development-setup.md @@ -0,0 +1,409 @@ +# Local Development Setup + +This guide walks you through setting up Open Notebook for local development. Follow these steps to get the full stack running on your machine. + +## Prerequisites + +Before you start, ensure you have the following installed: + +- **Python 3.11+** - Check with: `python --version` +- **uv** (recommended) or **pip** - Install from: https://github.com/astral-sh/uv +- **SurrealDB** - Via Docker or binary (see below) +- **Docker** (optional) - For containerized database +- **Node.js 18+** (optional) - For frontend development +- **Git** - For version control + +## Step 1: Clone and Initial Setup + +```bash +# Clone the repository +git clone https://github.com/lfnovo/open-notebook.git +cd open-notebook + +# Add upstream remote for keeping your fork updated +git remote add upstream https://github.com/lfnovo/open-notebook.git +``` + +## Step 2: Install Python Dependencies + +```bash +# Using uv (recommended) +uv sync + +# Or using pip +pip install -e . +``` + +## Step 3: Environment Variables + +Create a `.env` file in the project root with your configuration: + +```bash +# Copy from example +cp .env.example .env +``` + +Edit `.env` with your settings: + +```bash +# Database +SURREAL_URL=ws://localhost:8000/rpc +SURREAL_USER=root +SURREAL_PASSWORD=password +SURREAL_NAMESPACE=open_notebook +SURREAL_DATABASE=development + +# AI Providers (add your API keys) +OPENAI_API_KEY=sk-... +ANTHROPIC_API_KEY=sk-ant-... +GOOGLE_API_KEY=AI... +GROQ_API_KEY=gsk-... + +# Application +APP_PASSWORD= # Optional password protection +DEBUG=true +LOG_LEVEL=DEBUG +``` + +### AI Provider Keys + +You'll need at least one AI provider. Popular options: + +- **OpenAI** - https://platform.openai.com/api-keys +- **Anthropic (Claude)** - https://console.anthropic.com/ +- **Google** - https://ai.google.dev/ +- **Groq** - https://console.groq.com/ + +For local development, you can also use: +- **Ollama** - Run locally without API keys (see "Local Ollama" below) + +## Step 4: Start SurrealDB + +### Option A: Using Docker (Recommended) + +```bash +# Start SurrealDB in memory +docker run -d --name surrealdb -p 8000:8000 \ + surrealdb/surrealdb:v2 start \ + --user root --pass password \ + --bind 0.0.0.0:8000 memory + +# Or with persistent storage +docker run -d --name surrealdb -p 8000:8000 \ + -v surrealdb_data:/data \ + surrealdb/surrealdb:v2 start \ + --user root --pass password \ + --bind 0.0.0.0:8000 file:/data/surreal.db +``` + +### Option B: Using Make + +```bash +make database +``` + +### Option C: Using Docker Compose + +```bash +docker compose up -d surrealdb +``` + +### Verify SurrealDB is Running + +```bash +# Should show server information +curl http://localhost:8000/ +``` + +## Step 5: Run Database Migrations + +Database migrations run automatically when you start the API. The first startup will apply any pending migrations. + +To verify migrations manually: + +```bash +# API will run migrations on startup +uv run python -m api.main +``` + +Check the logs - you should see messages like: +``` +Running migration 001_initial_schema +Running migration 002_add_vectors +... +Migrations completed successfully +``` + +## Step 6: Start the API Server + +In a new terminal window: + +```bash +# Terminal 2: Start API (port 5055) +uv run --env-file .env uvicorn api.main:app --host 0.0.0.0 --port 5055 + +# Or using the shortcut +make api +``` + +You should see: +``` +INFO: Application startup complete +INFO: Uvicorn running on http://0.0.0.0:5055 +``` + +### Verify API is Running + +```bash +# Check health endpoint +curl http://localhost:5055/health + +# View API documentation +open http://localhost:5055/docs +``` + +## Step 7: Start the Frontend (Optional) + +If you want to work on the frontend, start Next.js in another terminal: + +```bash +# Terminal 3: Start Next.js frontend (port 3000) +cd frontend +npm install # First time only +npm run dev +``` + +You should see: +``` +> next dev + ▲ Next.js 15.x + - Local: http://localhost:3000 +``` + +### Access the Frontend + +Open your browser to: http://localhost:3000 + +## Verification Checklist + +After setup, verify everything is working: + +- [ ] **SurrealDB**: `curl http://localhost:8000/` returns content +- [ ] **API**: `curl http://localhost:5055/health` returns `{"status": "ok"}` +- [ ] **API Docs**: `open http://localhost:5055/docs` works +- [ ] **Database**: API logs show migrations completing +- [ ] **Frontend** (optional): `http://localhost:3000` loads + +## Starting Services Together + +### Quick Start All Services + +```bash +make start-all +``` + +This starts SurrealDB, API, and frontend in one command. + +### Individual Terminals (Recommended for Development) + +**Terminal 1 - Database:** +```bash +make database +``` + +**Terminal 2 - API:** +```bash +make api +``` + +**Terminal 3 - Frontend:** +```bash +cd frontend && npm run dev +``` + +## Development Tools Setup + +### Pre-commit Hooks (Optional but Recommended) + +Install git hooks to automatically check code quality: + +```bash +uv run pre-commit install +``` + +Now your commits will be checked before they're made. + +### Code Quality Commands + +```bash +# Lint Python code (auto-fix) +make ruff +# or: ruff check . --fix + +# Type check Python code +make lint +# or: uv run python -m mypy . + +# Run tests +uv run pytest + +# Run tests with coverage +uv run pytest --cov=open_notebook +``` + +## Common Development Tasks + +### Running Tests + +```bash +# Run all tests +uv run pytest + +# Run specific test file +uv run pytest tests/test_notebooks.py + +# Run with coverage report +uv run pytest --cov=open_notebook --cov-report=html +``` + +### Creating a Feature Branch + +```bash +# Create and switch to new branch +git checkout -b feature/my-feature + +# Make changes, then commit +git add . +git commit -m "feat: add my feature" + +# Push to your fork +git push origin feature/my-feature +``` + +### Updating from Upstream + +```bash +# Fetch latest changes +git fetch upstream + +# Rebase your branch +git rebase upstream/main + +# Push updated branch +git push origin feature/my-feature -f +``` + +## Troubleshooting + +### "Connection refused" on SurrealDB + +**Problem**: API can't connect to SurrealDB + +**Solutions**: +1. Check if SurrealDB is running: `docker ps | grep surrealdb` +2. Verify URL in `.env`: Should be `ws://localhost:8000/rpc` +3. Restart SurrealDB: `docker stop surrealdb && docker rm surrealdb` +4. Then restart with: `docker run -d --name surrealdb -p 8000:8000 surrealdb/surrealdb:v2 start --user root --pass password --bind 0.0.0.0:8000 memory` + +### "Address already in use" + +**Problem**: Port 5055 or 3000 is already in use + +**Solutions**: +```bash +# Find process using port +lsof -i :5055 # Check port 5055 + +# Kill process (macOS/Linux) +kill -9 + +# Or use different port +uvicorn api.main:app --port 5056 +``` + +### Module not found errors + +**Problem**: Import errors when running API + +**Solutions**: +```bash +# Reinstall dependencies +uv sync + +# Or with pip +pip install -e . +``` + +### Database migration failures + +**Problem**: API fails to start with migration errors + +**Solutions**: +1. Check SurrealDB is running: `curl http://localhost:8000/` +2. Check credentials in `.env` match your SurrealDB setup +3. Check logs for specific migration error: `make api 2>&1 | grep -i migration` +4. Verify database exists: Check SurrealDB console at http://localhost:8000/ + +### Migrations not applying + +**Problem**: Database schema seems outdated + +**Solutions**: +1. Restart API - migrations run on startup: `make api` +2. Check logs show "Migrations completed successfully" +3. Verify `/migrations/` folder exists and has files +4. Check SurrealDB is writable and not in read-only mode + +## Optional: Local Ollama Setup + +For testing with local AI models: + +```bash +# Install Ollama from https://ollama.ai + +# Pull a model (e.g., Mistral 7B) +ollama pull mistral + +# Add to .env +OLLAMA_BASE_URL=http://localhost:11434 +``` + +Then in your code, you can use Ollama through the Esperanto library. + +## Optional: Docker Development Environment + +Run entire stack in Docker: + +```bash +# Start all services +docker compose --profile multi up + +# Logs +docker compose logs -f + +# Stop services +docker compose down +``` + +## Next Steps + +After setup is complete: + +1. **Read the Contributing Guide** - [contributing.md](contributing.md) +2. **Explore the Architecture** - Check the documentation +3. **Find an Issue** - Look for "good first issue" on GitHub +4. **Set Up Pre-commit** - Install git hooks for code quality +5. **Join Discord** - https://discord.gg/37XJPXfz2w + +## Getting Help + +If you get stuck: + +- **Discord**: [Join our server](https://discord.gg/37XJPXfz2w) for real-time help +- **GitHub Issues**: Check existing issues for similar problems +- **GitHub Discussions**: Ask questions in discussions +- **Documentation**: See [code-standards.md](code-standards.md) and [testing.md](testing.md) + +--- + +**Ready to contribute?** Go to [contributing.md](contributing.md) for the contribution workflow. diff --git a/docs/7-DEVELOPMENT/index.md b/docs/7-DEVELOPMENT/index.md new file mode 100644 index 00000000..81826bc8 --- /dev/null +++ b/docs/7-DEVELOPMENT/index.md @@ -0,0 +1,96 @@ +# Development + +Welcome to the Open Notebook development documentation! Whether you're contributing code, understanding our architecture, or maintaining the project, you'll find guidance here. + +## 🎯 Pick Your Path + +### 👨‍💻 I Want to Contribute Code + +Start with **[Contributing Guide](contributing.md)** for the workflow, then check: +- **[Quick Start](quick-start.md)** - Clone, install, verify in 5 minutes +- **[Development Setup](development-setup.md)** - Complete local environment guide +- **[Code Standards](code-standards.md)** - How to write code that fits our style +- **[Testing](testing.md)** - How to write and run tests + +**First time?** Check out our [Contributing Guide](contributing.md) for the issue-first workflow. + +--- + +### 🏗️ I Want to Understand the Architecture + +**[Architecture Overview](architecture.md)** covers: +- 3-tier system design +- Tech stack and rationale +- Key components and workflows +- Design patterns we use + +For deeper dives, check `/open_notebook/` CLAUDE.md for component-specific guidance. + +--- + +### 👨‍🔧 I'm a Maintainer + +**[Maintainer Guide](maintainer-guide.md)** covers: +- Issue triage and management +- Pull request review process +- Communication templates +- Best practices + +--- + +## 📚 Quick Links + +| Document | For | Purpose | +|---|---|---| +| [Quick Start](quick-start.md) | New developers | Clone, install, and verify setup (5 min) | +| [Development Setup](development-setup.md) | Local development | Complete environment setup guide | +| [Contributing](contributing.md) | Code contributors | Workflow: issue → code → PR | +| [Code Standards](code-standards.md) | Writing code | Style guides for Python, FastAPI, DB | +| [Testing](testing.md) | Testing code | How to write and run tests | +| [Architecture](architecture.md) | Understanding system | System design, tech stack, workflows | +| [Design Principles](design-principles.md) | All developers | What guides our decisions | +| [API Reference](api-reference.md) | Building integrations | Complete REST API documentation | +| [Maintainer Guide](maintainer-guide.md) | Maintainers | Managing issues, PRs, releases | + +--- + +## 🚀 Current Development Priorities + +We're actively looking for help with: + +1. **Frontend Enhancement** - Improve Next.js/React UI with real-time updates +2. **Performance** - Async processing and caching optimizations +3. **Testing** - Expand test coverage across components +4. **Documentation** - API examples and developer guides +5. **Integrations** - New content sources and AI providers + +See GitHub Issues labeled `good first issue` or `help wanted`. + +--- + +## 💬 Getting Help + +- **Discord**: [Join our server](https://discord.gg/37XJPXfz2w) for real-time discussions +- **GitHub Discussions**: For architecture questions +- **GitHub Issues**: For bugs and features + +Don't be shy! We're here to help new contributors succeed. + +--- + +## 📖 Additional Resources + +### External Documentation +- [FastAPI Docs](https://fastapi.tiangolo.com/) +- [SurrealDB Docs](https://surrealdb.com/docs) +- [LangChain Docs](https://python.langchain.com/) +- [Next.js Docs](https://nextjs.org/docs) + +### Our Libraries +- [Esperanto](https://github.com/lfnovo/esperanto) - Multi-provider AI abstraction +- [Content Core](https://github.com/lfnovo/content-core) - Content processing +- [Podcast Creator](https://github.com/lfnovo/podcast-creator) - Podcast generation + +--- + +Ready to get started? Head over to **[Quick Start](quick-start.md)**! 🎉 diff --git a/docs/7-DEVELOPMENT/maintainer-guide.md b/docs/7-DEVELOPMENT/maintainer-guide.md new file mode 100644 index 00000000..6fd94fed --- /dev/null +++ b/docs/7-DEVELOPMENT/maintainer-guide.md @@ -0,0 +1,408 @@ +# Maintainer Guide + +This guide is for project maintainers to help manage contributions effectively while maintaining project quality and vision. + +## Table of Contents + +- [Issue Management](#issue-management) +- [Pull Request Review](#pull-request-review) +- [Common Scenarios](#common-scenarios) +- [Communication Templates](#communication-templates) + +## Issue Management + +### When a New Issue is Created + +**1. Initial Triage** (within 24-48 hours) + +- Add appropriate labels: + - `bug`, `enhancement`, `documentation`, etc. + - `good first issue` for beginner-friendly tasks + - `needs-triage` until reviewed + - `help wanted` if you'd welcome community contributions + +- Quick assessment: + - Is it clear and well-described? + - Is it aligned with project vision? (See [design-principles.md](design-principles.md)) + - Does it duplicate an existing issue? + +**2. Initial Response** + +```markdown +Thanks for opening this issue! We'll review it and get back to you soon. + +[If it's a bug] In the meantime, have you checked our troubleshooting guide? + +[If it's a feature] You might find our [design principles](design-principles.md) helpful for understanding what we're building toward. +``` + +**3. Decision Making** + +Ask yourself: +- Does this align with our [design principles](design-principles.md)? +- Is this something we want in the core project, or better as a plugin/extension? +- Do we have the capacity to support this feature long-term? +- Will this benefit most users, or just a specific use case? + +**4. Issue Assignment** + +If the contributor checked "I am a developer and would like to work on this": + +**For Accepted Issues:** +```markdown +Great idea! This aligns well with our goals, particularly [specific design principle]. + +I see you'd like to work on this. Before you start: + +1. Please share your proposed approach/solution +2. Review our [Contributing Guide](contributing.md) and [Design Principles](design-principles.md) +3. Once we agree on the approach, I'll assign this to you + +Looking forward to your thoughts! +``` + +**For Issues Needing Clarification:** +```markdown +Thanks for offering to work on this! Before we proceed, we need to clarify a few things: + +1. [Question 1] +2. [Question 2] + +Once we have these details, we can discuss the best approach. +``` + +**For Issues Not Aligned with Vision:** +```markdown +Thank you for the suggestion and for offering to work on this! + +After reviewing against our [design principles](design-principles.md), we've decided not to pursue this in the core project because [specific reason]. + +However, you might be able to achieve this through [alternative approach, if applicable]. + +We appreciate your interest in contributing! Feel free to check out our [open issues](link) for other ways to contribute. +``` + +### Labels to Use + +**Priority:** +- `priority: critical` - Security issues, data loss bugs +- `priority: high` - Major functionality broken +- `priority: medium` - Annoying bugs, useful features +- `priority: low` - Nice to have, edge cases + +**Status:** +- `needs-triage` - Not yet reviewed by maintainer +- `needs-info` - Waiting for more information from reporter +- `needs-discussion` - Requires community/team discussion +- `ready` - Approved and ready to be worked on +- `in-progress` - Someone is actively working on this +- `blocked` - Cannot proceed due to external dependency + +**Type:** +- `bug` - Something is broken +- `enhancement` - New feature or improvement +- `documentation` - Documentation improvements +- `question` - General questions +- `refactor` - Code cleanup/restructuring + +**Difficulty:** +- `good first issue` - Good for newcomers +- `help wanted` - Community contributions welcome +- `advanced` - Requires deep codebase knowledge + +## Pull Request Review + +### Initial PR Review Checklist + +**Before diving into code:** + +- [ ] Is there an associated approved issue? +- [ ] Does the PR reference the issue number? +- [ ] Is the PR description clear about what changed and why? +- [ ] Did the contributor check the relevant boxes in the PR template? +- [ ] Are there tests? Screenshots (for UI changes)? + +**Red Flags** (may require closing PR): +- No associated issue +- Issue was not assigned to contributor +- PR tries to solve multiple unrelated problems +- Breaking changes without discussion +- Conflicts with project vision + +### Code Review Process + +**1. High-Level Review** + +- Does the approach align with our architecture? +- Is the solution appropriately scoped? +- Are there simpler alternatives? +- Does it follow our design principles? + +**2. Code Quality Review** + +Python: +- [ ] Follows PEP 8 +- [ ] Has type hints +- [ ] Has docstrings +- [ ] Proper error handling +- [ ] No security vulnerabilities + +TypeScript/Frontend: +- [ ] Follows TypeScript best practices +- [ ] Proper component structure +- [ ] No console.logs left in production code +- [ ] Accessible UI components + +**3. Testing Review** + +- [ ] Has appropriate test coverage +- [ ] Tests are meaningful (not just for coverage percentage) +- [ ] Tests pass locally and in CI +- [ ] Edge cases are tested + +**4. Documentation Review** + +- [ ] Code is well-commented +- [ ] Complex logic is explained +- [ ] User-facing documentation updated (if applicable) +- [ ] API documentation updated (if API changed) +- [ ] Migration guide provided (if breaking change) + +### Providing Feedback + +**Positive Feedback** (important!): +```markdown +Thanks for this PR! I really like [specific thing they did well]. + +[Feedback on what needs to change] +``` + +**Requesting Changes:** +```markdown +This is a great start! A few things to address: + +1. **[High-level concern]**: [Explanation and suggested approach] +2. **[Code quality issue]**: [Specific example and fix] +3. **[Testing gap]**: [What scenarios need coverage] + +Let me know if you have questions about any of this! +``` + +**Suggesting Alternative Approach:** +```markdown +I appreciate the effort you put into this! However, I'm concerned about [specific issue]. + +Have you considered [alternative approach]? It might be better because [reasons]. + +What do you think? +``` + +## Common Scenarios + +### Scenario 1: Good Code, Wrong Approach + +**Situation**: Contributor wrote quality code, but solved the problem in a way that doesn't fit our architecture. + +**Response:** +```markdown +Thank you for this PR! The code quality is great, and I can see you put thought into this. + +However, I'm concerned that this approach [specific architectural concern]. In our architecture, we [explain the pattern we follow]. + +Would you be open to refactoring this to [suggested approach]? I'm happy to provide guidance on the specifics. + +Alternatively, if you don't have time for a refactor, I can take over and finish this up (with credit to you, of course). + +Let me know what you prefer! +``` + +### Scenario 2: PR Without Assigned Issue + +**Situation**: Contributor submitted PR without going through issue approval process. + +**Response:** +```markdown +Thanks for the PR! I appreciate you taking the time to contribute. + +However, to maintain project coherence, we require all PRs to be linked to an approved issue that was assigned to the contributor. This is explained in our [Contributing Guide](contributing.md). + +This helps us: +- Ensure work aligns with project vision +- Prevent duplicate efforts +- Discuss approach before implementation + +Could you please: +1. Create an issue describing this change +2. Wait for it to be reviewed and assigned to you +3. We can then reopen this PR or you can create a new one + +Sorry for the inconvenience - this process helps us manage the project effectively. +``` + +### Scenario 3: Feature Request Not Aligned with Vision + +**Situation**: Well-intentioned feature that doesn't fit project goals. + +**Response:** +```markdown +Thank you for this suggestion! I can see how this would be useful for [specific use case]. + +After reviewing against our [design principles](design-principles.md), we've decided not to include this in the core project because [specific reason - e.g., "it conflicts with our 'Simplicity Over Features' principle" or "it would require dependencies that conflict with our privacy-first approach"]. + +Some alternatives: +- [If applicable] This could be built as a plugin/extension +- [If applicable] This functionality might be achievable through [existing feature] +- [If applicable] You might be interested in [other tool] which is designed for this use case + +We appreciate your contribution and hope you understand. Feel free to check our roadmap or open issues for other ways to contribute! +``` + +### Scenario 4: Contributor Ghosts After Feedback + +**Situation**: You requested changes, but contributor hasn't responded in 2+ weeks. + +**After 2 weeks:** +```markdown +Hey there! Just checking in on this PR. Do you have time to address the feedback, or would you like someone else to take over? + +No pressure either way - just want to make sure this doesn't fall through the cracks. +``` + +**After 1 month with no response:** +```markdown +Thanks again for starting this work! Since we haven't heard back, I'm going to close this PR for now. + +If you want to pick this up again in the future, feel free to reopen it or create a new PR. Alternatively, I'll mark the issue as available for someone else to work on. + +We appreciate your contribution! +``` + +Then: +- Close the PR +- Unassign the issue +- Add `help wanted` label to the issue + +### Scenario 5: Breaking Changes Without Discussion + +**Situation**: PR introduces breaking changes that weren't discussed. + +**Response:** +```markdown +Thanks for this PR! However, I notice this introduces breaking changes that weren't discussed in the original issue. + +Breaking changes require: +1. Prior discussion and approval +2. Migration guide for users +3. Deprecation period (when possible) +4. Clear documentation of the change + +Could we discuss the breaking changes first? Specifically: +- [What breaks and why] +- [Who will be affected] +- [Migration path] + +We may need to adjust the approach to minimize impact on existing users. +``` + +## Communication Templates + +### Closing a PR (Misaligned with Vision) + +```markdown +Thank you for taking the time to contribute! We really appreciate it. + +After careful review, we've decided not to merge this PR because [specific reason related to design principles]. + +This isn't a reflection on your code quality - it's about maintaining focus on our core goals as outlined in [design-principles.md](design-principles.md). + +We'd love to have you contribute in other ways! Check out: +- Good first issues +- Help wanted issues +- Our roadmap + +Thanks again for your interest in Open Notebook! +``` + +### Closing a Stale Issue + +```markdown +We're closing this issue due to inactivity. If this is still relevant, feel free to reopen it with updated information. + +Thanks! +``` + +### Asking for More Information + +```markdown +Thanks for reporting this! To help us investigate, could you provide: + +1. [Specific information needed] +2. [Logs, screenshots, etc.] +3. [Steps to reproduce] + +This will help us understand the issue better and find a solution. +``` + +### Thanking a Contributor + +```markdown +Merged! + +Thank you so much for this contribution, @username! [Specific thing they did well]. + +This will be included in the next release. +``` + +## Best Practices + +### Be Kind and Respectful + +- Thank contributors for their time and effort +- Assume good intentions +- Be patient with newcomers +- Explain *why*, not just *what* + +### Be Clear and Direct + +- Don't leave ambiguity about next steps +- Be specific about what needs to change +- Explain architectural decisions +- Set clear expectations + +### Be Consistent + +- Apply the same standards to all contributors +- Follow the process you've defined +- Document decisions for future reference + +### Be Protective of Project Vision + +- It's okay to say "no" +- Prioritize long-term maintainability +- Don't accept features you can't support +- Keep the project focused + +### Be Responsive + +- Respond to issues within 48 hours (even just to acknowledge) +- Review PRs within a week when possible +- Keep contributors updated on status +- Close stale issues/PRs to keep things tidy + +## When in Doubt + +Ask yourself: +1. Does this align with our [design principles](design-principles.md)? +2. Will we be able to maintain this feature long-term? +3. Does this benefit most users, or just an edge case? +4. Is there a simpler alternative? +5. Would I want to support this in 2 years? + +If you're unsure, it's perfectly fine to: +- Ask for input from other maintainers +- Start a discussion issue +- Sleep on it before making a decision + +--- + +**Remember**: Good maintainership is about balancing openness to contributions with protection of project vision. You're not being mean by saying "no" to things that don't fit - you're being a responsible steward of the project. diff --git a/docs/7-DEVELOPMENT/quick-start.md b/docs/7-DEVELOPMENT/quick-start.md new file mode 100644 index 00000000..f6417eab --- /dev/null +++ b/docs/7-DEVELOPMENT/quick-start.md @@ -0,0 +1,128 @@ +# Quick Start - Development + +Get Open Notebook running locally in 5 minutes. + +## Prerequisites + +- **Python 3.11+** +- **Git** +- **uv** (package manager) - install with `curl -LsSf https://astral.sh/uv/install.sh | sh` +- **Docker** (optional, for SurrealDB) + +## 1. Clone the Repository (2 min) + +```bash +# Fork the repository on GitHub first, then clone your fork +git clone https://github.com/YOUR_USERNAME/open-notebook.git +cd open-notebook + +# Add upstream remote for updates +git remote add upstream https://github.com/lfnovo/open-notebook.git +``` + +## 2. Install Dependencies (2 min) + +```bash +# Install Python dependencies +uv sync + +# Verify uv is working +uv --version +``` + +## 3. Start Services (1 min) + +In separate terminal windows: + +```bash +# Terminal 1: Start SurrealDB (database) +make database +# or: docker run -d --name surrealdb -p 8000:8000 surrealdb/surrealdb:v2 start --user root --pass password --bind 0.0.0.0:8000 memory + +# Terminal 2: Start API (backend on port 5055) +make api +# or: uv run --env-file .env uvicorn api.main:app --host 0.0.0.0 --port 5055 + +# Terminal 3: Start Frontend (UI on port 3000) +cd frontend && npm run dev +``` + +## 4. Verify Everything Works (instant) + +- **API Health**: http://localhost:5055/health → should return `{"status": "ok"}` +- **API Docs**: http://localhost:5055/docs → interactive API documentation +- **Frontend**: http://localhost:3000 → Open Notebook UI + +**All three show up?** ✅ You're ready to develop! + +--- + +## Next Steps + +- **First Issue?** Pick a [good first issue](https://github.com/lfnovo/open-notebook/issues?q=label%3A%22good+first+issue%22) +- **Understand the code?** Read [Architecture Overview](architecture.md) +- **Make changes?** Follow [Contributing Guide](contributing.md) +- **Setup details?** See [Development Setup](development-setup.md) + +--- + +## Troubleshooting + +### "Port 5055 already in use" +```bash +# Find what's using the port +lsof -i :5055 + +# Use a different port +uv run uvicorn api.main:app --port 5056 +``` + +### "Can't connect to SurrealDB" +```bash +# Check if SurrealDB is running +docker ps | grep surrealdb + +# Restart it +make database +``` + +### "Python version is too old" +```bash +# Check your Python version +python --version # Should be 3.11+ + +# Use Python 3.11 specifically +uv sync --python 3.11 +``` + +### "npm: command not found" +```bash +# Install Node.js from https://nodejs.org/ +# Then install frontend dependencies +cd frontend && npm install +``` + +--- + +## Common Development Commands + +```bash +# Run tests +uv run pytest + +# Format code +make ruff + +# Type checking +make lint + +# Run the full stack +make start-all + +# View API documentation +open http://localhost:5055/docs +``` + +--- + +Need more help? See [Development Setup](development-setup.md) for details or join our [Discord](https://discord.gg/37XJPXfz2w). diff --git a/docs/7-DEVELOPMENT/testing.md b/docs/7-DEVELOPMENT/testing.md new file mode 100644 index 00000000..ce12fa1c --- /dev/null +++ b/docs/7-DEVELOPMENT/testing.md @@ -0,0 +1,423 @@ +# Testing Guide + +This document provides guidelines for writing tests in Open Notebook. Testing is critical to maintaining code quality and preventing regressions. + +## Testing Philosophy + +### What to Test + +Focus on testing the things that matter most: + +- **Business Logic** - Core domain models and their operations +- **API Contracts** - HTTP endpoint behavior and error handling +- **Critical Workflows** - End-to-end flows that users depend on +- **Data Persistence** - Database operations and data integrity +- **Error Conditions** - How the system handles failures gracefully + +### What NOT to Test + +Don't waste time testing framework code: + +- Framework functionality (FastAPI, React, etc.) +- Third-party library implementation +- Simple getters/setters without logic +- View/presentation layer rendering (unless it contains logic) + +## Test Structure + +We use **pytest** with async support for all Python tests: + +```python +import pytest +from httpx import AsyncClient +from open_notebook.domain.notebook import Notebook + +@pytest.mark.asyncio +async def test_create_notebook(): + """Test notebook creation.""" + notebook = Notebook(name="Test Notebook", description="Test description") + await notebook.save() + + assert notebook.id is not None + assert notebook.name == "Test Notebook" + assert notebook.created is not None + +@pytest.mark.asyncio +async def test_api_create_notebook(): + """Test notebook creation via API.""" + async with AsyncClient(app=app, base_url="http://test") as client: + response = await client.post( + "/api/notebooks", + json={"name": "Test Notebook", "description": "Test description"} + ) + assert response.status_code == 200 + data = response.json() + assert data["name"] == "Test Notebook" +``` + +## Test Categories + +### 1. Unit Tests + +Test individual functions and methods in isolation: + +```python +@pytest.mark.asyncio +async def test_notebook_validation(): + """Test that notebook name validation works.""" + with pytest.raises(InvalidInputError): + Notebook(name="", description="test") + +@pytest.mark.asyncio +async def test_notebook_archive(): + """Test notebook archiving.""" + notebook = Notebook(name="Test", description="") + notebook.archive() + assert notebook.archived is True +``` + +**Location**: `tests/unit/` + +### 2. Integration Tests + +Test component interactions and database operations: + +```python +@pytest.mark.asyncio +async def test_create_notebook_with_sources(): + """Test creating a notebook and adding sources.""" + notebook = await create_notebook(name="Research", description="") + source = await add_source(notebook_id=notebook.id, url="https://example.com") + + retrieved = await get_notebook_with_sources(notebook.id) + assert len(retrieved.sources) == 1 + assert retrieved.sources[0].id == source.id +``` + +**Location**: `tests/integration/` + +### 3. API Tests + +Test HTTP endpoints and error responses: + +```python +@pytest.mark.asyncio +async def test_get_notebooks_endpoint(): + """Test GET /notebooks endpoint.""" + async with AsyncClient(app=app, base_url="http://test") as client: + response = await client.get("/api/notebooks") + assert response.status_code == 200 + data = response.json() + assert isinstance(data, list) + +@pytest.mark.asyncio +async def test_create_notebook_validation(): + """Test that invalid input is rejected.""" + async with AsyncClient(app=app, base_url="http://test") as client: + response = await client.post( + "/api/notebooks", + json={"name": "", "description": ""} + ) + assert response.status_code == 400 +``` + +**Location**: `tests/api/` + +### 4. Database Tests + +Test data persistence and query correctness: + +```python +@pytest.mark.asyncio +async def test_save_and_retrieve_notebook(): + """Test saving and retrieving a notebook from database.""" + notebook = Notebook(name="Test", description="desc") + await notebook.save() + + retrieved = await Notebook.get(notebook.id) + assert retrieved.name == "Test" + assert retrieved.description == "desc" + +@pytest.mark.asyncio +async def test_query_by_criteria(): + """Test querying notebooks by criteria.""" + await create_notebook("Active", "") + await create_notebook("Archived", "") + + active = await repo_query( + "SELECT * FROM notebook WHERE archived = false" + ) + assert len(active) >= 1 +``` + +**Location**: `tests/database/` + +## Running Tests + +### Run All Tests + +```bash +uv run pytest +``` + +### Run Specific Test File + +```bash +uv run pytest tests/test_notebooks.py +``` + +### Run Specific Test Function + +```bash +uv run pytest tests/test_notebooks.py::test_create_notebook +``` + +### Run with Coverage Report + +```bash +uv run pytest --cov=open_notebook +``` + +### Run Only Unit Tests + +```bash +uv run pytest tests/unit/ +``` + +### Run Only Integration Tests + +```bash +uv run pytest tests/integration/ +``` + +### Run Tests in Verbose Mode + +```bash +uv run pytest -v +``` + +### Run Tests with Output + +```bash +uv run pytest -s +``` + +## Test Fixtures + +Use pytest fixtures for common setup and teardown: + +```python +import pytest + +@pytest.fixture +async def test_notebook(): + """Create a test notebook.""" + notebook = Notebook(name="Test Notebook", description="Test description") + await notebook.save() + yield notebook + await notebook.delete() + +@pytest.fixture +async def api_client(): + """Create an API test client.""" + async with AsyncClient(app=app, base_url="http://test") as client: + yield client + +@pytest.fixture +async def test_notebook_with_sources(test_notebook): + """Create a test notebook with sample sources.""" + source1 = Source(notebook_id=test_notebook.id, url="https://example.com") + source2 = Source(notebook_id=test_notebook.id, url="https://example.org") + await source1.save() + await source2.save() + + test_notebook.sources = [source1, source2] + yield test_notebook + + # Cleanup + await source1.delete() + await source2.delete() +``` + +## Best Practices + +### 1. Write Descriptive Test Names + +```python +# Good - clearly describes what is being tested +async def test_create_notebook_with_valid_name_succeeds(): + ... + +# Bad - vague about what's being tested +async def test_notebook(): + ... +``` + +### 2. Use Docstrings + +```python +@pytest.mark.asyncio +async def test_vector_search_returns_sorted_results(): + """Test that vector search results are sorted by relevance score.""" + # Implementation +``` + +### 3. Test Edge Cases + +```python +@pytest.mark.asyncio +async def test_search_with_empty_query(): + """Test that empty query raises error.""" + with pytest.raises(InvalidInputError): + await vector_search("") + +@pytest.mark.asyncio +async def test_search_with_very_long_query(): + """Test that very long query is handled.""" + long_query = "x" * 10000 + results = await vector_search(long_query) + assert isinstance(results, list) + +@pytest.mark.asyncio +async def test_search_with_special_characters(): + """Test that special characters are handled.""" + results = await vector_search("@#$%^&*()") + assert isinstance(results, list) +``` + +### 4. Use Assertions Effectively + +```python +# Good - specific assertions +assert notebook.name == "Test" +assert len(notebook.sources) == 3 +assert notebook.created is not None + +# Less good - too broad +assert notebook is not None +assert notebook # ambiguous what's being tested +``` + +### 5. Test Both Success and Failure Cases + +```python +@pytest.mark.asyncio +async def test_create_notebook_success(): + """Test successful notebook creation.""" + notebook = await create_notebook(name="Research", description="AI") + assert notebook.id is not None + assert notebook.name == "Research" + +@pytest.mark.asyncio +async def test_create_notebook_empty_name_fails(): + """Test that empty name raises error.""" + with pytest.raises(InvalidInputError): + await create_notebook(name="", description="") + +@pytest.mark.asyncio +async def test_create_notebook_duplicate_fails(): + """Test that duplicate names are handled.""" + await create_notebook(name="Research", description="") + with pytest.raises(DuplicateError): + await create_notebook(name="Research", description="") +``` + +### 6. Keep Tests Independent + +```python +# Good - test is self-contained +@pytest.mark.asyncio +async def test_archive_notebook(): + notebook = Notebook(name="Test", description="") + await notebook.save() + await notebook.archive() + assert notebook.archived is True + +# Bad - depends on another test's state +@pytest.mark.asyncio +async def test_archive_existing_notebook(): + # Assumes test_create_notebook ran first + await notebook.archive() # notebook undefined +``` + +### 7. Use Fixtures for Reusable Setup + +```python +# Instead of repeating setup: +@pytest.fixture +async def client_with_auth(api_client, mock_auth): + """Client with authentication set up.""" + api_client.headers.update({"Authorization": f"Bearer {mock_auth.token}"}) + yield api_client + +@pytest.mark.asyncio +async def test_protected_endpoint(client_with_auth): + """Test protected endpoint.""" + response = await client_with_auth.get("/api/protected") + assert response.status_code == 200 +``` + +## Coverage Goals + +- Aim for 70%+ overall coverage +- 90%+ coverage for critical business logic +- Don't obsess over 100% - focus on meaningful tests +- Use `--cov` flag to check coverage: `uv run pytest --cov=open_notebook` + +## Async Test Patterns + +### Testing Async Functions + +```python +@pytest.mark.asyncio +async def test_async_operation(): + """Test async function.""" + result = await some_async_function() + assert result is not None +``` + +### Testing Concurrent Operations + +```python +@pytest.mark.asyncio +async def test_concurrent_notebook_creation(): + """Test creating multiple notebooks concurrently.""" + tasks = [ + create_notebook(f"Notebook {i}", "") + for i in range(10) + ] + notebooks = await asyncio.gather(*tasks) + assert len(notebooks) == 10 + assert all(n.id for n in notebooks) +``` + +## Common Testing Errors + +### Error: "event loop is closed" + +Solution: Use the async fixture properly: +```python +@pytest.fixture +async def notebook(): # Use async fixture + notebook = Notebook(name="Test", description="") + await notebook.save() + yield notebook + await notebook.delete() +``` + +### Error: "object is not awaitable" + +Solution: Make sure you're using await: +```python +# Wrong +result = create_notebook("Test", "") + +# Right +result = await create_notebook("Test", "") +``` + +--- + +**See also:** +- [Code Standards](code-standards.md) - Code formatting and style +- [Contributing Guide](contributing.md) - Overall contribution workflow diff --git a/docs/assets/add_source.png b/docs/assets/add_source.png deleted file mode 100644 index 59213e9d..00000000 Binary files a/docs/assets/add_source.png and /dev/null differ diff --git a/docs/assets/ai_note.png b/docs/assets/ai_note.png deleted file mode 100644 index 3709a9f8..00000000 Binary files a/docs/assets/ai_note.png and /dev/null differ diff --git a/docs/assets/asset_list.png b/docs/assets/asset_list.png deleted file mode 100644 index 87327a80..00000000 Binary files a/docs/assets/asset_list.png and /dev/null differ diff --git a/docs/assets/context.png b/docs/assets/context.png deleted file mode 100644 index ada373f2..00000000 Binary files a/docs/assets/context.png and /dev/null differ diff --git a/docs/assets/hero.svg b/docs/assets/hero.svg deleted file mode 100644 index 87013478..00000000 --- a/docs/assets/hero.svg +++ /dev/null @@ -1,60 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/assets/human_note.png b/docs/assets/human_note.png deleted file mode 100644 index 397d3927..00000000 Binary files a/docs/assets/human_note.png and /dev/null differ diff --git a/docs/assets/new_notebook.png b/docs/assets/new_notebook.png deleted file mode 100644 index 6e90a441..00000000 Binary files a/docs/assets/new_notebook.png and /dev/null differ diff --git a/docs/assets/podcast.png b/docs/assets/podcast.png deleted file mode 100644 index 8af36d2c..00000000 Binary files a/docs/assets/podcast.png and /dev/null differ diff --git a/docs/assets/podcast_listen.png b/docs/assets/podcast_listen.png deleted file mode 100644 index eaf26c9d..00000000 Binary files a/docs/assets/podcast_listen.png and /dev/null differ diff --git a/docs/assets/podcast_template.png b/docs/assets/podcast_template.png deleted file mode 100644 index 00a508ec..00000000 Binary files a/docs/assets/podcast_template.png and /dev/null differ diff --git a/docs/assets/search.png b/docs/assets/search.png deleted file mode 100644 index db199c9a..00000000 Binary files a/docs/assets/search.png and /dev/null differ diff --git a/docs/assets/transformations.png b/docs/assets/transformations.png deleted file mode 100644 index 707bb253..00000000 Binary files a/docs/assets/transformations.png and /dev/null differ diff --git a/docs/assets/whilte_logo.png b/docs/assets/whilte_logo.png deleted file mode 100644 index 400d040e..00000000 Binary files a/docs/assets/whilte_logo.png and /dev/null differ diff --git a/docs/deployment/development.md b/docs/deployment/development.md deleted file mode 100644 index 9b1e2f9f..00000000 --- a/docs/deployment/development.md +++ /dev/null @@ -1,512 +0,0 @@ -# Development Setup - -This guide covers setting up Open Notebook for local development, contributing to the project, and running from source code. - -## 🎯 Who This Guide Is For - -This setup is ideal if you want to: -- **Contribute to Open Notebook** - Fix bugs, add features, or improve documentation -- **Customize the application** - Modify the code for your specific needs -- **Understand the codebase** - Learn how Open Notebook works internally -- **Develop integrations** - Build custom plugins or extensions - -## 🛠️ Prerequisites - -### System Requirements - -- **Python 3.11+** - Required for the application -- **Node.js 18+** - For frontend development (if contributing to UI) -- **Git** - For version control -- **Docker** - For SurrealDB and optional services - -### Development Tools - -- **Code editor** - VS Code, PyCharm, or your preferred IDE -- **Terminal** - Command line access -- **Web browser** - For testing the application - -## 📥 Installation - -### Step 1: Clone the Repository - -```bash -git clone https://github.com/lfnovo/open-notebook.git -cd open-notebook -``` - -### Step 2: Python Environment Setup - -Open Notebook uses **uv** for dependency management: - -```bash -# Install uv (if not already installed) -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Create and activate virtual environment -uv venv -source .venv/bin/activate # On Windows: .venv\Scripts\activate - -# Install dependencies -uv sync -``` - -### Step 3: Database Setup - -#### Option A: Docker SurrealDB (Recommended) - -```bash -# Start SurrealDB with Docker -docker run -d \ - --name surrealdb-dev \ - -p 8000:8000 \ - surrealdb/surrealdb:v2 \ - start --log trace --user root --pass root memory -``` - -#### Option B: Local SurrealDB Installation - -```bash -# Install SurrealDB locally -curl -sSf https://install.surrealdb.com | sh - -# Start SurrealDB -surreal start --log trace --user root --pass root memory -``` - -### Step 4: Environment Configuration - -Create a `.env` file in the project root: - -```env -# Database Configuration -SURREAL_URL=ws://localhost:8000/rpc -SURREAL_USER=root -SURREAL_PASSWORD=root -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=development - -# Required: At least one AI provider -OPENAI_API_KEY=sk-your-openai-key - -# Optional: Additional providers for testing -ANTHROPIC_API_KEY=sk-ant-your-anthropic-key -GOOGLE_API_KEY=your-google-key -GROQ_API_KEY=gsk_your-groq-key - -# Optional: Development settings -LOG_LEVEL=DEBUG -ENABLE_ANALYTICS=false -``` - -### Step 5: Frontend Setup - -Install frontend dependencies: - -```bash -cd frontend -npm install -cd .. -``` - -> **Note**: Database migrations now run automatically when the API starts. No manual migration step is required. - -### Step 6: Start the Application - -#### Option A: Full Stack with Make - -```bash -# Start all services (recommended for development) -make start-all -``` - -This starts: -- **SurrealDB** (if not already running) -- **FastAPI backend** on port 5055 -- **Background worker** for async tasks -- **React frontend** on port 8502 - -#### Option B: Individual Services - -Start services separately for debugging: - -```bash -# Terminal 1: Start the API -uv run python api/main.py - -# Terminal 2: Start the background worker -uv run python -m open_notebook.worker - -# Terminal 3: Start the React frontend -cd frontend && npm run dev -``` - -## 🔧 Development Workflow - -### Project Structure - -``` -open-notebook/ -├── api/ # FastAPI backend -│ ├── routers/ # API routes -│ └── main.py # API entry point -├── frontend/ # React frontend (Next.js) -│ ├── src/ # React components and pages -│ └── public/ # Static assets -├── open_notebook/ # Core application -│ ├── domain/ # Business logic -│ ├── database/ # Database layer -│ └── graphs/ # LangGraph workflows -├── prompts/ # Jinja2 templates -├── docs/ # Documentation -└── tests/ # Test files -``` - -### Development Commands - -```bash -# Install new dependencies -uv add package-name - -# Run tests -uv run pytest - -# Run linting -uv run ruff check -uv run ruff format - -# Type checking -uv run mypy . - -# Start development server -make start-dev -``` - -### Making Changes - -1. **Create a branch** for your feature/fix: - ```bash - git checkout -b feature/your-feature-name - ``` - -2. **Make your changes** in the appropriate files - -3. **Test your changes**: - ```bash - uv run pytest - ``` - -4. **Format code**: - ```bash - uv run ruff format - ``` - -5. **Commit your changes**: - ```bash - git add . - git commit -m "feat: your descriptive commit message" - ``` - -6. **Push and create a pull request**: - ```bash - git push origin feature/your-feature-name - ``` - -## 🧪 Testing - -### Running Tests - -```bash -# Run all tests -uv run pytest - -# Run specific test file -uv run pytest tests/test_specific.py - -# Run with coverage -uv run pytest --cov=open_notebook - -# Run integration tests -uv run pytest tests/integration/ -``` - -### Test Structure - -``` -tests/ -├── unit/ # Unit tests -├── integration/ # Integration tests -├── fixtures/ # Test fixtures -└── conftest.py # Test configuration -``` - -### Writing Tests - -```python -# Example test file -import pytest -from open_notebook.domain.notebook import Notebook - -def test_notebook_creation(): - notebook = Notebook(name="Test Notebook", description="Test") - assert notebook.name == "Test Notebook" - assert notebook.description == "Test" -``` - -## 🚀 Building and Deployment - -### Local Docker Build - -```bash -# Build multi-container version -make docker-build-dev - -# Build single-container version -make docker-build-single-dev - -# Test the built image -docker run -p 8502:8502 \ - -v ./notebook_data:/app/data \ - -v ./surreal_data:/mydata \ - open_notebook:v1-latest -``` - -### Production Build - -```bash -# Build with multi-platform support -make docker-build - -# Build and push to registry -make docker-push -``` - -## 🔍 Debugging - -### Common Development Issues - -#### Database Connection Errors - -```bash -# Check if SurrealDB is running -docker ps | grep surrealdb - -# Check SurrealDB logs -docker logs surrealdb-dev - -# Test connection -curl -X POST http://localhost:8000/sql \ - -H "Content-Type: application/json" \ - -d '{"sql": "SELECT * FROM VERSION"}' -``` - -#### API Not Starting - -```bash -# Check Python environment -uv run python --version - -# Check dependencies -uv run pip list | grep fastapi - -# Start with debug mode -uv run python api/main.py --debug -``` - -#### Frontend Issues - -```bash -# Check Node.js and npm versions -node --version -npm --version - -# Reinstall frontend dependencies -cd frontend -rm -rf node_modules package-lock.json -npm install - -# Start frontend in development mode -npm run dev -``` - -### Debugging Tools - -#### VS Code Configuration - -Create `.vscode/launch.json`: - -```json -{ - "version": "0.2.0", - "configurations": [ - { - "name": "FastAPI", - "type": "python", - "request": "launch", - "program": "api/main.py", - "console": "integratedTerminal", - "cwd": "${workspaceFolder}", - "env": { - "PYTHONPATH": "${workspaceFolder}" - } - }, - { - "name": "React Frontend", - "type": "node", - "request": "launch", - "cwd": "${workspaceFolder}/frontend", - "runtimeExecutable": "npm", - "runtimeArgs": ["run", "dev"], - "console": "integratedTerminal" - } - ] -} -``` - -#### Python Debugging - -```python -# Add breakpoints in code -import pdb; pdb.set_trace() - -# Or use debugger -import debugpy -debugpy.listen(5678) -debugpy.wait_for_client() -``` - -## 📝 Code Style and Standards - -### Python Style Guide - -- **Formatting**: Use `ruff format` for code formatting -- **Linting**: Use `ruff check` for linting -- **Type hints**: Use type hints for all functions -- **Docstrings**: Document all public functions and classes - -### Example Code Style - -```python -from typing import List, Optional -from pydantic import BaseModel - -class Notebook(BaseModel): - """A notebook for organizing research sources.""" - - name: str - description: Optional[str] = None - sources: List[str] = [] - - def add_source(self, source_id: str) -> None: - """Add a source to the notebook. - - Args: - source_id: The ID of the source to add - """ - if source_id not in self.sources: - self.sources.append(source_id) -``` - -### Commit Message Format - -Follow conventional commits: - -``` -feat: add new podcast generation feature -fix: resolve database connection issue -docs: update deployment guide -refactor: improve source processing logic -test: add tests for notebook creation -``` - -## 🤝 Contributing - -### Before Contributing - -1. **Read the contribution guidelines** in `CONTRIBUTING.md` -2. **Join the Discord** for discussion: [discord.gg/37XJPXfz2w](https://discord.gg/37XJPXfz2w) -3. **Check existing issues** to avoid duplicates -4. **Discuss major changes** before implementing - -### Contribution Process - -1. **Fork the repository** on GitHub -2. **Create a feature branch** from `main` -3. **Make your changes** following the coding standards -4. **Add tests** for new functionality -5. **Update documentation** as needed -6. **Submit a pull request** with a clear description - -### Areas for Contribution - -- **Frontend Development** - Modern React/Next.js UI improvements -- **Backend Features** - API endpoints, new functionality -- **AI Integrations** - New model providers, better prompts -- **Documentation** - Guides, tutorials, API docs -- **Testing** - Unit tests, integration tests -- **Bug Fixes** - Resolve existing issues - -## 📚 Development Resources - -### Documentation - -- **[API Documentation](../api-reference.md)** - REST API reference -- **[Architecture Guide](../architecture.md)** - System architecture -- **[Plugin Development](../plugins.md)** - Creating custom plugins - -### External Resources - -- **[SurrealDB Documentation](https://surrealdb.com/docs)** - Database queries and schema -- **[FastAPI Documentation](https://fastapi.tiangolo.com/)** - API framework -- **[Next.js Documentation](https://nextjs.org/docs)** - React framework -- **[LangChain Documentation](https://python.langchain.com/)** - AI workflows - -### Getting Help - -- **[Discord Server](https://discord.gg/37XJPXfz2w)** - Real-time development help -- **[GitHub Discussions](https://github.com/lfnovo/open-notebook/discussions)** - Design discussions -- **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Bug reports and feature requests - -## 🔄 Maintenance - -### Keeping Your Fork Updated - -```bash -# Add upstream remote -git remote add upstream https://github.com/lfnovo/open-notebook.git - -# Fetch upstream changes -git fetch upstream - -# Merge upstream changes -git checkout main -git merge upstream/main -``` - -### Dependency Updates - -```bash -# Update dependencies -uv sync --upgrade - -# Check for security issues -uv audit - -# Update pre-commit hooks -pre-commit autoupdate -``` - -### Database Migrations - -Database migrations now run automatically when the API starts. When you need to create new migrations: - -```bash -# Create new migration file -# Add your migration to migrations/ folder with incremental number - -# Migrations are automatically applied on API startup -uv run python api/main.py -``` - ---- - -**Ready to contribute?** Start by forking the repository and following the installation steps above. Join our Discord for real-time help and discussion! \ No newline at end of file diff --git a/docs/deployment/docker.md b/docs/deployment/docker.md deleted file mode 100644 index 2997c1a2..00000000 --- a/docs/deployment/docker.md +++ /dev/null @@ -1,528 +0,0 @@ -# Docker Deployment Guide - -**The complete Docker setup guide for Open Notebook - from beginner to advanced configurations.** - -This guide covers everything you need to deploy Open Notebook using Docker, from a simple single-provider setup to advanced multi-provider configurations with local models. - -## 📋 What You'll Get - -Open Notebook is a powerful AI-powered research and note-taking tool that: -- Modern Next.js/React interface for a smooth user experience -- Helps you organize research across multiple notebooks -- Lets you chat with your documents using AI -- Supports 16+ AI providers (OpenAI, Anthropic, Google, Ollama, and more) -- Creates AI-generated podcasts from your content -- Works with PDFs, web links, videos, audio files, and more - -## 📦 Docker Image Registries - -Open Notebook images are available from two registries: - -- **GitHub Container Registry (GHCR)**: `ghcr.io/lfnovo/open-notebook` - Hosted on GitHub, no Docker Hub account needed -- **Docker Hub**: `lfnovo/open_notebook` - Traditional Docker registry - -Both registries contain identical images. Choose based on your preference: -- Use **GHCR** if you prefer GitHub-native workflows or Docker Hub is blocked -- Use **Docker Hub** if you're already using it or prefer the traditional registry - -All examples in this guide use Docker Hub (`lfnovo/open_notebook`), but you can replace it with `ghcr.io/lfnovo/open-notebook` anywhere. - -## 🚀 Quick Start (5 Minutes) - -### Step 1: Install Docker - -#### Windows -1. Download Docker Desktop from [docker.com](https://www.docker.com/products/docker-desktop/) -2. Run the installer and follow the setup wizard -3. Restart your computer when prompted -4. Launch Docker Desktop - -#### macOS -1. Download Docker Desktop from [docker.com](https://www.docker.com/products/docker-desktop/) -2. Choose Intel or Apple Silicon based on your Mac -3. Drag Docker to Applications folder -4. Open Docker from Applications - -#### Linux (Ubuntu/Debian) -```bash -sudo apt update -sudo apt install docker.io docker-compose -sudo systemctl start docker -sudo systemctl enable docker -sudo usermod -aG docker $USER -``` -Log out and log back in after installation. - -### Step 2: Get Your OpenAI API Key - -OpenAI provides everything you need to get started: -- **Text generation** for chat and notes -- **Embeddings** for search functionality -- **Text-to-speech** for podcast generation -- **Speech-to-text** for audio transcription - -1. Go to [platform.openai.com](https://platform.openai.com/) -2. Create an account or sign in -3. Navigate to **API Keys** in the sidebar -4. Click **"Create new secret key"** -5. Name your key (e.g., "Open Notebook") -6. Copy the key (starts with "sk-") -7. **Save it safely** - you won't see it again! - -**Important**: Add at least $5 in credits to your OpenAI account before using the API. - -### Step 3: Deploy Open Notebook - -1. **Create a project directory**: - ```bash - mkdir open-notebook - cd open-notebook - ``` - -2. **Create `docker-compose.yml`**: - ```yaml - services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" # Frontend - - "5055:5055" # API - environment: - - OPENAI_API_KEY=your_openai_key_here - volumes: - - ./notebook_data:/app/data - - ./surreal_data:/mydata - restart: always - ``` - -3. **Create `docker.env` file** (optional but recommended): - ```env - # Required: Your OpenAI API key - OPENAI_API_KEY=sk-your-actual-key-here - - # Optional: Security for public deployments - OPEN_NOTEBOOK_PASSWORD=your_secure_password - - # Database settings (auto-configured) - SURREAL_URL=ws://localhost:8000/rpc - SURREAL_USER=root - SURREAL_PASSWORD=root - SURREAL_NAMESPACE=open_notebook - SURREAL_DATABASE=production - ``` - -4. **Start Open Notebook**: - ```bash - docker compose up -d - ``` - -5. **Access the application**: - - **Next.js UI**: http://localhost:8502 - Modern, responsive interface - - **API Documentation**: http://localhost:5055/docs - Full REST API access - - You should see the Open Notebook interface! - -**Alternative: Using GHCR** -To use GitHub Container Registry instead, simply replace the image name: -```yaml -services: - open_notebook: - image: ghcr.io/lfnovo/open-notebook:v1-latest-single - # ... rest of configuration stays the same -``` - -### Step 4: Configure Your Models - -Before creating your first notebook, configure your AI models: - -1. Click **"⚙️ Settings"** in the sidebar -2. Click **"🤖 Models"** tab -3. Configure these recommended models: - - **Language Model**: `gpt-5-mini` (cost-effective) - - **Embedding Model**: `text-embedding-3-small` (required for search) - - **Text-to-Speech**: `gpt-4o-mini-tts` (for podcast generation) - - **Speech-to-Text**: `whisper-1` (for audio transcription) -4. Click **"Save"** after configuring all models - -### Step 5: Create Your First Notebook - -1. Click **"Create New Notebook"** -2. Give it a name (e.g., "My Research") -3. Add a description -4. Click **"Create"** -5. Add your first source (web link, PDF, or text) -6. Start chatting with your content! - -## 🔧 Advanced Configuration - -### Multi-Container Setup - -For production deployments or development, use the multi-container setup: - -```yaml -services: - surrealdb: - image: surrealdb/surrealdb:v2 - ports: - - "8000:8000" - command: start --log trace --user root --pass root memory - restart: always - - open_notebook: - image: lfnovo/open_notebook:v1-latest - # Or use: ghcr.io/lfnovo/open-notebook:v1-latest - ports: - - "8502:8502" # Next.js Frontend - - "5055:5055" # REST API - env_file: - - ./docker.env - volumes: - - ./notebook_data:/app/data - depends_on: - - surrealdb - restart: always -``` - -### Environment Configuration - -Create a comprehensive `docker.env` file: - -```env -# Required: Database connection -SURREAL_URL=ws://surrealdb:8000/rpc -SURREAL_USER=root -SURREAL_PASSWORD=root -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=production - -# Required: At least one AI provider -OPENAI_API_KEY=sk-your-openai-key - -# Optional: Additional AI providers -ANTHROPIC_API_KEY=sk-ant-your-anthropic-key -GOOGLE_API_KEY=your-google-key -GROQ_API_KEY=gsk_your-groq-key - -# Optional: Security -OPEN_NOTEBOOK_PASSWORD=your_secure_password - -# Optional: Advanced features -ELEVENLABS_API_KEY=your-elevenlabs-key -``` - -## 🌟 Advanced Provider Setup - -### OpenRouter (100+ Models) - -OpenRouter gives you access to virtually every AI model through a single API: - -1. **Get your API key** at [openrouter.ai](https://openrouter.ai/keys) -2. **Add to your `docker.env`**: - ```env - OPENROUTER_API_KEY=sk-or-your-openrouter-key - ``` -3. **Restart the container**: - ```bash - docker compose restart - ``` -4. **Configure models** in Models - -**Recommended OpenRouter models**: -- `anthropic/claude-3-haiku` - Fast and cost-effective -- `google/gemini-pro` - Good reasoning capabilities -- `meta-llama/llama-3-8b-instruct` - Open source option - -### Ollama (Local Models) - -Run AI models locally for complete privacy: - -1. **Install Ollama** on your host machine from [ollama.ai](https://ollama.ai) -2. **Start Ollama**: - ```bash - ollama serve - ``` -3. **Download models**: - ```bash - ollama pull llama2 # 7B model (~4GB) - ollama pull mistral # 7B model (~4GB) - ollama pull llama2:13b # 13B model (~8GB) - ``` -4. **Find your IP address**: - - Windows: `ipconfig` (look for IPv4 Address) - - macOS/Linux: `ifconfig` or `ip addr show` -5. **Configure Open Notebook**: - ```env - OLLAMA_API_BASE=http://192.168.1.100:11434 - ``` - Replace `192.168.1.100` with your actual IP. - -6. **Restart and configure** models in Models - -### Other Providers - -**Anthropic (Direct)**: -```env -ANTHROPIC_API_KEY=sk-ant-your-key -``` - -**Google Gemini**: -```env -GOOGLE_API_KEY=AIzaSy-your-key -``` - -**Groq (Fast Inference)**: -```env -GROQ_API_KEY=gsk_your-key -``` - -## 🔒 Security & Production - -### Password Protection - -For public deployments, always set a password: - -```env -OPEN_NOTEBOOK_PASSWORD=your_secure_password -``` - -This protects both the web interface and API endpoints. - -### Production Best Practices - -1. **Use HTTPS**: Deploy behind a reverse proxy with SSL -2. **Regular Updates**: Keep containers updated -3. **Monitor Resources**: Set up resource limits -4. **Backup Data**: Regular backups of volumes -5. **Network Security**: Configure firewall rules - -### Example Production Setup - -```yaml -services: - surrealdb: - image: surrealdb/surrealdb:v2 - ports: - - "127.0.0.1:8000:8000" # Bind to localhost only - command: start --log warn --user root --pass root file:///mydata/database.db - volumes: - - ./surreal_data:/mydata - restart: always - deploy: - resources: - limits: - memory: 1G - cpus: "0.5" - - open_notebook: - image: lfnovo/open_notebook:v1-latest - ports: - - "127.0.0.1:8502:8502" - - "127.0.0.1:5055:5055" - env_file: - - ./docker.env - volumes: - - ./notebook_data:/app/data - depends_on: - - surrealdb - restart: always - deploy: - resources: - limits: - memory: 2G - cpus: "1.0" -``` - -## 🛠️ Management & Maintenance - -### Container Management - -```bash -# Start services -docker compose up -d - -# Stop services -docker compose down - -# View logs -docker compose logs -f - -# Restart specific service -docker compose restart open_notebook - -# Update to latest version -docker compose pull -docker compose up -d -``` - -### Data Management - -```bash -# Backup data -tar -czf backup-$(date +%Y%m%d).tar.gz notebook_data surreal_data - -# Restore data -tar -xzf backup-20240101.tar.gz - -# Clean up old containers -docker system prune -a -``` - -### Monitoring - -```bash -# Check resource usage -docker stats - -# Check service health -docker compose ps - -# View detailed logs -docker compose logs --tail=100 -f open_notebook -``` - -## 📊 Performance Optimization - -### Resource Allocation - -**Minimum requirements**: -- 2GB RAM -- 2 CPU cores -- 10GB storage - -**Recommended for production**: -- 4GB+ RAM -- 4+ CPU cores -- 50GB+ storage - -### Model Selection Tips - -**For cost optimization**: -- Use OpenRouter for expensive models -- Use Ollama for simple tasks -- Monitor usage at provider dashboards - -**For performance**: -- Use Groq for fast inference -- Use local models for privacy -- Use OpenAI for reliability - -## 🔍 Troubleshooting - -### Common Issues - -**Port conflicts**: -```bash -# Check what's using port 8502 -lsof -i :8502 - -# Use different port -docker compose -p 8503:8502 up -d -``` - -**API key errors**: -1. Verify keys are set correctly in `docker.env` -2. Check you have credits with your AI provider -3. Ensure no extra spaces in the key - -**Database connection issues**: -1. Check SurrealDB container is running -2. Verify database files are writable -3. Try restarting containers - -**Out of memory errors**: -1. Increase Docker memory allocation -2. Use smaller models -3. Monitor resource usage - -### Getting Help - -1. **Check logs**: `docker compose logs -f` -2. **Verify environment**: `docker compose config` -3. **Test connectivity**: `docker compose exec open_notebook ping surrealdb` -4. **Join Discord**: [discord.gg/37XJPXfz2w](https://discord.gg/37XJPXfz2w) -5. **GitHub Issues**: [github.com/lfnovo/open-notebook/issues](https://github.com/lfnovo/open-notebook/issues) - -## 🎯 Next Steps - -After successful deployment: - -1. **Create your first notebook** - Start with a simple research project -2. **Explore features** - Try podcasts, transformations, and search -3. **Optimize models** - Experiment with different providers -4. **Join the community** - Share your experience and get help - -## 📚 Complete Configuration Reference - -### All Environment Variables - -```env -# Database Configuration -SURREAL_URL=ws://surrealdb:8000/rpc -SURREAL_USER=root -SURREAL_PASSWORD=root -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=production - -# Required: At least one AI provider -OPENAI_API_KEY=sk-your-openai-key - -# Optional: Additional AI providers -ANTHROPIC_API_KEY=sk-ant-your-anthropic-key -GOOGLE_API_KEY=your-google-key -GROQ_API_KEY=gsk_your-groq-key -OPENROUTER_API_KEY=sk-or-your-openrouter-key -OLLAMA_API_BASE=http://192.168.1.100:11434 - -# Optional: Advanced TTS -ELEVENLABS_API_KEY=your-elevenlabs-key - -# Optional: Security -OPEN_NOTEBOOK_PASSWORD=your_secure_password - -# Optional: Advanced settings -LOG_LEVEL=INFO -MAX_UPLOAD_SIZE=100MB -ENABLE_ANALYTICS=false -``` - -### Complete Docker Compose - -```yaml -version: '3.8' -services: - surrealdb: - image: surrealdb/surrealdb:v2 - ports: - - "8000:8000" - command: start --log warn --user root --pass root file:///mydata/database.db - volumes: - - ./surreal_data:/mydata - restart: always - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:8000/health"] - interval: 30s - timeout: 10s - retries: 3 - - open_notebook: - image: lfnovo/open_notebook:v1-latest - ports: - - "8502:8502" # Next.js Frontend - - "5055:5055" # REST API - env_file: - - ./docker.env - volumes: - - ./notebook_data:/app/data - depends_on: - surrealdb: - condition: service_healthy - restart: always - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:5055/health"] - interval: 30s - timeout: 10s - retries: 3 -``` - ---- - -**Ready to get started?** Follow the Quick Start section above and you'll be up and running in 5 minutes! \ No newline at end of file diff --git a/docs/deployment/index.md b/docs/deployment/index.md deleted file mode 100644 index 221fc8d8..00000000 --- a/docs/deployment/index.md +++ /dev/null @@ -1,184 +0,0 @@ -# Deployment Guide - -This section provides comprehensive guides for deploying Open Notebook in different environments, from simple local setups to production deployments. - -## 🚀 Quick Start - -**New to Open Notebook?** Start with the [Docker Setup Guide](docker.md) - it's the fastest way to get up and running. - -## 📋 Deployment Options - -### 1. [Docker Deployment](docker.md) -**Recommended for most users** -- Complete beginner-friendly guide -- Single-container and multi-container options -- Supports all major AI providers -- Perfect for local development and testing - -### 2. [Single Container Deployment](single-container.md) -**Best for platforms like PikaPods** -- All-in-one container solution -- Simplified deployment process -- Ideal for cloud hosting platforms -- Lower resource requirements - -### 3. [Development Setup](development.md) -**For contributors and advanced users** -- Local development environment -- Source code installation -- Development tools and debugging -- Contributing to the project - -### 4. [Reverse Proxy Configuration](reverse-proxy.md) -**For production deployments with custom domains** -- nginx, Caddy, Traefik configurations -- Custom domain setup -- SSL/HTTPS configuration -- Runtime API URL configuration - -### 5. [Security Configuration](security.md) -**Essential for public deployments** -- Password protection setup -- Security best practices -- Production deployment considerations -- Troubleshooting security issues - -### 6. [Retry Configuration](retry-configuration.md) -**For reliable background job processing** -- Automatic retry for transient failures -- Database transaction conflict handling -- Embedding provider failure recovery -- Performance tuning and monitoring - -## 🎯 Choose Your Deployment Method - -### Use Docker Setup if: -- You're new to Open Notebook -- You want the easiest setup experience -- You need multiple AI provider support -- You're running locally or on a private server - -### Use Single Container if: -- You're deploying on PikaPods, Railway, or similar platforms -- You want the simplest possible deployment -- You have resource constraints -- You don't need to scale services independently - -### Use Reverse Proxy Setup if: -- You're deploying with a custom domain -- You need HTTPS/SSL encryption -- You're using nginx, Caddy, or Traefik -- You want to expose only specific ports publicly - -### Use Development Setup if: -- You want to contribute to the project -- You need to modify the source code -- You're developing integrations or plugins -- You want to understand the codebase - -## 📚 Additional Resources - -### Before You Start -- **[System Requirements](#system-requirements)** - Hardware and software needs -- **[API Keys Guide](#api-keys)** - Getting keys from AI providers -- **[Environment Variables](#environment-variables)** - Configuration reference - -### After Deployment -- **[First Notebook Guide](../getting-started/first-notebook.md)** - Create your first research project -- **[User Guide](../user-guide/index.md)** - Learn all the features -- **[Troubleshooting](../troubleshooting/index.md)** - Common issues and solutions - -## 🔧 System Requirements - -### Minimum Requirements -- **Memory**: 2GB RAM -- **CPU**: 2 cores -- **Storage**: 10GB free space -- **Network**: Internet connection for AI providers - -### Recommended Requirements -- **Memory**: 4GB+ RAM -- **CPU**: 4+ cores -- **Storage**: 50GB+ free space -- **Network**: Stable high-speed internet - -### Platform Support -- **Linux**: Ubuntu 20.04+, CentOS 7+, or similar -- **Windows**: Windows 10+ with WSL2 (for Docker) -- **macOS**: macOS 10.14+ -- **Docker**: Version 20.10+ required - -## 🔑 API Keys - -Open Notebook supports multiple AI providers. You'll need at least one: - -### Required for Basic Functionality -- **OpenAI**: For GPT models, embeddings, and TTS - - Get your key at [platform.openai.com](https://platform.openai.com) - - Provides: Language models, embeddings, speech services - -### Optional Providers -- **Anthropic**: For Claude models -- **Google**: For Gemini models -- **Groq**: For fast inference -- **Ollama**: For local models (no API key needed) - -See the [Model Providers Guide](../model-providers.md) for detailed setup instructions. - -## 🌍 Environment Variables - -### Core Configuration -```bash -# Database (auto-configured in Docker) -SURREAL_URL=ws://localhost:8000/rpc -SURREAL_USER=root -SURREAL_PASSWORD=root -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=production - -# Security (optional) -OPEN_NOTEBOOK_PASSWORD=your_secure_password -``` - -### AI Provider Keys -```bash -# OpenAI (recommended) -OPENAI_API_KEY=sk-... - -# Additional providers (optional) -ANTHROPIC_API_KEY=sk-ant-... -GOOGLE_API_KEY=AIzaSy... -GROQ_API_KEY=gsk_... -OLLAMA_API_BASE=http://localhost:11434 -``` - -## 🆘 Getting Help - -### Community Support -- **[Discord Server](https://discord.gg/37XJPXfz2w)** - Real-time help and discussion -- **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Bug reports and feature requests -- **[GitHub Discussions](https://github.com/lfnovo/open-notebook/discussions)** - Questions and ideas - -### Documentation -- **[User Guide](../user-guide/index.md)** - Complete feature documentation -- **[Troubleshooting](../troubleshooting/index.md)** - Common issues and solutions -- **[API Reference](../api-reference.md)** - REST API documentation - -## 📞 Support - -Having trouble with deployment? Here's how to get help: - -1. **Check the troubleshooting section** in each deployment guide -2. **Search existing issues** on GitHub -3. **Ask on Discord** for real-time help -4. **Create a GitHub issue** for bugs or feature requests - -Remember to include: -- Your operating system and version -- Deployment method used -- Error messages (if any) -- Steps to reproduce the issue - ---- - -**Ready to deploy?** Choose your deployment method above and follow the step-by-step guide! \ No newline at end of file diff --git a/docs/deployment/retry-configuration.md b/docs/deployment/retry-configuration.md deleted file mode 100644 index 397ef61b..00000000 --- a/docs/deployment/retry-configuration.md +++ /dev/null @@ -1,345 +0,0 @@ -# Retry Configuration Guide - -Open Notebook includes automatic retry capabilities for background commands to handle transient failures gracefully. This guide explains how retry works and how to configure it for your deployment. - -## Overview - -The retry system (powered by surreal-commands v1.2.0+) automatically retries failed commands when they encounter transient errors like: - -- **Database transaction conflicts** during concurrent operations -- **Network failures** when calling external APIs (embedding providers, LLMs) -- **Request timeouts** to external services -- **Rate limits** from third-party APIs - -Permanent errors (invalid input, authentication failures, etc.) are **not** retried and fail immediately. - -## How It Works - -### Architecture - -``` -Command Execution - ↓ -Try to execute - ↓ -Success? → Done - ↓ -Transient Error? (RuntimeError, ConnectionError, TimeoutError) - ↓ -Retry with backoff - ↓ -Max attempts reached? - ↓ -Final failure → Report error -``` - -### Retry Strategies - -**Exponential Jitter** (default, recommended): -- Waits: 1s → ~2s → ~4s → ~8s → ~16s (with randomization) -- Prevents "thundering herd" when many workers retry simultaneously -- Best for: Database conflicts, concurrent operations - -**Exponential**: -- Waits: 1s → 2s → 4s → 8s → 16s (predictable) -- Good for: API rate limits (predictable backoff helps with quota reset) - -**Fixed**: -- Waits: 2s → 2s → 2s → 2s → 2s (constant) -- Best for: Quick recovery scenarios - -**Random**: -- Waits: Random between min and max -- Use when: You want unpredictable retry timing - -## Global Configuration - -Configure default retry behavior for **all** commands via environment variables in your `.env` file: - -```bash -# Enable/disable retry globally (default: true) -SURREAL_COMMANDS_RETRY_ENABLED=true - -# Maximum retry attempts before giving up (default: 3) -SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=3 - -# Wait strategy between retry attempts (default: exponential_jitter) -# Options: exponential_jitter, exponential, fixed, random -SURREAL_COMMANDS_RETRY_WAIT_STRATEGY=exponential_jitter - -# Minimum wait time between retries in seconds (default: 1) -SURREAL_COMMANDS_RETRY_WAIT_MIN=1 - -# Maximum wait time between retries in seconds (default: 30) -SURREAL_COMMANDS_RETRY_WAIT_MAX=30 - -# Worker concurrency (affects likelihood of DB conflicts) -# Higher concurrency = more conflicts but faster processing -# Lower concurrency = fewer conflicts but slower processing -SURREAL_COMMANDS_MAX_TASKS=5 -``` - -### Tuning Global Defaults - -**For resource-constrained deployments** (low CPU/memory): -```bash -SURREAL_COMMANDS_MAX_TASKS=2 -SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=3 -SURREAL_COMMANDS_RETRY_WAIT_MAX=20 -``` -- Fewer concurrent tasks reduce conflict likelihood -- Lower max wait since conflicts are rare - -**For high-performance deployments** (powerful servers): -```bash -SURREAL_COMMANDS_MAX_TASKS=10 -SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=5 -SURREAL_COMMANDS_RETRY_WAIT_MAX=30 -``` -- More concurrent tasks for faster processing -- More retries to handle increased conflicts - -**For debugging** (disable retries to see immediate errors): -```bash -SURREAL_COMMANDS_RETRY_ENABLED=false -``` - -## Per-Command Configuration - -Individual commands can override global defaults. Open Notebook uses custom retry strategies for specific operations: - -### embed_chunk (Database Operations) - -Handles concurrent chunk embedding with retry for transaction conflicts: - -```python -@command( - "embed_chunk", - app="open_notebook", - retry={ - "max_attempts": 5, - "wait_strategy": "exponential_jitter", - "wait_min": 1, - "wait_max": 30, - "retry_on": [RuntimeError, ConnectionError, TimeoutError], - }, -) -``` - -**What it retries**: -- SurrealDB transaction conflicts (`RuntimeError`) -- Network failures to embedding provider (`ConnectionError`) -- Request timeouts (`TimeoutError`) - -**What it doesn't retry**: -- Invalid input (`ValueError`) -- Authentication errors -- Missing embedding model - -**Why 5 attempts?** -Database conflicts are cheap to retry (local operation), so we retry more aggressively. - -### vectorize_source & rebuild_embeddings (Orchestration) - -Orchestration commands that coordinate other jobs **disable retries** to fail fast: - -```python -@command("vectorize_source", app="open_notebook", retry=None) -``` - -**Why no retries?** -- Job submission failures should be immediately visible -- Allows quick debugging of orchestration issues -- Individual child jobs (`embed_chunk`) have their own retry logic - -## Common Scenarios - -### Issue: Vectorization fails with "transaction conflict" errors - -**Symptoms**: -``` -RuntimeError: Failed to commit transaction due to a read or write conflict -``` - -**Solution 1 - Reduce concurrency** (fewer conflicts): -```bash -SURREAL_COMMANDS_MAX_TASKS=3 -``` - -**Solution 2 - Increase retry attempts**: -```bash -SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=7 -``` - -**Solution 3 - Longer backoff** (give more time between retries): -```bash -SURREAL_COMMANDS_RETRY_WAIT_MAX=60 -``` - -### Issue: Embedding provider rate limits (429 errors) - -**Symptoms**: -``` -HTTP 429: Rate limit exceeded -``` - -**Solution - Configure longer waits**: -```bash -SURREAL_COMMANDS_RETRY_WAIT_MIN=5 -SURREAL_COMMANDS_RETRY_WAIT_MAX=120 -SURREAL_COMMANDS_RETRY_WAIT_STRATEGY=exponential -``` - -This gives the API quota time to reset between retries. - -### Issue: Slow/unstable network to embedding provider - -**Symptoms**: -``` -TimeoutError: Request timed out -ConnectionError: Failed to establish connection -``` - -**Solution - More retries with longer waits**: -```bash -SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=5 -SURREAL_COMMANDS_RETRY_WAIT_MAX=60 -``` - -### Issue: Want to see errors immediately (debugging) - -**Solution - Disable retries temporarily**: -```bash -SURREAL_COMMANDS_RETRY_ENABLED=false -``` - -Remember to re-enable after debugging! - -## Monitoring Retry Behavior - -### Check Worker Logs - -Retry attempts are logged automatically: - -``` -Transaction conflict for chunk 42 - will be retried by retry mechanism -[Retry] Attempt 2/5 for embed_chunk, waiting 2.3s -[Retry] Attempt 3/5 for embed_chunk, waiting 5.1s -Successfully embedded chunk 42 -``` - -### Look for Retry Patterns - -**High retry rate** (many retries happening): -- Consider reducing `SURREAL_COMMANDS_MAX_TASKS` -- Check if external services are slow/unstable -- May need to increase `SURREAL_COMMANDS_RETRY_WAIT_MAX` - -**Retries exhausted** (commands failing after all retries): -- Check if issue is actually permanent (auth error, invalid config) -- May need to increase `SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS` -- Check external service status - -**No retries** (operations always succeed first try): -- Your retry configuration is working well! -- Could potentially increase `SURREAL_COMMANDS_MAX_TASKS` for better performance - -## Best Practices - -### ✅ Do - -- **Use exponential_jitter for concurrent operations** (prevents thundering herd) -- **Set reasonable max_attempts** (3-5 for most operations) -- **Monitor retry rates** to tune configuration -- **Test retry behavior** with large documents after config changes -- **Document custom retry strategies** in your deployment notes - -### ❌ Don't - -- **Don't set max_attempts too high** (>10) - may mask real issues -- **Don't use fixed strategy for concurrent operations** - causes thundering herd -- **Don't disable retries in production** unless debugging -- **Don't set wait_max too low** (<5s) - may exhaust retries too quickly -- **Don't forget to re-enable retries** after debugging - -## Advanced: Custom Retry Logic - -If you're developing custom commands, you can configure retry behavior: - -```python -from surreal_commands import command - -@command( - "my_custom_command", - app="my_app", - retry={ - "max_attempts": 3, - "wait_strategy": "exponential_jitter", - "wait_min": 1, - "wait_max": 30, - "retry_on": [RuntimeError, ConnectionError, TimeoutError], - }, -) -async def my_custom_command(input_data): - try: - # Your command logic - result = await some_operation() - return result - - except RuntimeError: - # Re-raise to trigger retry - raise - - except ValueError: - # Don't retry - permanent error - return {"success": False, "error": str(e)} -``` - -**Key points**: -- Exceptions in `retry_on` must be **re-raised** to trigger retries -- Other exceptions should be caught and returned as failures -- Transient errors: RuntimeError, ConnectionError, TimeoutError -- Permanent errors: ValueError, AuthenticationError, etc. - -## Troubleshooting - -### Retries not working - -**Check 1**: Is retry enabled? -```bash -grep SURREAL_COMMANDS_RETRY_ENABLED .env -# Should show: SURREAL_COMMANDS_RETRY_ENABLED=true -``` - -**Check 2**: Is the exception being re-raised? -Check your command code - exceptions must be re-raised to trigger retries. - -**Check 3**: Is the exception in `retry_on` list? -Only exceptions listed in `retry_on` are retried. - -### Worker crashing on errors - -**Issue**: Worker crashes instead of retrying - -**Cause**: Exception is not being caught by retry mechanism - -**Solution**: Check that the exception type is in the `retry_on` list and is being re-raised in the command. - -### Retries taking too long - -**Issue**: Commands retry forever - -**Cause**: `wait_max` is too high or `max_attempts` is too high - -**Solution**: Reduce retry parameters: -```bash -SURREAL_COMMANDS_RETRY_MAX_ATTEMPTS=3 -SURREAL_COMMANDS_RETRY_WAIT_MAX=30 -``` - -## References - -- [surreal-commands v1.2.0 Release](https://github.com/lfnovo/surreal-commands/releases/tag/v1.2.0) -- [surreal-commands Retry Documentation](https://github.com/lfnovo/surreal-commands#retry-configuration) -- [Issue #229: Batch Vectorization Transaction Conflicts](https://github.com/lfnovo/open-notebook/issues/229) -- [Exponential Backoff Best Practices](https://en.wikipedia.org/wiki/Exponential_backoff) diff --git a/docs/deployment/reverse-proxy.md b/docs/deployment/reverse-proxy.md deleted file mode 100644 index a9685650..00000000 --- a/docs/deployment/reverse-proxy.md +++ /dev/null @@ -1,456 +0,0 @@ -# Reverse Proxy Configuration - -This guide helps you deploy Open Notebook behind a reverse proxy (nginx, Caddy, Traefik, etc.) or with a custom domain. - -## ⭐ Simplified Configuration (v1.1+) - -Starting with v1.1, Open Notebook uses Next.js rewrites to dramatically simplify reverse proxy configuration. **You now only need to proxy to port 8502** - Next.js handles internal API routing automatically. - -### How It Works - -``` -Browser → Reverse Proxy → Port 8502 (Next.js) - ↓ (internal proxy) - Port 5055 (FastAPI) -``` - -Next.js rewrites automatically forward `/api/*` requests to the FastAPI backend on port 5055, so your reverse proxy only needs to know about one port! - -### Simple Configuration Examples - -#### Nginx (Recommended) - -```nginx -server { - listen 443 ssl http2; - server_name notebook.example.com; - - ssl_certificate /etc/nginx/ssl/fullchain.pem; - ssl_certificate_key /etc/nginx/ssl/privkey.pem; - - # Single location block - that's it! - location / { - proxy_pass http://open-notebook:8502; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_cache_bypass $http_upgrade; - } -} -``` - -#### Traefik - -```yaml -services: - open-notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - - API_URL=https://notebook.example.com - labels: - - "traefik.enable=true" - - "traefik.http.routers.notebook.rule=Host(`notebook.example.com`)" - - "traefik.http.routers.notebook.entrypoints=websecure" - - "traefik.http.routers.notebook.tls.certresolver=myresolver" - - "traefik.http.services.notebook.loadbalancer.server.port=8502" - networks: - - traefik-network -``` - -#### Caddy - -```caddy -notebook.example.com { - reverse_proxy open-notebook:8502 -} -``` - -#### Coolify - -1. Create a new service pointing to `lfnovo/open_notebook:v1-latest-single` -2. Set port to **8502** (not 5055!) -3. Add environment variable: `API_URL=https://your-domain.com` -4. Enable HTTPS in Coolify settings -5. Done! Coolify handles the reverse proxy automatically. - -### Environment Variables - -With the simplified approach, you typically only need: - -```bash -# Required for reverse proxy setups -API_URL=https://your-domain.com - -# Optional: Only needed for multi-container deployments -# Default is http://localhost:5055 (single-container) -# INTERNAL_API_URL=http://api-service:5055 -``` - -### Optional: Direct API Access for External Integrations - -If you have external scripts or integrations that need direct API access, you can still route `/api/*` directly to port 5055: - -```nginx -# Optional: Direct API access (for external integrations only) -location /api/ { - proxy_pass http://open-notebook:5055/api/; - # ... same headers as above -} - -# Primary route (handles browser traffic) -location / { - proxy_pass http://open-notebook:8502; - # ... same headers as above -} -``` - -**Note**: The simplified single-port approach (port 8502 only) works for 95% of use cases. Only add direct API routing if you specifically need it. - ---- - -## Legacy Configuration (Pre-v1.1) - -> **Note**: The configurations below are still supported but no longer necessary with v1.1+. New deployments should use the simplified configuration above. - -## The API_URL Environment Variable - -Starting with v1.0+, Open Notebook supports runtime configuration of the API URL through the `API_URL` environment variable. This means you can use the same Docker image in different deployment scenarios without rebuilding. - -### How It Works - -The frontend uses a three-tier priority system to determine the API URL: - -1. **Runtime Configuration** (Highest Priority): `API_URL` environment variable set at container runtime -2. **Build-time Configuration**: `NEXT_PUBLIC_API_URL` baked into the Docker image -3. **Auto-detection** (Fallback): Infers from the incoming HTTP request headers - -**Auto-detection details:** -- The Next.js frontend analyzes the incoming HTTP request -- Extracts the hostname from the `host` header -- Respects the `X-Forwarded-Proto` header (for HTTPS behind reverse proxies) -- Constructs the API URL as `{protocol}://{hostname}:5055` -- Example: Request to `http://10.20.30.20:8502` → API URL becomes `http://10.20.30.20:5055` - -## Common Scenarios - -### Scenario 1: Docker on Localhost (Default) - -No configuration needed! The system auto-detects. - -```bash -docker run -d \ - --name open-notebook \ - -p 8502:8502 -p 5055:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_data:/mydata \ - lfnovo/open_notebook:v1-latest-single -``` - -### Scenario 2: Docker on Remote Server (LAN/VPS) - -Access via IP address - auto-detection works, but you can be explicit: - -```bash -docker run -d \ - --name open-notebook \ - -p 8502:8502 -p 5055:5055 \ - -e API_URL=http://192.168.1.100:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_data:/mydata \ - lfnovo/open_notebook:v1-latest-single -``` - -> **Note**: Don't include `/api` at the end - the system adds this automatically! - -### Scenario 3: Behind Reverse Proxy with Custom Domain - -This is where `API_URL` is **essential**. Your reverse proxy handles HTTPS and routing. - -> **Important**: If your reverse proxy forwards `/api` requests to the backend, set `API_URL` to just the domain (without `/api` suffix). The frontend will append `/api` automatically. - -#### Example: nginx + Docker Compose - -**docker-compose.yml:** -```yaml -version: '3.8' - -services: - open-notebook: - image: lfnovo/open_notebook:v1-latest-single - container_name: open-notebook - environment: - - API_URL=https://notebook.example.com - - OPENAI_API_KEY=${OPENAI_API_KEY} - volumes: - - ./notebook_data:/app/data - - ./surreal_data:/mydata - ports: - - "8502:8502" # Frontend - - "5055:5055" # API - restart: unless-stopped - - nginx: - image: nginx:alpine - container_name: nginx-proxy - ports: - - "80:80" - - "443:443" - volumes: - - ./nginx.conf:/etc/nginx/nginx.conf:ro - - ./ssl:/etc/nginx/ssl:ro - depends_on: - - open-notebook - restart: unless-stopped -``` - -**nginx.conf:** -```nginx -http { - upstream frontend { - server open-notebook:8502; - } - - upstream api { - server open-notebook:5055; - } - - server { - listen 80; - server_name notebook.example.com; - return 301 https://$server_name$request_uri; - } - - server { - listen 443 ssl http2; - server_name notebook.example.com; - - ssl_certificate /etc/nginx/ssl/fullchain.pem; - ssl_certificate_key /etc/nginx/ssl/privkey.pem; - - # API - location /api/ { - proxy_pass http://api/api/; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } - - # Frontend (catch-all - handles /config automatically) - location / { - proxy_pass http://frontend; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection 'upgrade'; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_cache_bypass $http_upgrade; - } - } -} -``` - -### Scenario 4: Behind Reverse Proxy with Subdomain - -If you want API on a separate subdomain: - -**docker-compose.yml:** -```yaml -services: - open-notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - - API_URL=https://api.notebook.example.com - # ... other env vars -``` - -**nginx.conf:** -```nginx -# Frontend server -server { - listen 443 ssl http2; - server_name notebook.example.com; - - location / { - proxy_pass http://open-notebook:8502; - # ... proxy headers - } -} - -# API server -server { - listen 443 ssl http2; - server_name api.notebook.example.com; - - location / { - proxy_pass http://open-notebook:5055; - # ... proxy headers - } -} -``` - -### Scenario 5: Traefik - -**docker-compose.yml:** -```yaml -version: '3.8' - -services: - open-notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - - API_URL=https://notebook.example.com - labels: - # Frontend - - "traefik.enable=true" - - "traefik.http.routers.notebook-frontend.rule=Host(`notebook.example.com`)" - - "traefik.http.routers.notebook-frontend.entrypoints=websecure" - - "traefik.http.routers.notebook-frontend.tls.certresolver=myresolver" - - "traefik.http.services.notebook-frontend.loadbalancer.server.port=8502" - - # API (higher priority to match first) - - "traefik.http.routers.notebook-api.rule=Host(`notebook.example.com`) && PathPrefix(`/api`)" - - "traefik.http.routers.notebook-api.entrypoints=websecure" - - "traefik.http.routers.notebook-api.tls.certresolver=myresolver" - - "traefik.http.routers.notebook-api.priority=100" - - "traefik.http.services.notebook-api.loadbalancer.server.port=5055" - networks: - - traefik-network - -networks: - traefik-network: - external: true -``` - -### Scenario 6: Caddy - -**Caddyfile:** -```caddy -notebook.example.com { - # API - reverse_proxy /api/* open-notebook:5055 - - # Frontend (catch-all - handles /config automatically) - reverse_proxy / open-notebook:8502 -} -``` - -**docker-compose.yml:** -```yaml -services: - open-notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - - API_URL=https://notebook.example.com - # No need to expose ports if using Caddy in same network -``` - -## Troubleshooting - -### Connection Error: Unable to connect to server - -**Symptoms**: Frontend displays "Unable to connect to server. Please check if the API is running." - -**Possible Causes**: - -1. **API_URL not set correctly** for your reverse proxy setup - - Check browser console (F12) for connection errors - - Look for logs showing what URL the frontend is trying - -2. **Reverse proxy not forwarding to correct port** - - API should be accessible at the URL specified in `API_URL` - - Test: `curl https://your-domain.com/api/config` should return JSON - -3. **CORS issues** - - Ensure `X-Forwarded-Proto` and `X-Forwarded-For` headers are set in proxy config - - Check API logs for CORS errors - -4. **SSL/TLS certificate issues** - - Ensure your reverse proxy has valid SSL certificates - - Mixed content errors (HTTPS frontend trying to reach HTTP API) - -### Frontend adds `:5055` to URL when using reverse proxy (versions ≤ 1.0.10) - -**Symptoms** (only in versions 1.0.10 and earlier): -- You set `API_URL=https://your-domain.com` -- Browser console shows: "Attempted URL: https://your-domain.com:5055/api/config" -- CORS errors with "Status code: (null)" - -**Root Cause**: -In versions ≤ 1.0.10, the frontend's config endpoint was at `/api/runtime-config`, which gets intercepted by reverse proxies routing all `/api/*` requests to the backend. This prevented the frontend from reading the `API_URL` environment variable. - -**Solution**: -Upgrade to version 1.0.11 or later. The config endpoint has been moved to `/config` which avoids the `/api/*` routing conflict. - -**Note**: Most reverse proxy configurations with a catch-all rule like `location / { proxy_pass http://frontend; }` will automatically route `/config` to the frontend without any additional configuration needed. - -**Only if you have issues**, explicitly configure the `/config` route: - -```nginx -# Only needed if your reverse proxy doesn't have a catch-all rule -location = /config { - proxy_pass http://open-notebook:8502; - proxy_http_version 1.1; - proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; -} -``` - -**Verification**: -Check browser console (F12) - should see: `✅ [Config] Runtime API URL from server: https://your-domain.com` - -### How to Debug - -1. **Check browser console** (F12 → Console tab): - - Look for messages starting with `🔧 [Config]` - - These show the configuration detection process - - You'll see which API URL is being used - -2. **Test API directly**: - ```bash - # Should return JSON config - curl https://your-domain.com/api/config - ``` - -3. **Check Docker logs**: - ```bash - docker logs open-notebook - ``` - - Look for frontend and API startup messages - - Check for connection errors - -4. **Verify environment variable**: - ```bash - docker exec open-notebook env | grep API_URL - ``` - -### Missing Authorization Header - -**Symptoms**: API returns `{"detail": "Missing authorization header"}` - -This happens when: -- You have set `OPEN_NOTEBOOK_PASSWORD` for authentication -- You're trying to access `/api/config` directly without logging in first - -**Solution**: This is expected behavior! The frontend handles this automatically. Just access the frontend URL and log in through the UI. - -## Best Practices - -1. **Always use HTTPS** in production with reverse proxies -2. **Set `API_URL` explicitly** when using reverse proxies to avoid auto-detection issues -3. **Use environment files** (`.env` or `docker.env`) to manage configuration -4. **Test your setup** by accessing the frontend and checking browser console logs -5. **Keep ports 5055 and 8502 accessible** from your reverse proxy container - -## Additional Resources - -- [Docker Deployment Guide](./docker.md) -- [Security Guide](./security.md) -- [Troubleshooting](../troubleshooting/common-issues.md) diff --git a/docs/deployment/security.md b/docs/deployment/security.md deleted file mode 100644 index 09f22c80..00000000 --- a/docs/deployment/security.md +++ /dev/null @@ -1,481 +0,0 @@ -# Security Configuration - -Open Notebook includes optional password protection and security features for users who need to deploy their instances publicly or in shared environments. - -## 🔒 Password Protection - -### When to Use Password Protection - -Password protection is recommended for: - -- **Public cloud deployments** - PikaPods, Railway, DigitalOcean, AWS, etc. -- **Shared network environments** - Corporate networks, shared servers -- **Team deployments** - Multiple users accessing the same instance -- **Production environments** - Any deployment accessible beyond localhost - -### When NOT to Use Password Protection - -Skip password protection for: - -- **Local development** - Running on your personal machine -- **Private networks** - Secure, isolated network environments -- **Single-user setups** - Only you have access to the machine -- **Testing environments** - Temporary or development instances - -## 🚀 Quick Setup - -### Docker Deployment - -For Docker deployments, add the password to your environment: - -```yaml -# docker-compose.yml -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" - environment: - - OPENAI_API_KEY=your_openai_key - - OPEN_NOTEBOOK_PASSWORD=your_secure_password - volumes: - - ./notebook_data:/app/data - restart: always -``` - -Or using a `.env` file: - -```env -# .env file -OPENAI_API_KEY=your_openai_key -OPEN_NOTEBOOK_PASSWORD=your_secure_password -``` - -### Development Setup - -For development installations, add to your `.env` file: - -```env -# .env file in project root -OPEN_NOTEBOOK_PASSWORD=your_secure_password -``` - -## 🔐 Password Requirements - -### Choosing a Secure Password - -- **Length**: Minimum 12 characters, preferably 20+ -- **Complexity**: Mix of uppercase, lowercase, numbers, and symbols -- **Uniqueness**: Don't reuse passwords from other services -- **Avoid**: Common words, personal information, predictable patterns - -### Password Examples - -```bash -# Good passwords -OPEN_NOTEBOOK_PASSWORD=MySecure2024!Research#Tool -OPEN_NOTEBOOK_PASSWORD=Notebook$Dev$2024$Strong! - -# Bad passwords (don't use these) -OPEN_NOTEBOOK_PASSWORD=password123 -OPEN_NOTEBOOK_PASSWORD=opennotebook -OPEN_NOTEBOOK_PASSWORD=admin -``` - -### Password Management - -- **Use a password manager** to generate and store the password -- **Document the password** in your team's secure password vault -- **Rotate passwords** regularly for production deployments -- **Share securely** - Never send passwords via email or chat - -## 🛡️ How Security Works - -### React frontend Protection - -When password protection is enabled: - -1. **Login form** appears on first visit -2. **Session storage** - Password stored in browser session -3. **Persistent login** - Users stay logged in until browser closure -4. **No logout button** - Clear browser data to log out - -### API Protection - -All API endpoints require authentication: - -```bash -# API calls require Authorization header -curl -H "Authorization: Bearer your_password" \ - http://localhost:5055/api/notebooks -``` - -### Excluded Endpoints - -These endpoints work without authentication: - -- **Health check**: `/health` - System status -- **API documentation**: `/docs` - OpenAPI documentation -- **OpenAPI spec**: `/openapi.json` - API schema - -## 🔧 Configuration Examples - -### Single Container with Security - -```yaml -# docker-compose.single.yml -services: - open_notebook_single: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" - - "5055:5055" - environment: - - OPENAI_API_KEY=sk-your-openai-key - - OPEN_NOTEBOOK_PASSWORD=your_secure_password - - ANTHROPIC_API_KEY=sk-ant-your-anthropic-key - volumes: - - ./notebook_data:/app/data - - ./surreal_single_data:/mydata - restart: always -``` - -### Multi-Container with Security - -```yaml -# docker-compose.yml -services: - surrealdb: - image: surrealdb/surrealdb:v2 - ports: - - "127.0.0.1:8000:8000" # Bind to localhost only - command: start --log warn --user root --pass root file:///mydata/database.db - volumes: - - ./surreal_data:/mydata - restart: always - - open_notebook: - image: lfnovo/open_notebook:v1-latest - ports: - - "8502:8502" - - "5055:5055" - environment: - - OPENAI_API_KEY=sk-your-openai-key - - OPEN_NOTEBOOK_PASSWORD=your_secure_password - - SURREAL_URL=ws://surrealdb:8000/rpc - - SURREAL_USER=root - - SURREAL_PASSWORD=root - volumes: - - ./notebook_data:/app/data - depends_on: - - surrealdb - restart: always -``` - -### Development with Security - -```env -# .env file for development -OPEN_NOTEBOOK_PASSWORD=dev_password_2024 - -# Database -SURREAL_URL=ws://localhost:8000/rpc -SURREAL_USER=root -SURREAL_PASSWORD=root -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=development - -# AI Providers -OPENAI_API_KEY=sk-your-openai-key -ANTHROPIC_API_KEY=sk-ant-your-anthropic-key -``` - -## 🌐 Production Security - -### HTTPS/TLS Configuration - -**Always use HTTPS** in production. Here's an nginx reverse proxy example: - -```nginx -# /etc/nginx/sites-available/open-notebook -server { - listen 80; - server_name your-domain.com; - return 301 https://$server_name$request_uri; -} - -server { - listen 443 ssl http2; - server_name your-domain.com; - - ssl_certificate /etc/letsencrypt/live/your-domain.com/fullchain.pem; - ssl_certificate_key /etc/letsencrypt/live/your-domain.com/privkey.pem; - - # Security headers - add_header X-Frame-Options DENY; - add_header X-Content-Type-Options nosniff; - add_header X-XSS-Protection "1; mode=block"; - add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; - - # React frontend - location / { - proxy_pass http://127.0.0.1:8502; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - # WebSocket support - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - } - - # API endpoints - location /api/ { - proxy_pass http://127.0.0.1:5055; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - } -} -``` - -### Firewall Configuration - -Configure your firewall to restrict access: - -```bash -# UFW (Ubuntu) -sudo ufw allow ssh -sudo ufw allow 80/tcp -sudo ufw allow 443/tcp -sudo ufw deny 8502/tcp # Block direct access to Next.js -sudo ufw deny 5055/tcp # Block direct access to API -sudo ufw enable - -# iptables -iptables -A INPUT -p tcp --dport 22 -j ACCEPT -iptables -A INPUT -p tcp --dport 80 -j ACCEPT -iptables -A INPUT -p tcp --dport 443 -j ACCEPT -iptables -A INPUT -p tcp --dport 8502 -j DROP -iptables -A INPUT -p tcp --dport 5055 -j DROP -``` - -### Docker Security - -```yaml -# Production docker-compose.yml with security -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest - ports: - - "127.0.0.1:8502:8502" # Bind to localhost only - - "127.0.0.1:5055:5055" - environment: - - OPEN_NOTEBOOK_PASSWORD=your_secure_password - volumes: - - ./notebook_data:/app/data - restart: always - security_opt: - - no-new-privileges:true - read_only: true - tmpfs: - - /tmp - - /var/tmp - deploy: - resources: - limits: - memory: 2G - cpus: "1.0" -``` - -## 🔍 API Authentication - -### Making Authenticated API Calls - -```bash -# Get all notebooks -curl -H "Authorization: Bearer your_password" \ - http://localhost:5055/api/notebooks - -# Create a new notebook -curl -X POST \ - -H "Authorization: Bearer your_password" \ - -H "Content-Type: application/json" \ - -d '{"name": "My Notebook", "description": "Research notes"}' \ - http://localhost:5055/api/notebooks - -# Upload a file -curl -X POST \ - -H "Authorization: Bearer your_password" \ - -F "file=@document.pdf" \ - http://localhost:5055/api/sources/upload -``` - -### Python API Client - -```python -import requests - -class OpenNotebookClient: - def __init__(self, base_url: str, password: str): - self.base_url = base_url - self.headers = {"Authorization": f"Bearer {password}"} - - def get_notebooks(self): - response = requests.get( - f"{self.base_url}/api/notebooks", - headers=self.headers - ) - return response.json() - - def create_notebook(self, name: str, description: str = None): - data = {"name": name, "description": description} - response = requests.post( - f"{self.base_url}/api/notebooks", - headers=self.headers, - json=data - ) - return response.json() - -# Usage -client = OpenNotebookClient("http://localhost:5055", "your_password") -notebooks = client.get_notebooks() -``` - -## 🚨 Security Considerations - -### Important Limitations - -Open Notebook's password protection provides **basic access control**, not enterprise-grade security: - -- **Plain text transmission** - Passwords sent over HTTP (use HTTPS) -- **No password hashing** - Passwords stored in memory as plain text -- **No user management** - Single password for all users -- **No session timeout** - Sessions persist until browser closure -- **No rate limiting** - No protection against brute force attacks -- **No audit logging** - No security event logging - -### Risk Mitigation - -1. **Use HTTPS** - Always encrypt traffic with TLS -2. **Strong passwords** - Use complex, unique passwords -3. **Network security** - Implement proper firewall rules -4. **Regular updates** - Keep containers and dependencies updated -5. **Monitoring** - Monitor access logs and system resources -6. **Backup strategy** - Regular backups of data and configurations - -### Enterprise Considerations - -For enterprise deployments requiring advanced security: - -- **SSO integration** - Consider implementing OAuth2/SAML -- **Role-based access** - Implement user roles and permissions -- **Audit logging** - Track all user actions and API calls -- **Rate limiting** - Implement API rate limiting -- **Database encryption** - Encrypt data at rest -- **Network segmentation** - Isolate services in secure networks - -## 🔧 Troubleshooting - -### Common Security Issues - -#### Password Not Working - -```bash -# Check environment variable is set -docker compose exec open_notebook env | grep OPEN_NOTEBOOK_PASSWORD - -# Check container logs -docker compose logs open_notebook | grep -i auth - -# Test API directly -curl -H "Authorization: Bearer your_password" \ - http://localhost:5055/health -``` - -#### UI Shows Login Form but API Doesn't - -```bash -# Environment variable might not be set for API -docker compose exec open_notebook env | grep OPEN_NOTEBOOK_PASSWORD - -# Restart services -docker compose restart - -# Check both services are using the same password -docker compose logs | grep -i password -``` - -#### 401 Unauthorized Errors - -```bash -# Check authorization header format -curl -v -H "Authorization: Bearer your_password" \ - http://localhost:5055/api/notebooks - -# Verify password matches environment variable -echo $OPEN_NOTEBOOK_PASSWORD - -# Check for special characters in password -echo "Password contains: $(echo $OPEN_NOTEBOOK_PASSWORD | wc -c) characters" -``` - -#### Cannot Access After Setting Password - -```bash -# Clear browser cache and cookies -# Try incognito/private mode -# Check browser console for errors -# Verify password is correct -``` - -### Security Testing - -```bash -# Test without password (should fail) -curl http://localhost:5055/api/notebooks - -# Test with correct password (should succeed) -curl -H "Authorization: Bearer your_password" \ - http://localhost:5055/api/notebooks - -# Test health endpoint (should work without password) -curl http://localhost:5055/health - -# Test documentation (should work without password) -curl http://localhost:5055/docs -``` - -## 📞 Getting Help - -### Security Issues - -If you discover security vulnerabilities: - -1. **Do not open public issues** for security problems -2. **Contact the maintainer** directly via email -3. **Provide detailed information** about the vulnerability -4. **Allow time for fixes** before public disclosure - -### Community Support - -For security configuration help: - -- **[Discord Server](https://discord.gg/37XJPXfz2w)** - Real-time help -- **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Configuration problems -- **[Documentation](../index.md)** - Additional guides - -### Best Practices - -1. **Test security** thoroughly before production deployment -2. **Monitor logs** regularly for suspicious activity -3. **Keep updated** with security patches and updates -4. **Follow principle of least privilege** in network configuration -5. **Regular security reviews** of your deployment - ---- - -**Ready to secure your deployment?** Start with the Quick Setup section above and always use HTTPS in production! \ No newline at end of file diff --git a/docs/deployment/single-container.md b/docs/deployment/single-container.md deleted file mode 100644 index 5e79e419..00000000 --- a/docs/deployment/single-container.md +++ /dev/null @@ -1,351 +0,0 @@ -# Single Container Deployment - -For users who prefer an all-in-one container solution (e.g., PikaPods, Railway, simple cloud deployments), Open Notebook provides a single-container image that includes all services: SurrealDB, API backend, background worker, and React frontend. - -## 📋 Overview - -The single-container deployment packages everything you need: -- **SurrealDB**: Database service -- **FastAPI**: REST API backend -- **Background Worker**: For podcast generation and transformations -- **Next.js**: Web UI interface - -All services are managed by supervisord with proper startup ordering, making it perfect for platforms that prefer single-container deployments. - -## 🚀 Quick Start - -### Option 1: Docker Compose (Recommended) - -This is the easiest way to get started with persistent data. - -1. **Create a project directory**: - ```bash - mkdir open-notebook - cd open-notebook - ``` - -2. **Create a `docker-compose.single.yml` file**: - ```yaml - services: - open_notebook_single: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" # React frontend - - "5055:5055" # REST API - environment: - # Required: Add your API keys here - - OPENAI_API_KEY=your_openai_key - - ANTHROPIC_API_KEY=your_anthropic_key - - # Optional: Additional providers - - GOOGLE_API_KEY=your_google_key - - GROQ_API_KEY=your_groq_key - - # Optional: Password protection for public deployments - - OPEN_NOTEBOOK_PASSWORD=your_secure_password - volumes: - - ./notebook_data:/app/data # Application data - - ./surreal_single_data:/mydata # SurrealDB data - restart: always - ``` - -3. **Start the container**: - ```bash - docker compose -f docker-compose.single.yml up -d - ``` - -4. **Access the application**: - - React frontend: http://localhost:8502 - - REST API: http://localhost:5055 - - API Documentation: http://localhost:5055/docs - -### Option 2: Direct Docker Run - -For quick testing without docker-compose: - -```bash -docker run -d \ - --name open-notebook-single \ - -p 8502:8502 \ - -p 5055:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_single_data:/mydata \ - -e OPENAI_API_KEY=your_openai_key \ - -e ANTHROPIC_API_KEY=your_anthropic_key \ - -e OPEN_NOTEBOOK_PASSWORD=your_secure_password \ - lfnovo/open_notebook:v1-latest-single -``` - -## 🌐 Platform-Specific Deployments - -### PikaPods - -Perfect for PikaPods one-click deployment: - -1. **Use this configuration**: - ``` - Image: lfnovo/open_notebook:v1-latest-single - Port: 8502 - ``` - -2. **Set environment variables in PikaPods**: - ``` - OPENAI_API_KEY=your_openai_key - OPEN_NOTEBOOK_PASSWORD=your_secure_password - ``` - -3. **Mount volumes**: - - `/app/data` for application data - - `/mydata` for database files - -### Railway - -For Railway deployment: - -1. **Connect your GitHub repository** or use the template -2. **Set environment variables**: - ``` - OPENAI_API_KEY=your_openai_key - OPEN_NOTEBOOK_PASSWORD=your_secure_password - ``` -3. **Configure volumes** in Railway dashboard for data persistence - -### DigitalOcean App Platform - -1. **Create a new app** from Docker Hub -2. **Use image**: `lfnovo/open_notebook:v1-latest-single` -3. **Set environment variables** in the app settings -4. **Configure persistent storage** for `/app/data` and `/mydata` - -## 🔧 Configuration - -### Environment Variables - -The single-container deployment uses the same environment variables as the multi-container setup, but with SurrealDB configured for localhost connection: - -```bash -# Database connection (automatically configured) -SURREAL_URL="ws://localhost:8000/rpc" -SURREAL_USER="root" -SURREAL_PASSWORD="root" -SURREAL_NAMESPACE="open_notebook" -SURREAL_DATABASE="staging" - -# Required: At least one AI provider -OPENAI_API_KEY=your_openai_key - -# Optional: Additional AI providers -ANTHROPIC_API_KEY=your_anthropic_key -GOOGLE_API_KEY=your_google_key -GROQ_API_KEY=your_groq_key -OLLAMA_API_BASE=http://your-ollama-host:11434 - -# Optional: Security for public deployments -OPEN_NOTEBOOK_PASSWORD=your_secure_password - -# Optional: Advanced TTS -ELEVENLABS_API_KEY=your_elevenlabs_key -``` - -### Data Persistence - -**Critical**: Always mount these volumes to persist data between container restarts: - -1. **`/app/data`** - Application data (notebooks, sources, uploads) -2. **`/mydata`** - SurrealDB database files - -**Example with proper volumes**: -```yaml -volumes: - - ./notebook_data:/app/data # Your notebooks and sources - - ./surreal_single_data:/mydata # Database files -``` - -## 🔒 Security - -### Password Protection - -For public deployments, **always set a password**: - -```bash -OPEN_NOTEBOOK_PASSWORD=your_secure_password -``` - -This protects both the React frontend and REST API with password authentication. - -### Security Best Practices - -1. **Use HTTPS**: Deploy behind a reverse proxy with SSL -2. **Strong Password**: Use a complex, unique password -3. **Regular Updates**: Keep the container image updated -4. **Network Security**: Configure firewall rules appropriately -5. **Monitor Access**: Check logs for suspicious activity - -## 🏗️ Building from Source - -To build the single-container image yourself: - -```bash -# Clone the repository -git clone https://github.com/lfnovo/open-notebook -cd open-notebook - -# Build the single-container image -make docker-build-single-dev - -# Or build with multi-platform support -make docker-build-single -``` - -## 📊 Service Management - -### Startup Order - -Services start in this order with proper delays: -1. **SurrealDB** (5 seconds startup time) -2. **API Backend** (3 seconds startup time) -3. **Background Worker** (3 seconds startup time) -4. **React frontend** (5 seconds startup time) - -### Service Monitoring - -All services are managed by supervisord. Check service status: - -```bash -# View all services -docker exec -it open-notebook-single supervisorctl status - -# View specific service logs -docker exec -it open-notebook-single supervisorctl tail -f api -docker exec -it open-notebook-single supervisorctl tail -f streamlit -``` - -## 💻 Resource Requirements - -### Minimum Requirements -- **Memory**: 1GB RAM -- **CPU**: 1 core -- **Storage**: 10GB (for data persistence) -- **Network**: Internet connection for AI providers - -### Recommended for Production -- **Memory**: 2GB+ RAM -- **CPU**: 2+ cores -- **Storage**: 50GB+ (for larger datasets) -- **Network**: Stable high-speed internet - -## 🔍 Troubleshooting - -### Container Won't Start - -**Check the logs**: -```bash -docker logs open-notebook-single -``` - -**Common issues**: -- Insufficient memory (increase to 2GB+) -- Port conflicts (change port mapping) -- Missing API keys (check environment variables) - -### Database Connection Issues - -**Symptoms**: API errors, empty notebooks, connection timeouts - -**Solutions**: -1. **Check memory**: SurrealDB needs at least 512MB -2. **Verify volumes**: Ensure `/mydata` is mounted and writable -3. **Check startup order**: Wait for full startup (30-60 seconds) -4. **Restart container**: Sometimes a fresh start helps - -### Service Startup Problems - -**Check individual services**: -```bash -# Enter the container -docker exec -it open-notebook-single bash - -# Check service status -supervisorctl status - -# Restart specific service -supervisorctl restart api -supervisorctl restart streamlit -``` - -### Performance Issues - -**Symptoms**: Slow response times, timeouts - -**Solutions**: -1. **Increase memory**: Allocate 2GB+ RAM -2. **Check CPU**: Ensure adequate CPU resources -3. **Monitor logs**: Look for performance bottlenecks -4. **Optimize models**: Use faster models for real-time tasks - -## 📊 Comparison: Single vs Multi-Container - -| Feature | Single-Container | Multi-Container | -|---------|------------------|-----------------| -| **Deployment** | One container | Multiple containers | -| **Complexity** | Simple | More complex | -| **Scaling** | All services together | Independent scaling | -| **Resource Control** | Shared resources | Fine-grained control | -| **Debugging** | All logs in one place | Separate service logs | -| **Platform Support** | Excellent for PaaS | Better for Kubernetes | -| **Memory Usage** | More efficient | More flexible | -| **Startup Time** | Faster | Slower | - -## 🎯 When to Use Single-Container - -### ✅ Use Single-Container When: -- **Platform requirements**: PikaPods, Railway, or similar platforms -- **Simple deployment**: You want the easiest possible setup -- **Resource constraints**: Limited memory/CPU resources -- **Quick testing**: Rapid deployment for testing -- **Single user**: Personal use without scaling needs - -### ❌ Use Multi-Container When: -- **Production scaling**: Need to scale services independently -- **Resource optimization**: Want fine-grained resource control -- **Development**: Building/debugging the application -- **High availability**: Need service-level redundancy -- **Complex networking**: Custom networking requirements - -## 🆘 Getting Help - -### Quick Diagnostics - -Before asking for help, gather this information: - -```bash -# Container status -docker ps - -# Container logs -docker logs open-notebook-single - -# Service status inside container -docker exec -it open-notebook-single supervisorctl status - -# Resource usage -docker stats open-notebook-single -``` - -### Community Support - -- **[Discord Server](https://discord.gg/37XJPXfz2w)** - Real-time help and discussion -- **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Bug reports and feature requests -- **[Documentation](../index.md)** - Complete documentation - -### Common Solutions - -1. **Port conflicts**: Change port mapping in docker-compose -2. **Memory issues**: Increase container memory allocation -3. **Volume permissions**: Ensure mounted volumes are writable -4. **API key errors**: Verify environment variables are set correctly -5. **Startup timeouts**: Wait 60+ seconds for full service startup - ---- - -**Ready to deploy?** Start with Option 1 (Docker Compose) above for the best experience! \ No newline at end of file diff --git a/docs/development/api-reference.md b/docs/development/api-reference.md deleted file mode 100644 index ea3d3026..00000000 --- a/docs/development/api-reference.md +++ /dev/null @@ -1,1497 +0,0 @@ -# API Reference - -Open Notebook provides a comprehensive REST API for programmatic access to all functionality. This document covers all available endpoints, authentication, request/response formats, and usage examples. - -## 🔗 Base Information - -- **Base URL**: `http://localhost:5055` (default development) -- **Content Type**: `application/json` -- **Authentication**: Optional password-based authentication -- **API Version**: v0.2.2 - -## 🔐 Authentication - -Open Notebook supports optional password-based authentication via the `APP_PASSWORD` environment variable. - -### Authentication Header - -```bash -# If APP_PASSWORD is set -curl -H "Authorization: Bearer YOUR_PASSWORD" \ - http://localhost:5055/api/notebooks -``` - -### Authentication Responses - -**401 Unauthorized**: -```json -{ - "detail": "Authentication required" -} -``` - -## 📚 Notebooks API - -Manage notebook collections and organization. - -### GET /api/notebooks - -Get all notebooks with optional filtering and ordering. - -**Query Parameters**: -- `archived` (boolean, optional): Filter by archived status -- `order_by` (string, optional): Order by field and direction (default: "updated desc") - -**Response**: -```json -[ - { - "id": "notebook:uuid", - "name": "My Research Notebook", - "description": "Research on AI applications", - "archived": false, - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -**Example**: -```bash -curl -X GET "http://localhost:5055/api/notebooks?archived=false&order_by=created desc" -``` - -### POST /api/notebooks - -Create a new notebook. - -**Request Body**: -```json -{ - "name": "My New Notebook", - "description": "Description of the notebook" -} -``` - -**Response**: Same as GET single notebook - -**Example**: -```bash -curl -X POST http://localhost:5055/api/notebooks \ - -H "Content-Type: application/json" \ - -d '{"name": "Research Project", "description": "AI research notebook"}' -``` - -### GET /api/notebooks/{notebook_id} - -Get a specific notebook by ID. - -**Path Parameters**: -- `notebook_id` (string): Notebook ID - -**Response**: Same as POST response - -### PUT /api/notebooks/{notebook_id} - -Update a notebook. - -**Path Parameters**: -- `notebook_id` (string): Notebook ID - -**Request Body** (all fields optional): -```json -{ - "name": "Updated Name", - "description": "Updated description", - "archived": true -} -``` - -**Response**: Same as GET single notebook - -### DELETE /api/notebooks/{notebook_id} - -Delete a notebook. - -**Path Parameters**: -- `notebook_id` (string): Notebook ID - -**Response**: -```json -{ - "message": "Notebook deleted successfully" -} -``` - -## 📄 Sources API - -Manage content sources within notebooks. - -### POST /api/sources - -Create a new source. - -**Request Body**: -```json -{ - "notebook_id": "notebook:uuid", - "type": "link", - "url": "https://example.com/article", - "title": "Optional title", - "transformations": ["transformation:uuid"], - "embed": true, - "delete_source": false -} -``` - -**Source Types**: -- `link`: Web URL -- `upload`: File upload -- `text`: Direct text content - -**Response**: -```json -{ - "id": "source:uuid", - "title": "Article Title", - "topics": ["AI", "Machine Learning"], - "asset": { - "url": "https://example.com/article" - }, - "full_text": "Article content...", - "embedded_chunks": 15, - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" -} -``` - -### GET /api/sources - -Get all sources with optional filtering. - -**Query Parameters**: -- `notebook_id` (string, optional): Filter by notebook -- `limit` (integer, optional): Maximum results (default: 100) -- `offset` (integer, optional): Pagination offset - -**Response**: -```json -[ - { - "id": "source:uuid", - "title": "Article Title", - "topics": ["AI"], - "asset": { - "url": "https://example.com/article" - }, - "embedded_chunks": 15, - "insights_count": 3, - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -### GET /api/sources/{source_id} - -Get a specific source by ID. - -**Path Parameters**: -- `source_id` (string): Source ID - -**Response**: Same as POST response - -### PUT /api/sources/{source_id} - -Update a source. - -**Path Parameters**: -- `source_id` (string): Source ID - -**Request Body** (all fields optional): -```json -{ - "title": "Updated Title", - "topics": ["Updated", "Topics"] -} -``` - -**Response**: Same as GET single source - -### DELETE /api/sources/{source_id} - -Delete a source. - -**Path Parameters**: -- `source_id` (string): Source ID - -**Response**: -```json -{ - "message": "Source deleted successfully" -} -``` - -## 📝 Notes API - -Manage notes within notebooks. - -### POST /api/notes - -Create a new note. - -**Request Body**: -```json -{ - "title": "Note Title", - "content": "Note content", - "note_type": "human", - "notebook_id": "notebook:uuid" -} -``` - -**Note Types**: -- `human`: Manual note -- `ai`: AI-generated note - -**Response**: -```json -{ - "id": "note:uuid", - "title": "Note Title", - "content": "Note content", - "note_type": "human", - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" -} -``` - -### GET /api/notes - -Get all notes with optional filtering. - -**Query Parameters**: -- `notebook_id` (string, optional): Filter by notebook -- `note_type` (string, optional): Filter by note type -- `limit` (integer, optional): Maximum results - -**Response**: Array of note objects - -### GET /api/notes/{note_id} - -Get a specific note by ID. - -**Path Parameters**: -- `note_id` (string): Note ID - -**Response**: Same as POST response - -### PUT /api/notes/{note_id} - -Update a note. - -**Path Parameters**: -- `note_id` (string): Note ID - -**Request Body** (all fields optional): -```json -{ - "title": "Updated Title", - "content": "Updated content", - "note_type": "ai" -} -``` - -**Response**: Same as GET single note - -### DELETE /api/notes/{note_id} - -Delete a note. - -**Path Parameters**: -- `note_id` (string): Note ID - -**Response**: -```json -{ - "message": "Note deleted successfully" -} -``` - -## 🔍 Search API - -Perform full-text and vector search across content. - -### POST /api/search - -Search the knowledge base. - -**Request Body**: -```json -{ - "query": "artificial intelligence", - "type": "vector", - "limit": 10, - "search_sources": true, - "search_notes": true, - "minimum_score": 0.2 -} -``` - -**Search Types**: -- `text`: Full-text search -- `vector`: Semantic search (requires embedding model) - -**Response**: -```json -{ - "results": [ - { - "id": "source:uuid", - "title": "AI Research Paper", - "content": "Relevant content excerpt...", - "score": 0.85, - "type": "source", - "metadata": { - "topics": ["AI", "Machine Learning"] - } - } - ], - "total_count": 1, - "search_type": "vector" -} -``` - -### POST /api/search/ask - -Ask questions using AI models (streaming response). - -**Request Body**: -```json -{ - "question": "What are the key benefits of AI?", - "strategy_model": "model:gpt-5-mini", - "answer_model": "model:gpt-5-mini", - "final_answer_model": "model:gpt-5-mini" -} -``` - -**Response**: Server-Sent Events (SSE) stream - -**Stream Events**: -```json -// Strategy phase -data: {"type": "strategy", "reasoning": "...", "searches": [...]} - -// Individual answers -data: {"type": "answer", "content": "Answer content..."} - -// Final answer -data: {"type": "final_answer", "content": "Final synthesized answer..."} - -// Completion -data: {"type": "complete", "final_answer": "Final answer..."} -``` - -### POST /api/search/ask/simple - -Ask questions (non-streaming response). - -**Request Body**: Same as streaming version - -**Response**: -```json -{ - "answer": "The key benefits of AI include...", - "question": "What are the key benefits of AI?" -} -``` - -## 🤖 Models API - -Manage AI models and configurations. - -### GET /api/models - -Get all configured models. - -**Response**: -```json -[ - { - "id": "model:uuid", - "name": "gpt-5-mini", - "provider": "openai", - "type": "language", - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -### POST /api/models - -Create a new model configuration. - -**Request Body**: -```json -{ - "name": "gpt-5-mini", - "provider": "openai", - "type": "language" -} -``` - -**Model Types**: -- `language`: Text generation models -- `embedding`: Vector embedding models -- `text_to_speech`: TTS models -- `speech_to_text`: STT models - -**Response**: Same as GET single model - -### GET /api/models/{model_id} - -Get a specific model by ID. - -**Path Parameters**: -- `model_id` (string): Model ID - -**Response**: Same as POST response - -### DELETE /api/models/{model_id} - -Delete a model configuration. - -**Path Parameters**: -- `model_id` (string): Model ID - -**Response**: -```json -{ - "message": "Model deleted successfully" -} -``` - -### GET /api/models/defaults - -Get default model configurations. - -**Response**: -```json -{ - "default_chat_model": "model:gpt-5-mini", - "default_transformation_model": "model:gpt-5-mini", - "large_context_model": "model:gpt-5-mini", - "default_text_to_speech_model": "model:gpt-4o-mini-tts", - "default_speech_to_text_model": "model:whisper-1", - "default_embedding_model": "model:text-embedding-3-small", - "default_tools_model": "model:gpt-5-mini" -} -``` - -## 🔧 Transformations API - -Manage content transformations and AI-powered analysis. - -### GET /api/transformations - -Get all transformations. - -**Response**: -```json -[ - { - "id": "transformation:uuid", - "name": "summarize", - "title": "Summarize Content", - "description": "Create a concise summary", - "prompt": "Summarize the following content...", - "apply_default": true, - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -### POST /api/transformations - -Create a new transformation. - -**Request Body**: -```json -{ - "name": "custom_analysis", - "title": "Custom Analysis", - "description": "Perform custom content analysis", - "prompt": "Analyze the following content for key themes...", - "apply_default": false -} -``` - -**Response**: Same as GET single transformation - -### GET /api/transformations/{transformation_id} - -Get a specific transformation by ID. - -**Path Parameters**: -- `transformation_id` (string): Transformation ID - -**Response**: Same as POST response - -### PUT /api/transformations/{transformation_id} - -Update a transformation. - -**Path Parameters**: -- `transformation_id` (string): Transformation ID - -**Request Body** (all fields optional): -```json -{ - "name": "updated_name", - "title": "Updated Title", - "description": "Updated description", - "prompt": "Updated prompt...", - "apply_default": true -} -``` - -**Response**: Same as GET single transformation - -### DELETE /api/transformations/{transformation_id} - -Delete a transformation. - -**Path Parameters**: -- `transformation_id` (string): Transformation ID - -**Response**: -```json -{ - "message": "Transformation deleted successfully" -} -``` - -### POST /api/transformations/execute - -Execute a transformation on content. - -**Request Body**: -```json -{ - "transformation_id": "transformation:uuid", - "input_text": "Content to transform...", - "model_id": "model:gpt-5-mini" -} -``` - -**Response**: -```json -{ - "output": "Transformed content...", - "transformation_id": "transformation:uuid", - "model_id": "model:gpt-5-mini" -} -``` - -## 📊 Insights API - -Manage AI-generated insights for sources. - -### GET /api/sources/{source_id}/insights - -Get insights for a specific source. - -**Path Parameters**: -- `source_id` (string): Source ID - -**Response**: -```json -[ - { - "id": "insight:uuid", - "source_id": "source:uuid", - "insight_type": "summary", - "content": "This source discusses...", - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -### POST /api/sources/{source_id}/insights - -Create a new insight for a source. - -**Path Parameters**: -- `source_id` (string): Source ID - -**Request Body**: -```json -{ - "transformation_id": "transformation:uuid", - "model_id": "model:gpt-5-mini" -} -``` - -**Response**: Same as GET insight - -### POST /api/insights/{insight_id}/save-as-note - -Save an insight as a note. - -**Path Parameters**: -- `insight_id` (string): Insight ID - -**Request Body**: -```json -{ - "notebook_id": "notebook:uuid" -} -``` - -**Response**: -```json -{ - "note_id": "note:uuid", - "message": "Insight saved as note successfully" -} -``` - -## 🎙️ Podcasts API - -Generate professional multi-speaker podcasts. - -### GET /api/episode-profiles - -Get all episode profiles. - -**Response**: -```json -[ - { - "id": "episode_profile:uuid", - "name": "tech_discussion", - "description": "Technical discussion between 2 experts", - "speaker_config": "tech_experts", - "outline_provider": "openai", - "outline_model": "gpt-5-mini", - "transcript_provider": "openai", - "transcript_model": "gpt-5-mini", - "default_briefing": "Create an engaging technical discussion...", - "num_segments": 5, - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -### GET /api/speaker-profiles - -Get all speaker profiles. - -**Response**: -```json -[ - { - "id": "speaker_profile:uuid", - "name": "tech_experts", - "description": "Two technical experts for tech discussions", - "tts_provider": "openai", - "tts_model": "gpt-4o-mini-tts", - "speakers": [ - { - "name": "Dr. Alex Chen", - "voice_id": "nova", - "backstory": "Senior AI researcher...", - "personality": "Analytical, clear communicator..." - } - ], - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -### POST /api/podcasts - -Create a new podcast episode. - -**Request Body**: -```json -{ - "name": "AI Discussion Episode", - "briefing": "Discuss the latest AI developments...", - "episode_profile_id": "episode_profile:uuid", - "source_ids": ["source:uuid1", "source:uuid2"], - "note_ids": ["note:uuid1"] -} -``` - -**Response**: -```json -{ - "id": "episode:uuid", - "name": "AI Discussion Episode", - "briefing": "Discuss the latest AI developments...", - "episode_profile": {...}, - "speaker_profile": {...}, - "command": "command:uuid", - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" -} -``` - -### GET /api/podcasts/{episode_id} - -Get a specific podcast episode. - -**Path Parameters**: -- `episode_id` (string): Episode ID - -**Response**: Same as POST response - -### GET /api/podcasts/{episode_id}/audio - -Download the generated audio file. - -**Path Parameters**: -- `episode_id` (string): Episode ID - -**Response**: Audio file download (MP3 format) - -## 🎛️ Settings API - -Manage application settings and configuration. - -### GET /api/settings - -Get current application settings. - -**Response**: -```json -{ - "default_content_processing_engine_doc": "docling", - "default_content_processing_engine_url": "firecrawl", - "default_embedding_option": "auto", - "auto_delete_files": "false", - "youtube_preferred_languages": ["en", "es"] -} -``` - -### PUT /api/settings - -Update application settings. - -**Request Body** (all fields optional): -```json -{ - "default_content_processing_engine_doc": "docling", - "default_content_processing_engine_url": "firecrawl", - "default_embedding_option": "auto", - "auto_delete_files": "true", - "youtube_preferred_languages": ["en", "fr", "de"] -} -``` - -**Response**: Same as GET response - -## 💬 Chat API - -Manage chat sessions and conversational AI interactions within notebooks. - -### GET /api/chat/sessions - -Get all chat sessions for a notebook. - -**Query Parameters**: -- `notebook_id` (string, required): Notebook ID to get sessions for - -**Response**: -```json -[ - { - "id": "chat_session:uuid", - "title": "Chat Session Title", - "notebook_id": "notebook:uuid", - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z", - "message_count": 5 - } -] -``` - -**Example**: -```bash -curl -X GET "http://localhost:5055/api/chat/sessions?notebook_id=notebook:uuid" -``` - -### POST /api/chat/sessions - -Create a new chat session for a notebook. - -**Request Body**: -```json -{ - "notebook_id": "notebook:uuid", - "title": "Optional session title" -} -``` - -**Response**: Same as GET single session - -**Example**: -```bash -curl -X POST http://localhost:5055/api/chat/sessions \ - -H "Content-Type: application/json" \ - -d '{"notebook_id": "notebook:uuid", "title": "New Chat Session"}' -``` - -### GET /api/chat/sessions/{session_id} - -Get a specific chat session with its message history. - -**Path Parameters**: -- `session_id` (string): Chat session ID - -**Response**: -```json -{ - "id": "chat_session:uuid", - "title": "Chat Session Title", - "notebook_id": "notebook:uuid", - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z", - "message_count": 3, - "messages": [ - { - "id": "msg_1", - "type": "human", - "content": "Hello, what can you tell me about AI?", - "timestamp": null - }, - { - "id": "msg_2", - "type": "ai", - "content": "AI, or Artificial Intelligence, refers to...", - "timestamp": null - } - ] -} -``` - -### PUT /api/chat/sessions/{session_id} - -Update a chat session (currently supports title updates). - -**Path Parameters**: -- `session_id` (string): Chat session ID - -**Request Body**: -```json -{ - "title": "Updated Session Title" -} -``` - -**Response**: Same as GET single session (without messages) - -### DELETE /api/chat/sessions/{session_id} - -Delete a chat session and all its messages. - -**Path Parameters**: -- `session_id` (string): Chat session ID - -**Response**: -```json -{ - "success": true, - "message": "Session deleted successfully" -} -``` - -### POST /api/chat/execute - -Execute a chat message and get AI response. - -**Request Body**: -```json -{ - "session_id": "chat_session:uuid", - "message": "What are the key benefits of machine learning?", - "context": { - "sources": [ - { - "id": "source:uuid", - "title": "ML Research Paper", - "content": "Machine learning content..." - } - ], - "notes": [ - { - "id": "note:uuid", - "title": "ML Notes", - "content": "My notes on ML..." - } - ] - } -} -``` - -**Response**: -```json -{ - "session_id": "chat_session:uuid", - "messages": [ - { - "id": "msg_1", - "type": "human", - "content": "What are the key benefits of machine learning?", - "timestamp": null - }, - { - "id": "msg_2", - "type": "ai", - "content": "Based on the provided context, machine learning offers several key benefits...", - "timestamp": null - } - ] -} -``` - -**Example**: -```bash -curl -X POST http://localhost:5055/api/chat/execute \ - -H "Content-Type: application/json" \ - -d '{ - "session_id": "chat_session:uuid", - "message": "Summarize the main points", - "context": {"sources": [], "notes": []} - }' -``` - -### POST /api/chat/context - -Build context for chat based on notebook content and configuration. - -**Request Body**: -```json -{ - "notebook_id": "notebook:uuid", - "context_config": { - "sources": { - "source:uuid1": "full content", - "source:uuid2": "insights only" - }, - "notes": { - "note:uuid1": "full content" - } - } -} -``` - -**Context Configuration Values**: -- `"full content"`: Include complete source/note content -- `"insights only"`: Include source insights/summary only -- `"not in context"`: Exclude from context - -**Response**: -```json -{ - "context": { - "sources": [ - { - "id": "source:uuid", - "title": "Source Title", - "content": "Source content or insights...", - "type": "source" - } - ], - "notes": [ - { - "id": "note:uuid", - "title": "Note Title", - "content": "Note content...", - "type": "note" - } - ] - }, - "token_count": 1250, - "char_count": 5000 -} -``` - -## 📐 Context API - -Manage context configuration for AI operations. - -### POST /api/context - -Get context information for a notebook. - -**Request Body**: -```json -{ - "notebook_id": "notebook:uuid", - "context_config": { - "sources": { - "source:uuid1": "full", - "source:uuid2": "summary" - }, - "notes": { - "note:uuid1": "full" - } - } -} -``` - -**Context Levels**: -- `full`: Include complete content -- `summary`: Include summary only -- `exclude`: Exclude from context - -**Response**: -```json -{ - "notebook_id": "notebook:uuid", - "sources": [ - { - "id": "source:uuid", - "title": "Source Title", - "content": "Source content...", - "inclusion_level": "full" - } - ], - "notes": [ - { - "id": "note:uuid", - "title": "Note Title", - "content": "Note content...", - "inclusion_level": "full" - } - ], - "total_tokens": 1500 -} -``` - -## 🔨 Commands API - -Monitor and manage background jobs. - -### GET /api/commands - -Get all commands (background jobs). - -**Query Parameters**: -- `status` (string, optional): Filter by status -- `limit` (integer, optional): Maximum results - -**Response**: -```json -[ - { - "id": "command:uuid", - "name": "podcast_generation", - "status": "completed", - "progress": 100, - "result": {...}, - "error": null, - "created": "2024-01-01T00:00:00Z", - "updated": "2024-01-01T00:00:00Z" - } -] -``` - -### GET /api/commands/{command_id} - -Get a specific command by ID. - -**Path Parameters**: -- `command_id` (string): Command ID - -**Response**: Same as array item above - -### DELETE /api/commands/{command_id} - -Cancel/delete a command. - -**Path Parameters**: -- `command_id` (string): Command ID - -**Response**: -```json -{ - "message": "Command deleted successfully" -} -``` - -## 🏷️ Embedding API - -Manage vector embeddings for content. The embedding system supports both synchronous and asynchronous processing, as well as bulk rebuild operations for upgrading embeddings when switching models. - -### POST /api/embed - -Generate embeddings for an item (source, note, or insight). - -**Request Body**: -```json -{ - "item_id": "source:uuid", - "item_type": "source", - "async_processing": false -} -``` - -**Parameters**: -- `item_id` (string, required): ID of the item to embed -- `item_type` (string, required): Type of item - `source`, `note`, or `insight` -- `async_processing` (boolean, optional): Process in background (default: false) - -**Behavior**: -- Embedding operations are **idempotent** - calling multiple times safely replaces existing embeddings -- For sources: Deletes existing chunks and creates new embeddings -- For notes: Updates the note's embedding vector -- For insights: Regenerates the insight's embedding vector - -**Response (Synchronous)**: -```json -{ - "success": true, - "message": "Source embedded successfully", - "item_id": "source:uuid", - "item_type": "source" -} -``` - -**Response (Asynchronous)**: -```json -{ - "success": true, - "message": "Embedding queued for background processing", - "item_id": "source:uuid", - "item_type": "source", - "command_id": "command:uuid" -} -``` - -**Example (Synchronous)**: -```bash -curl -X POST http://localhost:5055/api/embed \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer YOUR_PASSWORD" \ - -d '{ - "item_id": "source:abc123", - "item_type": "source", - "async_processing": false - }' -``` - -**Example (Asynchronous)**: -```bash -# Submit for background processing -COMMAND_ID=$(curl -X POST http://localhost:5055/api/embed \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer YOUR_PASSWORD" \ - -d '{ - "item_id": "source:abc123", - "item_type": "source", - "async_processing": true - }' | jq -r '.command_id') - -# Check status -curl -X GET http://localhost:5055/api/commands/$COMMAND_ID -``` - -### POST /api/embeddings/rebuild - -Rebuild embeddings for multiple items in bulk. Useful when switching embedding models or fixing corrupted embeddings. - -**Request Body**: -```json -{ - "mode": "existing", - "include_sources": true, - "include_notes": true, - "include_insights": true -} -``` - -**Parameters**: -- `mode` (string, required): Rebuild mode - - `"existing"`: Re-embed only items that already have embeddings - - `"all"`: Re-embed existing items + create embeddings for items without any -- `include_sources` (boolean, optional): Include sources in rebuild (default: true) -- `include_notes` (boolean, optional): Include notes in rebuild (default: true) -- `include_insights` (boolean, optional): Include insights in rebuild (default: true) - -**Response**: -```json -{ - "command_id": "command:uuid", - "message": "Rebuild started successfully", - "estimated_items": 165 -} -``` - -**Example**: -```bash -# Rebuild all existing embeddings -curl -X POST http://localhost:5055/api/embeddings/rebuild \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer YOUR_PASSWORD" \ - -d '{ - "mode": "existing", - "include_sources": true, - "include_notes": true, - "include_insights": true - }' - -# Rebuild and create new embeddings for everything -curl -X POST http://localhost:5055/api/embeddings/rebuild \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer YOUR_PASSWORD" \ - -d '{ - "mode": "all", - "include_sources": true, - "include_notes": false, - "include_insights": false - }' -``` - -### GET /api/embeddings/rebuild/{command_id}/status - -Get the status and progress of a rebuild operation. - -**Path Parameters**: -- `command_id` (string): Command ID returned from rebuild endpoint - -**Response (Running)**: -```json -{ - "command_id": "command:uuid", - "status": "running", - "progress": null, - "stats": null, - "started_at": "2024-01-01T12:00:00Z", - "completed_at": null, - "error_message": null -} -``` - -**Response (Completed)**: -```json -{ - "command_id": "command:uuid", - "status": "completed", - "progress": { - "total_items": 165, - "processed_items": 165, - "failed_items": 0 - }, - "stats": { - "sources_processed": 115, - "notes_processed": 25, - "insights_processed": 25, - "processing_time": 125.5 - }, - "started_at": "2024-01-01T12:00:00Z", - "completed_at": "2024-01-01T12:02:05Z", - "error_message": null -} -``` - -**Response (Failed)**: -```json -{ - "command_id": "command:uuid", - "status": "failed", - "progress": { - "total_items": 165, - "processed_items": 50, - "failed_items": 1 - }, - "stats": null, - "started_at": "2024-01-01T12:00:00Z", - "completed_at": "2024-01-01T12:01:00Z", - "error_message": "No embedding model configured" -} -``` - -**Example**: -```bash -# Start rebuild -COMMAND_ID=$(curl -X POST http://localhost:5055/api/embeddings/rebuild \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer YOUR_PASSWORD" \ - -d '{"mode": "existing", "include_sources": true}' \ - | jq -r '.command_id') - -# Poll for status -while true; do - STATUS=$(curl -s -X GET \ - "http://localhost:5055/api/embeddings/rebuild/$COMMAND_ID/status" \ - -H "Authorization: Bearer YOUR_PASSWORD" \ - | jq -r '.status') - - echo "Status: $STATUS" - - if [ "$STATUS" = "completed" ] || [ "$STATUS" = "failed" ]; then - break - fi - - sleep 5 -done - -# Get final results -curl -X GET "http://localhost:5055/api/embeddings/rebuild/$COMMAND_ID/status" \ - -H "Authorization: Bearer YOUR_PASSWORD" | jq . -``` - -**Status Values**: -- `queued`: Rebuild job queued for processing -- `running`: Rebuild in progress -- `completed`: Rebuild finished successfully -- `failed`: Rebuild failed with error - -## 🚨 Error Responses - -### Common Error Codes - -**400 Bad Request**: -```json -{ - "detail": "Invalid input data" -} -``` - -**401 Unauthorized**: -```json -{ - "detail": "Authentication required" -} -``` - -**404 Not Found**: -```json -{ - "detail": "Resource not found" -} -``` - -**422 Validation Error**: -```json -{ - "detail": [ - { - "loc": ["body", "name"], - "msg": "field required", - "type": "value_error.missing" - } - ] -} -``` - -**500 Internal Server Error**: -```json -{ - "detail": "Internal server error occurred" -} -``` - -## 📋 Usage Examples - -### Complete Workflow Example - -```bash -# 1. Create a notebook -NOTEBOOK_ID=$(curl -X POST http://localhost:5055/api/notebooks \ - -H "Content-Type: application/json" \ - -d '{"name": "AI Research", "description": "Research on AI applications"}' \ - | jq -r '.id') - -# 2. Add a source -SOURCE_ID=$(curl -X POST http://localhost:5055/api/sources \ - -H "Content-Type: application/json" \ - -d "{\"notebook_id\": \"$NOTEBOOK_ID\", \"type\": \"link\", \"url\": \"https://example.com/ai-article\", \"embed\": true}" \ - | jq -r '.id') - -# 3. Create a model -MODEL_ID=$(curl -X POST http://localhost:5055/api/models \ - -H "Content-Type: application/json" \ - -d '{"name": "gpt-5-mini", "provider": "openai", "type": "language"}' \ - | jq -r '.id') - -# 4. Search for content -curl -X POST http://localhost:5055/api/search \ - -H "Content-Type: application/json" \ - -d '{"query": "artificial intelligence", "type": "vector", "limit": 5}' - -# 5. Ask a question -curl -X POST http://localhost:5055/api/search/ask/simple \ - -H "Content-Type: application/json" \ - -d "{\"question\": \"What are the main AI applications?\", \"strategy_model\": \"$MODEL_ID\", \"answer_model\": \"$MODEL_ID\", \"final_answer_model\": \"$MODEL_ID\"}" -``` - -### Podcast Generation Example - -```bash -# 1. Get episode profiles -curl -X GET http://localhost:5055/api/episode-profiles - -# 2. Create a podcast -EPISODE_ID=$(curl -X POST http://localhost:5055/api/podcasts \ - -H "Content-Type: application/json" \ - -d "{\"name\": \"AI Discussion\", \"briefing\": \"Discuss AI trends\", \"episode_profile_id\": \"episode_profile:tech_discussion\", \"source_ids\": [\"$SOURCE_ID\"]}" \ - | jq -r '.id') - -# 3. Check command status -curl -X GET http://localhost:5055/api/commands - -# 4. Download audio when ready -curl -X GET http://localhost:5055/api/podcasts/$EPISODE_ID/audio -o podcast.mp3 -``` - -### Chat Conversation Example - -```bash -# 1. Create a chat session -SESSION_ID=$(curl -X POST http://localhost:5055/api/chat/sessions \ - -H "Content-Type: application/json" \ - -d "{\"notebook_id\": \"$NOTEBOOK_ID\", \"title\": \"Research Discussion\"}" \ - | jq -r '.id') - -# 2. Build context for the chat -CONTEXT=$(curl -X POST http://localhost:5055/api/chat/context \ - -H "Content-Type: application/json" \ - -d "{\"notebook_id\": \"$NOTEBOOK_ID\", \"context_config\": {\"sources\": {\"$SOURCE_ID\": \"full content\"}}}") - -# 3. Send a chat message -curl -X POST http://localhost:5055/api/chat/execute \ - -H "Content-Type: application/json" \ - -d "{\"session_id\": \"$SESSION_ID\", \"message\": \"What are the key insights from this research?\", \"context\": $CONTEXT}" - -# 4. Get chat history -curl -X GET http://localhost:5055/api/chat/sessions/$SESSION_ID - -# 5. List all sessions for the notebook -curl -X GET "http://localhost:5055/api/chat/sessions?notebook_id=$NOTEBOOK_ID" -``` - -## 📡 WebSocket Support - -Currently, Open Notebook uses Server-Sent Events (SSE) for real-time updates in the Ask endpoint. WebSocket support may be added in future versions for more interactive features. - -## 📈 Rate Limiting - -The API currently doesn't enforce rate limiting, but it's recommended to implement rate limiting in production deployments to prevent abuse. - -## 🔄 Versioning - -The API uses semantic versioning. Breaking changes will increment the major version number. The current API version is included in the OpenAPI documentation at `/docs`. - ---- - -This API reference provides comprehensive coverage of Open Notebook's REST API. For additional examples and integration patterns, check the [GitHub repository](https://github.com/lfnovo/open-notebook) and join our [Discord community](https://discord.gg/37XJPXfz2w). \ No newline at end of file diff --git a/docs/development/architecture.md b/docs/development/architecture.md deleted file mode 100644 index 50e11bfe..00000000 --- a/docs/development/architecture.md +++ /dev/null @@ -1,498 +0,0 @@ -# System Architecture - -This document provides a comprehensive overview of Open Notebook's architecture, including system design, component relationships, database schema, and service communication patterns. - -## 🏗️ High-Level Architecture - -Open Notebook follows a modern layered architecture with clear separation of concerns: - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Frontend Layer │ -├─────────────────────────────────────────────────────────────┤ -│ React frontend (pages/) │ REST API Clients (external) │ -└─────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ API Layer │ -├─────────────────────────────────────────────────────────────┤ -│ FastAPI Routers (api/routers/) │ Models (api/models.py) │ -│ Middleware (auth, CORS) │ Service Layer │ -└─────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ Domain Layer │ -├─────────────────────────────────────────────────────────────┤ -│ Business Logic (open_notebook/domain/) │ -│ Entity Models │ Validation │ Domain Services │ -└─────────────────────────────────────────────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────────────────────────┐ -│ Infrastructure Layer │ -├─────────────────────────────────────────────────────────────┤ -│ Database (SurrealDB) │ AI Services (Esperanto) │ -│ File Storage │ External APIs │ -└─────────────────────────────────────────────────────────────┘ -``` - -## 🧩 Core Components - -### 1. API Layer (`api/`) - -**Purpose**: HTTP interface for all application functionality - -**Key Components**: -- **FastAPI Application** (`api/main.py`): Main application with middleware and routing -- **Routers** (`api/routers/`): Endpoint definitions organized by domain -- **Models** (`api/models.py`): Pydantic models for request/response validation -- **Services** (`api/*_service.py`): Business logic orchestration -- **Authentication** (`api/auth.py`): Password-based authentication middleware - -**Architecture Pattern**: Clean API architecture with service layer abstraction - -```python -# Example API structure -@router.post("/notebooks", response_model=NotebookResponse) -async def create_notebook(notebook: NotebookCreate): - """Create a new notebook with validation and error handling.""" - new_notebook = Notebook(name=notebook.name, description=notebook.description) - await new_notebook.save() - return NotebookResponse.from_domain(new_notebook) -``` - -### 2. Domain Layer (`open_notebook/domain/`) - -**Purpose**: Core business logic and domain models - -**Key Components**: -- **Base Models** (`base.py`): Abstract base classes with common functionality -- **Entities**: `Notebook`, `Source`, `Note`, `Model`, `Transformation` -- **Services**: Domain-specific business logic -- **Validation**: Data integrity and business rules - -**Architecture Pattern**: Domain-Driven Design (DDD) with rich domain models - -```python -# Example domain model -class Notebook(BaseModel): - name: str - description: str - archived: bool = False - - @classmethod - async def get_all(cls, order_by: str = "updated desc") -> List["Notebook"]: - """Retrieve all notebooks with ordering.""" - # Business logic implementation - - async def save(self) -> None: - """Save notebook with validation.""" - # Domain validation and persistence -``` - -### 3. Database Layer (`open_notebook/database/`) - -**Purpose**: Data persistence and query abstraction - -**Key Components**: -- **Repository Pattern** (`repository.py`): CRUD operations abstraction -- **Connection Management**: Async SurrealDB connection handling -- **Migrations**: Database schema evolution (`migrations/`) -- **Query Builder**: SurrealQL query construction helpers - -**Architecture Pattern**: Repository pattern with async/await - -```python -# Repository functions -async def repo_create(table: str, data: Dict[str, Any]) -> Dict[str, Any] -async def repo_query(query_str: str, vars: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]] -async def repo_update(table: str, id: str, data: Dict[str, Any]) -> List[Dict[str, Any]] -async def repo_delete(record_id: Union[str, RecordID]) -``` - -### 4. AI Processing Layer (`open_notebook/graphs/`) - -**Purpose**: AI workflows and content processing - -**Key Components**: -- **LangChain Graphs**: Multi-step AI workflows -- **Ask System** (`ask.py`): Question-answering pipeline -- **Chat System** (`chat.py`): Conversational AI -- **Transformations** (`transformation.py`): Content analysis workflows -- **Source Processing** (`source.py`): Content ingestion and embedding - -**Architecture Pattern**: LangGraph for workflow orchestration - -```python -# Example AI workflow -@create_graph -async def ask_graph(state: AskState): - """Multi-step question answering workflow.""" - # 1. Strategy generation - # 2. Search execution - # 3. Answer synthesis - # 4. Final response generation -``` - -### 5. Background Processing (`commands/`) - -**Purpose**: Asynchronous job processing - -**Key Components**: -- **Command System**: Background job definitions -- **Job Queue**: SurrealDB-backed job scheduling -- **Status Tracking**: Real-time job progress monitoring -- **Error Handling**: Comprehensive error recovery - -**Architecture Pattern**: Command pattern with async job queue - -## 🗃️ Database Schema - -Open Notebook uses SurrealDB with a flexible document-oriented schema: - -### Core Tables - -#### `notebook` -```surrealql -DEFINE TABLE notebook SCHEMAFULL; -DEFINE FIELD name ON TABLE notebook TYPE string; -DEFINE FIELD description ON TABLE notebook TYPE string; -DEFINE FIELD archived ON TABLE notebook TYPE bool DEFAULT false; -DEFINE FIELD created ON TABLE notebook TYPE datetime DEFAULT time::now(); -DEFINE FIELD updated ON TABLE notebook TYPE datetime DEFAULT time::now(); -``` - -#### `source` -```surrealql -DEFINE TABLE source SCHEMAFULL; -DEFINE FIELD title ON TABLE source TYPE option; -DEFINE FIELD topics ON TABLE source TYPE option>; -DEFINE FIELD asset ON TABLE source TYPE option; -DEFINE FIELD full_text ON TABLE source TYPE option; -DEFINE FIELD notebook_id ON TABLE source TYPE record; -DEFINE FIELD embedding ON TABLE source TYPE option>; -DEFINE FIELD created ON TABLE source TYPE datetime DEFAULT time::now(); -DEFINE FIELD updated ON TABLE source TYPE datetime DEFAULT time::now(); -``` - -#### `note` -```surrealql -DEFINE TABLE note SCHEMAFULL; -DEFINE FIELD title ON TABLE note TYPE option; -DEFINE FIELD content ON TABLE note TYPE option; -DEFINE FIELD note_type ON TABLE note TYPE option; -DEFINE FIELD notebook_id ON TABLE note TYPE record; -DEFINE FIELD embedding ON TABLE note TYPE option>; -DEFINE FIELD created ON TABLE note TYPE datetime DEFAULT time::now(); -DEFINE FIELD updated ON TABLE note TYPE datetime DEFAULT time::now(); -``` - -#### `model` -```surrealql -DEFINE TABLE model SCHEMAFULL; -DEFINE FIELD name ON TABLE model TYPE string; -DEFINE FIELD provider ON TABLE model TYPE string; -DEFINE FIELD type ON TABLE model TYPE string; -DEFINE FIELD created ON TABLE model TYPE datetime DEFAULT time::now(); -DEFINE FIELD updated ON TABLE model TYPE datetime DEFAULT time::now(); -``` - -### Specialized Tables - -#### `transformation` -```surrealql -DEFINE TABLE transformation SCHEMAFULL; -DEFINE FIELD name ON TABLE transformation TYPE string; -DEFINE FIELD title ON TABLE transformation TYPE string; -DEFINE FIELD description ON TABLE transformation TYPE string; -DEFINE FIELD prompt ON TABLE transformation TYPE string; -DEFINE FIELD apply_default ON TABLE transformation TYPE bool DEFAULT false; -``` - -#### `episode_profile` (Podcast Generation) -```surrealql -DEFINE TABLE episode_profile SCHEMAFULL; -DEFINE FIELD name ON TABLE episode_profile TYPE string; -DEFINE FIELD description ON TABLE episode_profile TYPE option; -DEFINE FIELD speaker_config ON TABLE episode_profile TYPE string; -DEFINE FIELD outline_provider ON TABLE episode_profile TYPE string; -DEFINE FIELD outline_model ON TABLE episode_profile TYPE string; -DEFINE FIELD transcript_provider ON TABLE episode_profile TYPE string; -DEFINE FIELD transcript_model ON TABLE episode_profile TYPE string; -DEFINE FIELD default_briefing ON TABLE episode_profile TYPE string; -DEFINE FIELD num_segments ON TABLE episode_profile TYPE int DEFAULT 5; -``` - -#### `speaker_profile` (Podcast Generation) -```surrealql -DEFINE TABLE speaker_profile SCHEMAFULL; -DEFINE FIELD name ON TABLE speaker_profile TYPE string; -DEFINE FIELD description ON TABLE speaker_profile TYPE option; -DEFINE FIELD tts_provider ON TABLE speaker_profile TYPE string; -DEFINE FIELD tts_model ON TABLE speaker_profile TYPE string; -DEFINE FIELD speakers ON TABLE speaker_profile TYPE array; -DEFINE FIELD speakers.*.name ON TABLE speaker_profile TYPE string; -DEFINE FIELD speakers.*.voice_id ON TABLE speaker_profile TYPE option; -DEFINE FIELD speakers.*.backstory ON TABLE speaker_profile TYPE option; -DEFINE FIELD speakers.*.personality ON TABLE speaker_profile TYPE option; -``` - -### Relationships - -**Record Links** (SurrealDB native relationships): -- `source.notebook_id` → `notebook` records -- `note.notebook_id` → `notebook` records -- `episode.command` → `command` records - -**Embedding Relationships**: -- Sources and notes can have vector embeddings for semantic search -- Embeddings are stored as arrays of numbers in the same record - -## 🔄 Service Communication - -### API Communication Flow - -```mermaid -graph TB - A[Client Request] --> B[FastAPI Router] - B --> C[Service Layer] - C --> D[Domain Model] - D --> E[Repository] - E --> F[SurrealDB] - F --> E - E --> D - D --> C - C --> B - B --> A -``` - -### AI Processing Flow - -```mermaid -graph TB - A[Content Input] --> B[Source Processing] - B --> C[Content Extraction] - C --> D[Embedding Generation] - D --> E[Database Storage] - E --> F[Search Index] - - G[User Query] --> H[Vector Search] - H --> I[Context Retrieval] - I --> J[AI Model Processing] - J --> K[Response Generation] -``` - -### Background Job Processing - -```mermaid -graph TB - A[API Request] --> B[Command Creation] - B --> C[Job Queue] - C --> D[Background Worker] - D --> E[Job Execution] - E --> F[Status Updates] - F --> G[Result Storage] - G --> H[Client Notification] -``` - -## 🔧 Configuration Management - -### Environment Variables - -**Database Configuration**: -```bash -SURREAL_URL=ws://localhost:8000/rpc -SURREAL_USER=root -SURREAL_PASSWORD=password -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=main -``` - -**AI Provider Configuration**: -```bash -OPENAI_API_KEY=sk-... -ANTHROPIC_API_KEY=sk-ant-... -GOOGLE_API_KEY=AI... -``` - -**Application Configuration**: -```bash -APP_PASSWORD=optional_password -DEBUG=false -LOG_LEVEL=INFO -``` - -### Configuration Loading - -Configuration is managed through the `open_notebook/config.py` module: - -```python -class Config: - """Application configuration with environment variable support.""" - - # Database settings - database_url: str = os.getenv("SURREAL_URL", "ws://localhost:8000/rpc") - database_user: str = os.getenv("SURREAL_USER", "root") - database_password: str = os.getenv("SURREAL_PASSWORD", "password") - - # AI provider settings - openai_api_key: Optional[str] = os.getenv("OPENAI_API_KEY") - anthropic_api_key: Optional[str] = os.getenv("ANTHROPIC_API_KEY") - - # Application settings - app_password: Optional[str] = os.getenv("APP_PASSWORD") - debug: bool = os.getenv("DEBUG", "false").lower() == "true" -``` - -## 🔍 Search Architecture - -### Multi-Modal Search System - -Open Notebook implements both full-text and vector search: - -**Full-Text Search**: -- SurrealDB native text search capabilities -- Keyword-based matching across content -- Fast and lightweight for exact matches - -**Vector Search**: -- Semantic similarity using embeddings -- Cosine similarity scoring -- Context-aware result ranking - -### Search Implementation - -```python -async def vector_search( - keyword: str, - results: int = 10, - source: bool = True, - note: bool = True, - minimum_score: float = 0.2 -) -> List[Dict[str, Any]]: - """Perform vector search across sources and notes.""" - # 1. Generate query embedding - # 2. Calculate similarity scores - # 3. Filter by minimum score - # 4. Rank and return results -``` - -## 🎙️ Podcast Generation Architecture - -### Multi-Speaker Podcast System - -The podcast generation feature uses a sophisticated multi-step process: - -**Episode Profiles**: Define the structure and style of podcasts -- Speaker configuration -- Content outline generation -- Transcript creation -- Audio synthesis - -**Speaker Profiles**: Define individual speaker characteristics -- Voice selection (TTS models) -- Personality traits -- Background information -- Speaking patterns - -### Podcast Generation Flow - -```mermaid -graph TB - A[Content Input] --> B[Episode Profile Selection] - B --> C[Outline Generation] - C --> D[Transcript Creation] - D --> E[Speaker Assignment] - E --> F[Audio Synthesis] - F --> G[Audio Post-Processing] - G --> H[Final Podcast] -``` - -## 📊 Performance Considerations - -### Async/Await Patterns - -Open Notebook uses async/await throughout for optimal performance: - -```python -async def process_content(content: str) -> ProcessedContent: - """Process content asynchronously.""" - # Concurrent processing of multiple steps - embedding_task = asyncio.create_task(generate_embedding(content)) - extraction_task = asyncio.create_task(extract_metadata(content)) - - embedding, metadata = await asyncio.gather(embedding_task, extraction_task) - return ProcessedContent(embedding=embedding, metadata=metadata) -``` - -### Database Optimization - -**Connection Pooling**: Efficient database connection management -**Query Optimization**: Indexed queries and optimized SurrealQL -**Batch Operations**: Bulk insert/update operations where possible - -### Caching Strategy - -- **In-Memory Caching**: Model instances and configuration -- **Result Caching**: Expensive AI operations -- **Content Caching**: Processed documents and embeddings - -## 🔒 Security Architecture - -### Authentication - -**Password-Based Authentication**: -- Optional application-level password protection -- Middleware-based authentication -- Session management - -### Data Security - -**Privacy-First Design**: -- Local data storage by default -- No external data transmission (except to chosen AI providers) -- Configurable AI provider selection - -**Input Validation**: -- Pydantic model validation -- SQL injection prevention -- File upload security - -## 🚀 Deployment Architecture - -### Container Architecture - -```dockerfile -# Multi-stage build for optimal size -FROM python:3.11-slim as builder -# Build dependencies - -FROM python:3.11-slim as runtime -# Runtime environment -``` - -### Service Orchestration - -**Docker Compose Configuration**: -- Application container -- SurrealDB container -- Shared volume for data persistence -- Environment variable management - -### Scaling Considerations - -**Horizontal Scaling**: -- Stateless API design -- Shared database backend -- Load balancer compatibility - -**Vertical Scaling**: -- Async processing for CPU-intensive tasks -- Memory optimization for large documents -- Efficient embedding storage - ---- - -This architecture provides a solid foundation for Open Notebook's current capabilities while supporting future enhancements and scaling requirements. The modular design allows for easy extension and modification of individual components without affecting the overall system. \ No newline at end of file diff --git a/docs/development/contributing.md b/docs/development/contributing.md deleted file mode 100644 index 680a3824..00000000 --- a/docs/development/contributing.md +++ /dev/null @@ -1,709 +0,0 @@ -# Contributing to Open Notebook - -Thank you for your interest in contributing to Open Notebook! We welcome contributions from developers of all skill levels. This guide will help you get started and understand our development workflow. - -## 🎯 Quick Start for Contributors - -### 1. Fork and Clone - -```bash -# Fork the repository on GitHub, then clone your fork -git clone https://github.com/YOUR_USERNAME/open-notebook.git -cd open-notebook - -# Add the original repository as upstream -git remote add upstream https://github.com/lfnovo/open-notebook.git -``` - -### 2. Set Up Development Environment - -```bash -# Install dependencies using uv (recommended) -uv sync - -# Or using pip -pip install -e . - -# Start the development environment -make start-all -``` - -### 3. Verify Setup - -```bash -# Check that the API is running -curl http://localhost:5055/health - -# Check that the frontend is accessible -open http://localhost:8502 -``` - -## 🏗️ Development Workflow - -### Branch Strategy - -We use a **feature branch workflow**: - -1. **Main Branch**: `main` - production-ready code -2. **Feature Branches**: `feature/description` - new features -3. **Bug Fixes**: `fix/description` - bug fixes -4. **Documentation**: `docs/description` - documentation updates - -### Making Changes - -1. **Create a feature branch**: -```bash -git checkout -b feature/amazing-new-feature -``` - -2. **Make your changes** following our coding standards - -3. **Test your changes**: -```bash -# Run tests -uv run pytest - -# Run linting -uv run ruff check . - -# Run formatting -uv run ruff format . -``` - -4. **Commit your changes**: -```bash -git add . -git commit -m "feat: add amazing new feature" -``` - -5. **Push and create PR**: -```bash -git push origin feature/amazing-new-feature -# Then create a Pull Request on GitHub -``` - -### Keeping Your Fork Updated - -```bash -# Fetch upstream changes -git fetch upstream - -# Switch to main and merge -git checkout main -git merge upstream/main - -# Push to your fork -git push origin main -``` - -## 📏 Code Standards - -### Python Style Guide - -We follow **PEP 8** with some specific guidelines: - -#### Code Formatting -- Use **Ruff** for linting and formatting -- Maximum line length: **88 characters** -- Use **double quotes** for strings -- Use **trailing commas** in multi-line structures - -#### Type Hints -Always use type hints for function parameters and return values: - -```python -from typing import List, Optional, Dict, Any -from pydantic import BaseModel - -async def process_content( - content: str, - options: Optional[Dict[str, Any]] = None -) -> ProcessedContent: - """Process content with optional configuration.""" - # Implementation -``` - -#### Async/Await Patterns -Use async/await consistently: - -```python -# Good -async def fetch_data(url: str) -> Dict[str, Any]: - async with aiohttp.ClientSession() as session: - async with session.get(url) as response: - return await response.json() - -# Bad - mixing sync and async -def fetch_data(url: str) -> Dict[str, Any]: - loop = asyncio.get_event_loop() - return loop.run_until_complete(async_fetch(url)) -``` - -#### Error Handling -Use structured error handling with custom exceptions: - -```python -from open_notebook.exceptions import DatabaseOperationError, InvalidInputError - -async def create_notebook(name: str, description: str) -> Notebook: - """Create a new notebook with validation.""" - if not name.strip(): - raise InvalidInputError("Notebook name cannot be empty") - - try: - notebook = Notebook(name=name, description=description) - await notebook.save() - return notebook - except Exception as e: - raise DatabaseOperationError(f"Failed to create notebook: {str(e)}") -``` - -#### Documentation -Use **Google-style docstrings**: - -```python -async def vector_search( - query: str, - limit: int = 10, - minimum_score: float = 0.2 -) -> List[SearchResult]: - """Perform vector search across embedded content. - - Args: - query: Search query string - limit: Maximum number of results to return - minimum_score: Minimum similarity score for results - - Returns: - List of search results sorted by relevance score - - Raises: - InvalidInputError: If query is empty or limit is invalid - DatabaseOperationError: If search operation fails - """ -``` - -### FastAPI Standards - -#### Router Organization -Organize endpoints by domain: - -```python -# api/routers/notebooks.py -from fastapi import APIRouter, HTTPException, Query -from typing import List, Optional - -router = APIRouter() - -@router.get("/notebooks", response_model=List[NotebookResponse]) -async def get_notebooks( - archived: Optional[bool] = Query(None, description="Filter by archived status"), - order_by: str = Query("updated desc", description="Order by field and direction"), -): - """Get all notebooks with optional filtering and ordering.""" -``` - -#### Request/Response Models -Use Pydantic models for validation: - -```python -from pydantic import BaseModel, Field -from typing import Optional - -class NotebookCreate(BaseModel): - name: str = Field(..., description="Name of the notebook", min_length=1) - description: str = Field(default="", description="Description of the notebook") - -class NotebookResponse(BaseModel): - id: str - name: str - description: str - archived: bool - created: str - updated: str -``` - -#### Error Handling -Use consistent error responses: - -```python -from fastapi import HTTPException -from loguru import logger - -try: - result = await some_operation() - return result -except InvalidInputError as e: - raise HTTPException(status_code=400, detail=str(e)) -except DatabaseOperationError as e: - logger.error(f"Database error: {str(e)}") - raise HTTPException(status_code=500, detail="Internal server error") -``` - -### Database Standards - -#### SurrealDB Patterns -Use the repository pattern consistently: - -```python -from open_notebook.database.repository import repo_create, repo_query, repo_update - -# Create records -async def create_notebook(data: Dict[str, Any]) -> Dict[str, Any]: - """Create a new notebook record.""" - return await repo_create("notebook", data) - -# Query with parameters -async def find_notebooks_by_user(user_id: str) -> List[Dict[str, Any]]: - """Find notebooks for a specific user.""" - return await repo_query( - "SELECT * FROM notebook WHERE user_id = $user_id", - {"user_id": user_id} - ) - -# Update records -async def update_notebook(notebook_id: str, data: Dict[str, Any]) -> Dict[str, Any]: - """Update a notebook record.""" - return await repo_update("notebook", notebook_id, data) -``` - -#### Schema Management -Use migrations for schema changes: - -```surrealql --- migrations/8.surrealql -DEFINE TABLE IF NOT EXISTS new_feature SCHEMAFULL; -DEFINE FIELD IF NOT EXISTS name ON TABLE new_feature TYPE string; -DEFINE FIELD IF NOT EXISTS description ON TABLE new_feature TYPE option; -DEFINE FIELD IF NOT EXISTS created ON TABLE new_feature TYPE datetime DEFAULT time::now(); -DEFINE FIELD IF NOT EXISTS updated ON TABLE new_feature TYPE datetime DEFAULT time::now(); -``` - -## 🧪 Testing Guidelines - -### Test Structure - -We use **pytest** with async support: - -```python -import pytest -from httpx import AsyncClient -from open_notebook.domain.notebook import Notebook - -@pytest.mark.asyncio -async def test_create_notebook(): - """Test notebook creation.""" - notebook = Notebook(name="Test Notebook", description="Test description") - await notebook.save() - - assert notebook.id is not None - assert notebook.name == "Test Notebook" - assert notebook.created is not None - -@pytest.mark.asyncio -async def test_api_create_notebook(): - """Test notebook creation via API.""" - async with AsyncClient(app=app, base_url="http://test") as client: - response = await client.post( - "/api/notebooks", - json={"name": "Test Notebook", "description": "Test description"} - ) - assert response.status_code == 200 - data = response.json() - assert data["name"] == "Test Notebook" -``` - -### Test Categories - -1. **Unit Tests**: Test individual functions and methods -2. **Integration Tests**: Test component interactions -3. **API Tests**: Test HTTP endpoints -4. **Database Tests**: Test data persistence and queries - -### Running Tests - -```bash -# Run all tests -uv run pytest - -# Run specific test file -uv run pytest tests/test_notebooks.py - -# Run with coverage -uv run pytest --cov=open_notebook - -# Run only unit tests -uv run pytest tests/unit/ - -# Run only integration tests -uv run pytest tests/integration/ -``` - -### Test Fixtures - -Use pytest fixtures for common setup: - -```python -@pytest.fixture -async def test_notebook(): - """Create a test notebook.""" - notebook = Notebook(name="Test Notebook", description="Test description") - await notebook.save() - yield notebook - await notebook.delete() - -@pytest.fixture -async def api_client(): - """Create an API test client.""" - async with AsyncClient(app=app, base_url="http://test") as client: - yield client -``` - -## 📚 Documentation Standards - -### Code Documentation - -#### Module Docstrings -```python -""" -Notebook domain model and operations. - -This module contains the core Notebook class and related operations for -managing research notebooks within the Open Notebook system. -""" -``` - -#### Class Docstrings -```python -class Notebook(BaseModel): - """A research notebook containing sources, notes, and chat sessions. - - Notebooks are the primary organizational unit in Open Notebook, allowing - users to group related research materials and maintain separate contexts - for different projects. - - Attributes: - name: The notebook's display name - description: Optional description of the notebook's purpose - archived: Whether the notebook is archived (default: False) - created: Timestamp of creation - updated: Timestamp of last update - """ -``` - -#### Function Docstrings -```python -async def create_notebook( - name: str, - description: str = "", - user_id: Optional[str] = None -) -> Notebook: - """Create a new notebook with validation. - - Args: - name: The notebook name (required, non-empty) - description: Optional notebook description - user_id: Optional user ID for multi-user deployments - - Returns: - The created notebook instance - - Raises: - InvalidInputError: If name is empty or invalid - DatabaseOperationError: If creation fails - - Example: - ```python - notebook = await create_notebook( - name="AI Research", - description="Research on AI applications" - ) - ``` - """ -``` - -### API Documentation - -Use FastAPI's automatic documentation features: - -```python -@router.post( - "/notebooks", - response_model=NotebookResponse, - summary="Create a new notebook", - description="Create a new notebook with the specified name and description.", - responses={ - 201: {"description": "Notebook created successfully"}, - 400: {"description": "Invalid input data"}, - 500: {"description": "Internal server error"} - } -) -async def create_notebook(notebook: NotebookCreate): - """Create a new notebook.""" -``` - -### README Updates - -When adding new features, update relevant documentation: - -- **Feature documentation** in `docs/features/` -- **API documentation** in `docs/development/api-reference.md` -- **Architecture documentation** if adding new components -- **User guide** if adding user-facing features - -## 🚀 Development Environment - -### Prerequisites - -- **Python 3.11+** -- **uv** (recommended) or **pip** -- **SurrealDB** (via Docker or binary) -- **Docker** (optional, for containerized development) - -### Environment Variables - -Create a `.env` file in the project root: - -```bash -# Database -SURREAL_URL=ws://localhost:8000/rpc -SURREAL_USER=root -SURREAL_PASSWORD=password -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=development - -# AI Providers (add your API keys) -OPENAI_API_KEY=sk-... -ANTHROPIC_API_KEY=sk-ant-... -GOOGLE_API_KEY=AI... - -# Application -APP_PASSWORD= # Optional password protection -DEBUG=true -LOG_LEVEL=DEBUG -``` - -### Local Development Setup - -```bash -# Start SurrealDB -docker run -d --name surrealdb -p 8000:8000 \ - surrealdb/surrealdb:v2 start \ - --user root --pass password \ - --bind 0.0.0.0:8000 memory - -# Install dependencies -uv sync - -# Run database migrations -uv run python -m open_notebook.database.async_migrate - -# Start the API server -uv run python run_api.py - -# Start the Next.js frontend (in another terminal) -cd frontend && npm run dev -``` - -### Development Tools - -We use these tools for development: - -- **Ruff**: Linting and formatting -- **Pytest**: Testing framework -- **MyPy**: Type checking -- **Pre-commit**: Git hooks for code quality - -Install pre-commit hooks: - -```bash -uv run pre-commit install -``` - -## 🔧 Common Development Tasks - -### Adding a New API Endpoint - -1. **Create the endpoint** in the appropriate router: -```python -# api/routers/notebooks.py -@router.post("/notebooks/{notebook_id}/archive") -async def archive_notebook(notebook_id: str): - """Archive a notebook.""" - # Implementation -``` - -2. **Add request/response models** if needed: -```python -# api/models.py -class ArchiveRequest(BaseModel): - reason: Optional[str] = Field(None, description="Reason for archiving") -``` - -3. **Update the domain model** if needed: -```python -# open_notebook/domain/notebook.py -async def archive(self, reason: Optional[str] = None) -> None: - """Archive this notebook.""" - # Implementation -``` - -4. **Write tests**: -```python -# tests/test_notebooks.py -@pytest.mark.asyncio -async def test_archive_notebook(): - """Test notebook archiving.""" - # Test implementation -``` - -5. **Update documentation** in `docs/development/api-reference.md` - -### Adding a New Domain Model - -1. **Create the model**: -```python -# open_notebook/domain/new_model.py -from open_notebook.domain.base import BaseModel - -class NewModel(BaseModel): - """New domain model.""" - - # Fields and methods -``` - -2. **Create database migration**: -```surrealql --- migrations/N.surrealql -DEFINE TABLE IF NOT EXISTS new_model SCHEMAFULL; --- Field definitions -``` - -3. **Add API endpoints**: -```python -# api/routers/new_model.py -# Router implementation -``` - -4. **Write comprehensive tests** - -### Adding AI Processing Features - -1. **Create the graph**: -```python -# open_notebook/graphs/new_feature.py -from langgraph import create_graph - -@create_graph -async def new_feature_graph(state: NewFeatureState): - """New AI processing feature.""" - # Implementation -``` - -2. **Add service layer**: -```python -# api/new_feature_service.py -# Service implementation -``` - -3. **Create API endpoints**: -```python -# api/routers/new_feature.py -# Router implementation -``` - -4. **Test with multiple AI providers** - -## 🌟 Feature Contribution Guidelines - -### Current Priority Areas - -We're actively looking for contributions in these areas: - -1. **Frontend Enhancement**: Help improve the Next.js/React UI with real-time updates and better UX -2. **Testing**: Expand test coverage across all components -3. **Performance**: Async processing improvements and caching -4. **Documentation**: API examples and user guides -5. **Integrations**: New content sources and AI providers - -### Feature Proposal Process - -1. **Check existing issues** to avoid duplicates -2. **Open a discussion** on GitHub for large features -3. **Create an issue** with detailed requirements -4. **Get approval** from maintainers before starting work -5. **Implement in phases** for large features - -### Code Review Process - -All contributions go through code review: - -1. **Automated checks** must pass (linting, tests) -2. **Manual review** by maintainers -3. **Documentation review** for user-facing changes -4. **Integration testing** for complex features - -## 🐛 Bug Reports and Issues - -### Reporting Bugs - -When reporting bugs, please include: - -1. **Clear description** of the issue -2. **Steps to reproduce** the problem -3. **Expected vs actual behavior** -4. **Environment details** (OS, Python version, etc.) -5. **Relevant logs** and error messages - -### Bug Fix Process - -1. **Reproduce the issue** locally -2. **Write a failing test** that demonstrates the bug -3. **Fix the issue** with minimal changes -4. **Verify the fix** passes all tests -5. **Update documentation** if needed - -## 📞 Getting Help - -### Community Support - -- **Discord**: [Join our Discord server](https://discord.gg/37XJPXfz2w) for real-time help -- **GitHub Discussions**: For longer-form questions and ideas -- **GitHub Issues**: For bug reports and feature requests - -### Mentorship - -New contributors are welcome! We offer: - -- **First-time contributor** guidance -- **Code review** and feedback -- **Architecture discussions** -- **Career development** advice - -## 🏆 Recognition - -We recognize contributions through: - -- **GitHub credits** on releases -- **Community recognition** in Discord -- **Contribution statistics** in project analytics -- **Maintainer consideration** for active contributors - -## 📜 Code of Conduct - -We follow the [Contributor Covenant](https://www.contributor-covenant.org/). Please: - -- **Be respectful** and inclusive -- **Help others** learn and grow -- **Give constructive feedback** -- **Focus on the code**, not the person - -## 🎉 Thank You! - -Thank you for contributing to Open Notebook! Your contributions help make research more accessible and private for everyone. Whether you're fixing a typo, adding a feature, or helping with documentation, every contribution matters. - -Join our community and let's build something amazing together! 🚀 - ---- - -For questions about this guide or contributing in general, please reach out on [Discord](https://discord.gg/37XJPXfz2w) or open a GitHub Discussion. \ No newline at end of file diff --git a/docs/development/index.md b/docs/development/index.md deleted file mode 100644 index c0fdb328..00000000 --- a/docs/development/index.md +++ /dev/null @@ -1,141 +0,0 @@ -# Development Documentation - -Welcome to the Open Notebook development documentation! This section provides comprehensive technical information for developers and contributors. - -## 📋 Quick Navigation - -### Getting Started -- **[Architecture Overview](architecture.md)** - Understanding the system design and components -- **[API Reference](api-reference.md)** - Complete REST API documentation -- **[Contributing Guide](contributing.md)** - Development workflow and standards - -### Development Setup -Before diving into the documentation below, make sure you have Open Notebook set up locally: - -```bash -# Clone the repository -git clone https://github.com/lfnovo/open-notebook -cd open-notebook - -# Install dependencies with uv -uv sync - -# Start the development environment -make start-all -``` - -For detailed setup instructions, see the [Installation Guide](../getting-started/installation.md). - -## 🏗️ System Architecture - -Open Notebook is built with a modern Python stack using: - -- **Backend**: FastAPI with async/await patterns -- **Database**: SurrealDB for flexible document storage -- **Frontend**: Next.js for rapid UI development -- **AI Integration**: Multi-provider support via Esperanto library -- **Processing**: LangChain for AI workflows and content processing - -### Key Components - -| Component | Description | Location | -|-----------|-------------|----------| -| **API Layer** | FastAPI REST endpoints | `api/` | -| **Domain Models** | Core business logic | `open_notebook/domain/` | -| **Database** | SurrealDB repository pattern | `open_notebook/database/` | -| **AI Graphs** | LangChain processing workflows | `open_notebook/graphs/` | -| **Next.js Frontend** | Modern React-based web interface | `frontend/` | -| **Commands** | Background job processing | `commands/` | - -## 🔧 Development Workflow - -### Code Standards -- **Python**: PEP 8 compliance with type hints -- **Async/Await**: Consistent async patterns throughout -- **Error Handling**: Comprehensive exception handling -- **Logging**: Structured logging with Loguru -- **Testing**: Unit and integration tests with pytest - -### Database Patterns -Open Notebook uses SurrealDB with a custom repository pattern: - -```python -# Create records -await repo_create("table", data) - -# Query with SurrealQL -await repo_query("SELECT * FROM table WHERE field = $value", {"value": "example"}) - -# Update records -await repo_update("table", record_id, data) -``` - -### AI Integration -Multi-provider AI support via the Esperanto library: - -```python -from esperanto import AIFactory - -# Create language model -model = AIFactory.create_language("openai", "gpt-4") - -# Generate completion -response = model.chat_complete(messages) -``` - -## 🚀 Key Features to Understand - -### 1. Multi-Notebook Organization -- Notebooks contain sources, notes, and chat sessions -- Each notebook maintains isolated context -- Sources can be shared across notebooks (roadmap) - -### 2. Content Processing Pipeline -- **Ingestion**: Documents, URLs, text → structured content -- **Embedding**: Vector representations for semantic search -- **Transformations**: AI-powered content analysis -- **Indexing**: Both full-text and vector search - -### 3. AI Workflows -- **Chat**: Context-aware conversations -- **Ask**: Multi-step question answering -- **Transformations**: Content summarization and analysis -- **Podcast Generation**: Advanced multi-speaker content - -### 4. Background Processing -- Commands system for long-running tasks -- Async job queue with SurrealDB -- Status tracking and error handling - -## 📝 Contributing - -We welcome contributions! Here's how to get started: - -1. **Read the [Contributing Guide](contributing.md)** for detailed workflow -2. **Check the [Architecture Overview](architecture.md)** to understand the system -3. **Browse the [API Reference](api-reference.md)** for endpoint details -4. **Join our [Discord](https://discord.gg/37XJPXfz2w)** for community support - -### Current Development Priorities - -- **Frontend Enhancement**: Improving the Next.js/React UI with real-time updates -- **Performance**: Async processing and caching improvements -- **Testing**: Expanded test coverage -- **Documentation**: API documentation and examples - -## 📖 Additional Resources - -### External Documentation -- [SurrealDB Documentation](https://surrealdb.com/docs) -- [FastAPI Documentation](https://fastapi.tiangolo.com/) -- [LangChain Documentation](https://python.langchain.com/) -- [Esperanto Library](https://github.com/lfnovo/esperanto) - -### Community -- [Discord Server](https://discord.gg/37XJPXfz2w) - Development discussions -- [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) - Bug reports and features -- [GitHub Discussions](https://github.com/lfnovo/open-notebook/discussions) - Ideas and questions - ---- - -Ready to contribute? Start with the [Contributing Guide](contributing.md) and join our vibrant developer community! \ No newline at end of file diff --git a/docs/features/ai-models.md b/docs/features/ai-models.md deleted file mode 100644 index 8b4c1af6..00000000 --- a/docs/features/ai-models.md +++ /dev/null @@ -1,934 +0,0 @@ -# AI Models & Providers - -Open Notebook supports 16+ AI providers, giving you complete flexibility in choosing the AI models that best fit your needs, budget, and privacy requirements. This comprehensive guide covers everything you need to know about selecting, configuring, and optimizing your AI models. - -## Quick Start - -For immediate setup, use one of these configurations: - -### OpenAI Only (Simplest) -```bash -# Set environment variable -export OPENAI_API_KEY=your_key_here - -# Configure these models in Settings: -# Chat: gpt-5-mini -# Tools: gpt-5 -# Transformations: gpt-5-mini -# Embedding: text-embedding-3-small -# Speech-to-Text: whisper-1 -# Text-to-Speech: tts-1 -``` - -### Mixed Providers (Best Value) -```bash -# Environment variables -export OPENAI_API_KEY=your_key -export GEMINI_API_KEY=your_key -export OLLAMA_API_BASE=http://localhost:11434 - -# Recommended configuration in settings covered below -``` - -## Understanding Model Types - -Open Notebook uses four distinct types of AI models, each optimized for specific tasks: - -### 🧠 Language Models -- **Purpose**: Chat conversations, text generation, summaries, and tool calling -- **Key Features**: Reasoning, instruction following, context understanding -- **Usage**: Primary interface for AI interactions - -### 🔍 Embedding Models -- **Purpose**: Semantic search and content similarity matching -- **Key Features**: Convert text to numerical vectors for similarity comparison -- **Usage**: Power the search functionality across your content - -### 🎙️ Text-to-Speech (TTS) -- **Purpose**: Generate podcasts and audio content -- **Key Features**: Natural-sounding voice synthesis -- **Usage**: Convert your notes and research into professional podcasts - -### 🎧 Speech-to-Text (STT) -- **Purpose**: Transcribe audio and video files -- **Key Features**: Accurate transcription with speaker identification -- **Usage**: Convert audio/video sources into searchable text - -## Provider Support Matrix - -| Provider | Language | Embedding | STT | TTS | -|--------------|----------|-----------|-----|-----| -| **OpenAI** | ✅ | ✅ | ✅ | ✅ | -| **Anthropic** | ✅ | ❌ | ❌ | ❌ | -| **Google (Gemini)** | ✅ | ✅ | ❌ | ✅ | -| **Ollama** | ✅ | ✅ | ❌ | ❌ | -| **ElevenLabs** | ❌ | ❌ | ✅ | ✅ | -| **Mistral** | ✅ | ✅ | ❌ | ❌ | -| **DeepSeek** | ✅ | ❌ | ❌ | ❌ | -| **xAI (Grok)** | ✅ | ❌ | ❌ | ❌ | -| **Voyage AI** | ❌ | ✅ | ❌ | ❌ | -| **Groq** | ✅ | ❌ | ✅ | ❌ | -| **Vertex AI** | ✅ | ✅ | ❌ | ✅ | -| **Azure OpenAI** | ✅ | ✅ | ✅ | ✅ | -| **OpenRouter** | ✅ | ❌ | ❌ | ❌ | -| **Perplexity** | ✅ | ❌ | ❌ | ❌ | -| **OpenAI Compatible** | ✅ | ✅ | ✅ | ✅ | - -## Model Selection Guide - -### 🎯 Selection Criteria - -**💰 Cost Considerations** -- **Free**: Ollama models (run locally) -- **Budget**: OpenAI gpt-5-mini, Gemini Flash models -- **Premium**: Claude 3.5 Sonnet, gpt-5, Grok-3 - -**🎯 Quality Factors** -- **Reasoning**: Claude 3.5 Sonnet, Grok-3, DeepSeek-R1 -- **Tool Calling**: gpt-5, Claude 3.5 Sonnet, Grok-3 -- **Large Context**: Gemini models (up to 2M tokens) -- **Speed**: Groq models, Ollama local models - -**🔧 Special Features** -- **Reasoning Models**: Show transparent thinking process -- **Multilingual**: Gemini, Claude, GPT-4 -- **Code Generation**: Claude 3.5 Sonnet, gpt-5 -- **Creative Writing**: Claude, gpt-5, Grok - -## Provider Deep Dive - -### 🟦 Google (Gemini) -**Best for**: Large context processing, cost-effective high-quality models - -**Environment Setup** -```bash -export GEMINI_API_KEY=your_api_key_here - -# Optional: Override the default Gemini API endpoint -# Use this for Vertex AI, custom proxies, or alternative endpoints -# export GEMINI_API_BASE_URL=https://your-custom-endpoint.com -``` - -**Recommended Models** -- **Language**: `gemini-2.0-flash`, `gemini-2.5-pro-preview-06-05` -- **TTS**: `gemini-2.5-flash-preview-tts`, `gemini-2.5-pro-preview-tts` -- **Embedding**: `text-embedding-004` - -**Strengths** -- Massive context windows (up to 2M tokens) -- Excellent price-to-performance ratio -- Strong multilingual capabilities -- Integrated TTS with good quality - -**Considerations** -- No STT support -- Newer models may have limited availability - ---- - -### 🟢 OpenAI -**Best for**: Reliable performance, excellent tool calling, comprehensive ecosystem - -**Environment Setup** -```bash -export OPENAI_API_KEY=your_api_key_here -``` - -**Recommended Models** -- **Language**: `gpt-5-mini`, `gpt-5` -- **TTS**: `tts-1`, `gpt-4o-mini-tts` -- **STT**: `whisper-1` -- **Embedding**: `text-embedding-3-small` - -**Strengths** -- Most mature ecosystem -- Excellent tool calling capabilities -- Industry-standard STT with Whisper -- Consistent performance across models - -**Considerations** -- Higher costs for premium models -- Data privacy concerns for sensitive content - ---- - -### 🟣 Anthropic (Claude) -**Best for**: High-quality reasoning, safety, and nuanced understanding - -**Environment Setup** -```bash -export ANTHROPIC_API_KEY=your_api_key_here -``` - -**Recommended Models** -- **Language**: `claude-3-5-sonnet-latest` - -**Strengths** -- Exceptional reasoning capabilities -- Strong safety and alignment -- Excellent for complex analysis -- Superior code generation - -**Considerations** -- Only language models available -- Higher cost per token -- Need additional providers for other model types - ---- - -### 🦙 Ollama (Local/Free) -**Best for**: Privacy, offline use, zero ongoing costs - -**Environment Setup** -```bash -# Install Ollama locally -curl -fsSL https://ollama.ai/install.sh | sh - -# Set API base (if running remotely) -export OLLAMA_API_BASE=http://localhost:11434 -``` - -**Recommended Models** -- **Language**: `qwen3`, `gemma3`, `phi4`, `deepseek-r1`, `llama4` -- **Embedding**: `mxbai-embed-large` - -**Strengths** -- Completely free after setup -- Full data privacy (local processing) -- No internet dependency -- Support for reasoning models - -**Considerations** -- Requires local hardware resources -- Limited model variety compared to cloud providers -- No TTS/STT capabilities - -> **📖 Need detailed Ollama setup help?** Check our comprehensive [Ollama Setup Guide](ollama.md) for network configuration, Docker deployment, troubleshooting, and optimization tips. - ---- - -### 🎤 ElevenLabs -**Best for**: Premium voice synthesis and transcription - -**Environment Setup** -```bash -export ELEVENLABS_API_KEY=your_api_key_here -``` - -**Recommended Models** -- **TTS**: `eleven_turbo_v2_5`, `eleven-monolingual-v1` -- **STT**: `scribe_v1`, `eleven-stt-v1` - -**Strengths** -- Highest quality voice synthesis -- Excellent transcription accuracy -- Multiple voice options -- Good pricing for audio services - -**Considerations** -- Audio-only provider -- Requires separate language/embedding providers - ---- - -### 🔵 DeepSeek -**Best for**: Cost-effective language models with advanced reasoning - -**Environment Setup** -```bash -export DEEPSEEK_API_KEY=your_api_key_here -``` - -**Recommended Models** -- **Language**: `deepseek-chat`, `deepseek-reasoner` - -**Strengths** -- Excellent quality-to-price ratio -- Advanced reasoning capabilities -- Large context windows (64k+) -- Strong performance on technical tasks - -**Considerations** -- Limited to language models only -- Relatively new provider - ---- - -### 🟡 Mistral -**Best for**: European alternative with competitive pricing - -**Environment Setup** -```bash -export MISTRAL_API_KEY=your_api_key_here -``` - -**Recommended Models** -- **Language**: `mistral-medium-latest`, `ministral-8b-latest`, `magistral` -- **Embedding**: `mistral-embed` - -**Strengths** -- European data governance -- Competitive pricing -- Good reasoning capabilities -- Strong multilingual support - -**Considerations** -- Limited model variety -- No TTS/STT capabilities - ---- - -### ⚡ xAI (Grok) -**Best for**: Cutting-edge intelligence and unrestricted responses - -**Environment Setup** -```bash -export XAI_API_KEY=your_api_key_here -``` - -**Recommended Models** -- **Language**: `grok-3`, `grok-3-mini` - -**Strengths** -- State-of-the-art reasoning -- Less restrictive than other providers -- Excellent for creative and analytical tasks -- Real-time information access - -**Considerations** -- Premium pricing -- Limited to language models -- Relatively new provider - ---- - -### 🚢 Voyage AI -**Best for**: Specialized high-performance embeddings - -**Environment Setup** -```bash -export VOYAGE_API_KEY=your_api_key_here -``` - -**Recommended Models** -- **Embedding**: `voyage-3.5-lite` - -**Strengths** -- Specialized in embeddings -- Competitive performance -- Good pricing for embeddings - -**Considerations** -- Embedding-only provider -- Requires other providers for language models - ---- - -### 🔧 OpenAI Compatible (LM Studio & Others) -**Best for**: Using any OpenAI-compatible API endpoint for all AI modalities, including LM Studio - -**Environment Setup** -```bash -# Generic configuration (applies to all modalities) -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 -# Optional - only if your endpoint requires authentication -export OPENAI_COMPATIBLE_API_KEY=your_key_here - -# Mode-specific configuration (for different endpoints per modality) -export OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 -export OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:8080/v1 -export OPENAI_COMPATIBLE_BASE_URL_STT=http://localhost:9000/v1 -export OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:9000/v1 -``` - -**Common Use Cases** -- **LM Studio**: Run models locally with a familiar UI -- **Text Generation WebUI**: Alternative local inference -- **vLLM**: High-performance inference server -- **Custom Endpoints**: Any OpenAI-compatible API - -**Strengths** -- Use any OpenAI-compatible endpoint -- **NEW**: Full support for all 4 modalities (language, embeddings, STT, TTS) -- Configure different endpoints for different capabilities -- Perfect for LM Studio users -- Flexibility in model deployment -- Works with local and remote endpoints - -**Considerations** -- Performance depends on your hardware (for local) -- Model availability varies by endpoint -- Some endpoints may not support all features - -> **📖 Need detailed setup help?** Check our comprehensive [OpenAI-Compatible Setup Guide](openai-compatible.md) for LM Studio, Text Generation WebUI, vLLM, and other configurations. - ---- - -### ☁️ Azure OpenAI -**Best for**: Enterprise deployments with Microsoft Azure infrastructure - -**Environment Setup** -```bash -# Generic configuration (applies to all modalities) -export AZURE_OPENAI_API_KEY=your_key -export AZURE_OPENAI_ENDPOINT=https://your-resource.openai.azure.com/ -export AZURE_OPENAI_API_VERSION=2024-12-01-preview - -# Mode-specific configuration (for different deployments per modality) -# Use these when you have separate Azure deployments for different capabilities -export AZURE_OPENAI_API_KEY_LLM=your_llm_key -export AZURE_OPENAI_ENDPOINT_LLM=https://llm-resource.openai.azure.com/ -export AZURE_OPENAI_API_VERSION_LLM=2024-12-01-preview - -export AZURE_OPENAI_API_KEY_EMBEDDING=your_embedding_key -export AZURE_OPENAI_ENDPOINT_EMBEDDING=https://embedding-resource.openai.azure.com/ -export AZURE_OPENAI_API_VERSION_EMBEDDING=2024-12-01-preview - -# STT and TTS also supported with _STT and _TTS suffixes -``` - -**Recommended Models** -- **Language**: `gpt-4o`, `gpt-4o-mini`, `gpt-35-turbo` -- **Embedding**: `text-embedding-3-small`, `text-embedding-ada-002` -- **STT**: `whisper` (deployment name for Whisper model) -- **TTS**: `tts`, `tts-hd` (deployment names for TTS models) - -**Strengths** -- Enterprise-grade security and compliance -- **NEW**: Full support for modality-specific deployments -- Configure different Azure resources for different capabilities -- Integration with Azure ecosystem -- SLA guarantees and dedicated support -- Regional deployment options - -**Use Cases** -- **Single Deployment**: Use generic configuration when all models are in one Azure resource -- **Multi-Deployment**: Use mode-specific configuration for separate resources (e.g., production LLM in one region, embeddings in another) -- **Cost Optimization**: Different Azure subscriptions or resources for different workloads -- **Compliance**: Separate deployments for different data residency requirements - -**Considerations** -- Requires Azure subscription and resource setup -- More complex configuration than standard OpenAI -- Limited to Azure OpenAI service capabilities -- Deployment-based model access (not all models available) - -## 🧠 Reasoning Models - -Open Notebook fully supports **reasoning models** that show their transparent thinking process. These models output their internal reasoning within `` tags, which Open Notebook automatically handles. - -### How Reasoning Models Work - -**In Chat Interface** -- Reasoning content appears in a collapsible "🤔 AI Reasoning" section -- Clean final answers are displayed prominently -- Users can explore the AI's thought process - -**In Transformations** -- Clean output is stored in your notes -- Reasoning is filtered out automatically -- Professional results without internal monologue - -**In Search** -- Final answers remain clean and focused -- Reasoning helps improve answer quality - -### Supported Reasoning Models - -| Model | Provider | Access | Quality | -|-------|----------|---------|---------| -| **deepseek-r1** | Ollama | Free | Exceptional | -| **qwen3** | Ollama | Free | Very Good | -| **magistral** | Mistral | Paid | Good | -| **deepseek-reasoner** | DeepSeek | Paid | Excellent | - -### Benefits of Reasoning Models - -- **Transparency**: See exactly how AI reached conclusions -- **Trust**: Understand the logic behind responses -- **Learning**: Gain insights into AI problem-solving -- **Debugging**: Identify where AI reasoning went wrong -- **Quality**: Better answers through explicit reasoning - -## Recommended Configurations - -### 🌟 Best Value (Mixed Providers) -*Perfect balance of cost and performance* - -```bash -# Environment Variables -export OPENAI_API_KEY=your_key -export GEMINI_API_KEY=your_key -export OLLAMA_API_BASE=http://localhost:11434 -``` - -| Model Default | Recommended Model | Provider | -|---------------|-------------------|----------| -| Chat Model | `gpt-5-mini` | OpenAI | -| Tools Model | `gpt-5` | OpenAI | -| Transformations | `ministral-8b-latest` | Mistral | -| Large Context | `gemini-2.0-flash` | Google | -| Embedding | `text-embedding-3-small` | OpenAI | -| Text-to-Speech | `gemini-2.5-flash-preview-tts` | Google | -| Speech-to-Text | `whisper-1` | OpenAI | - -**Monthly Cost Estimate**: $20-50 for moderate usage - ---- - -### 💰 Budget-Friendly (Mostly Free) -*Great for getting started or keeping costs low* - -```bash -# Environment Variables -export OPENAI_API_KEY=your_key # For STT/TTS only -export OLLAMA_API_BASE=http://localhost:11434 -``` - -| Model Default | Recommended Model | Provider | -|---------------|-------------------|----------| -| Chat Model | `qwen3` | Ollama | -| Tools Model | `qwen3` | Ollama | -| Transformations | `gemma3` | Ollama | -| Large Context | `qwen3` | Ollama | -| Embedding | `mxbai-embed-large` | Ollama | -| Text-to-Speech | `gpt-4o-mini-tts` | OpenAI | -| Speech-to-Text | `whisper-1` | OpenAI | - -**Monthly Cost Estimate**: $5-15 (only for audio services) - ---- - -### 🚀 High Performance (Premium) -*When quality is your top priority* - -```bash -# Environment Variables -export ANTHROPIC_API_KEY=your_key -export XAI_API_KEY=your_key -export GEMINI_API_KEY=your_key -export VOYAGE_API_KEY=your_key -export ELEVENLABS_API_KEY=your_key -export OPENAI_API_KEY=your_key -``` - -| Model Default | Recommended Model | Provider | -|---------------|-------------------|----------| -| Chat Model | `claude-3-5-sonnet-latest` | Anthropic | -| Tools Model | `grok-3` | xAI | -| Transformations | `grok-3-mini` | xAI | -| Large Context | `gemini-2.5-pro-preview-06-05` | Google | -| Embedding | `voyage-3.5-lite` | Voyage | -| Text-to-Speech | `eleven_turbo_v2_5` | ElevenLabs | -| Speech-to-Text | `whisper-1` | OpenAI | - -**Monthly Cost Estimate**: $100-300 for moderate usage - ---- - -### 🏢 Single Provider (OpenAI) -*Simplify billing and setup* - -```bash -# Environment Variables -export OPENAI_API_KEY=your_key -``` - -| Model Default | Recommended Model | Provider | -|---------------|-------------------|----------| -| Chat Model | `gpt-5-mini` | OpenAI | -| Tools Model | `gpt-5` | OpenAI | -| Transformations | `gpt-5-mini` | OpenAI | -| Large Context | `gpt-5` | OpenAI | -| Embedding | `text-embedding-3-small` | OpenAI | -| Text-to-Speech | `gpt-4o-mini-tts` | OpenAI | -| Speech-to-Text | `whisper-1` | OpenAI | - -**Monthly Cost Estimate**: $30-80 for moderate usage - -## Setup Instructions - -### 1. Environment Variables - -Set up your API keys using environment variables. Here's the complete list: - -```bash -# Core Providers -export OPENAI_API_KEY=your_key -export ANTHROPIC_API_KEY=your_key -export GEMINI_API_KEY=your_key -export GEMINI_API_BASE_URL=https://custom-endpoint.com # Optional - -# Additional Language Providers -export MISTRAL_API_KEY=your_key -export DEEPSEEK_API_KEY=your_key -export XAI_API_KEY=your_key -export GROQ_API_KEY=your_key -export OPENROUTER_API_KEY=your_key - -# Audio Providers -export ELEVENLABS_API_KEY=your_key - -# Embedding Providers -export VOYAGE_API_KEY=your_key - -# Local/Cloud Infrastructure -export OLLAMA_API_BASE=http://localhost:11434 - -# Azure OpenAI -# Generic configuration (applies to all modalities) -export AZURE_OPENAI_API_KEY=your_key -export AZURE_OPENAI_ENDPOINT=your_endpoint -export AZURE_OPENAI_API_VERSION=2024-12-01-preview - -# Mode-specific configuration (for different deployments per modality) -export AZURE_OPENAI_API_KEY_LLM=your_llm_key -export AZURE_OPENAI_ENDPOINT_LLM=your_llm_endpoint -export AZURE_OPENAI_API_VERSION_LLM=2024-12-01-preview - -export AZURE_OPENAI_API_KEY_EMBEDDING=your_embedding_key -export AZURE_OPENAI_ENDPOINT_EMBEDDING=your_embedding_endpoint -export AZURE_OPENAI_API_VERSION_EMBEDDING=2024-12-01-preview -# Similarly for _STT and _TTS - -# Vertex AI -export VERTEX_PROJECT=your_project -export GOOGLE_APPLICATION_CREDENTIALS=./google-credentials.json -export VERTEX_LOCATION=us-east5 - -# OpenAI Compatible (LM Studio, etc.) -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 -export OPENAI_COMPATIBLE_API_KEY=your_key # Optional -``` - -### 2. Using Docker - -For Docker deployments, pass environment variables: - -```bash -docker run -d \ - --name open-notebook \ - -p 8502:8502 -p 5055:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_single_data:/mydata \ - -e OPENAI_API_KEY=your_key \ - -e GEMINI_API_KEY=your_key \ - -e ANTHROPIC_API_KEY=your_key \ - lfnovo/open_notebook:v1-latest-single -``` - -### 3. Model Configuration - -After setting environment variables: - -1. **Access Settings**: Go to the Settings page in Open Notebook -2. **Create Models**: Add your models for each provider -3. **Set Defaults**: Configure default models for each task type -4. **Test Models**: Use the Playground to test model performance - -### 4. Provider-Specific Setup - -#### OpenAI -```bash -export OPENAI_API_KEY=sk-your-key-here -``` -- Get your API key from [OpenAI Platform](https://platform.openai.com/api-keys) -- Supports all model types -- Immediate activation - -#### Anthropic -```bash -export ANTHROPIC_API_KEY=sk-ant-your-key-here -``` -- Get your API key from [Anthropic Console](https://console.anthropic.com/) -- Only language models available -- Requires separate providers for other types - -#### Google (Gemini) -```bash -export GEMINI_API_KEY=your-key-here - -# Optional: Custom API endpoint (for Vertex AI, proxies, etc.) -# export GEMINI_API_BASE_URL=https://your-custom-endpoint.com -``` -- Get your API key from [Google AI Studio](https://makersuite.google.com/app/apikey) -- Excellent for large context and TTS -- Cost-effective option -- Supports custom endpoints via `GEMINI_API_BASE_URL` for advanced deployments - -#### Ollama (Local) -```bash -# Install Ollama -curl -fsSL https://ollama.ai/install.sh | sh - -# Pull models -ollama pull qwen3 -ollama pull mxbai-embed-large - -# Set API base if remote -export OLLAMA_API_BASE=http://your-server:11434 -``` - -#### ElevenLabs -```bash -export ELEVENLABS_API_KEY=your-key-here -``` -- Get your API key from [ElevenLabs](https://elevenlabs.io/) -- Premium voice synthesis -- Excellent for podcast generation - -## Advanced Configuration - -### Model Switching - -You can switch models at runtime: - -**In Chat** -- Use the model selector dropdown -- Changes apply to current conversation - -**In Transformations** -- Configure per-transformation defaults -- Override on individual operations - -**In Settings** -- Change global defaults -- Affects all new operations - -### Performance Optimization - -**For Speed** -- Use smaller models for simple tasks -- Groq for fast inference -- Local Ollama models for instant response - -**For Quality** -- Use premium models for complex reasoning -- Claude 3.5 Sonnet for analysis -- GPT-4o for tool calling - -**For Cost** -- Use cheaper models for transformations -- Ollama for free processing -- OpenAI mini models for everyday use - -### Context Management - -**Small Context (< 32k tokens)** -- Any modern language model -- Faster processing -- Lower costs - -**Medium Context (32k-128k tokens)** -- GPT-4o, Claude 3.5 Sonnet -- Good balance of speed and capacity - -**Large Context (> 128k tokens)** -- Gemini models (up to 2M tokens) -- Essential for large document processing -- Higher costs but necessary for big content - -## Cost Optimization Strategies - -### 1. Tiered Model Strategy - -Use different models for different complexity levels: - -``` -Simple Tasks (70% of usage): -- Chat: gpt-5-mini or qwen3 (Ollama) -- Transformations: ministral-8b-latest - -Complex Tasks (25% of usage): -- Analysis: claude-3-5-sonnet-latest -- Tool calling: gpt-5 - -Specialized Tasks (5% of usage): -- Large context: gemini-2.0-flash -- Premium TTS: eleven_turbo_v2_5 -``` - -### 2. Smart Model Selection - -**For Transformations** -- Use smaller, cheaper models -- Batch multiple operations -- Cache results when possible - -**For Chat** -- Start with mini models -- Escalate to premium for complex queries -- Use reasoning models for transparency - -**For Embeddings** -- Use free Ollama models when possible -- OpenAI for balanced performance -- Voyage for specialized needs - -### 3. Usage Monitoring - -Track your usage patterns: - -```bash -# Monitor API usage through provider dashboards -# Set up billing alerts -# Review monthly costs by model -# Optimize based on actual usage patterns -``` - -### 4. Free Tier Maximization - -**Ollama (Completely Free)** -- Language models for most tasks -- Embeddings for search -- No usage limits after setup - -**Free Tiers** -- OpenAI: $5 monthly credit for new users -- Anthropic: Limited free tier -- Google: Generous free tier for Gemini - -### 5. Batch Processing - -Process multiple items together: -- Combine similar transformations -- Use larger context windows efficiently -- Reduce API call overhead - -## Troubleshooting - -### Common Issues - -**API Key Problems** -```bash -# Check environment variables -echo $OPENAI_API_KEY - -# Verify key format -# OpenAI: sk-... -# Anthropic: sk-ant-... -# Google: starts with alphanumeric -``` - -**Model Not Found** -- Verify model name spelling -- Check provider availability -- Ensure API key has access to model - -**Rate Limiting** -- Implement retry logic -- Use different models for different tasks -- Monitor API quotas - -**High Costs** -- Review model usage patterns -- Switch to cheaper models for simple tasks -- Use free Ollama models where possible - -### Provider-Specific Issues - -**OpenAI** -- Rate limits: Upgrade to paid tier -- Model access: Check account tier -- Usage limits: Monitor dashboard - -**Anthropic** -- Beta access: Some models require approval -- Rate limits: Request increase if needed -- Region restrictions: Check availability - -**Google (Gemini)** -- Quota limits: Monitor usage -- Model availability: Some models are preview -- API key restrictions: Check project settings - -**Ollama** -- Model download: Ensure sufficient disk space -- Performance: Check hardware requirements -- Network: Verify base URL configuration - -### Performance Issues - -**Slow Responses** -- Use smaller models -- Reduce context size -- Consider local Ollama models - -**Poor Quality** -- Upgrade to premium models -- Improve prompting -- Use reasoning models for complex tasks - -**High Latency** -- Check network connectivity -- Use geographically closer providers -- Consider local Ollama deployment - -## Best Practices - -### 1. Model Selection - -**Match Models to Tasks** -- Simple chat: Mini models -- Complex analysis: Premium models -- Transformations: Efficient models -- Large documents: High-context models - -**Consider Cost vs. Quality** -- Use premium models only when necessary -- Free models for development and testing -- Monitor and optimize usage patterns - -### 2. Security & Privacy - -**Sensitive Data** -- Use local Ollama models -- Avoid sending sensitive content to cloud providers -- Consider on-premises deployment - -**API Key Management** -- Use environment variables -- Rotate keys regularly -- Monitor usage for anomalies - -### 3. Reliability - -**Fallback Strategies** -- Configure multiple providers -- Have backup models ready -- Implement retry logic - -**Testing** -- Test new models in playground -- Validate performance before deployment -- Monitor quality metrics - -### 4. Optimization - -**Performance Tuning** -- Profile model performance -- Optimize context size -- Use appropriate model for each task - -**Cost Management** -- Set up billing alerts -- Regular usage reviews -- Optimize model selection - -## Getting Help - -**Community Support** -- [Discord Server](https://discord.gg/37XJPXfz2w) - Get help from the community -- [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) - Report bugs and request features - -**Documentation** -- [User Guide](../user-guide/index.md) - Learn how to use Open Notebook -- [Getting Started](../getting-started/index.md) - Quick setup guide -- [Troubleshooting](../troubleshooting/index.md) - Solve common issues - -**Testing Your Setup** -- Use the Playground in Settings to test models -- Try different model combinations -- Monitor performance and costs - -This comprehensive guide should help you make informed decisions about AI models for your Open Notebook deployment. Start with a simple configuration and gradually optimize based on your specific needs and usage patterns. \ No newline at end of file diff --git a/docs/features/citations.md b/docs/features/citations.md deleted file mode 100644 index 7794f87e..00000000 --- a/docs/features/citations.md +++ /dev/null @@ -1,483 +0,0 @@ -# Citations & References - -Open Notebook's citation system ensures research integrity and transparency by providing accurate source attribution for all AI-generated insights. This comprehensive guide covers how citations work throughout the platform and how to leverage them for academic and research workflows. - -## Overview - -The citation system in Open Notebook is built around the principle of **transparent and verifiable research**. Every AI-generated response includes proper source attribution, allowing you to trace claims back to their original sources. This system supports various academic and professional workflows while maintaining the highest standards of research integrity. - -### Key Features - -- **Automatic Source Attribution**: AI responses automatically include citations to source materials -- **Clickable Citation Links**: Direct access to original source content -- **Context-Aware Citations**: Citations adapt based on the content included in AI context -- **Multiple Citation Formats**: Support for various citation styles and formats -- **Cross-Platform Integration**: Citations work across chat, search, and note-taking features -- **Export Compatibility**: Citations are preserved in exported content - -## How Citations Work in Open Notebook - -### Automatic Citation Generation - -Open Notebook automatically generates citations whenever AI models reference your source materials. The system: - -1. **Tracks Source Usage**: Monitors which sources are referenced in AI responses -2. **Extracts Specific References**: Identifies exact quotes, paraphrases, and concept references -3. **Generates Attribution**: Creates proper citations with source identification -4. **Maintains Context**: Preserves the relationship between claims and sources - -### Citation Components - -Each citation in Open Notebook includes: - -- **Source Identification**: Clear identification of the referenced document -- **Content Location**: Specific sections, pages, or chunks referenced -- **Link to Original**: Direct access to the full source material -- **Attribution Context**: How the source was used in the AI response - -### Citation Accuracy - -The system ensures citation accuracy through: - -- **Source Verification**: Cross-referencing claims against original content -- **Context Matching**: Ensuring citations match the actual content used -- **Quote Precision**: Accurate representation of direct quotes and paraphrases -- **Relationship Tracking**: Maintaining the connection between insights and sources - -## Using Citations in Chat - -### Understanding Chat Citations - -When you engage with the AI assistant in chat, citations appear automatically when sources are referenced: - -``` -AI: According to the research presented in "Machine Learning Fundamentals" [1], -neural networks require careful hyperparameter tuning for optimal performance. - -[1] Machine Learning Fundamentals - Section 3.2: Neural Network Training -``` - -### Requesting Better Citations - -You can improve citation quality by: - -**Asking for Specific References**: -- "Please provide the exact quote that supports this point" -- "Which page in the document contains this information?" -- "Can you cite the specific study mentioned?" - -**Requesting Citation Formats**: -- "Please include page numbers for all references" -- "Can you provide APA-style citations for these sources?" -- "Include direct quotes with proper attribution" - -### Verifying Citation Accuracy - -Always verify citations by: - -1. **Clicking Citation Links**: Access the original source content -2. **Cross-Checking Claims**: Compare AI statements with source material -3. **Context Verification**: Ensure citations are used appropriately -4. **Completeness Check**: Confirm important sources aren't missing - -### Best Practices for Chat Citations - -**For Research Conversations**: -- Ask for specific citations when making claims -- Request page numbers and section references -- Verify controversial or critical statements -- Save well-cited responses as notes - -**For Academic Work**: -- Request formal citation formats -- Ask for multiple supporting sources -- Verify quote accuracy and context -- Maintain bibliography tracking - -## Ask Feature and Citations - -### How Ask Feature Citations Work - -The Ask feature provides comprehensive citations through a multi-step process: - -1. **Query Strategy**: AI determines what information to search for -2. **Source Search**: Vector search identifies relevant content -3. **Individual Analysis**: Each source is analyzed separately -4. **Citation Generation**: Proper attribution is created for each source -5. **Final Synthesis**: All citations are compiled in the final answer - -### Citation Quality in Ask Responses - -Ask feature citations include: - -- **Comprehensive Source Coverage**: References to all relevant sources -- **Specific Content Attribution**: Page numbers and section references -- **Direct Quote Integration**: Properly attributed quotes and paraphrases -- **Source Link Access**: Direct links to original documents - -### Best Practices for Ask Citations - -**Question Formulation**: -- Ask specific questions that require citation -- Request evidence-based responses -- Specify citation format requirements -- Ask for supporting documentation - -**Result Verification**: -- Review all cited sources -- Verify quote accuracy -- Check for missing important sources -- Confirm citation relevance - -## Citation Formatting and Display - -### Display Formats - -Citations appear in various formats throughout Open Notebook: - -**Inline Citations**: -``` -The study demonstrates significant improvement [1] in performance metrics. -``` - -**Reference Lists**: -``` -References: -[1] Smith, J. (2023). "Performance Optimization in Machine Learning." - Journal of AI Research, 45(3), 123-145. -``` - -**Contextual Citations**: -``` -Source: "Machine Learning Fundamentals" - Chapter 3, Page 47 -"Neural networks require careful hyperparameter tuning..." -``` - -### Citation Styles - -Open Notebook supports multiple citation approaches: - -- **Numbered References**: [1], [2], [3] format -- **Author-Date**: (Smith, 2023) format -- **Footnote Style**: Superscript references -- **Custom Formats**: Configurable citation styles - -### Interactive Citation Features - -**Clickable Links**: -- Click any citation to view the original source -- Hover for quick preview of referenced content -- Direct navigation to specific sections - -**Citation Tooltips**: -- Hover over citations for source information -- Quick access to document metadata -- Preview of referenced content - -## Source Attribution and Accuracy - -### Source Tracking - -Open Notebook maintains detailed source attribution through: - -**Metadata Preservation**: -- Document titles and authors -- Creation and modification dates -- Source URLs and file locations -- Document type and format information - -**Content Mapping**: -- Specific sections and pages referenced -- Embedded chunk identification -- Context window tracking -- Quote and paraphrase location - -### Accuracy Verification - -The system ensures citation accuracy through: - -**Content Verification**: -- Cross-referencing AI claims with source material -- Verifying quote accuracy and context -- Checking for misattribution or misrepresentation -- Ensuring complete source coverage - -**Quality Assurance**: -- Regular citation accuracy checks -- Source link verification -- Content freshness monitoring -- Attribution completeness review - -### Attribution Standards - -Open Notebook follows research integrity standards: - -- **Complete Attribution**: All sources properly credited -- **Accurate Representation**: Faithful reproduction of source claims -- **Context Preservation**: Maintaining original meaning and intent -- **Transparency**: Clear indication of AI-generated vs. source content - -## Integration with Notes and Search - -### Citations in Notes - -When saving AI responses as notes, citations are preserved: - -**Note Citation Features**: -- Automatic citation preservation -- Source link maintenance -- Reference list generation -- Bibliography compilation - -**Citation Management in Notes**: -- Edit and format citations -- Add additional source information -- Organize citations by topic -- Create reference collections - -### Search Result Citations - -Search results include proper attribution: - -**Search Citation Elements**: -- Source identification -- Content location indicators -- Relevance scoring -- Direct source access - -**Citation in Search Results**: -- Highlighted relevant passages -- Source metadata display -- Link to full document -- Context preservation - -### Cross-Platform Citation Consistency - -Citations remain consistent across: - -- **Chat Conversations**: Proper attribution in AI responses -- **Search Results**: Source identification and linking -- **Note Collections**: Preserved citations in saved content -- **Export Formats**: Citation maintenance in output - -## Advanced Citation Features - -### Custom Citation Formats - -Create custom citation styles for specific needs: - -**Academic Formats**: -- APA, MLA, Chicago, Harvard styles -- Journal-specific formats -- Institution requirements -- Custom academic standards - -**Professional Formats**: -- Industry-specific citation styles -- Report and documentation formats -- Legal citation standards -- Technical documentation styles - -### Citation Analytics - -Track citation usage across your research: - -**Citation Metrics**: -- Most frequently cited sources -- Citation patterns and trends -- Source utilization analysis -- Research coverage gaps - -**Source Performance**: -- Citation frequency per source -- Content utilization rates -- Source effectiveness metrics -- Research impact assessment - -### Bulk Citation Management - -Manage citations across multiple documents: - -**Citation Operations**: -- Bulk citation format updates -- Source information synchronization -- Citation style consistency -- Reference list generation - -**Bibliography Management**: -- Automatic bibliography creation -- Citation deduplication -- Source organization -- Reference verification - -## Best Practices for Research Integrity - -### Academic Research Standards - -**Citation Requirements**: -- Cite all sources used in AI conversations -- Verify quote accuracy and context -- Include page numbers and specific references -- Maintain complete bibliography records - -**Plagiarism Prevention**: -- Always attribute AI-generated insights -- Distinguish between source content and AI analysis -- Verify all claims against original sources -- Maintain transparent research practices - -### Professional Research Practices - -**Documentation Standards**: -- Maintain detailed citation records -- Document AI assistance in research -- Preserve source accessibility -- Ensure citation completeness - -**Quality Assurance**: -- Regular citation accuracy checks -- Source verification procedures -- Peer review of citation practices -- Continuous improvement processes - -### Collaboration and Sharing - -**Team Research**: -- Share citation standards across teams -- Maintain consistent citation practices -- Collaborate on source verification -- Establish citation quality protocols - -**Knowledge Sharing**: -- Document citation best practices -- Share effective citation strategies -- Contribute to citation improvement -- Maintain community standards - -## Export Options with Citations - -### Citation Preservation in Exports - -All export formats maintain citation integrity: - -**Export Formats**: -- Markdown with citation links -- PDF with clickable references -- HTML with interactive citations -- Plain text with reference lists - -**Citation Elements Preserved**: -- Source attribution -- Reference links -- Bibliography information -- Citation formatting - -### Export Best Practices - -**Before Exporting**: -- Verify all citations are accurate -- Check source link functionality -- Ensure bibliography completeness -- Review citation formatting - -**Export Configuration**: -- Choose appropriate citation format -- Configure link behavior -- Set bibliography preferences -- Optimize for target audience - -### Integration with External Tools - -**Citation Managers**: -- Export citations to Zotero, Mendeley -- BibTeX and EndNote compatibility -- Reference manager integration -- Citation synchronization - -**Document Platforms**: -- Word processor integration -- LaTeX citation support -- Academic publishing formats -- Collaboration tool compatibility - -## Troubleshooting Citation Issues - -### Common Citation Problems - -**Missing Citations**: -- Check source context configuration -- Verify AI model has access to sources -- Ensure sources are properly processed -- Review context inclusion settings - -**Incorrect Citations**: -- Verify source content accuracy -- Check for processing errors -- Review AI model interpretation -- Validate citation formatting - -**Broken Citation Links**: -- Verify source accessibility -- Check for moved or deleted files -- Update source URLs -- Refresh source processing - -### Citation Quality Improvement - -**Enhancing Citation Accuracy**: -- Provide specific context to AI -- Request detailed source references -- Verify claims against sources -- Ask for supporting evidence - -**Improving Citation Completeness**: -- Include all relevant sources in context -- Request comprehensive source coverage -- Ask for missing source identification -- Verify citation thoroughness - -## Future Enhancements - -### Planned Citation Features - -**Enhanced Citation Formats**: -- Additional academic citation styles -- Custom format creation tools -- Citation style templates -- Format validation tools - -**Advanced Attribution**: -- Granular content attribution -- Multi-source synthesis tracking -- Citation relationship mapping -- Source influence analysis - -**Integration Improvements**: -- Enhanced export capabilities -- Better citation manager integration -- Improved collaboration features -- Advanced citation analytics - -### Community Contributions - -**User Feedback**: -- Citation accuracy reporting -- Format suggestion system -- Best practice sharing -- Feature request channels - -**Collaborative Development**: -- Citation standard contributions -- Format template sharing -- Quality improvement initiatives -- Community citation guidelines - -## Conclusion - -Open Notebook's citation system provides a robust foundation for maintaining research integrity across all your knowledge work. By automatically generating accurate citations, providing transparent source attribution, and maintaining citation quality across all features, the system supports both academic and professional research workflows. - -The key to effective citation use in Open Notebook is understanding how citations flow through the system - from source processing through AI analysis to final export. By following best practices for citation verification, maintaining source quality, and leveraging the system's advanced features, you can ensure that your research maintains the highest standards of academic and professional integrity. - -Remember that citations in Open Notebook are not just reference links - they are the foundation of transparent, verifiable, and trustworthy research. Use them wisely to build upon existing knowledge while maintaining complete attribution to original sources. - -Whether you're conducting academic research, professional analysis, or collaborative knowledge building, Open Notebook's citation system provides the tools you need to maintain research integrity while leveraging the power of AI-assisted analysis. \ No newline at end of file diff --git a/docs/features/context-management.md b/docs/features/context-management.md deleted file mode 100644 index 957117cd..00000000 --- a/docs/features/context-management.md +++ /dev/null @@ -1,419 +0,0 @@ -# Context Management: Your Data, Your Control - -Open Notebook's context management system is a revolutionary feature that gives you **granular control** over what information gets shared with AI models. Unlike traditional research tools that send all your data to AI providers, Open Notebook empowers you to make precise decisions about context sharing, balancing functionality with privacy and cost control. - -## Table of Contents - -1. [Understanding Context Levels](#understanding-context-levels) -2. [Context Configuration Strategies](#context-configuration-strategies) -3. [Privacy and Data Control](#privacy-and-data-control) -4. [Performance Optimization](#performance-optimization) -5. [AI Model Integration and Cost Management](#ai-model-integration-and-cost-management) -6. [Advanced Context Features](#advanced-context-features) -7. [Best Practices](#best-practices) - -## Understanding Context Levels - -Open Notebook provides three distinct context levels, each designed for different use cases and privacy requirements: - -### 🚫 Not in Context -**"⛔ not in context"** - -- **What it does**: Completely excludes the source or note from AI interactions -- **Data sharing**: Zero information sent to AI providers -- **Use cases**: - - Highly sensitive or confidential documents - - Personal notes you don't want AI to access - - Reference materials that don't need AI analysis - - Large files that would consume excessive tokens - -**Example scenario**: You've uploaded a confidential contract for reference but don't want any AI model to process its contents. - -### 🟡 Summary Only (Sources) -**"🟡 insights" - Available for sources only** - -- **What it does**: Shares only AI-generated insights and summaries, never the full document text -- **Data sharing**: Processed summaries, key points, and transformations -- **Use cases**: - - Balancing functionality with privacy - - Reducing token consumption while maintaining usefulness - - Large documents where full text isn't necessary - - Cost-effective AI interactions - -**Example scenario**: You have a 50-page research paper where you only need the AI to understand the key findings and conclusions, not every detail. - -### 🟢 Full Content -**"🟢 full content"** - -- **What it does**: Provides complete access to the source text or note content -- **Data sharing**: Entire document or note content sent to AI models -- **Use cases**: - - Documents requiring detailed analysis - - Short documents where full context is needed - - Sources requiring precise citation and quotation - - Interactive research where AI needs complete information - -**Example scenario**: You're analyzing a specific methodology section and need the AI to reference exact procedures and technical details. - -## Context Configuration Strategies - -### Research-Focused Strategy - -**Best for**: Academic research, detailed analysis, comprehensive understanding - -``` -Sources: -- Primary research papers: 🟢 Full Content -- Background materials: 🟡 Summary Only -- Reference documents: 🚫 Not in Context -- Personal notes: 🟢 Full Content -``` - -**Benefits**: -- Deep AI understanding of key materials -- Cost-effective use of background information -- Protection of sensitive reference materials -- Complete access to personal insights - -### Privacy-First Strategy - -**Best for**: Sensitive research, confidential documents, personal projects - -``` -Sources: -- Sensitive documents: 🚫 Not in Context -- Public materials: 🟡 Summary Only -- Specific analysis targets: 🟢 Full Content (selectively) -- Personal notes: 🚫 Not in Context -``` - -**Benefits**: -- Maximum privacy protection -- Selective AI engagement -- Reduced data exposure -- Control over sensitive information - -### Cost-Optimization Strategy - -**Best for**: Budget-conscious users, large document collections, token management - -``` -Sources: -- Large documents: 🟡 Summary Only -- Critical materials: 🟢 Full Content (limited) -- Reference materials: 🚫 Not in Context -- Generated insights: 🟢 Full Content -``` - -**Benefits**: -- Minimized token consumption -- Focused AI spending -- Efficient resource utilization -- Strategic information sharing - -### Collaborative Strategy - -**Best for**: Team research, shared projects, knowledge management - -``` -Sources: -- Shared documents: 🟡 Summary Only -- Team notes: 🟢 Full Content -- External references: 🚫 Not in Context -- Project materials: 🟢 Full Content -``` - -**Benefits**: -- Balanced privacy and collaboration -- Standardized information sharing -- Controlled team access -- Efficient knowledge transfer - -## Privacy and Data Control - -### Data Sovereignty - -Open Notebook's context management ensures **complete data sovereignty**: - -- **Local Processing**: All context filtering happens on your infrastructure -- **Selective Sharing**: Only specifically authorized content reaches AI providers -- **Audit Trail**: Full transparency about what information is shared -- **Reversible Decisions**: Context levels can be changed at any time - -### Privacy Compliance - -The system supports various privacy frameworks: - -**GDPR Compliance**: -- Data minimization through context level selection -- User consent for each information sharing decision -- Right to be forgotten through context exclusion -- Transparent data processing practices - -**HIPAA Considerations**: -- Medical documents can be excluded from AI processing -- Summary-only access for research purposes -- Full control over patient information sharing -- Audit trails for compliance reporting - -**Corporate Security**: -- Proprietary information protection -- Selective competitive intelligence sharing -- Confidential document isolation -- Controlled IP exposure - -### Dynamic Privacy Controls - -Context levels can be adjusted in real-time: - -1. **Per-Conversation**: Change context for specific AI interactions -2. **Per-Source**: Individual control over each document or note -3. **Per-Project**: Notebook-level privacy settings -4. **Per-Provider**: Different context levels for different AI models - -## Performance Optimization - -### Token Management - -Context levels directly impact token consumption: - -**Token Usage by Context Level**: -- **Not in Context**: 0 tokens consumed -- **Summary Only**: 10-20% of full document tokens -- **Full Content**: 100% of document tokens - -**Optimization Strategies**: -- Use summary context for background materials -- Reserve full content for critical analysis -- Exclude large reference documents -- Monitor token usage through built-in counters - -### Processing Speed - -Context management affects response times: - -**Performance Characteristics**: -- **Summary Context**: Faster processing, smaller payloads -- **Full Content**: Slower processing, larger payloads -- **Mixed Strategy**: Balanced performance and functionality - -**Speed Optimization Tips**: -- Start with summary context for exploration -- Switch to full content for detailed analysis -- Use context exclusion for irrelevant materials -- Cache frequently accessed summaries - -### Memory Management - -Context levels help manage system resources: - -**Memory Usage**: -- **Context Exclusion**: Reduces memory footprint -- **Summary Processing**: Efficient memory utilization -- **Full Content**: Higher memory requirements - -**Resource Optimization**: -- Use selective context for large document collections -- Implement context rotation for different research phases -- Monitor system performance metrics -- Archive unused context materials - -## AI Model Integration and Cost Management - -### Provider-Specific Considerations - -Different AI providers have varying cost structures: - -**OpenAI GPT Models**: -- Input tokens: $0.01-$0.06 per 1K tokens -- Output tokens: $0.03-$0.12 per 1K tokens -- **Strategy**: Use summary context for exploration, full content for analysis - -**Anthropic Claude**: -- Input tokens: $0.003-$0.015 per 1K tokens -- Output tokens: $0.015-$0.075 per 1K tokens -- **Strategy**: Leverage higher context windows with selective full content - -**Google Gemini**: -- Input tokens: $0.001-$0.0075 per 1K tokens -- Output tokens: $0.002-$0.03 per 1K tokens -- **Strategy**: Cost-effective for larger context, good for mixed strategies - -**Local Models (Ollama)**: -- No per-token costs -- **Strategy**: Use full content freely, optimize for quality - -### Cost Calculation Tools - -Open Notebook provides built-in cost estimation: - -```python -# Example cost calculation -total_tokens = context_response.total_tokens -estimated_cost = calculate_cost(total_tokens, model_provider, model_name) -``` - -**Cost Monitoring Features**: -- Real-time token counting -- Per-conversation cost tracking -- Model comparison tools -- Budget alerts and limits - -### Multi-Model Strategies - -Leverage different models for different context levels: - -**Tiered Approach**: -- **Summary Generation**: Use cost-effective models (Gemini, local) -- **Analysis**: Use high-quality models (Claude, GPT-4) -- **Citations**: Use precise models (GPT-4, Claude) -- **Exploration**: Use free local models (Ollama) - -## Advanced Context Features - -### Contextual Transformations - -Apply different transformations based on context level: - -**Summary-Level Transformations**: -- Automated summaries -- Key point extraction -- Topic identification -- Sentiment analysis - -**Full-Content Transformations**: -- Detailed analysis -- Citation generation -- Methodology extraction -- Critical evaluation - -### Dynamic Context Adjustment - -Context levels can be modified during conversations: - -1. **Progressive Disclosure**: Start with summaries, expand to full content -2. **Focus Shifting**: Change context based on conversation direction -3. **Privacy Escalation**: Reduce context when discussing sensitive topics -4. **Performance Tuning**: Adjust context based on response quality - -### Context Inheritance - -New sources can inherit context settings: - -**Inheritance Patterns**: -- **Notebook Defaults**: New sources adopt notebook-level settings -- **Source Type**: Different defaults for PDFs, web links, notes -- **User Preferences**: Personal default context strategies -- **Project Templates**: Standardized context configurations - -### Context Metadata - -Each context decision includes metadata: - -**Tracking Information**: -- Context level selection timestamp -- Reasoning for context decision -- Token consumption estimates -- Privacy impact assessment - -## Best Practices - -### Getting Started - -**Initial Configuration**: -1. **Start Conservative**: Begin with summary-only context -2. **Test Gradually**: Experiment with full content on small documents -3. **Monitor Costs**: Track token usage and adjust accordingly -4. **Establish Patterns**: Develop consistent context strategies - -### Ongoing Management - -**Regular Review**: -- **Weekly**: Review context decisions for active projects -- **Monthly**: Analyze token usage and cost effectiveness -- **Quarterly**: Evaluate privacy and security practices -- **Annually**: Update context strategies based on workflow changes - -### Workflow Integration - -**Research Phases**: -1. **Discovery**: Use summary context for broad exploration -2. **Analysis**: Switch to full content for detailed examination -3. **Synthesis**: Mix context levels based on importance -4. **Communication**: Use full content for accurate citations - -### Team Collaboration - -**Shared Standards**: -- **Naming Conventions**: Clear context level indicators -- **Documentation**: Explain context decisions to team members -- **Templates**: Standardized context configurations -- **Training**: Educate team on context management benefits - -### Security Considerations - -**Regular Audits**: -- Review context sharing decisions -- Verify privacy compliance -- Monitor unauthorized access -- Update security policies - -**Incident Response**: -- Procedures for context exposure -- Rollback strategies for privacy breaches -- Communication protocols for data incidents -- Recovery procedures for compromised context - -### Performance Monitoring - -**Key Metrics**: -- **Token Usage**: Track consumption by context level -- **Response Quality**: Measure AI performance by context type -- **Cost Efficiency**: Calculate cost per insight generated -- **User Satisfaction**: Monitor workflow effectiveness - -**Optimization Cycles**: -1. **Measure**: Collect performance data -2. **Analyze**: Identify optimization opportunities -3. **Adjust**: Modify context strategies -4. **Validate**: Confirm improvement results - -### Troubleshooting Common Issues - -**Poor AI Responses**: -- **Problem**: AI lacks necessary context -- **Solution**: Increase context level for key sources -- **Prevention**: Review context decisions before important queries - -**High Token Costs**: -- **Problem**: Excessive full content usage -- **Solution**: Switch to summary context for background materials -- **Prevention**: Implement cost monitoring and alerts - -**Privacy Concerns**: -- **Problem**: Too much information shared with AI -- **Solution**: Reduce context levels for sensitive materials -- **Prevention**: Regular privacy audits and policy updates - -**Performance Issues**: -- **Problem**: Slow AI responses -- **Solution**: Optimize context selection and document sizes -- **Prevention**: Monitor system resources and adjust context accordingly - -## Conclusion - -Open Notebook's context management system represents a paradigm shift in AI-powered research tools. By providing granular control over information sharing, it empowers users to: - -- **Maintain Privacy**: Share only what's necessary with AI providers -- **Control Costs**: Optimize token usage and AI spending -- **Enhance Security**: Protect sensitive information from exposure -- **Improve Performance**: Balance functionality with system resources -- **Enable Compliance**: Meet organizational and regulatory requirements - -The key to success with context management is understanding that it's not just a feature—it's a fundamental approach to responsible AI integration. By thoughtfully configuring context levels, monitoring their impact, and continuously optimizing your strategy, you can achieve the perfect balance between AI-powered insights and data protection. - -**Remember**: With great power comes great responsibility. Use Open Notebook's context management system to build research workflows that are not only powerful and efficient but also secure and compliant with your privacy requirements. - ---- - -*For more information about Open Notebook's features, visit our [documentation](../user-guide/index.md) or join our [community](https://discord.gg/37XJPXfz2w).* \ No newline at end of file diff --git a/docs/features/index.md b/docs/features/index.md deleted file mode 100644 index 79f4479e..00000000 --- a/docs/features/index.md +++ /dev/null @@ -1,132 +0,0 @@ -# Features - -Open Notebook offers powerful features that set it apart from other AI research tools. This section provides deep dives into each capability, helping you master the advanced functionality that makes Open Notebook unique. - -## 🤖 AI & Model Integration - -### 🧠 **[AI Models](ai-models.md)** -Complete guide to Open Notebook's multi-model AI support. -- 15+ supported providers (OpenAI, Anthropic, Google, Ollama, and more) -- Model selection strategies for cost and performance -- Provider-specific setup and configuration -- Advanced model switching and management -- Cost optimization techniques - -### 🎛️ **[Context Management](context-management.md)** -Master Open Notebook's granular context control system. -- Three context levels: Not in Context, Summary Only, Full Content -- Privacy-first configuration strategies -- Performance optimization through context management -- Integration with AI models for cost control -- Advanced context features and automation - -### 🔧 **[OpenAI-Compatible Providers](openai-compatible.md)** -Use any OpenAI-compatible endpoint with Open Notebook. -- LM Studio, Text Generation WebUI, vLLM support -- Mode-specific configuration for different capabilities -- Docker networking and remote server setup -- Comprehensive troubleshooting and best practices -- Works with local and cloud endpoints - -### 🎙️ **[Local Text-to-Speech](local_tts.md)** -Run text-to-speech completely locally using OpenAI-compatible TTS servers. -- Zero ongoing costs after setup -- Complete privacy - audio never leaves your machine -- Multiple voice options and models -- Perfect for podcast generation -- Various local TTS solutions available - -### 🦙 **[Ollama Setup](ollama.md)** -Configure local language models and embeddings with Ollama. -- Free, privacy-focused AI models -- Network configuration and Docker integration -- Model recommendations and optimization -- Troubleshooting and best practices - -## 🔧 Content Processing - -### ⚡ **[Transformations](transformations.md)** -Leverage Open Notebook's powerful content transformation system. -- Built-in transformation types and examples -- Custom transformation creation guide -- Batch processing capabilities -- Integration with notebooks and sources -- Performance considerations and optimization - -### 📝 **[Citations](citations.md)** -Maintain research integrity with comprehensive citation support. -- Automatic citation generation and formatting -- Source attribution and accuracy verification -- Integration with chat and notes -- Export options with citation preservation -- Best practices for academic research - -## 🎵 Advanced Features - -### 🎙️ **[Podcasts](podcasts.md)** -Create professional multi-speaker podcasts from your research. -- Advanced 1-4 speaker system (vs Google Notebook LM's 2-speaker limit) -- Episode profiles and speaker configuration -- Background processing and queue management -- Audio quality settings and customization -- Export and sharing capabilities - ---- - -## Feature Comparison - -| Feature | Open Notebook | Google Notebook LM | Advantage | -|---------|---------------|-------------------|-----------| -| **AI Providers** | 15+ providers | Google only | Complete flexibility | -| **Context Control** | 3 granular levels | All-or-nothing | Privacy & performance | -| **Podcast Speakers** | 1-4 speakers | 2 speakers only | Professional quality | -| **Transformations** | Custom & built-in | Limited | Unlimited processing | -| **Citations** | Comprehensive | Basic | Research integrity | -| **Privacy** | Self-hosted | Cloud-only | Complete control | - -## Integration Patterns - -### Research Workflow -**Sources** → **Transformations** → **Context Management** → **AI Models** → **Citations** - -### Content Creation -**Sources** → **AI Models** → **Transformations** → **Podcasts** → **Export** - -### Team Collaboration -**Context Management** → **Citations** → **Transformations** → **Sharing** - -## Best Practices - -### Getting Started -1. **Start with AI Models** - Configure your preferred providers -2. **Master Context Management** - Understand privacy and performance trade-offs -3. **Explore Transformations** - Automate common research tasks -4. **Try Podcasts** - Convert research into accessible audio content - -### Advanced Usage -- **Combine transformations** for complex processing workflows -- **Use context management** strategically for different research phases -- **Leverage citations** for academic and professional credibility -- **Create custom episode profiles** for consistent podcast quality - -### Performance Optimization -- **Context management** reduces token usage and costs -- **Batch transformations** for efficiency -- **Model selection** based on task requirements -- **Background processing** for time-intensive tasks - -## Next Steps - -- **[User Guide](../user-guide/index.md)** - Learn the basics of using these features -- **[Deployment](../deployment/index.md)** - Set up these features in production -- **[Development](../development/index.md)** - Customize and extend functionality - -## Need Help? - -- 💬 **[Discord Community](https://discord.gg/37XJPXfz2w)** - Get help with advanced features -- 🐛 **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Report feature requests -- 📖 **[Troubleshooting](../troubleshooting/index.md)** - Common feature issues - ---- - -*These features represent Open Notebook's core differentiators. Each one is designed to give you more control, better performance, and superior results compared to other AI research tools.* \ No newline at end of file diff --git a/docs/features/local_tts.md b/docs/features/local_tts.md deleted file mode 100644 index 62c5e541..00000000 --- a/docs/features/local_tts.md +++ /dev/null @@ -1,668 +0,0 @@ -# Local Text-to-Speech Setup - -Learn how to run text-to-speech models completely locally using OpenAI-compatible TTS servers, giving you full privacy control and zero ongoing costs for podcast and audio generation. - -This guide uses **Speaches** as an example implementation, but the principles apply to any OpenAI-compatible TTS server. - -## Why Local Text-to-Speech? - -Running text-to-speech locally offers significant advantages: - -- **🔒 Complete Privacy**: Your content never leaves your machine -- **💰 Zero Ongoing Costs**: No per-character or per-minute charges -- **⚡ No Rate Limits**: Generate unlimited audio without restrictions -- **🌐 Offline Capability**: Works without internet connection -- **🎯 Full Control**: Choose and customize your voice models -- **📈 Predictable Costs**: One-time setup, no surprises - -## Available Local TTS Solutions - -Open Notebook supports any OpenAI-compatible text-to-speech server. This guide uses **Speaches** as an example because it's: - -- Open-source and actively maintained -- Easy to set up with Docker -- Compatible with OpenAI's TTS API specification -- Supports multiple high-quality voice models - -### About Speaches - -[Speaches](https://github.com/speaches-ai/speaches) is an open-source, OpenAI-compatible text-to-speech server that runs locally on your machine. It provides: - -- **OpenAI API Compatibility**: Works seamlessly with Open Notebook's OpenAI-compatible provider -- **High-Quality Voices**: Support for multiple neural TTS models -- **Easy Model Management**: Simple CLI for downloading and managing voice models -- **Docker Support**: Run in containers for easy deployment -- **Multiple Voice Options**: Various voices and languages available -- **Customizable Speed**: Adjust speech rate to your preference - -> **Note**: If you're using a different OpenAI-compatible TTS server, the configuration steps will be similar - just adjust the endpoints and model names accordingly. - -## Quick Start with Speaches - -This section demonstrates setup using Speaches as an example. If you're using a different local TTS solution, adapt the steps accordingly. - -### Prerequisites - -- **Docker** installed on your system -- At least **2GB RAM** available -- **5GB disk space** for models - -### Basic Setup - -The fastest way to get started is using our example setup: - -**1. Create a project directory:** -```bash -mkdir speaches-setup -cd speaches-setup -``` - -**2. Create a `docker-compose.yml` file:** -```yaml -services: - speaches: - image: ghcr.io/speaches-ai/speaches:latest-cpu - container_name: speaches - ports: - - "8969:8000" - volumes: - - hf-hub-cache:/home/ubuntu/.cache/huggingface/hub - restart: unless-stopped - -volumes: - hf-hub-cache: -``` - -**3. Start the Speaches server:** -```bash -docker compose up -d -``` - -**4. Download a TTS model:** -```bash -# Wait a few seconds for the container to start -sleep 10 - -# Download the recommended Kokoro model -docker compose exec speaches uv tool run speaches-cli model download speaches-ai/Kokoro-82M-v1.0-ONNX -``` - -**5. Test the setup:** -```bash -curl "http://localhost:8969/v1/audio/speech" -s -H "Content-Type: application/json" \ - --output test.mp3 \ - --data '{ - "input": "Hello! This is a test of local text to speech.", - "model": "speaches-ai/Kokoro-82M-v1.0-ONNX", - "voice": "af_bella", - "speed": 1.0 - }' -``` - -If successful, you'll have a `test.mp3` file with the generated speech! - -### Configure Open Notebook - -Now that Speaches is running, configure Open Notebook to use it: - -**1. Set the environment variable:** - -For Docker deployments: -```bash -docker run -d \ - --name open-notebook \ - -p 8502:8502 -p 5055:5055 \ - -v ./notebook_data:/app/data \ - -v ./surreal_data:/mydata \ - -e OPENAI_COMPATIBLE_BASE_URL_TTS=http://host.docker.internal:8969/v1 \ - lfnovo/open_notebook:v1-latest-single -``` - -For local development: -```bash -export OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:8969/v1 -``` - -**2. Add the model in Open Notebook:** - -1. Go to **Settings** → **Models** page -2. Click **Add Model** in the Text-to-Speech section -3. Configure the model: - - **Provider**: `openai_compatible` - - **Model Name**: `speaches-ai/Kokoro-82M-v1.0-ONNX` - - **Display Name**: `Kokoro Local TTS` (or your preference) -4. Click **Save** - -**3. Set as default (optional):** -- In Settings, set this model as your default Text-to-Speech model -- Now all podcast generation will use your local TTS - -## Available Voice Models - -Speaches supports various TTS models from Hugging Face. Here are some recommended options: - -### Kokoro (Recommended) -- **Model ID**: `speaches-ai/Kokoro-82M-v1.0-ONNX` -- **Size**: ~500MB -- **Quality**: High -- **Speed**: Fast -- **Languages**: English -- **Voices**: `af_bella`, `af_sarah`, `am_adam`, `am_michael`, and more - -### Other Models -You can use any compatible ONNX TTS model from Hugging Face. Check the [Speaches documentation](https://github.com/speaches-ai/speaches) for a complete list. - -## Available Voices - -The Kokoro model includes multiple voices with different characteristics: - -**Female Voices:** -- `af_bella` - Clear, professional -- `af_sarah` - Warm, friendly -- `af_nicole` - Energetic, expressive - -**Male Voices:** -- `am_adam` - Deep, authoritative -- `am_michael` - Friendly, conversational -- `bf_emma` - British accent, professional -- `bm_george` - British accent, formal - -**Testing Voices:** -```bash -# Try different voices to find your favorite -for voice in af_bella af_sarah am_adam am_michael; do - curl "http://localhost:8969/v1/audio/speech" -s \ - -H "Content-Type: application/json" \ - --output "test_${voice}.mp3" \ - --data "{ - \"input\": \"Hello! This is a test of the ${voice} voice.\", - \"model\": \"speaches-ai/Kokoro-82M-v1.0-ONNX\", - \"voice\": \"${voice}\" - }" -done -``` - -## Advanced Configuration - -### GPU Acceleration - -For faster processing with NVIDIA GPUs: - -```yaml -services: - speaches: - image: ghcr.io/speaches-ai/speaches:latest-cuda # GPU-enabled image - container_name: speaches - ports: - - "8969:8000" - volumes: - - hf-hub-cache:/home/ubuntu/.cache/huggingface/hub - restart: unless-stopped - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [gpu] - -volumes: - hf-hub-cache: -``` - -### Custom Port - -If port 8969 is already in use, change it in docker-compose.yml: - -```yaml -ports: - - "9000:8000" # Use port 9000 instead -``` - -Then update your environment variable: -```bash -export OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:9000/v1 -``` - -### Multiple Models - -Download and use multiple models for different purposes: - -```bash -# Download additional models -docker compose exec speaches uv tool run speaches-cli model download model-name-1 -docker compose exec speaches uv tool run speaches-cli model download model-name-2 - -# List downloaded models -docker compose exec speaches uv tool run speaches-cli model list -``` - -In Open Notebook, add each model separately and choose which to use for different podcasts. - -## Network Configuration - -### Docker Networking - -When Open Notebook runs in Docker and needs to reach Speaches: - -**On macOS/Windows:** -```bash -export OPENAI_COMPATIBLE_BASE_URL_TTS=http://host.docker.internal:8969/v1 -``` - -**On Linux:** -```bash -# Option 1: Use Docker bridge IP -export OPENAI_COMPATIBLE_BASE_URL_TTS=http://172.17.0.1:8969/v1 - -# Option 2: Use host networking -docker run --network host ... -``` - -### Remote Speaches Server - -Run Speaches on a different machine for distributed processing: - -```bash -# On the server machine -docker compose up -d - -# Allow external connections (be careful with firewall settings) -# Update docker-compose.yml to bind to 0.0.0.0:8969 -``` - -Then configure Open Notebook: -```bash -export OPENAI_COMPATIBLE_BASE_URL_TTS=http://server-ip:8969/v1 -``` - -**Security Warning:** Only expose Speaches on trusted networks or use proper authentication/firewall rules. - -## Podcast Generation - -### Creating Podcasts with Local TTS - -Once configured, use Speaches for podcast generation: - -1. **Go to Podcasts page** in Open Notebook -2. **Create or edit an Episode Profile** -3. **Configure speakers:** - - For each speaker, select your Speaches model - - Choose different voices (e.g., `af_bella` for host, `am_adam` for guest) -4. **Generate podcast** -5. **Audio is generated locally** using your Speaches server - -### Multi-Speaker Setup - -Create natural-sounding conversations with different voices: - -``` -Speaker 1 (Host): -- Model: speaches-ai/Kokoro-82M-v1.0-ONNX -- Voice: af_bella - -Speaker 2 (Guest): -- Model: speaches-ai/Kokoro-82M-v1.0-ONNX -- Voice: am_adam - -Speaker 3 (Narrator): -- Model: speaches-ai/Kokoro-82M-v1.0-ONNX -- Voice: bf_emma -``` - -## Performance Optimization - -### CPU Performance - -**Recommended Specs:** -- 4+ CPU cores -- 4GB+ RAM -- SSD storage - -**Tips:** -- Close unnecessary applications -- Use quantized models when available -- Adjust speech speed for faster generation - -### Memory Management - -Monitor Docker memory usage: -```bash -docker stats speaches -``` - -Allocate more memory if needed: -```yaml -services: - speaches: - # ... other config ... - mem_limit: 4g # Adjust based on your system -``` - -### Batch Processing - -For generating multiple audio files, Speaches handles concurrent requests efficiently. Open Notebook automatically manages this during podcast generation. - -## Troubleshooting - -### Service Won't Start - -**Symptom:** Container exits immediately - -**Solutions:** -```bash -# Check logs -docker compose logs speaches - -# Verify Docker is running -docker ps - -# Check port availability -lsof -i :8969 # macOS/Linux -netstat -ano | findstr :8969 # Windows -``` - ---- - -### Connection Refused - -**Symptom:** Open Notebook can't reach Speaches - -**Solutions:** -1. **Verify Speaches is running:** - ```bash - curl http://localhost:8969/v1/models - ``` - -2. **Check Docker networking:** - - Use `host.docker.internal` instead of `localhost` when Open Notebook is in Docker - - Verify firewall settings - -3. **Test from inside Open Notebook container:** - ```bash - docker exec -it open-notebook curl http://host.docker.internal:8969/v1/models - ``` - ---- - -### Model Not Found - -**Symptom:** Error about missing model during generation - -**Solutions:** -1. **Verify model is downloaded:** - ```bash - docker compose exec speaches uv tool run speaches-cli model list - ``` - -2. **Download the model:** - ```bash - docker compose exec speaches uv tool run speaches-cli model download speaches-ai/Kokoro-82M-v1.0-ONNX - ``` - -3. **Check model name matches** what you configured in Open Notebook - ---- - -### Poor Audio Quality - -**Symptom:** Generated speech sounds robotic or unclear - -**Solutions:** -- Try different voices -- Adjust speech speed (1.0 is normal, try 0.9-1.2) -- Use higher-quality models if available -- Check that model downloaded completely - ---- - -### Slow Generation - -**Symptom:** Audio generation takes a long time - -**Solutions:** -- **Enable GPU acceleration** if you have an NVIDIA GPU -- **Use faster models** (smaller models = faster generation) -- **Adjust speech speed** to 1.5-2.0 for quicker output -- **Allocate more CPU cores** in Docker settings -- **Use SSD storage** instead of HDD - ---- - -### Out of Memory - -**Symptom:** Container crashes or system freezes - -**Solutions:** -1. **Increase Docker memory limit:** - ```yaml - services: - speaches: - mem_limit: 4g # Increase this value - ``` - -2. **Use smaller models** -3. **Close other applications** -4. **Monitor with** `docker stats` - ---- - -### Voice Not Available - -**Symptom:** Requested voice doesn't work - -**Solutions:** -- Check available voices for your model -- Use one of the documented voices (af_bella, am_adam, etc.) -- Verify voice name spelling (case-sensitive) - -## Comparison: Local vs Cloud TTS - -| Aspect | Local (Speaches) | Cloud (OpenAI/ElevenLabs) | -|--------|------------------|---------------------------| -| **Cost** | Free after setup | $15-50 per 1M characters | -| **Privacy** | Complete | Data sent to provider | -| **Speed** | Depends on hardware | Usually faster | -| **Quality** | Good (improving) | Excellent | -| **Setup** | Moderate complexity | Simple API key | -| **Offline** | Yes | No | -| **Rate Limits** | None | Yes | -| **Voices** | Limited selection | Many options | -| **Languages** | Limited | 50+ languages | - -**Recommendation:** -- **Use Local** for: Privacy-sensitive content, high-volume generation, development -- **Use Cloud** for: Production podcasts, multiple languages, premium quality needs - -## Best Practices - -### 1. Model Management - -**Download Models Ahead of Time:** -```bash -# Don't wait until generation time -docker compose exec speaches uv tool run speaches-cli model download speaches-ai/Kokoro-82M-v1.0-ONNX -``` - -**Keep Models Updated:** -```bash -# Periodically check for model updates -# Remove old models to save space -docker compose exec speaches uv tool run speaches-cli model list -``` - -### 2. Voice Selection - -**Test Before Production:** -- Generate test audio with different voices -- Choose voices that match your podcast style -- Use consistent voices for recurring speakers - -**Voice Characteristics:** -- Clear pronunciation for educational content -- Expressive voices for storytelling -- Professional voices for business content - -### 3. Resource Management - -**Monitor System Resources:** -```bash -# Check Docker resource usage -docker stats speaches - -# Monitor disk space for models -docker compose exec speaches df -h -``` - -**Optimize Docker:** -```yaml -# Set appropriate limits -services: - speaches: - mem_limit: 4g - cpus: 2 -``` - -### 4. Backup Strategy - -**Persist Model Cache:** -The `hf-hub-cache` volume stores downloaded models. To backup: -```bash -# List volumes -docker volume ls - -# Backup volume -docker run --rm -v hf-hub-cache:/data -v $(pwd):/backup ubuntu tar czf /backup/speaches-models-backup.tar.gz /data -``` - -**Restore if needed:** -```bash -docker run --rm -v hf-hub-cache:/data -v $(pwd):/backup ubuntu tar xzf /backup/speaches-models-backup.tar.gz -C / -``` - -### 5. Testing - -**Always Test First:** -```bash -# Test with short text before generating long podcasts -curl "http://localhost:8969/v1/audio/speech" -s \ - -H "Content-Type: application/json" \ - --output test.mp3 \ - --data '{ - "input": "Test", - "model": "speaches-ai/Kokoro-82M-v1.0-ONNX", - "voice": "af_bella" - }' -``` - -## Complete Setup Script - -For quick setup, save this as `setup-speaches.sh`: - -```bash -#!/bin/bash -set -e - -echo "Creating Speaches setup directory..." -mkdir -p speaches-setup -cd speaches-setup - -echo "Creating docker-compose.yml..." -cat > docker-compose.yml << 'EOF' -services: - speaches: - image: ghcr.io/speaches-ai/speaches:latest-cpu - container_name: speaches - ports: - - "8969:8000" - volumes: - - hf-hub-cache:/home/ubuntu/.cache/huggingface/hub - restart: unless-stopped - -volumes: - hf-hub-cache: -EOF - -echo "Starting Speaches container..." -docker compose up -d - -echo "Waiting for service to be ready..." -sleep 10 - -echo "Downloading TTS model..." -docker compose exec speaches uv tool run speaches-cli model download speaches-ai/Kokoro-82M-v1.0-ONNX - -echo "Testing speech generation..." -curl "http://localhost:8969/v1/audio/speech" -s -H "Content-Type: application/json" \ - --output test-audio.mp3 \ - --data '{ - "input": "Hello! Speaches is now configured and ready to use with Open Notebook.", - "model": "speaches-ai/Kokoro-82M-v1.0-ONNX", - "voice": "af_bella", - "speed": 1.0 - }' - -echo "" -echo "✅ Setup complete!" -echo "" -echo "Next steps:" -echo "1. Test the audio file: test-audio.mp3" -echo "2. Set environment variable: export OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:8969/v1" -echo "3. Configure in Open Notebook Settings → Models" -echo "" -echo "To stop Speaches: docker compose down" -echo "To restart: docker compose up -d" -``` - -Make it executable and run: -```bash -chmod +x setup-speaches.sh -./setup-speaches.sh -``` - -## Using Other Local TTS Servers - -The principles in this guide apply to any OpenAI-compatible TTS server. When using a different solution: - -1. **Start your TTS server** following its documentation -2. **Set the environment variable** to point to your server: - ```bash - export OPENAI_COMPATIBLE_BASE_URL_TTS=http://your-server-url:port/v1 - ``` -3. **Add the model in Open Notebook** using provider `openai_compatible` -4. **Use the model name** as specified by your TTS server - -The key requirement is OpenAI API compatibility - specifically, the `/v1/audio/speech` endpoint. - -## Getting Help - -**Resources:** -- **Open Notebook Discord**: [https://discord.gg/37XJPXfz2w](https://discord.gg/37XJPXfz2w) - Get help with Open Notebook integration -- **Open Notebook Issues**: Report integration issues to Open Notebook -- **Speaches GitHub**: [https://github.com/speaches-ai/speaches](https://github.com/speaches-ai/speaches) - For Speaches-specific questions -- **Your TTS Server Documentation**: Consult the docs for your chosen TTS solution - -**Common Questions:** - -**Q: Can I use Speaches with multiple Open Notebook instances?** -A: Yes! Just point each instance to the same Speaches server URL. - -**Q: How much disk space do I need?** -A: Each model is 300-800MB. Start with 5GB and add more as you download models. - -**Q: Can I use this for commercial podcasts?** -A: Check the model's license on Hugging Face. Most open models allow commercial use. - -**Q: How does quality compare to ElevenLabs or OpenAI?** -A: Local models are improving rapidly. For most use cases, quality is very good. Premium services still have an edge for the highest quality needs. - -## Related Documentation - -- **[OpenAI-Compatible Setup](openai-compatible.md)** - General OpenAI-compatible provider configuration -- **[AI Models Guide](ai-models.md)** - Complete AI model configuration -- **[Podcast Generation](podcasts.md)** - Learn about creating podcasts -- **[Ollama Setup](ollama.md)** - Another local AI option for language models - ---- - -This guide should get you up and running with local text-to-speech in Open Notebook. Enjoy complete privacy and unlimited audio generation! 🎙️ diff --git a/docs/features/openai-compatible.md b/docs/features/openai-compatible.md deleted file mode 100644 index 460dd8c2..00000000 --- a/docs/features/openai-compatible.md +++ /dev/null @@ -1,617 +0,0 @@ -# OpenAI-Compatible Providers Setup Guide - -Open Notebook supports OpenAI-compatible API endpoints across all AI modalities (language models, embeddings, speech-to-text, and text-to-speech), giving you the flexibility to use popular tools like LM Studio, Text Generation WebUI, vLLM, and custom inference servers. - -## Why Choose OpenAI-Compatible Providers? - -- **🆓 Cost Flexibility**: Use free local inference or choose cost-effective cloud providers -- **🔒 Privacy Control**: Run models locally or choose privacy-focused hosted services -- **🎯 Model Selection**: Access to thousands of open-source models -- **⚡ Performance Tuning**: Optimize inference for your specific hardware -- **🔧 Full Control**: Deploy on your infrastructure with your configurations -- **🌐 Universal Standard**: Works with any service that implements the OpenAI API specification - -## Quick Start - -### Basic Setup (All Modalities) - -**For LM Studio** (simplest): -```bash -# Start LM Studio and enable server mode on port 1234 -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 - -# Most LM Studio endpoints don't require an API key -# export OPENAI_COMPATIBLE_API_KEY=not_needed -``` - -**For Text Generation WebUI**: -```bash -# Start with --api flag -# python server.py --api --listen - -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:5000/v1 -``` - -**For vLLM**: -```bash -# Start vLLM server -# vllm serve MODEL_NAME --port 8000 - -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:8000/v1 -``` - -### Advanced Setup (Mode-Specific Endpoints) - -Use different endpoints for different capabilities: - -```bash -# Language models on LM Studio -export OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 - -# Embeddings on a dedicated embedding server -export OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:8080/v1 - -# Speech services on a different server -export OPENAI_COMPATIBLE_BASE_URL_STT=http://localhost:9000/v1 -export OPENAI_COMPATIBLE_BASE_URL_TTS=http://localhost:8969/v1 -``` - -> **🎙️ Want free, local text-to-speech?** Check our [Local TTS Setup Guide](local_tts.md) for completely private, zero-cost podcast generation! - -## Environment Variable Reference - -### Generic Configuration - -Use these when you want the same endpoint for all modalities: - -| Variable | Purpose | Required | -|----------|---------|----------| -| `OPENAI_COMPATIBLE_BASE_URL` | Base URL for all AI services | Yes (unless using mode-specific) | -| `OPENAI_COMPATIBLE_API_KEY` | API key if endpoint requires auth | Optional | - -**Example:** -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 -export OPENAI_COMPATIBLE_API_KEY=your_key_here # If needed -``` - -### Mode-Specific Configuration - -Use these when you want different endpoints for different capabilities: - -| Variable | Purpose | Modality | -|----------|---------|----------| -| `OPENAI_COMPATIBLE_BASE_URL_LLM` | Language model endpoint | Language models | -| `OPENAI_COMPATIBLE_API_KEY_LLM` | API key for LLM endpoint | Language models | -| `OPENAI_COMPATIBLE_BASE_URL_EMBEDDING` | Embedding model endpoint | Embeddings | -| `OPENAI_COMPATIBLE_API_KEY_EMBEDDING` | API key for embedding endpoint | Embeddings | -| `OPENAI_COMPATIBLE_BASE_URL_STT` | Speech-to-text endpoint | Speech-to-Text | -| `OPENAI_COMPATIBLE_API_KEY_STT` | API key for STT endpoint | Speech-to-Text | -| `OPENAI_COMPATIBLE_BASE_URL_TTS` | Text-to-speech endpoint | Text-to-Speech | -| `OPENAI_COMPATIBLE_API_KEY_TTS` | API key for TTS endpoint | Text-to-Speech | - -**Precedence**: Mode-specific variables override the generic `OPENAI_COMPATIBLE_BASE_URL` - -**Example:** -```bash -# LLM on LM Studio -export OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 - -# Embeddings on dedicated server -export OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:8080/v1 -export OPENAI_COMPATIBLE_API_KEY_EMBEDDING=secret_key_here -``` - -## Common Use Cases - -### LM Studio - -**What is LM Studio?** -LM Studio is a desktop application for running large language models locally with a user-friendly interface. - -**Setup Steps:** -1. **Download and install** LM Studio from [lmstudio.ai](https://lmstudio.ai/) -2. **Download a model** (e.g., Llama 3, Qwen, Mistral) -3. **Start the local server**: - - Go to the "Local Server" tab - - Click "Start Server" - - Note the port (default: 1234) - -4. **Configure Open Notebook**: -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 -``` - -**What works:** -- ✅ Language models (chat, completions) -- ✅ Embeddings (with embedding models) -- ❌ Speech-to-text (not supported) -- ❌ Text-to-speech (not supported) - -**Tips:** -- LM Studio doesn't require an API key -- Choose quantized models (Q4, Q5) for better performance -- Monitor RAM usage - larger models need more memory - ---- - -### Text Generation WebUI (Oobabooga) - -**What is Text Generation WebUI?** -A powerful Gradio-based web interface for running Large Language Models. - -**Setup Steps:** -1. **Install** following [official instructions](https://github.com/oobabooga/text-generation-webui) -2. **Download a model** using the UI or manually -3. **Start with API mode**: -```bash -python server.py --api --listen -``` - -4. **Configure Open Notebook**: -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:5000/v1 -``` - -**What works:** -- ✅ Language models (excellent support) -- ✅ Embeddings (with compatible models) -- ❌ Speech services (not supported) - -**Tips:** -- Use `--listen` to accept connections from Docker -- Supports more model formats than LM Studio -- Great for fine-tuned models - ---- - -### vLLM - -**What is vLLM?** -High-performance inference server optimized for serving large language models at scale. - -**Setup Steps:** -1. **Install vLLM**: -```bash -pip install vllm -``` - -2. **Start the server**: -```bash -vllm serve meta-llama/Llama-3-8B-Instruct --port 8000 -``` - -3. **Configure Open Notebook**: -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:8000/v1 -``` - -**What works:** -- ✅ Language models (optimized inference) -- ✅ Embeddings (with embedding models) -- ❌ Speech services (not supported) - -**Tips:** -- Best performance for production deployments -- Supports tensor parallelism for large models -- Excellent for high-throughput scenarios - ---- - -### Custom OpenAI-Compatible Services - -Many services implement the OpenAI API specification: - -**Examples:** -- **Together AI**: Cloud-hosted models -- **Anyscale Endpoints**: Ray-based inference -- **Replicate**: Cloud model hosting -- **LocalAI**: Self-hosted alternative to OpenAI -- **FastChat**: Multi-model serving - -**Configuration:** -```bash -# Generic setup -export OPENAI_COMPATIBLE_BASE_URL=https://api.your-service.com/v1 -export OPENAI_COMPATIBLE_API_KEY=your_api_key_here -``` - -## Configuration Scenarios - -### Scenario 1: Single Local Endpoint (Simplest) - -**Use Case**: Running LM Studio for language models only - -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 -``` - -**Result**: -- ✅ Language models available -- ✅ Embeddings available (if model supports) -- ✅ Speech services available (if endpoint supports) -- All use the same endpoint - ---- - -### Scenario 2: Separate Endpoints per Modality - -**Use Case**: Language models on LM Studio, embeddings on dedicated server - -```bash -# Language models on LM Studio -export OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 - -# Embeddings on specialized server -export OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=http://localhost:8080/v1 -export OPENAI_COMPATIBLE_API_KEY_EMBEDDING=embedding_key_here -``` - -**Result**: -- ✅ Language models use LM Studio (port 1234) -- ✅ Embeddings use specialized server (port 8080) -- ❌ Speech services not available (not configured) - ---- - -### Scenario 3: Mixed Local and Cloud - -**Use Case**: Local models for privacy, cloud for specialized tasks - -```bash -# Local LLM (privacy-sensitive work) -export OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 - -# Cloud embeddings (better quality) -export OPENAI_COMPATIBLE_BASE_URL_EMBEDDING=https://api.cloud-provider.com/v1 -export OPENAI_COMPATIBLE_API_KEY_EMBEDDING=cloud_key_here - -# Cloud speech services -export OPENAI_COMPATIBLE_BASE_URL_TTS=https://api.cloud-provider.com/v1 -export OPENAI_COMPATIBLE_API_KEY_TTS=cloud_key_here -``` - -**Result**: -- ✅ Sensitive chat stays local -- ✅ High-quality embeddings from cloud -- ✅ Professional TTS from cloud -- 🔒 Privacy for conversations, cloud for non-sensitive features - ---- - -### Scenario 4: Docker Deployment - -**Use Case**: Open Notebook in Docker, LM Studio on host machine - -**On macOS/Windows**: -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://host.docker.internal:1234/v1 -``` - -**On Linux**: -```bash -# Use host networking or find host IP -export OPENAI_COMPATIBLE_BASE_URL=http://172.17.0.1:1234/v1 -# or use --network host in docker run -``` - -**Important**: -- LM Studio must be set to listen on `0.0.0.0`, not just `localhost` -- In LM Studio settings, enable "Allow network connections" - -## Network Configuration - -### Docker Networking - -**Problem**: Docker containers can't reach `localhost` on the host - -**Solutions:** - -**Option 1: Use `host.docker.internal` (Mac/Windows)** -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://host.docker.internal:1234/v1 -``` - -**Option 2: Use host IP address (Linux)** -```bash -# Find host IP -ip addr show docker0 | grep inet - -# Use in environment -export OPENAI_COMPATIBLE_BASE_URL=http://172.17.0.1:1234/v1 -``` - -**Option 3: Host networking (Linux only)** -```bash -docker run --network host \ - -v ./notebook_data:/app/data \ - -e OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 \ - lfnovo/open_notebook:v1-latest-single -``` - -### Remote Servers - -**Use Case**: OpenAI-compatible service on a different machine - -```bash -# Replace with your server's IP or hostname -export OPENAI_COMPATIBLE_BASE_URL=http://192.168.1.100:1234/v1 -``` - -**Security Notes:** -- Only use on trusted networks -- Consider using HTTPS for production -- Implement API key authentication if possible -- Use firewall rules to restrict access - -### SSL Configuration (Self-Signed Certificates) - -If you're running your OpenAI-compatible service behind a reverse proxy with self-signed SSL certificates (e.g., Caddy, nginx with custom certs), you may encounter SSL verification errors: - -``` -[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate -Connection error. -``` - -**Solutions:** - -**Option 1: Use a custom CA bundle (recommended)** -```bash -# Point to your CA certificate file -export ESPERANTO_SSL_CA_BUNDLE=/path/to/your/ca-bundle.pem -``` - -**Option 2: Disable SSL verification (development only)** -```bash -# WARNING: Only use in trusted development environments -export ESPERANTO_SSL_VERIFY=false -``` - -**Docker Compose example with SSL configuration:** -```yaml -services: - open-notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - - OPENAI_COMPATIBLE_BASE_URL=https://lmstudio.local:1234/v1 - # Option 1: Custom CA bundle - - ESPERANTO_SSL_CA_BUNDLE=/certs/ca-bundle.pem - # Option 2: Disable verification (dev only) - # - ESPERANTO_SSL_VERIFY=false - volumes: - - /path/to/your/ca-bundle.pem:/certs/ca-bundle.pem:ro -``` - -> **Security Note:** Disabling SSL verification exposes you to man-in-the-middle attacks. Always prefer using a custom CA bundle in production environments. - -### Port Conflicts - -**Problem**: Default port (1234) is already in use - -**Solution**: Change the port in your inference server - -**LM Studio:** -- Settings → Local Server → Port → Change to different port - -**Then update environment:** -```bash -export OPENAI_COMPATIBLE_BASE_URL=http://localhost:8888/v1 -``` - -## Troubleshooting - -### Connection Refused - -**Symptom**: "Connection refused" or "Could not connect to endpoint" - -**Solutions:** -1. **Verify server is running**: - ```bash - curl http://localhost:1234/v1/models - ``` - -2. **Check firewall settings**: Ensure the port is not blocked - -3. **For Docker**: Use `host.docker.internal` instead of `localhost` - -4. **Check server binding**: Server must listen on `0.0.0.0`, not just `127.0.0.1` - ---- - -### Models Not Found - -**Symptom**: "Model not found" or "No models available" - -**Solutions:** -1. **Verify model is loaded** in your inference server -2. **Check model name** matches what Open Notebook expects -3. **For LM Studio**: Ensure model is loaded in the local server tab -4. **Test endpoint**: - ```bash - curl http://localhost:1234/v1/models - ``` - ---- - -### Slow Performance - -**Symptom**: Responses take a long time - -**Solutions:** -1. **Use quantized models** (Q4, Q5 instead of full precision) -2. **Check RAM usage**: Model might be swapping to disk -3. **Reduce context length**: Smaller context = faster inference -4. **Enable GPU acceleration**: If available -5. **For vLLM**: Enable tensor parallelism for large models - ---- - -### Authentication Errors - -**Symptom**: "Unauthorized" or "Invalid API key" - -**Solutions:** -1. **Set API key** if your endpoint requires it: - ```bash - export OPENAI_COMPATIBLE_API_KEY=your_key_here - ``` - -2. **Check key validity**: Test with curl: - ```bash - curl -H "Authorization: Bearer YOUR_KEY" \ - http://localhost:1234/v1/models - ``` - -3. **For mode-specific**: Use the correct key variable: - ```bash - export OPENAI_COMPATIBLE_API_KEY_LLM=llm_key - export OPENAI_COMPATIBLE_API_KEY_EMBEDDING=embedding_key - ``` - ---- - -### Docker Can't Reach Host - -**Symptom**: Connection works locally but not from Docker - -**Solutions:** -1. **Use `host.docker.internal`** (Mac/Windows): - ```bash - export OPENAI_COMPATIBLE_BASE_URL=http://host.docker.internal:1234/v1 - ``` - -2. **On Linux**: Use host IP or `--network host` - -3. **Check server listening**: Must listen on `0.0.0.0:1234`, not `127.0.0.1:1234` - -4. **Test from inside container**: - ```bash - docker exec -it open-notebook curl http://host.docker.internal:1234/v1/models - ``` - ---- - -### Embeddings Not Working - -**Symptom**: Search or embeddings fail - -**Solutions:** -1. **Verify embedding model is loaded**: Many inference servers need explicit embedding model setup -2. **Use dedicated embedding endpoint**: If available -3. **Check model compatibility**: Not all models support embeddings -4. **For LM Studio**: Load an embedding model separately - ---- - -### Mixed Results (Some Modes Work, Others Don't) - -**Symptom**: Language models work, but embeddings or speech don't - -**Solution**: Use mode-specific configuration: -```bash -# What works -export OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 - -# For embeddings, use a different provider -export OPENAI_API_KEY=your_openai_key # Fallback to OpenAI for embeddings -``` - -## Best Practices - -### Security - -1. **API Keys**: - - Use environment variables, never hardcode - - Rotate keys regularly for cloud services - - Use different keys for different services - -2. **Network**: - - Only expose on trusted networks - - Use HTTPS in production - - Implement firewall rules - -3. **Data Privacy**: - - Use local models for sensitive data - - Check service privacy policies - - Understand data retention policies - -### Performance - -1. **Model Selection**: - - Quantized models (Q4, Q5) for better speed/memory trade-off - - Smaller models for simple tasks - - Larger models only when needed - -2. **Resource Management**: - - Monitor RAM and GPU usage - - Use appropriate batch sizes - - Consider model caching strategies - -3. **Network**: - - Use local endpoints when possible for lower latency - - For cloud: Choose geographically close servers - -### Reliability - -1. **Fallback Strategy**: - ```bash - # Primary: Local LLM - export OPENAI_COMPATIBLE_BASE_URL_LLM=http://localhost:1234/v1 - - # Fallback: Use OpenAI if local is unavailable - export OPENAI_API_KEY=your_backup_key - ``` - -2. **Health Checks**: - - Periodically test endpoints - - Monitor server status - - Set up alerts for downtime - -3. **Testing**: - - Test configuration before production - - Validate all required modalities work - - Check error handling - -## Related Guides - -**OpenAI-Compatible Setups:** -- **[Local TTS Setup](local_tts.md)** - Free, private text-to-speech for podcasts -- **[Ollama Setup](ollama.md)** - Local language models and embeddings -- **[AI Models Guide](ai-models.md)** - Complete model configuration overview - -## Getting Help - -**Community Resources:** -- [Open Notebook Discord](https://discord.gg/37XJPXfz2w) - Get help with Open Notebook integration -- [LM Studio Discord](https://discord.gg/lmstudio) - LM Studio-specific support -- [Text Generation WebUI GitHub](https://github.com/oobabooga/text-generation-webui) - Issues and discussions - -**Debugging Steps:** -1. **Test endpoint directly** with curl before configuring Open Notebook -2. **Check Open Notebook logs** for detailed error messages -3. **Verify environment variables** are set correctly -4. **Test with simple requests** first (list models, simple completion) - -**Common curl tests:** -```bash -# List models -curl http://localhost:1234/v1/models - -# Test completion -curl http://localhost:1234/v1/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": "your-model", - "messages": [{"role": "user", "content": "Hello!"}] - }' - -# Test embeddings -curl http://localhost:8080/v1/embeddings \ - -H "Content-Type: application/json" \ - -d '{ - "model": "embedding-model", - "input": "Test text" - }' -``` - -This guide should help you successfully configure OpenAI-compatible providers with Open Notebook. For general AI model configuration, see the [AI Models Guide](ai-models.md). \ No newline at end of file diff --git a/docs/features/podcasts.md b/docs/features/podcasts.md deleted file mode 100644 index f3664425..00000000 --- a/docs/features/podcasts.md +++ /dev/null @@ -1,358 +0,0 @@ -# Podcast Generation System - -Open Notebook's Podcast Generator transforms your research content into professional, multi-speaker podcasts with advanced customization capabilities. Our system delivers superior flexibility compared to Google Notebook LM's 2-speaker limitation, supporting 1-4 speakers with complete personality and voice customization. - -## 🎯 Core Capabilities - -### Multi-Speaker Advantage -- **1-4 Speakers**: Unlike Google Notebook LM's fixed 2-host format -- **Dynamic Configurations**: Solo experts, dual discussions, panel formats, interview styles -- **Personality Customization**: Rich character development with backstories and speaking styles -- **Voice Diversity**: Multiple TTS providers and voice options per speaker - -### Professional Quality -- **High-Quality Audio**: Professional TTS with natural speech patterns -- **Conversation Flow**: Optimized dialogue structures for engagement -- **Content Integration**: Seamless incorporation of research materials -- **Consistent Pacing**: Optimized for comprehension and accessibility - -## 🎬 Episode Profiles System - -### Pre-Configured Templates -Episode Profiles eliminate complex configuration with battle-tested combinations: - -#### **Tech Discussion** (2 Speakers) -- Technical experts with complementary perspectives -- Deep-dive analysis of complex topics -- Optimized for developer and technical audiences -- Natural debate and knowledge sharing format - -#### **Solo Expert** (1 Speaker) -- Single authority explaining concepts clearly -- Accessible presentation style -- Perfect for educational content -- Rich personality with engaging delivery - -#### **Business Analysis** (3-4 Speakers) -- Business-focused panel discussion -- Strategic viewpoints and market analysis -- Executive-level conversation style -- Diverse perspectives on business topics - -#### **Interview Style** (2 Speakers) -- Host interviewing subject matter expert -- Question-driven exploration -- Broad topic coverage -- Engaging conversational format - -### Custom Profile Creation -Build your own Episode Profiles by combining: -- Speaker count and role definitions -- AI model preferences (OpenAI, Anthropic, Google, Groq, Ollama) -- TTS provider selection (OpenAI, Google TTS, ElevenLabs) -- Briefing templates and conversation structures -- Segment organization and timing - -## 🔧 Speaker Configuration System - -### Individual Speaker Setup -Each speaker profile includes: - -#### **Voice Selection** -- Multiple TTS provider options -- Voice characteristics and tone -- Speech rate and emphasis settings -- Language and accent preferences - -#### **Personality Development** -- **Backstory**: Rich character development and expertise areas -- **Speaking Style**: Formal, conversational, enthusiastic, analytical -- **Role Definition**: Expert positioning and authority areas -- **Interaction Patterns**: How they engage with other speakers - -#### **Content Adaptation** -- **Expertise Focus**: Technical, business, creative, educational -- **Audience Awareness**: Beginner, intermediate, advanced -- **Presentation Style**: Explanatory, provocative, supportive, challenging - -### Multi-Speaker Dynamics -- **Conversation Flow**: Natural turn-taking and interruption patterns -- **Perspective Balance**: Ensuring diverse viewpoints are represented -- **Conflict Resolution**: Healthy debate without confrontation -- **Synthesis**: Bringing together different expert perspectives - -## 🎚️ Audio Quality & Customization - -### Quality Settings -- **Sample Rate**: 44.1kHz professional audio standard -- **Bit Depth**: 16-bit for optimal quality/size balance -- **Compression**: Optimized MP3 encoding for streaming and download -- **Normalization**: Consistent volume levels across speakers - -### Voice Enhancement -- **Natural Speech**: Advanced TTS with human-like inflection -- **Clarity Optimization**: Enhanced pronunciation and diction -- **Pacing Control**: Optimal speech rate for comprehension -- **Emotional Range**: Appropriate enthusiasm and engagement - -### Provider Options -#### **OpenAI TTS** -- High-quality voices with natural speech patterns -- Multiple voice options (Alloy, Echo, Fable, Onyx, Nova, Shimmer) -- Consistent quality and reliability -- Integrated with OpenAI ecosystem - -#### **Google Text-to-Speech** -- Wide language support -- Neural voice models -- Cost-effective option -- Reliable performance - -#### **ElevenLabs** -- Premium voice quality -- Custom voice cloning capabilities -- Emotional expression control -- Professional-grade output - -#### **Local TTS (OpenAI-Compatible)** -- 🆕 **Completely Free**: Zero ongoing costs after setup -- 🔒 **Full Privacy**: Audio generation never leaves your machine -- 🚀 **No Rate Limits**: Generate unlimited podcasts -- 🎙️ **Multiple Voices**: Various high-quality voice options -- ⚡ **Fast Processing**: Local generation without network latency -- 🔧 **Multiple Options**: Various local TTS servers available - -> **💡 Want to run TTS locally?** Check our comprehensive [Local TTS Setup Guide](local_tts.md) for step-by-step setup instructions, voice selection tips, and troubleshooting help. Perfect for privacy-focused users or high-volume podcast generation! - -## 🔄 Background Processing & Queue Management - -### Non-Blocking Experience -- **Async Processing**: Podcasts generate while you continue research -- **Queue System**: Multiple podcasts can be processed sequentially -- **Status Tracking**: Real-time updates without interface blocking -- **Notification System**: Desktop alerts when generation completes - -### Processing Pipeline -1. **Content Analysis**: Extracting and structuring research material -2. **Outline Generation**: Creating conversation framework -3. **Transcript Creation**: Generating natural dialogue -4. **Audio Synthesis**: Converting text to speech -5. **Post-Processing**: Audio optimization and formatting - -### Job Management -#### **Status Tracking** -- **Pending**: Job queued for processing -- **Running**: Active generation with progress indicators -- **Completed**: Ready for playback and download -- **Failed**: Error details and retry options - -#### **Error Recovery** -- **Automatic Retry**: Transient failures handled automatically -- **Detailed Logging**: Comprehensive error reporting -- **Graceful Degradation**: Partial success handling -- **Manual Intervention**: User control for complex issues - -## 🎧 Export Options & Sharing - -### Download Formats -- **MP3 Export**: High-quality audio for offline listening -- **Metadata Inclusion**: Episode information and generation details -- **Batch Download**: Multiple episodes at once -- **Mobile Optimization**: Compressed versions for mobile devices - -### Sharing Capabilities -- **Direct Links**: Share episodes with team members -- **Embed Options**: Integration with other platforms -- **Export Integration**: Compatible with podcast platforms -- **Version Control**: Track different generations of same content - -### Library Management -- **Episode Organization**: Grouped by notebook and topic -- **Search Functionality**: Find episodes by content or metadata -- **Playlist Creation**: Organize episodes into learning sequences -- **Archive System**: Long-term storage and retrieval - -## 🔗 Integration with Notes & Sources - -### Content Pipeline -- **Seamless Integration**: Direct generation from notebook content -- **Source Attribution**: Automatic citation and reference tracking -- **Context Preservation**: Maintains relationship to original research -- **Dynamic Updates**: Regenerate when source content changes - -### Research Workflow -- **Active Research**: Generate podcasts during research process -- **Review Sessions**: Create summaries of completed research -- **Learning Paths**: Series generation with consistent profiles -- **Knowledge Sharing**: Export for team collaboration - -### Source Material Optimization -- **Rich Content**: Text, links, documents, and media integration -- **Topic Focus**: Clear subject matter creates better discussions -- **Depth Analysis**: Comprehensive material yields engaging conversations -- **Fact Integration**: Seamless incorporation of research findings - -## 🚀 Advanced Features & Customization - -### Multi-Provider Architecture -- **Language Models**: OpenAI, Anthropic, Google, Groq, Ollama -- **Local Processing**: Full Ollama support for privacy-conscious users -- **Provider Mixing**: Different models for different speakers -- **Performance Optimization**: Automatic load balancing - -### Custom Development -- **API Access**: Full programmatic control via REST API -- **Plugin System**: Extensible architecture for custom features -- **Webhook Integration**: External system notifications -- **Batch Processing**: Automated generation workflows - -### Advanced Configurations - -#### **Performance Tuning** -- **Segment Structure**: Custom conversation organization -- **Timing Control**: Precise episode length management -- **Topic Weighting**: Emphasis on specific content areas -- **Personality Mixing**: Complex speaker interaction patterns - -#### **TTS Concurrency Control** -Configure parallel audio generation to optimize performance and avoid provider rate limits: - -```bash -# Environment variable configuration -export TTS_BATCH_SIZE=3 # Number of concurrent TTS requests (default: 5) -``` - -**Recommended Settings by Provider:** -- **OpenAI TTS**: `TTS_BATCH_SIZE=5` (default, handles high concurrency well) -- **ElevenLabs**: `TTS_BATCH_SIZE=2` (strict rate limits, reduce for stability) -- **Google TTS**: `TTS_BATCH_SIZE=4` (moderate concurrency tolerance) -- **Custom/Local TTS**: `TTS_BATCH_SIZE=1` (depends on hardware/setup) - -**Performance Trade-offs:** -- **Higher values (4-5)**: Faster podcast generation, higher provider load -- **Lower values (1-2)**: Slower generation, more reliable for rate-limited providers -- **Optimal setting**: Balance between speed and provider stability - -## 🛠️ Troubleshooting Common Issues - -### Generation Failures -#### **Insufficient Content** -- **Problem**: Episode generation fails with sparse source material -- **Solution**: Ensure notebook contains substantial research content -- **Prevention**: Aim for 1000+ words of source material - -#### **API Quota Limits** -- **Problem**: TTS or LLM API limits exceeded -- **Solution**: Check API quotas and upgrade plans if needed -- **Prevention**: Monitor usage and set up billing alerts - -#### **TTS Concurrency Issues** -- **Problem**: TTS provider rate limiting or concurrent request failures -- **Solution**: Configure TTS batch size to reduce parallel audio generation -- **Environment Variable**: `TTS_BATCH_SIZE=2` (default: 5) -- **Usage**: Lower values reduce provider load but increase generation time -```bash -# Reduce concurrent TTS requests for providers with strict limits -export TTS_BATCH_SIZE=2 -# or -export TTS_BATCH_SIZE=1 # Most conservative, slowest -``` - -#### **Voice Configuration Errors** -- **Problem**: Specific voice not available or misconfigured -- **Solution**: Verify TTS provider settings and voice availability -- **Prevention**: Test voice configurations before full generation - -### Audio Quality Issues -#### **Poor Audio Quality** -- **Problem**: Distorted or low-quality audio output -- **Solution**: Check TTS provider settings and audio format configuration -- **Prevention**: Use recommended providers and quality settings - -#### **Inconsistent Volume** -- **Problem**: Speakers at different volume levels -- **Solution**: Enable audio normalization in settings -- **Prevention**: Use consistent TTS provider for all speakers - -#### **Unnatural Speech** -- **Problem**: Robotic or awkward speech patterns -- **Solution**: Adjust personality settings and try different TTS providers -- **Prevention**: Test speaker configurations with sample content - -### Performance Issues -#### **Slow Generation** -- **Problem**: Podcast generation takes excessive time -- **Solution**: Check API response times and consider provider switching -- **Prevention**: Monitor system resources and API performance - -#### **Memory Issues** -- **Problem**: High memory usage during generation -- **Solution**: Reduce concurrent podcast generations -- **Prevention**: Monitor system resources and optimize content size - -### Content Issues -#### **Repetitive Content** -- **Problem**: Speakers repeating same information -- **Solution**: Improve source material diversity and speaker role definitions -- **Prevention**: Ensure varied source content and clear speaker differentiation - -#### **Off-Topic Discussions** -- **Problem**: Podcast content straying from research material -- **Solution**: Refine briefing templates and topic focus -- **Prevention**: Use clear, focused research content as source material - -## 📱 Mobile & Accessibility Features - -### Audio-First Design -Perfect for various consumption scenarios: -- **Commuting**: Hands-free learning during travel -- **Exercise**: Background education during workouts -- **Multitasking**: Information consumption while working -- **Accessibility**: Support for visually impaired users - -### Responsive Interface -- **Mobile Optimization**: Full functionality on mobile devices -- **Touch Controls**: Intuitive playback and navigation -- **Offline Support**: Download for offline listening -- **Sync Capability**: Progress tracking across devices - -## 🎯 Competitive Advantages - -### vs. Google Notebook LM -- **Speaker Flexibility**: 1-4 speakers vs. fixed 2-host format -- **Voice Customization**: Multiple TTS providers vs. limited options -- **Content Control**: Full customization vs. fixed templates -- **Privacy Options**: Local processing available vs. cloud-only -- **Integration**: Seamless notebook workflow vs. separate tool - -### vs. Traditional Podcast Tools -- **Automated Generation**: AI-driven vs. manual production -- **Research Integration**: Direct content pipeline vs. separate workflow -- **Quality Consistency**: Professional output vs. variable quality -- **Speed**: Minutes vs. hours of production time -- **Accessibility**: No audio expertise required vs. technical barriers - -## 🚀 Getting Started - -### Initial Setup -1. **API Configuration**: Set up keys for preferred AI and TTS providers -2. **Profile Initialization**: Click "Initialize Default Profiles" on first use -3. **Content Preparation**: Ensure notebook contains substantial research material -4. **Test Generation**: Start with a simple episode to verify configuration - -### First Podcast Generation -1. **Select Content**: Choose notebook with rich research content -2. **Pick Profile**: Select appropriate Episode Profile for your content -3. **Name Episode**: Provide descriptive name reflecting content -4. **Generate**: Click "Generate Podcast" and continue working -5. **Review**: Listen to completed episode and refine for future generations - -### Optimization Tips -- **Content Quality**: More diverse source material creates better discussions -- **Profile Matching**: Align Episode Profile with content type and audience -- **Iterative Improvement**: Refine profiles based on output quality -- **Workflow Integration**: Generate podcasts as part of research process - ---- - -*Open Notebook's Podcast Generator establishes a new standard for AI-powered content transformation, offering unprecedented flexibility and quality compared to existing solutions like Google Notebook LM.* \ No newline at end of file diff --git a/docs/features/transformations.md b/docs/features/transformations.md deleted file mode 100644 index cefaac58..00000000 --- a/docs/features/transformations.md +++ /dev/null @@ -1,362 +0,0 @@ -# Transformations - -Transformations are a core feature of Open Notebook that provide a flexible and powerful way to generate new insights by applying customizable processing steps to your content. Inspired by the [Fabric framework](https://github.com/danielmiessler/fabric), transformations enable you to automatically distill, summarize, and enrich your research materials in meaningful ways. - -## What are Transformations? - -A **Transformation** is a customizable AI-powered process that modifies text input to produce structured, meaningful output. Whether you're summarizing articles, extracting key insights, generating reflective questions, or creating content outlines, transformations automate the processing of your research materials according to your specific needs. - -Transformations work by: -- Taking your source content as input -- Applying a custom prompt template that defines the processing logic -- Using AI models to generate structured output -- Automatically creating new cards in your notebook with the results - -## Core Components - -### Transformation Elements - -Each transformation consists of several key components: - -- **Name**: Internal identifier for your reference -- **Title**: Displayed as the title of all cards created by this transformation -- **Description**: Helpful hint shown in the UI to explain the transformation's purpose -- **Prompt**: The actual AI prompt template that defines how content should be processed -- **Apply Default**: Whether this transformation should be suggested for all new sources - -### Default Transformation Prompt - -The system includes a configurable default transformation prompt that gets prepended to all transformations. This allows you to: -- Set consistent tone and style across all transformations -- Define global requirements or constraints -- Include instructions that prevent AI models from refusing certain tasks due to content policies - -## Built-in Transformation Types - -Open Notebook comes with several common transformation patterns that you can use immediately or customize: - -### Content Analysis -- **Summarization**: Extract key points and main ideas from lengthy content -- **Insight Extraction**: Identify important insights, conclusions, and implications -- **Question Generation**: Create thoughtful questions for deeper reflection -- **Key Concepts**: Extract and define important terms and concepts - -### Research Support -- **Literature Review**: Analyze academic papers and research content -- **Citation Extraction**: Pull out important quotes and references -- **Methodology Analysis**: Break down research methods and approaches -- **Data Insights**: Extract statistical findings and data points - -### Creative Processing -- **Content Outlines**: Create structured outlines from unorganized content -- **Action Items**: Extract actionable tasks and next steps -- **Comparative Analysis**: Compare and contrast different perspectives -- **Trend Identification**: Spot patterns and emerging themes - -## Custom Transformation Creation - -### Creating Your Own Transformations - -1. **Navigate to Transformations**: Go to the Transformations page in the UI -2. **Create New**: Click the "New Transformation" button -3. **Configure Settings**: - - Enter a descriptive name for internal reference - - Set a title that will appear on generated cards - - Write a clear description explaining the transformation's purpose - - Define your custom prompt template - - Choose whether to apply by default to new sources - -![New Transformation](/assets/new_transformation.png) - -### Prompt Design Best Practices - -When creating custom prompts, consider these guidelines: - -**Structure Your Prompts**: -``` -# ROLE -You are an expert researcher analyzing academic content. - -# TASK -Extract the 5 most important insights from the following text. - -# FORMAT -Present each insight as: -- **Insight**: [Brief description] -- **Evidence**: [Supporting details from text] -- **Implications**: [Why this matters] - -# CONSTRAINTS -- Focus on actionable insights -- Avoid redundancy -- Cite specific examples from the text -``` - -**Use Template Variables**: -- Access source metadata with `{{ source.title }}`, `{{ source.url }}` -- Reference the current timestamp with `{{ current_time }}` -- Include custom data passed to the transformation - -**Consider Output Format**: -- Use markdown for structured output -- Include headings for better organization -- Format lists and tables for readability - -## Batch Processing Capabilities - -### Applying Transformations at Scale - -Transformations can be applied to multiple sources simultaneously: - -1. **Source Selection**: Select multiple sources from your notebook -2. **Transformation Choice**: Choose which transformation to apply -3. **Batch Execution**: Process all selected sources with the same transformation -4. **Progress Tracking**: Monitor the processing status of each source - -### Performance Considerations - -- **Model Selection**: Choose appropriate models for your content type and complexity -- **Content Length**: Longer content may require more processing time and tokens -- **Concurrent Processing**: The system processes multiple transformations efficiently -- **Resource Management**: Monitor token usage and processing costs - -## Transformation Management and Organization - -### Organizing Your Transformations - -**Categories and Tags**: -- Group related transformations by purpose -- Use descriptive names and clear descriptions -- Maintain a logical ordering for frequently used transformations - -**Version Control**: -- Keep track of prompt changes over time -- Test modifications before applying to important content -- Maintain backup copies of successful transformation configurations - -**Sharing and Collaboration**: -- Export transformation configurations for sharing -- Create standardized transformations for team use -- Document transformation purposes and best practices - -## Integration with Other Features - -### Notebook Integration - -Transformations seamlessly integrate with your notebook workflow: - -- **Automatic Card Creation**: Results appear as new cards in your notebook -- **Source Linking**: Transformed content maintains connections to original sources -- **Search Integration**: Transformation results are fully searchable -- **Note Connections**: Link transformation outputs to your personal notes - -### Model Compatibility - -Transformations work with various AI models: - -- **OpenAI Models**: GPT-3.5, GPT-4, and other OpenAI offerings -- **Anthropic Models**: Claude variants with different capabilities -- **Local Models**: Self-hosted models for privacy and control -- **Specialized Models**: Domain-specific models for particular content types - -### Workflow Integration - -**Research Workflows**: -- Apply transformations as part of your research process -- Chain multiple transformations for complex analysis -- Use transformation results to guide further research - -**Content Creation**: -- Transform research into actionable content -- Generate outlines and summaries for writing projects -- Extract quotes and citations for academic work - -## Performance Considerations - -### Optimization Strategies - -**Model Selection**: -- Choose faster models for simple transformations -- Use more capable models for complex analysis -- Consider cost vs. quality trade-offs - -**Prompt Optimization**: -- Write clear, specific prompts to reduce processing time -- Avoid overly complex instructions that may confuse models -- Test prompts with sample content before full deployment - -**Content Preparation**: -- Pre-process content to remove unnecessary elements -- Break large documents into manageable chunks -- Ensure content is well-formatted for optimal results - -### Monitoring and Troubleshooting - -**Performance Metrics**: -- Track processing time for different transformation types -- Monitor token usage and associated costs -- Identify bottlenecks in your transformation pipeline - -**Error Handling**: -- Implement retry mechanisms for failed transformations -- Log errors for debugging and improvement -- Provide fallback options for problematic content - -## Best Practices and Use Cases - -### Academic Research - -**Literature Reviews**: -- Extract key findings from research papers -- Identify methodology patterns across studies -- Generate comparative analyses of different approaches - -**Note-Taking Enhancement**: -- Transform raw notes into structured insights -- Generate questions for further investigation -- Create study guides from course materials - -### Content Creation - -**Blog Writing**: -- Transform research into blog post outlines -- Extract quotable insights and statistics -- Generate social media content from longer pieces - -**Documentation**: -- Convert technical content into user-friendly guides -- Extract key procedures and best practices -- Create FAQ sections from support content - -### Business Intelligence - -**Market Research**: -- Analyze competitor content and strategies -- Extract trends and insights from industry reports -- Generate executive summaries from detailed analyses - -**Process Improvement**: -- Transform feedback into actionable insights -- Identify patterns in customer communications -- Generate improvement recommendations from data - -### Personal Knowledge Management - -**Learning Enhancement**: -- Create study materials from educational content -- Generate practice questions from textbooks -- Extract key concepts for memorization - -**Reflection and Planning**: -- Transform journal entries into insights -- Generate action items from meeting notes -- Create goal-setting materials from personal reflections - -## Experimenting with Transformations - -### Playground Environment - -Use the Playground page to: -- Test different transformation prompts with sample content -- Compare results across different AI models -- Refine your transformations before applying to important content -- Experiment with new transformation ideas safely - -### Iterative Improvement - -**Testing Cycle**: -1. Create initial transformation prompt -2. Test with representative content samples -3. Analyze results and identify improvements -4. Refine prompt and test again -5. Deploy to production use - -**Feedback Integration**: -- Collect feedback on transformation quality -- Iterate based on user needs and preferences -- Track transformation effectiveness over time - -## Advanced Features - -### Template Customization - -**Dynamic Content**: -- Use conditional logic in prompt templates -- Adapt transformations based on source type -- Include context-sensitive instructions - -**Variable Integration**: -- Access source metadata in transformations -- Include user preferences and settings -- Utilize historical transformation results - -### Automation Workflows - -**Scheduled Transformations**: -- Set up automatic processing for new content -- Create transformation pipelines for regular tasks -- Integrate with external content sources - -**Conditional Processing**: -- Apply different transformations based on content type -- Use content analysis to guide transformation selection -- Implement quality checks and validation - -## Troubleshooting Common Issues - -### Transformation Failures - -**Common Causes**: -- Malformed prompt templates -- Insufficient model capabilities -- Content formatting issues -- Token limit exceeded - -**Solutions**: -- Validate prompt syntax before deployment -- Choose appropriate models for complexity -- Pre-process content for consistency -- Break large content into smaller chunks - -### Quality Issues - -**Poor Results**: -- Refine prompt specificity and clarity -- Provide more context and examples -- Adjust model selection for task complexity -- Test with different content types - -**Inconsistent Output**: -- Standardize prompt formatting -- Include explicit output format requirements -- Use consistent terminology across prompts -- Implement validation checks - -## Future Enhancements - -The transformation system continues to evolve with planned features including: - -- **Note Transformations**: Apply transformations to personal notes and annotations -- **Transformation Chains**: Link multiple transformations for complex workflows -- **Template Marketplace**: Share and discover transformation templates -- **Advanced Analytics**: Detailed metrics on transformation performance and usage -- **Integration APIs**: Connect transformations with external tools and services - -## Conclusion - -Transformations represent the heart of Open Notebook's intelligent content processing capabilities. By providing a flexible, customizable system for applying AI-powered analysis to your research materials, transformations enable you to extract maximum value from your content while maintaining control over the processing logic. - -Whether you're conducting academic research, creating content, or managing personal knowledge, transformations can significantly enhance your productivity and insight generation. Start with the built-in transformation types, experiment with custom prompts in the playground, and gradually build a library of transformations tailored to your specific needs and workflows. - -The sky truly is the limit when it comes to creating personalized, powerful workflows that bring out the most meaningful insights from your content. - - \ No newline at end of file diff --git a/docs/getting-started/5-minute-setup.md b/docs/getting-started/5-minute-setup.md deleted file mode 100644 index 22a9ac7f..00000000 --- a/docs/getting-started/5-minute-setup.md +++ /dev/null @@ -1,149 +0,0 @@ -# 5-Minute Setup Guide - -**Goal:** Get Open Notebook running as fast as possible. - -## Step 1: Know Your Setup (10 seconds) - -Answer one question: **Where will you ACCESS Open Notebook from?** - -- ✅ **Same computer where Docker runs** → Use `localhost` setup below -- ✅ **Different computer** (accessing a server, Raspberry Pi, NAS, etc.) → Use `remote` setup below - -## Step 2: Install Docker (if needed) - -Already have Docker? Skip to Step 3. - -- **Mac/Windows:** Download [Docker Desktop](https://www.docker.com/products/docker-desktop/) -- **Linux:** `sudo apt install docker.io docker-compose-plugin` - -## Step 3: Get an API Key - -You need at least one AI provider. OpenAI is recommended for beginners: - -1. Go to https://platform.openai.com/api-keys -2. Create account → "Create new secret key" -3. Add $5 in credits -4. Copy the key (starts with `sk-`) - -## Step 4: Run Open Notebook - -### 🏠 For Localhost (Same Computer): - -```bash -mkdir open-notebook && cd open-notebook - -cat > docker-compose.yml << 'EOF' -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" # Web UI - - "5055:5055" # API - environment: - - OPENAI_API_KEY=REPLACE_WITH_YOUR_KEY - # Database connection (required) - - SURREAL_URL=ws://localhost:8000/rpc - - SURREAL_USER=root - - SURREAL_PASSWORD=root - - SURREAL_NAMESPACE=open_notebook - - SURREAL_DATABASE=production - volumes: - - ./notebook_data:/app/data - - ./surreal_data:/mydata - restart: always -EOF - -# Edit the file and replace REPLACE_WITH_YOUR_KEY with your actual key -nano docker-compose.yml # or use your preferred editor - -docker compose up -d -``` - -**Access:** http://localhost:8502 - -### 🌐 For Remote Server: - -```bash -mkdir open-notebook && cd open-notebook - -cat > docker-compose.yml << 'EOF' -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" # Web UI - - "5055:5055" # API - environment: - - OPENAI_API_KEY=REPLACE_WITH_YOUR_KEY - - API_URL=http://REPLACE_WITH_SERVER_IP:5055 - # Database connection (required) - - SURREAL_URL=ws://localhost:8000/rpc - - SURREAL_USER=root - - SURREAL_PASSWORD=root - - SURREAL_NAMESPACE=open_notebook - - SURREAL_DATABASE=production - volumes: - - ./notebook_data:/app/data - - ./surreal_data:/mydata - restart: always -EOF - -# Edit the file and replace both placeholders -nano docker-compose.yml # or use your preferred editor - -docker compose up -d -``` - -**Find your server IP:** -```bash -# On the server where Docker is running: -hostname -I # Linux -ipconfig # Windows -ifconfig | grep inet # Mac -``` - -**Replace in the file:** -- `REPLACE_WITH_YOUR_KEY` → Your actual OpenAI key -- `REPLACE_WITH_SERVER_IP` → Your server's IP (e.g., `192.168.1.100`) - -**Access:** http://YOUR_SERVER_IP:8502 - -## Step 5: Verify Setup - -1. **Open the URL** in your browser -2. If you see "Unable to connect to server": - - **Remote setup?** Make sure you set `API_URL` with your actual server IP - - **Both ports exposed?** Run `docker ps` and verify you see both 8502 and 5055 - - **Using localhost for remote?** That won't work! Use the actual IP address - -3. If you see the Open Notebook interface: - - Click **Settings** → **Models** - - Configure your default models - - Start creating notebooks! - -**Working?** → Proceed to [Your First Notebook](first-notebook.md) - -**Not working?** → [Quick Troubleshooting Guide](../troubleshooting/quick-fixes.md) - -## Common Mistakes to Avoid - -| ❌ Wrong | ✅ Correct | -|----------|-----------| -| Only exposing port 8502 | Expose BOTH ports: 8502 and 5055 | -| Using `localhost` in API_URL for remote access | Use the actual server IP: `192.168.1.100` | -| Adding `/api` to API_URL | Just use `http://server-ip:5055` | -| Forgetting to restart after config changes | Always run `docker compose down && docker compose up -d` | - -## Next Steps - -Once Open Notebook is running: - -1. **Configure Models** - Settings → Models -2. **Create Your First Notebook** - [Follow this guide](first-notebook.md) -3. **Add Sources** - PDFs, web links, documents -4. **Start Chatting** - Ask questions about your content -5. **Generate Podcasts** - Turn your research into audio - ---- - -**Need help?** Join our [Discord community](https://discord.gg/37XJPXfz2w) for fast support! diff --git a/docs/getting-started/first-notebook.md b/docs/getting-started/first-notebook.md deleted file mode 100644 index 9cd6cd1b..00000000 --- a/docs/getting-started/first-notebook.md +++ /dev/null @@ -1,213 +0,0 @@ -# Your First Notebook - A Complete Walkthrough - -Welcome to Open Notebook! This guide will walk you through creating your first notebook and experiencing all the core features that make Open Notebook a powerful research and note-taking tool. By the end of this walkthrough, you'll have created a notebook, added sources, generated AI insights, and learned how to effectively use the chat assistant. - -## Understanding the Interface - -Open Notebook uses a clean three-column layout designed to optimize your research workflow: - -- **Left Column**: Your sources and notes - all the content you've added to your notebook -- **Middle Column**: The main workspace where you'll interact with transformations and view detailed content -- **Right Column**: The AI chat assistant and context management - -This layout keeps your sources visible while you work, making it easy to reference materials and manage what information the AI can access. - -## Step 1: Creating Your First Notebook - -Let's start by creating a notebook for a sample research project. - -1. **Click "New Notebook"** on the main dashboard -2. **Choose a descriptive name** - for this example, let's use "Climate Change Research" -3. **Write a detailed description** - this is crucial as it helps the AI understand your research context - - Example description: - ``` - Research notebook focused on climate change impacts, solutions, and policy. - Collecting information from scientific papers, news articles, and expert interviews - to understand current trends and potential mitigation strategies. - ``` - -![New Notebook](/assets/new_notebook.png) - -4. **Click "Create Notebook"** - -**💡 Pro Tip**: The more detailed your description, the better the AI will understand your research goals and provide relevant insights. - -## Step 2: Adding Your First Sources - -Now let's add different types of content to your notebook. We'll add three different source types to demonstrate the flexibility. - -### Adding a Web Article - -1. **Click "Add Source"** in the left column -2. **Select "Link"** as your source type -3. **Paste a URL** - try this example: `https://www.ipcc.ch/report/ar6/wg1/` -4. **Add a title** like "IPCC Climate Report" -5. **Click "Add Source"** - -The system will automatically scrape the content and make it searchable. - -![Add Source](/assets/add_source.png) - -### Adding a Text Note - -1. **Click "Add Source"** again -2. **Select "Text"** as your source type -3. **Paste or type content** - you can add research notes, quotes, or any text content -4. **Give it a descriptive title** like "Key Climate Statistics" -5. **Click "Add Source"** - -### Adding a File - -1. **Click "Add Source"** one more time -2. **Select "File"** as your source type -3. **Upload a PDF, document, or other supported file** -4. **The system will automatically extract the text content** - -You'll now see all your sources listed in the left column: - -![Asset List](/assets/asset_list.png) - -## Step 3: Generating Your First AI Insights - -Now that you have content, let's generate some AI insights using transformations. - -1. **Click on one of your sources** in the left column -2. **Look for the "Transformations" section** in the middle column -3. **Try a pre-built transformation** like "Summarize" or "Key Points" -4. **Click "Generate"** to create your first AI insight - -![Transformations](/assets/transformations.png) - -The AI will analyze your content and provide insights based on the transformation you selected. These insights can be saved as notes for future reference. - -**💡 Pro Tip**: Transformations are customizable prompts. You can create your own transformations for specific research needs, like "Extract methodology" or "Identify key arguments." - -## Step 4: Understanding Context Settings - -Before chatting with the AI, it's important to understand how context works. This is one of Open Notebook's most powerful features. - -![Context Settings](/assets/context.png) - -For each source, you can set: - -- **Not in Context**: The AI won't see this content (saves on API costs) -- **Summary**: The AI gets a summary and can request full content if needed (balanced approach) -- **Full Content**: The AI gets the complete text (most comprehensive but uses more tokens) - -### Setting Up Context for Your First Chat - -1. **Click on the context toggle** next to each source -2. **For your first try, set one source to "Full Content"** -3. **Set others to "Summary"** to balance cost and performance -4. **Leave any sensitive sources as "Not in Context"** - -## Step 5: Your First Chat with the AI - -Now let's have a conversation with the AI assistant about your research. - -1. **Look at the right column** for the chat interface -2. **Type your first question** - try something like: - ``` - What are the main causes of climate change discussed in my sources? - ``` -3. **Press Enter** to send your question - -The AI will analyze your sources (based on your context settings) and provide a comprehensive answer. Notice how it references specific sources in its response. - -### Try These Follow-up Questions: - -- "What solutions are mentioned for addressing climate change?" -- "Can you compare the different perspectives in my sources?" -- "What are the key statistics I should remember?" - -## Step 6: Saving Important Information as Notes - -When the AI provides a particularly useful response, you can save it as a note: - -1. **Look for the "Save as Note" button** under any AI response -2. **Click it** to convert the response into a permanent note -3. **Edit the title** if needed -4. **The note will appear in your left column** for easy reference - -![AI Notes](/assets/ai_note.png) - -You can also create manual notes: - -1. **Click "Add Note"** in the left column -2. **Write your own observations** or insights -3. **Save** to keep them with your research - -![Human Notes](/assets/human_note.png) - -## Step 7: Working with Multiple Chat Threads - -For complex research, you might want separate conversations: - -1. **Look for the "New Chat" option** in the chat interface -2. **Create topic-specific chats** like: - - "Policy Analysis" - - "Technical Details" - - "Literature Review" - -Each chat maintains its own context and history, helping you stay organized. - -## Step 8: Using Search Across Your Research - -As your notebook grows, use the search feature to find information quickly: - -1. **Go to the Search page** (if available in your interface) -2. **Search by keywords** or use semantic search -3. **Find relevant notes and sources** across all your notebooks - -![Search](/assets/search.png) - -## Next Steps for Deeper Exploration - -Congratulations! You've successfully created your first notebook and experienced the core features. Here are some next steps to explore: - -### Advanced Features to Try: - -1. **Custom Transformations**: Create your own analysis prompts for specific research needs -2. **Podcast Generation**: Convert your research into audio summaries -3. **Advanced Context Management**: Experiment with different context settings for optimal AI interactions -4. **Source Organization**: Develop a system for categorizing and managing your sources - -### Best Practices to Develop: - -1. **Regular Note-Taking**: Save important AI insights as notes for future reference -2. **Context Optimization**: Use the minimum context needed for each conversation to save costs -3. **Descriptive Naming**: Give your notebooks and sources clear, searchable names -4. **Source Diversity**: Mix different types of content (articles, documents, personal notes) for richer insights - -### Getting Help: - -- **Documentation**: Explore the full documentation for advanced features -- **Community**: Join GitHub discussions for tips and feature requests -- **Support**: Check the troubleshooting section for common issues - -## Troubleshooting Common First-Time Issues - -**Sources not processing**: Wait a few moments for content extraction, especially for large files. - -**AI not responding**: Check that you have at least one source set to "Summary" or "Full Content" in your context settings. - -**Poor AI responses**: Try providing more context or asking more specific questions. - -**Missing features**: Ensure your deployment includes all necessary AI model configurations. - -## Summary - -You've now experienced the complete Open Notebook workflow: - -✅ Created a notebook with a detailed description -✅ Added multiple types of sources (web, text, file) -✅ Generated AI insights using transformations -✅ Configured context settings for privacy and cost control -✅ Chatted with the AI assistant about your research -✅ Saved important insights as notes -✅ Learned about advanced features and best practices - -Open Notebook is designed to grow with your research needs. As you add more sources and develop your workflow, you'll discover even more powerful ways to organize, analyze, and understand your research materials. - -Happy researching! 🎉 \ No newline at end of file diff --git a/docs/getting-started/index.md b/docs/getting-started/index.md deleted file mode 100644 index ea5a8dbb..00000000 --- a/docs/getting-started/index.md +++ /dev/null @@ -1,92 +0,0 @@ -# Getting Started with Open Notebook - -Welcome to Open Notebook! Choose your path below based on what you need. - ---- - -## 🚀 **Just Want It Running? (Recommended)** - -**[5-Minute Setup Guide](5-minute-setup.md)** ← **Start here!** - -The fastest path from zero to running Open Notebook. Clear steps, no fluff. - -**Perfect for:** First-time users, quick testing, simple deployments - ---- - -## 📚 **Want to Learn More First?** - -### **[Introduction](introduction.md)** -What is Open Notebook and why should you care? -- Key features and benefits -- Comparison with Google Notebook LM -- Use cases and who it's for -- System requirements - -### **[Quick Start Tutorial](quick-start.md)** -Detailed walkthrough with examples and verification steps. -- Docker setup with explanations -- Localhost vs remote configuration -- Model configuration -- First notebook creation -- Common issues and fixes - -### **[Complete Installation Guide](installation.md)** -Comprehensive guide for all deployment scenarios. -- Multiple installation methods -- All AI provider configurations -- Service architecture deep dive -- Production deployment options -- Advanced troubleshooting - -### **[Your First Notebook](first-notebook.md)** -Step-by-step tutorial once Open Notebook is running. -- Interface overview -- Adding different types of sources -- Generating AI-powered notes -- Chat interactions -- Best practices - ---- - -## 🆘 **Having Issues?** - -**Most common problem:** "Unable to connect to server" - -→ **[5-Minute Troubleshooting](../troubleshooting/quick-fixes.md)** - Fast fixes for setup issues - -→ **[Common Issues Guide](../troubleshooting/common-issues.md)** - Complete troubleshooting reference - -→ **[Discord Community](https://discord.gg/37XJPXfz2w)** - Get help from the community - ---- - -## 📍 Recommended Path - -**New to Open Notebook?** Follow this sequence: - -1. **[5-Minute Setup](5-minute-setup.md)** - Get it running first (5 min) -2. **[Your First Notebook](first-notebook.md)** - Learn by doing (10 min) -3. **[Introduction](introduction.md)** - Understand the full potential (5 min) - -**Already know what you want?** Jump directly to: -- **[Installation Guide](installation.md)** - For production deployments -- **[User Guide](../user-guide/index.md)** - Deep dive into features -- **[Features](../features/index.md)** - Advanced capabilities (podcasts, transformations) - ---- - -## 🎯 Quick Links by Use Case - -| I want to... | Start here | -|-------------|------------| -| **Try Open Notebook now** | [5-Minute Setup](5-minute-setup.md) | -| **Deploy on a remote server** | [5-Minute Setup](5-minute-setup.md) → Remote section | -| **Understand before installing** | [Introduction](introduction.md) | -| **Deploy for production use** | [Installation Guide](installation.md) | -| **Fix installation problems** | [Quick Fixes](../troubleshooting/quick-fixes.md) | -| **Learn all features** | [User Guide](../user-guide/index.md) | - ---- - -*Open Notebook is designed to be simple to start, yet powerful when you need it. Start with the 5-minute setup and explore from there!* \ No newline at end of file diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md deleted file mode 100644 index a4a5be46..00000000 --- a/docs/getting-started/installation.md +++ /dev/null @@ -1,841 +0,0 @@ -# Open Notebook Installation Guide - -This comprehensive guide will help you install and configure Open Notebook, an open-source, privacy-focused alternative to Google's Notebook LM. Whether you're a beginner or advanced user, this guide covers all installation methods and configuration options. - -## Table of Contents - -1. [Quick Start](#quick-start) -2. [System Requirements](#system-requirements) -3. [Installation Methods](#installation-methods) -4. [Service Architecture](#service-architecture) -5. [Environment Configuration](#environment-configuration) -6. [Manual Installation](#manual-installation) -7. [Docker Installation](#docker-installation) -8. [AI Model Configuration](#ai-model-configuration) -9. [Verification and Testing](#verification-and-testing) -10. [Security Configuration](#security-configuration) -11. [Troubleshooting](#troubleshooting) - ---- - -## Quick Start - -For users who want to get started immediately: - -### Docker (Recommended for Beginners) -```bash -# Create project directory -mkdir open-notebook && cd open-notebook - -# Download configuration files -curl -O https://raw.githubusercontent.com/lfnovo/open-notebook/main/docker-compose.yml -curl -O https://raw.githubusercontent.com/lfnovo/open-notebook/main/.env.example - -# Rename and configure environment -mv .env.example docker.env -# Edit docker.env with your API keys - -# Start Open Notebook -docker compose up -d -``` - -### From Source (Developers) -```bash -# Clone and setup -git clone https://github.com/lfnovo/open-notebook -cd open-notebook -cp .env.example .env -# Edit .env with your API keys - -# Install dependencies and start -uv sync -make start-all -``` - -Access Open Notebook at `http://localhost:8502` - ---- - -## System Requirements - -### Hardware Requirements -- **CPU**: 2+ cores recommended (4+ cores for better performance) -- **RAM**: Minimum 4GB (8GB+ recommended) -- **Storage**: 10GB+ available space -- **Network**: Stable internet connection for AI model access - -### Operating System Support -- **macOS**: 10.15 (Catalina) or later -- **Linux**: Ubuntu 18.04+, Debian 9+, CentOS 7+, Fedora 30+ -- **Windows**: Windows 10 or later (WSL2 recommended) - -### Software Prerequisites -- **Python**: 3.9 or later (for source installation) -- **Docker**: Latest version (for Docker installation) -- **uv**: Python package manager (for source installation) - ---- - -## Installation Methods - -Open Notebook supports multiple installation methods. Choose the one that best fits your needs: - -| Method | Best For | Difficulty | Pros | Cons | -|--------|----------|------------|------|------| -| **Docker Single-Container** | Beginners, simple deployments | Easy | One-click setup, isolated environment | Less control, harder to debug | -| **Docker Multi-Container** | Production deployments | Medium | Scalable, professional setup | More complex configuration | -| **Source Installation** | Developers, customization | Advanced | Full control, easy debugging | Requires Python knowledge | - ---- - -## Service Architecture - -Open Notebook consists of four main services that work together: - -### 1. **SurrealDB Database** (Port 8000) -- **Purpose**: Stores notebooks, sources, notes, and metadata -- **Technology**: SurrealDB - a modern, multi-model database -- **Configuration**: Runs in Docker container with persistent storage - -### 2. **FastAPI Backend** (Port 5055) -- **Purpose**: REST API for all application functionality -- **Features**: Interactive API documentation, authentication, data validation -- **Endpoints**: `/api/notebooks`, `/api/sources`, `/api/notes`, `/api/chat` - -### 3. **Background Worker** -- **Purpose**: Processes long-running tasks asynchronously -- **Tasks**: Podcast generation, content transformations, embeddings -- **Technology**: Surreal Commands worker system - -### 4. **React frontend** (Port 8502) -- **Purpose**: Web-based user interface -- **Features**: Notebooks, chat, sources, notes, search -- **Technology**: Next.js framework - -### Service Communication Flow -``` -User Browser → React frontend → FastAPI Backend → SurrealDB Database - ↓ - Background Worker ← Job Queue -``` - ---- - -## Environment Configuration - -Open Notebook uses environment variables for configuration. Create a `.env` file (or `docker.env` for Docker) based on the template below: - -### Core Configuration -```env -# Security (Optional - for public deployments) -OPEN_NOTEBOOK_PASSWORD=your_secure_password_here - -# Database Configuration -SURREAL_URL="ws://localhost:8000/rpc" -SURREAL_USER="root" -SURREAL_PASSWORD="root" -SURREAL_NAMESPACE="open_notebook" -SURREAL_DATABASE="production" -``` - -### AI Provider Configuration - -#### OpenAI (Recommended for beginners) -```env -# Provides: Language models, embeddings, TTS, STT -OPENAI_API_KEY=sk-your-openai-key-here -``` - -#### Anthropic (Claude models) -```env -# Provides: High-quality language models -ANTHROPIC_API_KEY=sk-ant-your-anthropic-key-here -``` - -#### Google (Gemini) -```env -# Provides: Large context models, embeddings, TTS -GEMINI_API_KEY=your-gemini-key-here -``` - -#### Vertex AI (Google Cloud) -```env -# Provides: Enterprise-grade AI models -VERTEX_PROJECT=your-google-cloud-project-name -GOOGLE_APPLICATION_CREDENTIALS=./google-credentials.json -VERTEX_LOCATION=us-east5 -``` - -#### Additional Providers -```env -# DeepSeek - Cost-effective models -DEEPSEEK_API_KEY=your-deepseek-key-here - -# Mistral - European AI provider -MISTRAL_API_KEY=your-mistral-key-here - -# Groq - Fast inference -GROQ_API_KEY=your-groq-key-here - -# xAI (Grok) - Cutting-edge models -XAI_API_KEY=your-xai-key-here - -# ElevenLabs - High-quality voice synthesis -ELEVENLABS_API_KEY=your-elevenlabs-key-here - -# Ollama - Local AI models -OLLAMA_API_BASE="http://localhost:11434" - -# OpenRouter - Access to multiple models -OPENROUTER_BASE_URL="https://openrouter.ai/api/v1" -OPENROUTER_API_KEY=your-openrouter-key-here - -# Azure OpenAI -# Generic configuration (applies to all modalities) -AZURE_OPENAI_API_KEY=your-azure-key-here -AZURE_OPENAI_ENDPOINT=https://your-endpoint.openai.azure.com/ -AZURE_OPENAI_API_VERSION=2024-12-01-preview - -# Mode-specific configuration (for different deployments per modality) -# AZURE_OPENAI_API_KEY_LLM=your-llm-key -# AZURE_OPENAI_ENDPOINT_LLM=https://llm-endpoint.openai.azure.com/ -# AZURE_OPENAI_API_VERSION_LLM=2024-12-01-preview -# AZURE_OPENAI_API_KEY_EMBEDDING=your-embedding-key -# AZURE_OPENAI_ENDPOINT_EMBEDDING=https://embedding-endpoint.openai.azure.com/ -# AZURE_OPENAI_API_VERSION_EMBEDDING=2024-12-01-preview - -# OpenAI Compatible (LM Studio, etc.) -OPENAI_COMPATIBLE_BASE_URL=http://localhost:1234/v1 -# Optional - only if your endpoint requires authentication -OPENAI_COMPATIBLE_API_KEY=your-key-here -``` - -### Optional Services -```env -# Firecrawl - Enhanced web scraping -FIRECRAWL_API_KEY=your-firecrawl-key-here - -# Jina - Advanced embeddings -JINA_API_KEY=your-jina-key-here - -# Voyage AI - Specialized embeddings -VOYAGE_API_KEY=your-voyage-key-here - -# LangSmith - Debugging and monitoring -LANGCHAIN_TRACING_V2=true -LANGCHAIN_ENDPOINT="https://api.smith.langchain.com" -LANGCHAIN_API_KEY=your-langsmith-key-here -LANGCHAIN_PROJECT="Open Notebook" -``` - ---- - -## Manual Installation - -### Prerequisites Installation - -#### macOS -```bash -# Install Homebrew if not already installed -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - -# Install uv (Python package manager) -brew install uv - -# Install Docker Desktop -brew install --cask docker -``` - -#### Ubuntu/Debian -```bash -# Update package list -sudo apt update - - -# Install uv -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Install Docker -sudo apt install -y docker.io docker-compose-plugin -sudo systemctl start docker -sudo systemctl enable docker -sudo usermod -aG docker $USER -``` - -#### CentOS/RHEL/Fedora -```bash -# Install system dependencies -sudo dnf install -y file-devel python3-devel gcc - -# Install uv -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Install Docker -sudo dnf install -y docker docker-compose -sudo systemctl start docker -sudo systemctl enable docker -sudo usermod -aG docker $USER -``` - -### Source Installation Steps - -1. **Clone the Repository** -```bash -git clone https://github.com/lfnovo/open-notebook.git -cd open-notebook -``` - -2. **Configure Environment** -```bash -# Copy environment template -cp .env.example .env - -# Edit environment file with your API keys -nano .env # or use your preferred editor -``` - -3. **Install Python Dependencies** -```bash -# Install all required packages -uv sync - -# Install additional system-specific packages -uv pip install python-magic -``` - -4. **Initialize Database** -```bash -# Start SurrealDB -make database -# Wait for database to be ready (about 10 seconds) -``` - -5. **Start All Services** -```bash -# Start all services at once -make start-all -``` - -This will start: -- SurrealDB database on port 8000 -- FastAPI backend on port 5055 -- Background worker for processing -- React frontend on port 8502 - -### Alternative: Start Services Individually - -For development or debugging, you can start each service separately: - -```bash -# Terminal 1: Database -make database - -# Terminal 2: API Backend -make api - -# Terminal 3: Background Worker -make worker - -# Terminal 4: React frontend -make run -``` - ---- - -## Docker Installation - -### Single-Container Deployment (Recommended for Beginners) - -Perfect for personal use or platforms like PikaPods: - -1. **Create Project Directory** -```bash -mkdir open-notebook -cd open-notebook -``` - -2. **Create Docker Compose File** -```bash -# Create docker-compose.yml -cat > docker-compose.yml << 'EOF' -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" - - "5055:5055" - env_file: - - ./docker.env - pull_policy: always - volumes: - - ./notebook_data:/app/data - - ./surreal_single_data:/mydata - restart: always -EOF -``` - -3. **Create Environment File** -```bash -# Create docker.env with your API keys -cat > docker.env << 'EOF' -# REQUIRED: At least one AI provider -OPENAI_API_KEY=your-openai-key-here - -# Database settings (don't change) -SURREAL_ADDRESS=localhost -SURREAL_PORT=8000 -SURREAL_USER=root -SURREAL_PASS=root -SURREAL_NAMESPACE=open_notebook -SURREAL_DATABASE=production - -# Optional: Password protection -# OPEN_NOTEBOOK_PASSWORD=your_secure_password -EOF -``` - -4. **Start Open Notebook** -```bash -docker compose up -d -``` - -### Multi-Container Deployment (Production) - -For scalable production deployments: - -1. **Download Configuration** -```bash -# Download the main docker-compose.yml -curl -O https://raw.githubusercontent.com/lfnovo/open-notebook/main/docker-compose.yml - -# Copy environment template -curl -o docker.env https://raw.githubusercontent.com/lfnovo/open-notebook/main/.env.example -``` - -2. **Configure Environment** -```bash -# Edit docker.env with your API keys -nano docker.env -``` - -3. **Start Services** -```bash -# Start with multi-container profile -docker compose --profile multi up -d -``` - -### Docker Service Management - -```bash -# Check service status -docker compose ps - -# View logs -docker compose logs -f - -# Stop services -docker compose down - -# Update to latest version -docker compose pull -docker compose up -d - -# Restart specific service -docker compose restart open_notebook -``` - ---- - -## AI Model Configuration - -After installation, configure your AI models for optimal performance: - -### 1. Access Model Settings -- Navigate to **Settings** → **Models** in the web interface -- Or visit `http://localhost:8502` and click the settings icon - -### 2. Configure Model Categories - -#### Language Models (Chat & Generation) -**Budget-Friendly Options:** -- `gpt-5-mini` (OpenAI) - Great value for most tasks -- `deepseek-chat` (DeepSeek) - Excellent quality-to-price ratio -- `gemini-2.0-flash` (Google) - Large context window - -**Premium Options:** -- `gpt-4o` (OpenAI) - Excellent tool calling -- `claude-3.5-sonnet` (Anthropic) - High-quality reasoning -- `grok-3` (xAI) - Cutting-edge intelligence - -#### Embedding Models (Search & Similarity) -**Recommended:** -- `text-embedding-3-small` (OpenAI) - $0.02 per 1M tokens -- `text-embedding-004` (Google) - Generous free tier -- `mistral-embed` (Mistral) - European alternative - -#### Text-to-Speech (Podcast Generation) -**High Quality:** -- `eleven_turbo_v2_5` (ElevenLabs) - Best voice quality -- `gpt-4o-mini-tts` (OpenAI) - Good quality, reliable - -**Budget Options:** -- `gemini-2.5-flash-preview-tts` (Google) - $10 per 1M tokens - -#### Speech-to-Text (Audio Transcription) -**Recommended:** -- `whisper-1` (OpenAI) - Industry standard -- `scribe_v1` (ElevenLabs) - High-quality transcription - -### 3. Provider-Specific Setup - -#### OpenAI Setup -1. Visit https://platform.openai.com/ -2. Create account and navigate to **API Keys** -3. Click **"Create new secret key"** -4. Add at least $5 in billing credits -5. Copy key to your `.env` file - -#### Anthropic Setup -1. Visit https://console.anthropic.com/ -2. Create account and navigate to **API Keys** -3. Generate new key -4. Add to environment variables - -#### Google (Gemini) Setup -1. Visit https://makersuite.google.com/app/apikey -2. Create new API key -3. Add to environment variables - -### 4. Model Recommendations by Use Case - -#### Personal Research -```env -# Language: gpt-5-mini (OpenAI) -# Embedding: text-embedding-3-small (OpenAI) -# TTS: gpt-4o-mini-tts (OpenAI) -# STT: whisper-1 (OpenAI) -``` - -#### Professional Use -```env -# Language: claude-3.5-sonnet (Anthropic) -# Embedding: text-embedding-004 (Google) -# TTS: eleven_turbo_v2_5 (ElevenLabs) -# STT: whisper-1 (OpenAI) -``` - -#### Budget-Conscious -```env -# Language: deepseek-chat (DeepSeek) -# Embedding: text-embedding-004 (Google) -# TTS: gemini-2.5-flash-preview-tts (Google) -# STT: whisper-1 (OpenAI) -``` - ---- - -## Verification and Testing - -### 1. Service Health Checks - -#### Check All Services -```bash -# For source installation -make status - -# For Docker -docker compose ps -``` - -#### Individual Service Tests -```bash -# Test database connection -curl http://localhost:8000/health - -# Test API backend -curl http://localhost:5055/health - -# Test React frontend -curl http://localhost:8502/healthz -``` - -### 2. Create Test Notebook - -1. **Access Web Interface** - - Open `http://localhost:8502` - - You should see the Open Notebook home page - -2. **Create First Notebook** - - Click "Create New Notebook" - - Name: "Test Notebook" - - Description: "Testing installation" - - Click "Create" - -3. **Add Test Source** - - Click "Add Source" - - Select "Text" tab - - Paste: "This is a test document for Open Notebook installation." - - Click "Add Source" - -4. **Test Chat Function** - - Go to Chat tab - - Ask: "What is this document about?" - - You should receive a response about the test document - -### 3. Feature Testing - -#### Test Search Functionality -1. Add multiple sources to your notebook -2. Use the search bar to find specific content -3. Verify both full-text and semantic search work - -#### Test Transformations -1. Select a source -2. Click "Transform" → "Summarize" -3. Verify transformation completes successfully - -#### Test Podcast Generation -1. Add substantial content to your notebook -2. Navigate to "Podcast" tab -3. Click "Generate Podcast" -4. Wait for background processing to complete - ---- - -## Security Configuration - -### Password Protection - -For public deployments, enable password protection: - -```env -# Add to your .env or docker.env file -OPEN_NOTEBOOK_PASSWORD=your_secure_password_here -``` - -**Features:** -- **React frontend**: Password prompt on first access -- **REST API**: Requires `Authorization: Bearer your_password` header -- **Local Usage**: Optional (can be left empty) - -### API Security - -When using the REST API programmatically: - -```bash -# Example API call with password -curl -H "Authorization: Bearer your_password" \ - http://localhost:5055/api/notebooks -``` - -### Network Security - -For production deployments: - -1. **Use HTTPS**: Configure reverse proxy (nginx, Cloudflare) -2. **Firewall Rules**: Restrict access to necessary ports only -3. **VPN Access**: Consider VPN for private networks -4. **Regular Updates**: Keep Docker images updated - -```bash -# Update Docker images -docker compose pull -docker compose up -d -``` - ---- - -## Troubleshooting - -### Common Installation Issues - -#### Port Already in Use -```bash -# Error: Port 8502 is already in use -# Solution: Find and stop conflicting process -lsof -i :8502 -kill -9 - -# Or use different port -uv run --env-file .env cd frontend && npm run dev --server.port=8503 -``` - -#### Permission Denied (Docker) -```bash -# Error: Permission denied accessing Docker -# Solution: Add user to docker group -sudo usermod -aG docker $USER -# Log out and log back in -``` - -#### Python/uv Installation Issues -```bash -# Error: uv command not found -# Solution: Install uv package manager -curl -LsSf https://astral.sh/uv/install.sh | sh -source ~/.bashrc - -# Error: Python version conflict -# Solution: Use uv's Python management -uv python install 3.11 -uv python pin 3.11 -``` - -### API and Database Issues - -#### Database Connection Failed -```bash -# Check if SurrealDB is running -docker compose ps surrealdb - -# Check database logs -docker compose logs surrealdb - -# Restart database -docker compose restart surrealdb -``` - -#### API Backend Not Responding -```bash -# Check API logs -docker compose logs api - -# For source installation -# Check if API process is running -pgrep -f "run_api.py" - -# Restart API -make api -``` - -#### Worker Not Processing Jobs -```bash -# Check worker status -pgrep -f "surreal-commands-worker" - -# Restart worker -make worker-restart - -# Check worker logs -docker compose logs worker -``` - -### AI Provider Issues - -#### OpenAI API Key Errors -```bash -# Error: Invalid API key -# Solution: Verify key format and billing -# 1. Check key starts with "sk-" -# 2. Verify billing credits in OpenAI dashboard -# 3. Check API key permissions -``` - -#### Model Not Available -```bash -# Error: Model not found -# Solution: Check model availability -# 1. Verify model name in provider documentation -# 2. Check API key permissions -# 3. Try alternative model -``` - -#### Rate Limiting Issues -```bash -# Error: Rate limit exceeded -# Solution: Implement backoff strategy -# 1. Reduce concurrent requests -# 2. Upgrade provider plan -# 3. Use multiple providers -``` - -### Performance Issues - -#### Slow Response Times -```bash -# Check system resources -top -docker stats - -# Optimize database -# Consider increasing Docker memory limits -# Use faster storage (SSD) -``` - -#### Memory Issues -```bash -# Error: Out of memory -# Solution: Increase Docker memory -# 1. Docker Desktop → Settings → Resources -# 2. Increase memory limit to 4GB+ -# 3. Consider model optimization -``` - -### Data and Storage Issues - -#### Persistent Data Loss -```bash -# Ensure volumes are properly mounted -docker compose config - -# Check volume permissions -ls -la ./notebook_data -ls -la ./surreal_data - -# Fix permissions if needed -sudo chown -R $USER:$USER ./notebook_data -sudo chown -R $USER:$USER ./surreal_data -``` - -### Getting Help - -#### Community Support -- **Discord**: https://discord.gg/37XJPXfz2w -- **GitHub Issues**: https://github.com/lfnovo/open-notebook/issues -- **Installation Assistant**: https://chatgpt.com/g/g-68776e2765b48191bd1bae3f30212631-open-notebook-installation-assistant - -#### Bug Reports -When reporting issues, include: -1. Installation method (Docker/source) -2. Operating system and version -3. Error messages and logs -4. Steps to reproduce -5. Environment configuration (without API keys) - -#### Log Collection -```bash -# Collect all logs -docker compose logs > open-notebook-logs.txt - -# For source installation -make status > status.txt -``` - ---- - -## Next Steps - -After successful installation: - -1. **Read the User Guide**: Learn about features and workflows -2. **Check Model Providers**: Explore different AI providers for your needs -3. **Configure Transformations**: Set up custom content processing -4. **Explore API**: Use the REST API for integrations -5. **Join Community**: Connect with other users for tips and support - -### Advanced Configuration - -For advanced users: -- **Custom Prompts**: Customize AI behavior with Jinja templates -- **API Integration**: Build custom applications using the REST API -- **Multi-User Setup**: Configure for team usage -- **Backup Strategy**: Set up automated backups - -### Performance Optimization - -- **Model Selection**: Choose optimal models for your use case -- **Caching**: Configure appropriate cache settings -- **Resource Limits**: Tune Docker resource allocation -- **Monitoring**: Set up logging and monitoring - -Welcome to Open Notebook! 🚀 \ No newline at end of file diff --git a/docs/getting-started/introduction.md b/docs/getting-started/introduction.md deleted file mode 100644 index e90f2d24..00000000 --- a/docs/getting-started/introduction.md +++ /dev/null @@ -1,153 +0,0 @@ -# Welcome to Open Notebook - -## What is Open Notebook? - -Open Notebook is a powerful, open-source AI-powered research and note-taking platform that puts privacy and user control at the heart of knowledge management. Designed as a privacy-focused alternative to Google's Notebook LM, Open Notebook empowers researchers, students, and professionals to organize, analyze, and interact with their research materials using cutting-edge AI technology—all while maintaining complete control over their data. - -At its core, Open Notebook serves as your personal **cognitive partner**, helping you process information, generate insights, and create compelling content from your research materials. Whether you're a student working on a thesis, a researcher analyzing complex documents, or a professional synthesizing industry reports, Open Notebook provides the tools to enhance your learning and knowledge creation workflows. - -## Key Features & Benefits - -### 🔒 Privacy-First Architecture -Unlike cloud-based alternatives, Open Notebook runs entirely on your infrastructure, ensuring your sensitive research data never leaves your control. You decide what information to share with AI models and when. - -### 🤖 Multi-Model AI Support -Choose from industry-leading AI providers including OpenAI, Anthropic (Claude), Google (Gemini), Mistral, DeepSeek, Ollama (free/local), and many more. This flexibility ensures you're never locked into a single provider and can optimize for cost, performance, or specific capabilities. - -### 🎙️ Advanced Podcast Generation -Transform your research into engaging podcasts with 1-4 customizable speakers—a significant improvement over Google Notebook LM's 2-speaker limitation. Create professional-quality audio content with Episode Profiles for consistent, branded output. - -### 📚 Comprehensive Content Integration -Process diverse content types including: -- Web links and articles -- PDFs, EPUB, and Office documents -- YouTube videos and audio files -- Markdown and plain text -- Direct text input and pasting - -### 🔍 Intelligent Search & Discovery -Built-in full-text and vector search capabilities help you quickly locate information across all your research materials, notes, and conversations. - -### ⚙️ Fine-Grained Context Control -Precisely manage what information gets shared with AI models through three context levels: -- **No Context**: AI operates without your documents -- **Summary Only**: AI receives condensed summaries -- **Full Content**: AI accesses complete document text - -### 🛠️ Powerful Content Transformations -Create custom prompts to extract insights, generate summaries, analyze themes, and transform your content in ways that match your specific research needs. - -### 📖 AI-Powered Note Creation -Generate notes manually or let AI assist in creating insights from your research materials, with seamless integration between human and AI-generated content. - -### 🔗 Citations & References -Get accurate answers to questions about your documents with proper citations, enabling transparent and verifiable research workflows. - -## Comparison with Google Notebook LM - -| Feature | Open Notebook | Google Notebook LM | -|---------|---------------|-------------------| -| **Data Privacy** | Complete control, self-hosted | Data processed by Google | -| **AI Model Choice** | 15+ providers, local options | Google's models only | -| **Podcast Speakers** | 1-4 customizable speakers | 2 speakers only | -| **Content Types** | 10+ formats including video | Limited format support | -| **Search Capabilities** | Full-text + vector search | Basic search | -| **Context Control** | Granular 3-level control | Limited control | -| **Customization** | Fully customizable prompts | Fixed functionality | -| **Cost Model** | Pay-per-use to your provider | Free (with data trade-off) | -| **Offline Capability** | Yes (with local models) | No | -| **API Access** | Full REST API | Limited API | - -### Why Choose Open Notebook? - -**🎯 For Privacy-Conscious Users**: Your sensitive research data remains under your complete control, with no third-party data collection or processing. - -**🔄 For Flexibility**: Choose the best AI models for your specific needs and budget, switching providers as technology evolves. - -**🎨 For Customization**: Create personalized workflows, prompts, and transformations that match your unique research methodology. - -**💰 For Cost Control**: Pay only for what you use, with transparent pricing from AI providers and the option to use free local models. - -**🚀 For Advanced Features**: Access professional podcast generation, comprehensive content support, and powerful search capabilities. - -## Use Cases & Target Audience - -### 🎓 Students & Academics -- **Research Papers**: Analyze academic literature, generate summaries, and create citations -- **Thesis Writing**: Organize sources, develop arguments, and maintain research notes -- **Literature Reviews**: Synthesize multiple sources and identify themes -- **Study Materials**: Transform research into podcasts for mobile learning - -### 🔬 Researchers & Scientists -- **Data Analysis**: Process reports, studies, and documentation -- **Grant Applications**: Organize supporting materials and generate insights -- **Literature Monitoring**: Track developments in your field -- **Collaboration**: Share insights while maintaining data privacy - -### 💼 Business Professionals -- **Market Research**: Analyze industry reports, competitor information, and trends -- **Content Creation**: Transform research into presentations, reports, and marketing materials -- **Knowledge Management**: Organize and access organizational knowledge -- **Training Materials**: Create podcasts and summaries for team education - -### 🔐 Privacy-Conscious Users -- **Sensitive Documents**: Process confidential materials without cloud exposure -- **Personal Research**: Maintain control over personal projects and interests -- **Compliance Requirements**: Meet organizational data protection standards -- **Intellectual Property**: Protect proprietary research and development - -## System Requirements - -### Minimum Requirements -- **Operating System**: macOS, Linux, or Windows with Docker support -- **Memory**: 4GB RAM (8GB recommended) -- **Storage**: 2GB available space -- **Network**: Internet connection for AI provider APIs (optional for local models) - -### Recommended Setup -- **Memory**: 8GB+ RAM for optimal performance -- **Storage**: 10GB+ for document storage and local models -- **Processor**: Multi-core CPU for faster processing -- **Network**: Stable broadband connection - -### Software Dependencies -- **Docker**: For containerized deployment (recommended) -- **Python 3.8+**: For source installation -- **UV Package Manager**: For dependency management -- **Modern Web Browser**: Chrome, Firefox, Safari, or Edge - -### AI Provider Requirements -- **API Keys**: For cloud-based AI providers (OpenAI, Anthropic, etc.) -- **Ollama**: For free local models (optional) -- **Hardware**: GPU recommended for local model performance (optional) - -## Getting Started - -Ready to transform your research workflow? Here's what's next: - -### 📋 Next Steps -1. **[Installation Guide](installation.md)** - Set up Open Notebook on your system -2. **[Model Selection](../models.md)** - Choose the right AI models for your needs -3. **[Basic Workflow](../basic-workflow.md)** - Learn core concepts and workflows -4. **[Features Overview](../features/)** - Explore all available features - -### 🚀 Quick Start Options -- **Docker**: Get running in minutes with our containerized setup -- **Source Installation**: Full control with manual installation -- **Cloud Deployment**: Deploy on your preferred cloud platform - -### 💬 Community & Support -- **[Discord Community](https://discord.gg/37XJPXfz2w)** - Get help and share workflows -- **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Report bugs and request features -- **[Website](https://www.open-notebook.ai)** - Latest updates and resources - -### 🤝 Contributing -Open Notebook is built by the community, for the community. We welcome contributions in: -- **Development**: Frontend, backend, and feature development -- **Documentation**: Improve guides and help others -- **Testing**: Find bugs and test new features -- **Community**: Share workflows and help newcomers - ---- - -*Open Notebook: Empowering knowledge workers with privacy-focused AI tools. Take control of your research, enhance your learning, and create amazing content—all on your terms.* diff --git a/docs/getting-started/quick-start.md b/docs/getting-started/quick-start.md deleted file mode 100644 index 5331c6d2..00000000 --- a/docs/getting-started/quick-start.md +++ /dev/null @@ -1,245 +0,0 @@ -# Quick Start Guide - Get Open Notebook Running in 5 Minutes - -Get up and running with Open Notebook in just a few minutes! This guide will get you from zero to your first AI-powered notebook quickly. - -## Prerequisites - -Before starting, ensure you have: - -1. **Docker Desktop** installed and running - - [Download for Windows/Mac](https://www.docker.com/products/docker-desktop/) - - Linux: `sudo apt install docker.io docker-compose` - -2. **OpenAI API Key** (recommended for beginners) - - Go to [OpenAI Platform](https://platform.openai.com/) - - Create account → API Keys → Create new secret key - - Add $5+ credits to your account for API usage - -## Single Command Setup - -### Step 1: Choose Your Setup Method - -Are you installing on: -- **🏠 The same computer** you'll use to access Open Notebook? → Use **Local Setup** -- **🌐 A remote server** (Raspberry Pi, NAS, cloud server, Proxmox)? → Use **Remote Setup** - -### Step 2: Create Your Configuration - -Create a new folder called `open-notebook` and add these files: - -#### For Local Machine (Same Computer): - -**docker-compose.yml**: -```yaml -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" # Web UI - - "5055:5055" # API (required!) - env_file: - - ./docker.env - pull_policy: always - volumes: - - ./notebook_data:/app/data - - ./surreal_single_data:/mydata - restart: always -``` - -**docker.env**: -```env -# Replace YOUR_OPENAI_API_KEY_HERE with your actual API key -OPENAI_API_KEY=YOUR_OPENAI_API_KEY_HERE - -# Database connection (required for single-container) -SURREAL_URL="ws://localhost:8000/rpc" -SURREAL_USER="root" -SURREAL_PASSWORD="root" -SURREAL_NAMESPACE="open_notebook" -SURREAL_DATABASE="production" -``` - -#### For Remote Server: - -**docker-compose.yml**: -```yaml -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - ports: - - "8502:8502" # Web UI - - "5055:5055" # API (required!) - env_file: - - ./docker.env - pull_policy: always - volumes: - - ./notebook_data:/app/data - - ./surreal_single_data:/mydata - restart: always -``` - -**docker.env**: -```env -# Replace YOUR_OPENAI_API_KEY_HERE with your actual API key -OPENAI_API_KEY=YOUR_OPENAI_API_KEY_HERE - -# CRITICAL: Replace YOUR_SERVER_IP with your server's actual IP address -# Example: API_URL=http://192.168.1.100:5055 -API_URL=http://YOUR_SERVER_IP:5055 - -# Database connection (required for single-container) -SURREAL_URL="ws://localhost:8000/rpc" -SURREAL_USER="root" -SURREAL_PASSWORD="root" -SURREAL_NAMESPACE="open_notebook" -SURREAL_DATABASE="production" -``` - -> **⚠️ Finding Your Server IP:** -> On the server running Docker, use: -> - **Linux**: `hostname -I` or `ip addr show` -> - **Windows**: `ipconfig` (look for IPv4 Address) -> - **Mac**: `ifconfig | grep inet` - -### Step 3: Start Open Notebook - -Open terminal/command prompt in your `open-notebook` folder and run: - -```bash -docker compose up -d -``` - -**That's it!** Open Notebook is now running. - -**Access at:** -- **Local setup**: http://localhost:8502 -- **Remote setup**: http://YOUR_SERVER_IP:8502 (replace with your actual IP) - -## Basic Verification - -1. **Check Services**: Visit http://localhost:8502 - you should see the Open Notebook interface -2. **API Health**: Visit http://localhost:5055/docs - you should see the API documentation -3. **No Errors**: Run `docker-compose logs` to ensure no error messages - -## Simple Example Workflow - -### 1. Configure AI Models -- Click **Models** in the sidebar -- Set these recommended models: - - **Language Model**: `gpt-5-mini` - - **Embedding Model**: `text-embedding-3-small` - - **Text-to-Speech**: `gpt-4o-mini-tts` - - **Speech-to-Text**: `whisper-1` -- Click **Save** - -### 2. Create Your First Notebook -- Click **"Create New Notebook"** -- Name: "My Research" -- Description: "Getting started with Open Notebook" -- Click **"Create"** - -### 3. Add a Source -- Click **"Add Source"** -- Choose **"Link"** and paste: `https://en.wikipedia.org/wiki/Artificial_intelligence` -- Click **"Add Source"** -- Wait for processing to complete - -### 4. Generate Your First Note -- Go to the **Notes** column -- Click **"Create AI Note"** -- Enter prompt: "Summarize the key concepts of artificial intelligence" -- Click **"Generate Note"** -- Watch as AI creates a comprehensive summary! - -### 5. Chat with Your Content -- Go to the **Chat** column -- Ask: "What are the main applications of AI mentioned in the source?" -- Get instant answers with citations from your content - -## Next Steps - -Now that you have Open Notebook running: - -### Essential Features to Explore -- **📁 [Content Support](../content-support.md)** - Learn what file types you can add -- **🔍 [Search](../search.md)** - Master full-text and vector search -- **🎙️ [Podcast Generation](../features/podcasts.md)** - Create multi-speaker podcasts from your research -- **⚙️ [Transformations](../features/transformations.md)** - Extract insights and summaries - -### Advanced Setup -- **🔧 [Development Setup](../development/)** - Run from source code -- **☁️ [Deployment](../deployment/)** - Deploy to cloud services -- **🤖 [AI Models](../features/ai-models.md)** - Add more AI providers beyond OpenAI - -### Getting Help -- **💬 [Discord Community](https://discord.gg/37XJPXfz2w)** - Get help and share ideas -- **📖 [Full Documentation](../user-guide/)** - Complete feature guide -- **🐛 [Report Issues](https://github.com/lfnovo/open-notebook/issues)** - Found a bug? - -## Common Issues - -### ❌ "Unable to connect to server" Error - -**This is the #1 issue!** The frontend can't reach the API. - -**Quick Fix Checklist:** - -1. **Are you accessing from a different computer than where Docker runs?** - - ✅ Yes → You MUST set `API_URL` in your `docker.env` (see Remote Setup above) - - ❌ No → Skip to step 2 - -2. **Is port 5055 exposed?** - ```bash - docker ps - # Should show both: 0.0.0.0:8502->8502 AND 0.0.0.0:5055->5055 - ``` - - ❌ Missing 5055? Add it to your `docker-compose.yml` ports section - -3. **Restart after changes:** - ```bash - docker compose down - docker compose up -d - ``` - -**Still not working?** See the [complete troubleshooting guide](../troubleshooting/quick-fixes.md). - -### API Key Errors -- Double-check your API key in `docker.env` -- Ensure you have credits in your OpenAI account -- Verify no extra spaces around the key -- Key should start with `sk-` - -### Port Already in Use -```bash -docker compose down -docker compose up -d -``` - -### Container Won't Start -```bash -docker compose down -v -docker compose up -d -``` - -### Can't Access Interface -- Ensure Docker Desktop is running -- Check firewall isn't blocking ports 8502 and 5055 -- Try: `docker compose restart` - -## Stopping Open Notebook - -To stop: -```bash -docker-compose down -``` - -To start again: -```bash -docker-compose up -d -``` - ---- - -**Congratulations!** You now have Open Notebook running and ready for your research workflow. Start by adding your own documents and see how AI can enhance your note-taking and research process. - -**Next recommended read**: [Basic Workflow Guide](../basic-workflow.md) to learn effective research patterns. \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index bd6d1431..a31861ba 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,143 +1,289 @@ # Open Notebook Documentation -Welcome to the complete documentation for Open Notebook - your privacy-focused, AI-powered research companion. +Welcome to Open Notebook - a privacy-focused AI research assistant. This documentation is organized for different needs. -## 🚀 Getting Started +--- + +## 🎯 Choose Your Path + +### I'm brand new +→ Start here: **[0-START-HERE](0-START-HERE/index.md)** +- Learn what Open Notebook is +- Pick your setup path (OpenAI, cloud, local/Ollama) +- 5-minute quick start + +### I need to install/deploy +→ Go here: **[1-INSTALLATION](1-INSTALLATION/index.md)** +- Multiple installation routes +- Docker Compose (recommended) +- From source (developers) +- Single container (shared hosting) + +### I want to understand how it works +→ Read this: **[2-CORE-CONCEPTS](2-CORE-CONCEPTS/index.md)** +- Mental models and architecture +- How RAG (retrieval-augmented generation) works +- Notebooks, sources, and notes explained +- Chat vs. transformations vs. podcasts + +### I want to use it (tutorials) +→ Follow this: **[3-USER-GUIDE](3-USER-GUIDE/index.md)** +- How to add sources (PDFs, URLs, audio, video) +- Creating and organizing notes +- Chat effectively with your research +- Creating podcasts from research +- Search techniques + +### I need to configure it +→ Check this: **[5-CONFIGURATION](5-CONFIGURATION/index.md)** +- Choose and setup AI provider +- API configuration +- Database setup +- Advanced tuning + +### I need provider-specific help +→ Go here: **[4-AI-PROVIDERS](4-AI-PROVIDERS/index.md)** +- OpenAI, Anthropic, Google, Groq, Ollama, Azure +- Model comparisons +- Cost estimates +- Setup paths + +### Something's not working +→ Troubleshoot: **[6-TROUBLESHOOTING](6-TROUBLESHOOTING/index.md)** +- Quick fixes (top 10 issues) +- Installation problems +- Connection issues +- AI/chat problems +- Content processing issues +- Podcast problems + +### I want to contribute/develop +→ Read this: **[7-DEVELOPMENT](7-DEVELOPMENT/index.md)** +- Architecture and tech stack +- Contributing guidelines +- API reference +- Testing -New to Open Notebook? Start here to get up and running quickly. +--- -### 📚 **[Getting Started](getting-started/index.md)** -Everything you need to know to begin your Open Notebook journey. -- **[Introduction](getting-started/introduction.md)** - What is Open Notebook and why use it? -- **[Quick Start](getting-started/quick-start.md)** - Get running in 5 minutes -- **[Installation](getting-started/installation.md)** - Comprehensive setup guide -- **[Your First Notebook](getting-started/first-notebook.md)** - Step-by-step tutorial +## 📊 Documentation Overview + +### By Section + +**[0-START-HERE](0-START-HERE/index.md)** — Entry point +- What is Open Notebook? +- Quick start guides (3 routes) +- First 5 minutes + +**[1-INSTALLATION](1-INSTALLATION/index.md)** — Getting it running +- Multiple installation routes +- Docker, single-container, from-source +- Requirements and setup + +**[2-CORE-CONCEPTS](2-CORE-CONCEPTS/index.md)** — Understanding the system +- Notebooks, sources, notes hierarchy +- RAG (retrieval-augmented generation) +- Chat, transformations, podcasts +- Context management + +**[3-USER-GUIDE](3-USER-GUIDE/index.md)** — Using features +- Adding sources (all types) +- Working with notes +- Chat effectively +- Creating podcasts +- Searching (text and semantic) + +**[4-AI-PROVIDERS](4-AI-PROVIDERS/index.md)** — AI configuration +- Provider comparison +- Setup for each provider +- Model recommendations +- Cost estimates + +**[5-CONFIGURATION](5-CONFIGURATION/index.md)** — Complete reference +- AI provider setup (detailed) +- Database configuration +- Server/API settings +- Advanced tuning +- Environment variables (complete reference) + +**[6-TROUBLESHOOTING](6-TROUBLESHOOTING/index.md)** — Problem solving +- Quick fixes (top 10) +- Installation issues +- Connection problems +- AI/chat issues +- Content processing +- Podcast generation +- Getting help + +**[7-DEVELOPMENT](7-DEVELOPMENT/index.md)** — For contributors +- Architecture +- Contributing guidelines +- API reference +- Testing & development --- -## 📖 User Guide +## 🔍 Find What You Need + +### By Problem Type + +**Installation & Setup** +- Fresh install? → [0-START-HERE](0-START-HERE/index.md) +- Detailed installation routes? → [1-INSTALLATION](1-INSTALLATION/index.md) +- Configuration reference? → [5-CONFIGURATION](5-CONFIGURATION/index.md) +- Provider setup? → [4-AI-PROVIDERS](4-AI-PROVIDERS/index.md) -Master Open Notebook's interface and core functionality. +**Using Open Notebook** +- How to use features? → [3-USER-GUIDE](3-USER-GUIDE/index.md) +- Understanding concepts? → [2-CORE-CONCEPTS](2-CORE-CONCEPTS/index.md) +- Chat not working? → [6-TROUBLESHOOTING - AI Issues](6-TROUBLESHOOTING/ai-chat-issues.md) +- Files won't upload? → [6-TROUBLESHOOTING - Quick Fixes](6-TROUBLESHOOTING/quick-fixes.md#4-cannot-process-file-or-unsupported-format) -### 🎯 **[User Guide](user-guide/index.md)** -Complete reference for using Open Notebook effectively. -- **[Interface Overview](user-guide/interface-overview.md)** - Understanding the layout -- **[Notebooks](user-guide/notebooks.md)** - Organizing your research -- **[Sources](user-guide/sources.md)** - Adding and managing content -- **[Notes](user-guide/notes.md)** - Creating and organizing notes -- **[Chat](user-guide/chat.md)** - Conversing with AI -- **[Search](user-guide/search.md)** - Finding information quickly +**Troubleshooting** +- Quick fix? → [6-TROUBLESHOOTING - Quick Fixes](6-TROUBLESHOOTING/quick-fixes.md) +- Can't connect? → [6-TROUBLESHOOTING - Connection](6-TROUBLESHOOTING/connection-issues.md) +- Chat issues? → [6-TROUBLESHOOTING - AI Issues](6-TROUBLESHOOTING/ai-chat-issues.md) +- Podcast problems? → [6-TROUBLESHOOTING - Quick Fixes](6-TROUBLESHOOTING/quick-fixes.md#8-podcast-generation-failed) + +**Development** +- Architecture? → [7-DEVELOPMENT - Architecture](7-DEVELOPMENT/architecture.md) +- Contributing? → [7-DEVELOPMENT - Contributing](7-DEVELOPMENT/contributing.md) +- API reference? → [7-DEVELOPMENT - API Reference](7-DEVELOPMENT/api-reference.md) --- -## ⚡ Features +## 📚 Reading Paths -Explore Open Notebook's advanced capabilities and unique features. +### Path 1: Complete Beginner (1-2 hours) +1. [0-START-HERE/index.md](0-START-HERE/index.md) — Understand what it is +2. [0-START-HERE Quick Start](0-START-HERE/index.md) — Set it up +3. [2-CORE-CONCEPTS/index.md](2-CORE-CONCEPTS/index.md) — Understand concepts +4. [3-USER-GUIDE/index.md](3-USER-GUIDE/index.md) — Learn features -### 🔧 **[Features](features/index.md)** -Deep dives into what makes Open Notebook special. -- **[AI Models](features/ai-models.md)** - Multi-provider AI support -- **[Context Management](features/context-management.md)** - Granular privacy control -- **[Transformations](features/transformations.md)** - Custom content processing -- **[Podcasts](features/podcasts.md)** - Multi-speaker podcast generation -- **[Citations](features/citations.md)** - Research integrity support -- **[Local TTS](features/local_tts.md)** - 🆕 Free, private local text-to-speech -- **[OpenAI-Compatible](features/openai-compatible.md)** - Use LM Studio and compatible endpoints -- **[Ollama](features/ollama.md)** - Local AI models setup -- **[REST API](development/api-reference.md)** - [![API Docs](https://img.shields.io/badge/API-Documentation-blue?style=flat-square)](http://localhost:5055/docs) Complete programmatic access +**Result:** Fully understand how to use Open Notebook ---- +### Path 2: Get Running Fast (15 minutes) +1. [0-START-HERE](0-START-HERE/index.md) — Pick your path +2. Follow quick-start guide for your setup +3. Start using! + +**Result:** Running in 15 minutes, learn details later + +### Path 3: DevOps/Deployment (1-2 hours) +1. [1-INSTALLATION](1-INSTALLATION/index.md) — Understand routes +2. [5-CONFIGURATION](5-CONFIGURATION/index.md) — Reference setup +3. [7-DEVELOPMENT - Architecture](../7-DEVELOPMENT/architecture.md) — Understand system -## 🚀 Deployment +**Result:** Ready to deploy to production -Set up Open Notebook for different environments and use cases. +### Path 4: Troubleshooting (5-30 minutes) +1. [6-TROUBLESHOOTING/index.md](6-TROUBLESHOOTING/index.md) — Identify problem +2. Find specific guide +3. Follow solutions -### 🐳 **[Deployment](deployment/index.md)** -Complete deployment guides for all scenarios. -- **[Docker](deployment/docker.md)** - Multi-container setup -- **[Single Container](deployment/single-container.md)** - Simplified deployment -- **[Development](deployment/development.md)** - Source code setup -- **[Security](deployment/security.md)** - Production security -- **[Retry Configuration](deployment/retry-configuration.md)** - Background job reliability +**Result:** Problem solved! --- -## 🔧 Development +## ❓ Common Questions -Technical documentation for developers and contributors. +**Q: Where do I start?** +A: → [0-START-HERE](0-START-HERE/index.md) — Choose your setup path -### 👩‍💻 **[Development](development/index.md)** -Resources for extending and contributing to Open Notebook. -- **[Architecture](development/architecture.md)** - System design overview -- **[API Reference](development/api-reference.md)** - REST API documentation -- **[Contributing](development/contributing.md)** - How to contribute +**Q: How do I install it?** +A: → [1-INSTALLATION](1-INSTALLATION/index.md) — Multiple routes available ---- +**Q: How do I use [feature]?** +A: → [3-USER-GUIDE](3-USER-GUIDE/index.md) — Step-by-step tutorials + +**Q: Why does [feature] work like that?** +A: → [2-CORE-CONCEPTS](2-CORE-CONCEPTS/index.md) — Understand the mental model + +**Q: How do I configure [provider]?** +A: → [4-AI-PROVIDERS](4-AI-PROVIDERS/index.md) or [5-CONFIGURATION](5-CONFIGURATION/index.md) + +**Q: Something's broken, what do I do?** +A: → [6-TROUBLESHOOTING](6-TROUBLESHOOTING/index.md) — Problem solver -## 🩺 Support +**Q: How does the system work?** +A: → [2-CORE-CONCEPTS](2-CORE-CONCEPTS/index.md) — Architecture and concepts -Get help when you need it. +**Q: Can I contribute?** +A: → [7-DEVELOPMENT](../7-DEVELOPMENT/index.md) — Contributing guide -### 🛠️ **[Troubleshooting](troubleshooting/index.md)** -Solutions for common issues and problems. -- **[Common Issues](troubleshooting/common-issues.md)** - Frequent problems and fixes -- **[FAQ](troubleshooting/faq.md)** - Frequently asked questions -- **[Debugging](troubleshooting/debugging.md)** - Advanced troubleshooting +--- + +## 📖 How This Documentation is Organized + +### Principles +- **Progressive Disclosure**: Start simple, go deeper if needed +- **Multiple Entry Routes**: Different paths for different users +- **High Signal-to-Noise**: Focused content, no fluff +- **Step-by-Step**: Clear instructions you can follow +- **Decision Trees**: Help you pick the right path +- **Symptom-Based**: Troubleshooting by what's broken + +### Structure +- **0-START-HERE** — Entry point (everyone starts here) +- **1-INSTALLATION** — Multiple setup routes +- **2-CORE-CONCEPTS** — Mental models (understand why) +- **3-USER-GUIDE** — How to use (step-by-step) +- **4-AI-PROVIDERS** — Provider guides +- **5-CONFIGURATION** — Reference material +- **6-TROUBLESHOOTING** — Problem solving +- **7-DEVELOPMENT** — For contributors --- -## 🔄 Popular Workflows +## 🚀 Quick Navigation -Common ways to use Open Notebook effectively: +### First Time? +→ **[START HERE](0-START-HERE/index.md)** -### 🔬 **Academic Research** -Sources → Transformations → Context Management → Citations → Notes +### Just Want to Use It? +→ **[QUICK START](0-START-HERE/index.md)** (5 minutes) -### 📝 **Content Creation** -Sources → AI Models → Transformations → Podcasts → Export +### Something Broken? +→ **[TROUBLESHOOTING](6-TROUBLESHOOTING/index.md)** -### 🧠 **Learning & Study** -Sources → Search → Notes → Chat → Transformations +### Full Reference? +→ **[CONFIGURATION](5-CONFIGURATION/index.md)** -### 👥 **Team Research** -Context Management → Citations → Transformations → Sharing +### Developer? +→ **[DEVELOPMENT](7-DEVELOPMENT/index.md)** --- -## 🌟 What Makes Open Notebook Special - -| Feature | Open Notebook | Google Notebook LM | Advantage | -|---------|---------------|--------------------|-----------| -| **Privacy & Control** | Self-hosted, your data | Google cloud only | Complete data sovereignty | -| **AI Provider Choice** | 16+ providers (OpenAI, Anthropic, Ollama, LM Studio, etc.) | Google models only | Flexibility and cost optimization | -| **Podcast Speakers** | 1-4 speakers with custom profiles | 2 speakers only | Extreme flexibility | -| **Context Control** | 3 granular levels | All-or-nothing | Privacy and performance tuning | -| **Content Transformations** | Custom and built-in | Limited options | Unlimited processing power | -| **API Access** | Full REST API | No API | Complete automation | -| **Deployment** | Docker, cloud, or local | Google hosted only | Deploy anywhere | -| **Citations** | Comprehensive with sources | Basic references | Research integrity | -| **Customization** | Open source, fully customizable | Closed system | Unlimited extensibility | -| **Cost** | Pay only for AI usage | Monthly subscription + usage | Transparent and controllable | +## 📞 Getting Help + +- **Discord Community** — https://discord.gg/37XJPXfz2w +- **GitHub Issues** — https://github.com/lfnovo/open-notebook/issues +- **Documentation** — You're reading it! --- -## 🆘 Need Help? +## 📈 Documentation Stats -- 💬 **[Discord Community](https://discord.gg/37XJPXfz2w)** - Get help and share ideas -- 🐛 **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Report bugs and request features -- 📧 **[Contact](mailto:luis@lfnovo.com)** - Direct support -- 🌐 **[Website](https://www.open-notebook.ai)** - Project homepage +- **8 major sections** +- **35+ focused guides** +- **~80,000 words** +- **Covers all features** +- **Multiple entry paths** +- **Progressive difficulty** --- -## 🤝 Contributing +## 🎯 Start Here + +**First time using Open Notebook?** +→ Go to **[0-START-HERE](0-START-HERE/index.md)** -Open Notebook is open source and welcomes contributions: +**Experienced, looking for specific help?** +→ Use the navigation above to find your section -- **[Contributing Guide](development/contributing.md)** - How to get involved -- **[GitHub Repository](https://github.com/lfnovo/open-notebook)** - Source code -- **[License](https://github.com/lfnovo/open-notebook/blob/main/LICENSE)** - MIT License +**Something not working?** +→ Go to **[TROUBLESHOOTING](6-TROUBLESHOOTING/index.md)** --- -*This documentation is constantly evolving. Found an issue or have a suggestion? Please [open an issue](https://github.com/lfnovo/open-notebook/issues) or contribute directly!* +Last updated: January 2026 | Open Notebook v1.2.4+ diff --git a/docs/migration/streamlit-to-nextjs.md b/docs/migration/streamlit-to-nextjs.md deleted file mode 100644 index dc7ef88d..00000000 --- a/docs/migration/streamlit-to-nextjs.md +++ /dev/null @@ -1,288 +0,0 @@ -# Migration Guide: Next.js to Next.js Frontend - -**Complete guide for upgrading from the React frontend to the new Next.js frontend.** - -## Overview - -Open Notebook has migrated from a Next.js-based user interface to a modern Next.js/React frontend. This upgrade provides: - -- **Improved Performance**: Faster page loads and smoother interactions -- **Modern UI/UX**: Contemporary design with better responsiveness -- **Enhanced Features**: Better real-time updates and interactivity -- **Future-Ready**: Foundation for upcoming features like live updates - -## What's Changing - -### User Interface -- **Old**: Next.js-based UI (Python/Next.js) -- **New**: Next.js/React frontend (JavaScript/TypeScript) - -### What Stays the Same -- ✅ **Same Port**: Still runs on port 8502 -- ✅ **API Unchanged**: REST API remains on port 5055 -- ✅ **Data Intact**: All your notebooks, sources, and notes are preserved -- ✅ **Configuration**: Same environment variables and settings -- ✅ **Features**: All existing functionality works the same way - -## Upgrade Instructions - -### For Docker Users (Recommended) - -#### Single-Container Setup - -1. **Stop the current container**: - ```bash - docker compose down - ``` - -2. **Pull the latest image**: - ```bash - docker compose pull - ``` - -3. **Start with the new version**: - ```bash - docker compose up -d - ``` - -4. **Verify it's running**: - - Open http://localhost:8502 in your browser - - You should see the new Next.js interface - -#### Multi-Container Setup - -Same steps as above - the process is identical. - -### For Development Setup - -If you're running Open Notebook from source: - -1. **Pull the latest changes**: - ```bash - git pull origin main - ``` - -2. **Install frontend dependencies**: - ```bash - cd frontend - npm install - npm run build - cd .. - ``` - -3. **Start the application**: - ```bash - make start-all - ``` - -4. **Access the new interface**: - - Frontend: http://localhost:8502 - - API: http://localhost:5055 - -## Verification Steps - -After upgrading, verify everything works correctly: - -1. **Check the UI loads**: - - Navigate to http://localhost:8502 - - You should see a modern interface with a cleaner design - -2. **Test your notebooks**: - - Open an existing notebook - - Verify sources are visible - - Check notes are accessible - - Try the chat functionality - -3. **Test core features**: - - Create a new notebook - - Add a source (URL, file, or text) - - Generate a note - - Search your content - - Start a chat session - -4. **Check API access** (if you use it): - - Navigate to http://localhost:5055/docs - - API documentation should be accessible - - Test any custom integrations - -## Troubleshooting - -### UI Doesn't Load - -**Symptom**: Browser shows error or blank page at http://localhost:8502 - -**Solutions**: -1. Check container logs: - ```bash - docker compose logs -f open_notebook - ``` - -2. Verify container is running: - ```bash - docker compose ps - ``` - -3. Try restarting: - ```bash - docker compose restart open_notebook - ``` - -### Port Conflicts - -**Symptom**: Error about port 8502 already in use - -**Solutions**: -1. Check what's using the port: - ```bash - # macOS/Linux - lsof -i :8502 - - # Windows - netstat -ano | findstr :8502 - ``` - -2. Stop the conflicting service or change Open Notebook's port: - ```yaml - # In docker-compose.yml - ports: - - "8503:8502" # Maps host port 8503 to container port 8502 - ``` - -### Data Not Showing - -**Symptom**: Notebooks or sources appear empty - -**Solutions**: -1. Verify volume mounts are correct: - ```bash - docker compose config - ``` - -2. Check database is running (multi-container): - ```bash - docker compose ps surrealdb - ``` - -3. Verify data directories exist: - ```bash - ls -la notebook_data/ - ls -la surreal_data/ - ``` - -### API Errors - -**Symptom**: Frontend shows "Cannot connect to API" or similar errors - -**Solutions**: -1. Verify API is running: - ```bash - curl http://localhost:5055/health - ``` - -2. Check API logs: - ```bash - docker compose logs -f open_notebook | grep api - ``` - -3. Ensure environment variables are set: - ```bash - docker compose exec open_notebook env | grep SURREAL - ``` - -## Rollback Instructions - -If you need to rollback to the Next.js version: - -### Quick Rollback - -1. **Stop current containers**: - ```bash - docker compose down - ``` - -2. **Use a specific older version** (replace with your previous version): - ```bash - # In docker-compose.yml, change: - image: lfnovo/open_notebook:0.1.45-single # or whatever version you had - ``` - -3. **Start the old version**: - ```bash - docker compose up -d - ``` - -### Finding Your Previous Version - -Check your Docker images: -```bash -docker images | grep open_notebook -``` - -Or check the [releases page](https://github.com/lfnovo/open-notebook/releases) for version numbers. - -## Frequently Asked Questions - -### Do I need to backup before upgrading? - -While the upgrade process doesn't modify your data, it's always a good practice to backup: - -```bash -# Backup your data -tar -czf backup-$(date +%Y%m%d).tar.gz notebook_data surreal_data -``` - -### Will my bookmarks still work? - -Yes! The new frontend still runs on port 8502, so all your bookmarks will continue to work. - -### Do I need to reconfigure AI models? - -No, all your model configurations are stored in the database and will work automatically with the new UI. - -### Will my API integrations break? - -No, the API is completely unchanged. All existing integrations will continue to work. - -### What if I prefer the old React frontend? - -You can rollback to any previous version using the instructions above. However, we recommend trying the new UI as it provides better performance and will receive all future updates. - -### How do I report issues with the new UI? - -Please report any issues on our [GitHub Issues page](https://github.com/lfnovo/open-notebook/issues) or join our [Discord server](https://discord.gg/37XJPXfz2w) for help. - -## New Features in Next.js UI - -While the migration maintains feature parity, the new frontend enables: - -- **Better Performance**: Faster loading and navigation -- **Improved Responsiveness**: Better mobile and tablet support -- **Modern Design**: Cleaner, more intuitive interface -- **Foundation for Future**: Enables upcoming features like real-time collaboration - -## Getting Help - -If you encounter any issues during migration: - -1. **Check the logs**: `docker compose logs -f` -2. **Review this guide**: Most issues are covered in Troubleshooting -3. **Join Discord**: [discord.gg/37XJPXfz2w](https://discord.gg/37XJPXfz2w) -4. **Open an issue**: [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) - -## Post-Migration Checklist - -After successfully migrating, complete these steps: - -- [ ] Verify all notebooks load correctly -- [ ] Test source addition and viewing -- [ ] Verify notes are accessible -- [ ] Test chat functionality -- [ ] Check search works as expected -- [ ] Verify podcast generation (if used) -- [ ] Test any custom API integrations -- [ ] Update any deployment documentation you maintain -- [ ] Remove old Docker images to free space: `docker image prune` - ---- - -**Questions?** Join our [Discord community](https://discord.gg/37XJPXfz2w) or [open an issue](https://github.com/lfnovo/open-notebook/issues) on GitHub. diff --git a/docs/troubleshooting/common-issues.md b/docs/troubleshooting/common-issues.md deleted file mode 100644 index aaef1243..00000000 --- a/docs/troubleshooting/common-issues.md +++ /dev/null @@ -1,788 +0,0 @@ -# Common Issues and Solutions - -This document covers the most frequently encountered issues when installing, configuring, and using Open Notebook, along with their solutions. - -> **🆘 Quick Fixes for Setup Issues** -> -> Most problems are caused by incorrect API_URL configuration. Choose your scenario below for instant fixes. - -## Setup-Related Issues (START HERE!) - -### ❌ "Unable to connect to server" or "Connection Error" - -**This is the #1 issue for new users.** The frontend can't reach the API. - -#### Diagnostic Checklist: - -1. **Are both ports exposed?** - ```bash - docker ps - # Should show: 0.0.0.0:8502->8502 AND 0.0.0.0:5055->5055 - ``` - ✅ **Fix:** Add `-p 5055:5055` to your docker run command, or add it to docker-compose.yml: - ```yaml - ports: - - "8502:8502" - - "5055:5055" # Add this! - ``` - -2. **Are you accessing from a different machine than where Docker is running?** - - **Determine your server's IP:** - ```bash - # On the Docker host machine: - hostname -I # Linux - ipconfig # Windows - ifconfig | grep inet # Mac - ``` - - ✅ **Fix:** Set environment variable (replace with your actual server IP): - - **Docker Compose:** - ```yaml - environment: - - API_URL=http://192.168.1.100:5055 - ``` - - **Docker Run:** - ```bash - -e API_URL=http://192.168.1.100:5055 - ``` - -3. **Using localhost in API_URL but accessing remotely?** - - ❌ **Wrong:** - ``` - Access from browser: http://192.168.1.100:8502 - API_URL setting: http://localhost:5055 # This won't work! - ``` - - ✅ **Correct:** - ``` - Access from browser: http://192.168.1.100:8502 - API_URL setting: http://192.168.1.100:5055 - ``` - -#### Common Scenarios: - -| Your Setup | Access URL | API_URL Value | -|------------|-----------|---------------| -| Docker on your laptop, accessed locally | `http://localhost:8502` | Not needed (or `http://localhost:5055`) | -| Docker on Proxmox VM at 192.168.1.50 | `http://192.168.1.50:8502` | `http://192.168.1.50:5055` | -| Docker on Raspberry Pi at 10.0.0.10 | `http://10.0.0.10:8502` | `http://10.0.0.10:5055` | -| Docker on NAS at nas.local | `http://nas.local:8502` | `http://nas.local:5055` | -| Behind reverse proxy at notebook.mydomain.com | `https://notebook.mydomain.com` | `https://notebook.mydomain.com/api` | - -#### After changing API_URL: - -**Always restart the container:** -```bash -# Docker Compose -docker compose down -docker compose up -d - -# Docker Run -docker stop open-notebook -docker rm open-notebook -# Then run your docker run command again -``` - ---- - -### ❌ Frontend trying to connect on port 8502 instead of 5055 - -**Symptom:** Frontend tries to access `http://10.10.10.107:8502/api/config` instead of using port 5055. - -**Cause:** API_URL is not set correctly or you're using an old version. - -✅ **Fix:** -1. Ensure you're using version 1.0.6+ which supports runtime API_URL -2. Set API_URL environment variable (not NEXT_PUBLIC_API_URL) -3. Restart container after setting the variable - ```bash - docker compose down && docker compose up -d - ``` - ---- - -### ❌ "API config endpoint returned status 404" - -**Cause:** You added `/api` to the end of API_URL. - -❌ **Wrong:** `API_URL=http://192.168.1.100:5055/api` - -✅ **Correct:** `API_URL=http://192.168.1.100:5055` - -The `/api` path is added automatically by the application. - ---- - -### ❌ "Missing authorization header" - -**Cause:** You have password authentication enabled but it's not configured correctly. - -✅ **Fix:** Set the password in your environment: -```yaml -environment: - - OPEN_NOTEBOOK_PASSWORD=your_secure_password -``` - -Or provide it when logging into the web interface. - ---- - -## Installation Problems - -### Port Already in Use - -**Problem**: Error message "Port 8502 is already in use" or similar port conflicts. - -**Symptoms**: -- Cannot start React frontend -- Error messages about address already in use -- Services failing to bind to ports - -**Solutions**: - -1. **Find and stop conflicting process**: - ```bash - # Check what's using port 8502 - lsof -i :8502 - - # Kill the process (replace PID with actual process ID) - kill -9 - ``` - -2. **Use different ports**: - ```bash - # For React frontend - uv run --env-file .env cd frontend && npm run dev --server.port=8503 - - # For Docker deployment, modify docker-compose.yml - ports: - - "8503:8502" # host:container - ``` - -3. **Common port conflicts**: - - Port 8502 (Next.js): Often used by other Next.js apps - - Port 5055 (API): May conflict with other web services - - Port 8000 (SurrealDB): May conflict with other databases - -### Permission Denied (Docker) - -**Problem**: Docker commands fail with permission denied errors. - -**Symptoms**: -- "permission denied while trying to connect to the Docker daemon socket" -- Docker commands require sudo - -**Solutions**: - -1. **Add user to docker group (Linux)**: - ```bash - sudo usermod -aG docker $USER - - # Log out and log back in, or run: - newgrp docker - ``` - -2. **Start Docker service (Linux)**: - ```bash - sudo systemctl start docker - sudo systemctl enable docker - ``` - -3. **Restart Docker Desktop (Windows/Mac)**: - - Close Docker Desktop completely - - Restart Docker Desktop - - Wait for it to fully start - -### Python/uv Installation Issues - -**Problem**: `uv` command not found or Python version conflicts. - -**Symptoms**: -- "uv: command not found" -- Python version mismatch errors -- Virtual environment issues - -**Solutions**: - -1. **Install uv package manager**: - ```bash - # macOS - brew install uv - - # Linux/WSL - curl -LsSf https://astral.sh/uv/install.sh | sh - source ~/.bashrc - - # Windows - powershell -c "irm https://astral.sh/uv/install.ps1 | iex" - ``` - -2. **Fix Python version issues**: - ```bash - # Install specific Python version - uv python install 3.11 - - # Pin Python version for project - uv python pin 3.11 - - # Recreate virtual environment - uv sync --reinstall - ``` - -3. **Clear uv cache**: - ```bash - uv cache clean - ``` - -### SurrealDB Connection Issues - -**Problem**: Cannot connect to SurrealDB database. - -**Symptoms**: -- "Connection refused" errors -- Database queries failing -- Timeout errors - -**Solutions**: - -1. **Check SurrealDB is running**: - ```bash - # For Docker - docker compose ps surrealdb - - # Check logs - docker compose logs surrealdb - ``` - -2. **Verify connection settings**: - ```bash - # Check environment variables - echo $SURREAL_URL - echo $SURREAL_USER - - # Test connection - curl http://localhost:8000/health - ``` - -3. **Restart SurrealDB**: - ```bash - docker compose restart surrealdb - # Wait 10 seconds for startup - sleep 10 - ``` - -4. **Check file permissions**: - ```bash - # Ensure data directory is writable - ls -la surreal_data/ - - # Fix permissions if needed - sudo chown -R $USER:$USER surreal_data/ - ``` - -## Runtime Errors - -### SSL Certificate Verification Errors - -**Problem**: SSL verification errors when connecting to local AI providers (Ollama, LM Studio) behind reverse proxies with self-signed certificates. - -**Symptoms**: -- `[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate` -- `Connection error` when using HTTPS endpoints -- Works with HTTP but fails with HTTPS - -**Cause**: Python's SSL verification uses the `certifi` package certificate store, not the system's certificate store. Self-signed certificates are not trusted by default. - -**Solutions**: - -1. **Use a custom CA bundle (recommended)**: - ```bash - # Add to your .env or docker-compose.yml - ESPERANTO_SSL_CA_BUNDLE=/path/to/your/ca-bundle.pem - ``` - - For Docker, mount the certificate: - ```yaml - services: - open-notebook: - environment: - - ESPERANTO_SSL_CA_BUNDLE=/certs/ca-bundle.pem - volumes: - - /path/to/your/ca-bundle.pem:/certs/ca-bundle.pem:ro - ``` - -2. **Disable SSL verification (development only)**: - ```bash - # WARNING: Only use in trusted development environments - ESPERANTO_SSL_VERIFY=false - ``` - -3. **Use HTTP instead of HTTPS**: - - If your services are on a trusted local network, using HTTP is acceptable - - Change your endpoint URL from `https://` to `http://` - -> **Security Note:** Disabling SSL verification exposes you to man-in-the-middle attacks. Always prefer using a custom CA bundle or HTTP on trusted networks. - -**Related Documentation:** -- [Ollama SSL Configuration](../features/ollama.md#ssl-configuration-self-signed-certificates) -- [OpenAI-Compatible SSL Configuration](../features/openai-compatible.md#ssl-configuration-self-signed-certificates) - ---- - -### AI Provider API Errors - -**Problem**: Errors when using AI models (OpenAI, Anthropic, etc.). - -**Symptoms**: -- "Invalid API key" errors -- "Rate limit exceeded" messages -- Model not found errors - -**Solutions**: - -1. **Verify API keys**: - ```bash - # Check key format (don't expose full key) - echo $OPENAI_API_KEY | cut -c1-10 - - # Test OpenAI key - curl -H "Authorization: Bearer $OPENAI_API_KEY" \ - https://api.openai.com/v1/models - ``` - -2. **Check billing and usage**: - - OpenAI: Visit https://platform.openai.com/account/billing - - Anthropic: Visit https://console.anthropic.com/account/billing - - Ensure you have sufficient credits - -3. **Verify model availability**: - ```bash - # Check model names in settings - # Use gpt-5-mini instead of gpt-4-mini - # Use claude-3-haiku-20240307 instead of claude-3-haiku - ``` - -4. **Handle rate limits**: - - Wait before retrying - - Use lower-tier models for testing - - Check provider rate limits - -### API Timeout Errors During Transformations - -**Problem**: Timeout errors when running transformations or generating insights, even though the operation completes successfully. - -**Symptoms**: -- "timeout of 30000ms exceeded" in React frontend -- "Failed to connect to API: timed out" in Streamlit UI -- Transformation completes after a few minutes, but error appears after 30-60 seconds -- Common with local models (Ollama), remote LM Studio, or slow hardware - -**Solutions**: - -1. **Increase API client timeout** (recommended): - ```bash - # Add to your .env file - API_CLIENT_TIMEOUT=600 # 10 minutes (600 seconds) - ``` - - This controls how long the frontend/UI waits for API responses. Default is 300 seconds (5 minutes). - -2. **Adjust timeout based on your setup**: - ```bash - # Fast cloud APIs (OpenAI, Anthropic, Groq) - API_CLIENT_TIMEOUT=300 # 5 minutes (default) - - # Local Ollama on GPU - API_CLIENT_TIMEOUT=600 # 10 minutes - - # Local Ollama on CPU or slow hardware - API_CLIENT_TIMEOUT=1200 # 20 minutes - - # Remote LM Studio over slow network - API_CLIENT_TIMEOUT=900 # 15 minutes - ``` - -3. **Increase LLM provider timeout if needed**: - ```bash - # Add to your .env file if the model itself is timing out - ESPERANTO_LLM_TIMEOUT=180 # 3 minutes (default is 60s) - ``` - - Only increase this if you see errors during actual model inference, not just HTTP timeouts. - -4. **Use faster models for testing**: - - Test with cloud APIs first to verify setup - - Try smaller local models (e.g., `gemma2:2b` instead of `llama3:70b`) - - Preload models before running transformations: `ollama run model-name` - -5. **Restart services after configuration changes**: - ```bash - # For Docker - docker compose down - docker compose up -d - - # For source installation - make stop-all - make start-all - ``` - -**Important Notes**: -- `API_CLIENT_TIMEOUT` should be HIGHER than `ESPERANTO_LLM_TIMEOUT` for proper error handling -- If transformations complete successfully after refresh, you only need to increase `API_CLIENT_TIMEOUT` -- First time running a model may be slower due to model loading - -**Related GitHub Issue**: [#131](https://github.com/lfnovo/open-notebook/issues/131) - -### Memory and Performance Issues - -**Problem**: Application running slowly or crashing due to memory issues. - -**Symptoms**: -- Slow response times -- Out of memory errors -- Application crashes -- High CPU usage - -**Solutions**: - -1. **Increase Docker memory**: - ```bash - # Docker Desktop → Settings → Resources → Memory - # Increase to 4GB or more - ``` - -2. **Monitor resource usage**: - ```bash - # Check Docker stats - docker stats - - # Check system resources - htop - top - ``` - -3. **Optimize model usage**: - - Use smaller models (gpt-5-mini vs gpt-5) - - Reduce context window size - - Process fewer documents at once - -4. **Clear application cache**: - ```bash - # Clear Python cache - find . -name "__pycache__" -type d -exec rm -rf {} + - - # Clear Next.js cache - rm -rf ~/.streamlit/cache/ - ``` - -### Background Job Failures - -**Problem**: Background tasks (podcast generation, transformations) failing. - -**Symptoms**: -- Jobs stuck in "processing" state -- No podcast audio generated -- Transformations not completing - -**Solutions**: - -1. **Check worker status**: - ```bash - # Check if worker is running - pgrep -f "surreal-commands-worker" - - # Restart worker - make worker-restart - ``` - -2. **Check job logs**: - ```bash - # View worker logs - docker compose logs worker - - # Check command status in database - # (Access through UI or API) - ``` - -3. **Verify AI provider configuration**: - - Ensure TTS/STT models are configured - - Check API keys for required providers - - Test models individually - -4. **Clear stuck jobs**: - ```bash - # Restart all services - make stop-all - make start-all - ``` - -### File Upload Issues - -**Problem**: Cannot upload files or file processing fails. - -**Symptoms**: -- Upload button not working -- File processing errors -- Unsupported file type messages - -**Solutions**: - -1. **Check file size limits**: - ```bash - # Default Next.js limit is 200MB - # Large files may timeout - ``` - -2. **Verify file types**: - - PDF: Standard PDF files (not password protected) - - Images: PNG, JPG, GIF, WebP - - Audio: MP3, WAV, M4A - - Video: MP4, AVI, MOV (for transcript extraction) - - Documents: TXT, DOC, DOCX - -3. **Check file permissions**: - ```bash - # Ensure files are readable - ls -la /path/to/file - - # Fix permissions - chmod 644 /path/to/file - ``` - -4. **Test with smaller files**: - - Try with a simple text file first - - Gradually increase complexity - -## Performance Issues - -### Slow Search and Chat - -**Problem**: Search and chat responses are very slow. - -**Symptoms**: -- Long wait times for responses -- Timeout errors -- Poor user experience - -**Solutions**: - -1. **Optimize embedding model**: - - Use faster embedding models - - Reduce embedding dimensions - - Process fewer documents at once - -2. **Database optimization**: - ```bash - # Check database performance - docker compose logs surrealdb - - # Consider using RocksDB for better performance - # (Already configured in docker-compose.yml) - ``` - -3. **Reduce context size**: - - Limit search results - - Use shorter prompts - - Reduce notebook size - -4. **Use faster models**: - - gpt-5-mini instead of gpt-5 - - claude-3-haiku instead of claude-3-opus - - Local models for simple tasks - -### High Resource Usage - -**Problem**: Application consuming too much CPU or memory. - -**Symptoms**: -- High CPU usage in task manager -- System becoming unresponsive -- Docker containers using excessive resources - -**Solutions**: - -1. **Set resource limits**: - ```yaml - # In docker-compose.yml - services: - open_notebook: - deploy: - resources: - limits: - memory: 2G - cpus: "1.0" - ``` - -2. **Monitor and identify bottlenecks**: - ```bash - # Check which service is consuming resources - docker stats - - # Check system processes - htop - ``` - -3. **Optimize processing**: - - Process documents in batches - - Use background jobs for heavy tasks - - Limit concurrent operations - -## Configuration Problems - -### Environment Variables Not Loading - -**Problem**: Environment variables are not being read correctly. - -**Symptoms**: -- Default values being used instead of configured values -- API keys not recognized -- Connection errors to external services - -**Solutions**: - -1. **Check file names**: - ```bash - # For source installation - ls -la .env - - # For Docker - ls -la docker.env - ``` - -2. **Verify file format**: - ```bash - # Check for invisible characters - cat -A .env - - # Ensure no spaces around equals - OPENAI_API_KEY=value # Correct - OPENAI_API_KEY = value # Incorrect - ``` - -3. **Check environment loading**: - ```bash - # Test environment variable - echo $OPENAI_API_KEY - - # For Docker - docker compose config - ``` - -4. **Restart services after changes**: - ```bash - # For Docker - docker compose down - docker compose up -d - - # For source installation - make stop-all - make start-all - ``` - -### Model Configuration Issues - -**Problem**: AI models not working or configured incorrectly. - -**Symptoms**: -- Model not found errors -- Incorrect responses -- Configuration not saving - -**Solutions**: - -1. **Check model names**: - ```bash - # Use exact model names from provider documentation - # OpenAI: gpt-5-mini, gpt-5, text-embedding-3-small - # Anthropic: claude-3-haiku-20240307, claude-3-sonnet-20240229 - ``` - -2. **Verify provider configuration**: - - Check API keys are valid - - Ensure models are available for your account - - Test with simple requests first - -3. **Reset model configuration**: - - Go to Models - - Clear all configurations - - Reconfigure with known working models - -4. **Check provider status**: - - Visit provider status pages - - Check for service outages - - Try alternative providers - -### Database Schema Issues - -**Problem**: Database schema conflicts or migration issues. - -**Symptoms**: -- Field validation errors -- Query failures -- Data not saving correctly - -**Solutions**: - -1. **Check database logs**: - ```bash - docker compose logs surrealdb - ``` - -2. **Reset database (WARNING: This deletes all data)**: - ```bash - # Stop services - make stop-all - - # Remove database files - rm -rf surreal_data/ - - # Restart services (will recreate database) - make start-all - ``` - -3. **Manual schema update**: - ```bash - # Run migrations - uv run python -m open_notebook.database.async_migrate - ``` - -4. **Check SurrealDB version**: - ```bash - # Ensure using compatible version - docker compose pull surrealdb - docker compose up -d - ``` - -## Getting Help - -If you've tried the solutions above and are still experiencing issues: - -1. **Collect diagnostic information**: - ```bash - # System information - uname -a - docker version - docker compose version - - # Service status - make status - - # Recent logs - docker compose logs --tail=100 > logs.txt - ``` - -2. **Create a minimal reproduction**: - - Start with a fresh installation - - Use minimal configuration - - Document exact steps to reproduce - -3. **Ask for help**: - - Discord: https://discord.gg/37XJPXfz2w - - GitHub Issues: https://github.com/lfnovo/open-notebook/issues - - Include all diagnostic information - -Remember to remove API keys and sensitive information before sharing logs or configuration files. \ No newline at end of file diff --git a/docs/troubleshooting/debugging.md b/docs/troubleshooting/debugging.md deleted file mode 100644 index cea48afc..00000000 --- a/docs/troubleshooting/debugging.md +++ /dev/null @@ -1,662 +0,0 @@ -# Debugging and Diagnostics - -This guide provides comprehensive debugging techniques, log analysis methods, and performance profiling tools for Open Notebook. - -## Log Analysis - -### Understanding Log Levels - -Open Notebook uses structured logging with the following levels: -- `DEBUG`: Detailed information for debugging -- `INFO`: General information about system operation -- `WARNING`: Potentially problematic situations -- `ERROR`: Error events that might still allow the application to continue -- `CRITICAL`: Serious errors that may cause the application to abort - -### Accessing Logs - -#### Docker Deployment -```bash -# View all service logs -docker compose logs - -# Follow logs in real-time -docker compose logs -f - -# View logs for specific service -docker compose logs surrealdb -docker compose logs open_notebook - -# View last 100 lines -docker compose logs --tail=100 - -# View logs with timestamps -docker compose logs -t -``` - -#### Source Installation -```bash -# API logs (if running in background) -tail -f api.log - -# Worker logs -tail -f worker.log - -# Database logs -docker compose logs surrealdb - -# Next.js logs (stdout) -# Run in foreground to see logs directly -``` - -### Log Configuration - -#### Enable Debug Logging -```bash -# Add to .env or docker.env -LOG_LEVEL=DEBUG - -# Restart services -docker compose restart -``` - -#### Custom Log Configuration -```python -# For development, add to your Python code -import logging -logging.basicConfig( - level=logging.DEBUG, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' -) -``` - -### Common Log Messages - -#### Successful Operations -``` -INFO - Starting Open Notebook services -INFO - Database connection established -INFO - API server started on port 5055 -INFO - React frontend started on port 8502 -INFO - Background worker started -INFO - Model configuration loaded -INFO - Source processed successfully -``` - -#### Warning Messages -``` -WARNING - Rate limit approaching for provider: openai -WARNING - Large file upload detected: 50MB -WARNING - Model response truncated due to length -WARNING - Database connection retrying -WARNING - Cache miss for embedding -``` - -#### Error Messages -``` -ERROR - Failed to connect to database: Connection refused -ERROR - API key invalid for provider: openai -ERROR - Model not found: gpt-4-invalid -ERROR - File processing failed: Unsupported format -ERROR - Background job failed: Timeout -ERROR - Memory limit exceeded -``` - -## Error Diagnosis - -### Database Connection Errors - -#### Symptoms -``` -ERROR - Database connection failed -ERROR - Connection refused at localhost:8000 -ERROR - Authentication failed for SurrealDB -``` - -#### Diagnosis Steps -1. **Check SurrealDB status**: - ```bash - docker compose ps surrealdb - ``` - -2. **Verify connection settings**: - ```bash - # Check environment variables - echo $SURREAL_URL - echo $SURREAL_USER - echo $SURREAL_PASSWORD - ``` - -3. **Test direct connection**: - ```bash - curl http://localhost:8000/health - ``` - -4. **Check database logs**: - ```bash - docker compose logs surrealdb - ``` - -#### Common Solutions -- Restart SurrealDB container -- Check port availability -- Verify credentials -- Check file permissions for data directory - -### AI Provider Errors - -#### API Key Issues -``` -ERROR - Invalid API key for provider: openai -ERROR - Authentication failed: API key not found -ERROR - Insufficient credits for provider: anthropic -``` - -**Diagnosis**: -```bash -# Test OpenAI key -curl -H "Authorization: Bearer $OPENAI_API_KEY" \ - https://api.openai.com/v1/models - -# Test Anthropic key -curl -H "x-api-key: $ANTHROPIC_API_KEY" \ - https://api.anthropic.com/v1/models -``` - -#### Model Not Found -``` -ERROR - Model not found: gpt-4-invalid -ERROR - Model not available for your account -``` - -**Diagnosis**: -- Check model name spelling -- Verify model availability for your account -- Check provider documentation for exact model names - -#### Rate Limiting -``` -ERROR - Rate limit exceeded for provider: openai -ERROR - Too many requests, please retry later -``` - -**Diagnosis**: -```bash -# Check rate limits in provider dashboard -# Monitor request frequency -# Implement retry logic with backoff -``` - -### File Processing Errors - -#### Upload Issues -``` -ERROR - File upload failed: File too large -ERROR - Unsupported file type: .xyz -ERROR - File processing timeout -``` - -**Diagnosis**: -1. **Check file size**: - ```bash - ls -lh /path/to/file - ``` - -2. **Verify file type**: - ```bash - file /path/to/file - ``` - -3. **Test with smaller file**: - - Use minimal test file - - Gradually increase complexity - -#### Processing Failures -``` -ERROR - PDF extraction failed: Encrypted file -ERROR - Audio transcription failed: Unsupported codec -ERROR - Image OCR failed: Invalid image format -``` - -**Diagnosis**: -- Check file integrity -- Verify file format compliance -- Test with known good files - -### Memory and Performance Issues - -#### Out of Memory -``` -ERROR - Out of memory: Cannot allocate -ERROR - Process killed due to memory limit -ERROR - Docker container OOMKilled -``` - -**Diagnosis**: -```bash -# Check memory usage -docker stats - -# Check system memory -free -h - -# Check Docker memory limits -docker system info | grep Memory -``` - -#### Performance Degradation -``` -WARNING - Response time exceeded threshold: 30s -WARNING - High CPU usage detected: 95% -WARNING - Database query slow: 5s -``` - -**Diagnosis**: -```bash -# Monitor resources -htop -iostat -x 1 - -# Check database performance -docker compose logs surrealdb | grep -i slow -``` - -## Performance Profiling - -### System Resource Monitoring - -#### Real-time Monitoring -```bash -# Docker container resources -docker stats - -# System resources -htop - -# Disk I/O -iostat -x 1 - -# Network usage -nethogs -``` - -#### Historical Analysis -```bash -# Container resource history -docker logs --since="1h" container_name | grep -i memory - -# System logs -journalctl -u docker --since="1 hour ago" -``` - -### Application Performance - -#### Response Time Analysis -```bash -# Measure API response times -time curl http://localhost:5055/api/notebooks - -# Measure with verbose output -curl -w "@curl-format.txt" http://localhost:5055/api/notebooks -``` - -Create `curl-format.txt`: -``` - time_namelookup: %{time_namelookup}\n - time_connect: %{time_connect}\n - time_appconnect: %{time_appconnect}\n - time_pretransfer: %{time_pretransfer}\n - time_redirect: %{time_redirect}\n - time_starttransfer: %{time_starttransfer}\n - ----------\n - time_total: %{time_total}\n -``` - -#### Database Performance -```bash -# Check database query performance -docker compose logs surrealdb | grep -i "slow\|performance\|query" - -# Monitor database connections -docker compose exec surrealdb ps aux -``` - -#### Memory Profiling -```python -# Add to Python code for memory profiling -import tracemalloc -tracemalloc.start() - -# Your code here - -current, peak = tracemalloc.get_traced_memory() -print(f"Current memory usage: {current / 1024 / 1024:.1f} MB") -print(f"Peak memory usage: {peak / 1024 / 1024:.1f} MB") -tracemalloc.stop() -``` - -### AI Provider Performance - -#### Response Time Monitoring -```bash -# Monitor AI provider response times -grep -r "provider.*response_time" logs/ - -# Check for timeouts -grep -r "timeout\|Timeout" logs/ -``` - -#### Usage Analytics -```python -# Track AI usage patterns -# Add to your monitoring code -import time -start_time = time.time() - -# AI API call here - -end_time = time.time() -response_time = end_time - start_time -print(f"AI response time: {response_time:.2f}s") -``` - -## Support Information Gathering - -### System Information Collection - -#### Basic System Info -```bash -# System details -uname -a -lsb_release -a # Linux -sw_vers # macOS - -# Docker information -docker version -docker compose version -docker system info -``` - -#### Open Notebook Information -```bash -# Version information -grep version pyproject.toml - -# Service status -make status - -# Environment check (without sensitive info) -env | grep -E "(SURREAL|LOG|DEBUG)" | grep -v "PASSWORD\|KEY" -``` - -### Log Collection for Support - -#### Comprehensive Log Collection -```bash -#!/bin/bash -# collect_logs.sh - -echo "Collecting Open Notebook diagnostic information..." - -# Create diagnostic directory -mkdir -p diagnostic_$(date +%Y%m%d_%H%M%S) -cd diagnostic_$(date +%Y%m%d_%H%M%S) - -# System information -echo "Collecting system information..." -uname -a > system_info.txt -docker version >> system_info.txt -docker compose version >> system_info.txt - -# Service status -echo "Collecting service status..." -make status > service_status.txt -docker compose ps >> service_status.txt - -# Logs -echo "Collecting logs..." -docker compose logs --tail=500 > docker_logs.txt -docker compose logs surrealdb --tail=200 > surrealdb_logs.txt - -# Configuration (sanitized) -echo "Collecting configuration..." -env | grep -E "(SURREAL|LOG|DEBUG)" | grep -v "PASSWORD\|KEY" > environment.txt - -# Resource usage -echo "Collecting resource information..." -docker stats --no-stream > resource_usage.txt -df -h > disk_usage.txt -free -h > memory_info.txt - -echo "Diagnostic collection complete!" -echo "Please compress and share the diagnostic_* directory" -``` - -#### Sanitizing Logs -```bash -# Remove sensitive information from logs -sed -i 's/sk-[a-zA-Z0-9]*/[REDACTED_API_KEY]/g' logs.txt -sed -i 's/password=[^[:space:]]*/password=[REDACTED]/g' logs.txt -``` - -### Creating Reproduction Cases - -#### Minimal Reproduction -1. **Start with clean environment**: - ```bash - # Fresh installation - rm -rf surreal_data/ notebook_data/ - docker compose down - docker compose up -d - ``` - -2. **Document exact steps**: - - Each click or command - - Exact file used - - Configuration settings - - Expected vs actual behavior - -3. **Capture evidence**: - - Screenshots of errors - - Full error messages - - Log excerpts - - System state - -#### Test Case Template -```markdown -## Bug Report - -### Environment -- OS: [e.g., Ubuntu 22.04] -- Docker version: [e.g., 24.0.7] -- Open Notebook version: [e.g., 1.0.0] -- Installation method: [Docker/Source] - -### Steps to Reproduce -1. Start Open Notebook -2. Create new notebook named "Test" -3. Add text source: "Hello world" -4. Navigate to Chat -5. Ask: "What is this about?" - -### Expected Behavior -Should receive response about the text content - -### Actual Behavior -Error: "Model not found" - -### Logs -``` -ERROR - Model not found: gpt-4-invalid -``` - -### Additional Context -- Using OpenAI provider -- gpt-5-mini model configured -- First time setup -``` - -## Advanced Debugging - -### Database Debugging - -#### Direct Database Access -```bash -# Connect to SurrealDB directly -docker compose exec surrealdb /surreal sql \ - --conn http://localhost:8000 \ - --user root \ - --pass root \ - --ns open_notebook \ - --db production -``` - -#### Query Analysis -```sql --- Check table contents -SELECT * FROM notebook LIMIT 10; - --- Check relationships -SELECT * FROM source WHERE notebook_id = notebook:abc123; - --- Performance analysis -SELECT count() FROM source GROUP BY notebook_id; -``` - -### Network Debugging - -#### Service Communication -```bash -# Test internal Docker network -docker compose exec open_notebook ping surrealdb - -# Test external connectivity -docker compose exec open_notebook curl -I https://api.openai.com - -# Check port bindings -netstat -tulpn | grep -E "(8000|5055|8502)" -``` - -#### DNS Resolution -```bash -# Check DNS from container -docker compose exec open_notebook nslookup api.openai.com - -# Check /etc/hosts -docker compose exec open_notebook cat /etc/hosts -``` - -### Performance Debugging - -#### CPU Profiling -```python -# Add to Python code -import cProfile -import pstats - -# Profile your function -cProfile.run('your_function()', 'profile_stats') - -# Analyze results -p = pstats.Stats('profile_stats') -p.sort_stats('cumulative').print_stats(10) -``` - -#### Memory Leak Detection -```python -# Track memory usage over time -import psutil -import os - -def log_memory_usage(): - process = psutil.Process(os.getpid()) - memory_mb = process.memory_info().rss / 1024 / 1024 - print(f"Memory usage: {memory_mb:.1f} MB") - -# Call periodically -log_memory_usage() -``` - -## Monitoring and Alerting - -### Health Checks - -#### Service Health Endpoints -```bash -# Check all health endpoints -curl -f http://localhost:8000/health # SurrealDB -curl -f http://localhost:5055/health # API -curl -f http://localhost:8502/healthz # Next.js -``` - -#### Automated Health Monitoring -```bash -#!/bin/bash -# health_check.sh - -services=("8000" "5055" "8502") -for port in "${services[@]}"; do - if curl -f http://localhost:$port/health* >/dev/null 2>&1; then - echo "✅ Service on port $port is healthy" - else - echo "❌ Service on port $port is unhealthy" - fi -done -``` - -### Log Monitoring - -#### Real-time Error Monitoring -```bash -# Monitor for errors in real-time -docker compose logs -f | grep -i error - -# Monitor specific patterns -docker compose logs -f | grep -E "(ERROR|CRITICAL|timeout)" -``` - -#### Log Analysis Scripts -```bash -#!/bin/bash -# analyze_logs.sh - -echo "Error Summary:" -docker compose logs --since="1h" | grep -c "ERROR" - -echo "Top Error Messages:" -docker compose logs --since="1h" | grep "ERROR" | \ - cut -d':' -f4- | sort | uniq -c | sort -nr | head -10 - -echo "Provider Issues:" -docker compose logs --since="1h" | grep -i "provider.*error" -``` - -## Best Practices for Debugging - -### Systematic Approach -1. **Reproduce the issue** consistently -2. **Isolate the problem** to specific components -3. **Check recent changes** that might have caused issues -4. **Gather evidence** through logs and monitoring -5. **Test hypotheses** systematically -6. **Document findings** for future reference - -### Debugging Tools Checklist -- [ ] System resource monitoring (htop, docker stats) -- [ ] Log aggregation and analysis -- [ ] Network connectivity testing -- [ ] Database query analysis -- [ ] API response time measurement -- [ ] Memory usage tracking -- [ ] Error rate monitoring - -### When to Seek Help -- Issue persists after following troubleshooting guides -- Problem affects multiple users or systems -- Security-related concerns -- Performance degradation without clear cause -- Data integrity issues - ---- - -*This debugging guide is continuously updated based on real-world troubleshooting experiences. For additional support, join our Discord community or create a GitHub issue with your diagnostic information.* \ No newline at end of file diff --git a/docs/troubleshooting/faq.md b/docs/troubleshooting/faq.md deleted file mode 100644 index 1a7ae80e..00000000 --- a/docs/troubleshooting/faq.md +++ /dev/null @@ -1,407 +0,0 @@ -# Frequently Asked Questions - -This document addresses common questions about Open Notebook usage, configuration, and best practices. - -## General Usage - -### What is Open Notebook? - -Open Notebook is an open-source, privacy-focused alternative to Google's Notebook LM. It allows you to: -- Create and manage research notebooks -- Chat with your documents using AI -- Generate podcasts from your content -- Search across all your sources with semantic search -- Transform and analyze your content - -### How is Open Notebook different from Google Notebook LM? - -**Privacy**: Your data stays local by default. Only your chosen AI providers receive queries. -**Flexibility**: Support for 15+ AI providers (OpenAI, Anthropic, Google, local models, etc.) -**Customization**: Open source, so you can modify and extend functionality -**Control**: You control your data, models, and processing - -### Do I need technical skills to use Open Notebook? - -**Basic usage**: No technical skills required. The Docker installation is designed for non-technical users. -**Advanced features**: Some technical knowledge helpful for: -- Custom model configurations -- API integrations -- Source code modifications - -### Can I use Open Notebook offline? - -**Partially**: The application runs locally, but requires internet for: -- AI model API calls (unless using local models like Ollama) -- Web content scraping -- Some file processing features - -**Fully offline**: Possible with local models (Ollama) for basic functionality. - -### What file types does Open Notebook support? - -**Documents**: -- PDF (text extraction) -- Microsoft Word (DOC, DOCX) -- Plain text (TXT) -- Markdown (MD) - -**Web Content**: -- URLs (automatic web scraping) -- YouTube videos (transcript extraction) -- Web articles and blog posts - -**Media**: -- Images (PNG, JPG, GIF, WebP) with OCR -- Audio files (MP3, WAV, M4A) with transcription -- Video files (MP4, AVI, MOV) for transcript extraction - -**Other**: -- Direct text input -- CSV data -- Code files (with syntax highlighting) - -### How much does it cost to run Open Notebook? - -**Software**: Free (open source) -**AI API costs**: Pay-per-use to providers: -- OpenAI: ~$0.50-5 per 1M tokens depending on model -- Anthropic: ~$3-75 per 1M tokens depending on model -- Google: Often free tier available -- Local models: Free after initial setup - -**Typical monthly costs**: $5-50 for moderate usage, depending on chosen models. - -## AI Models and Providers - -### Which AI provider should I choose? - -**For beginners**: OpenAI (reliable, well-documented, good balance of cost/quality) -**For advanced users**: Mix of providers based on specific needs -**For privacy**: Local models (Ollama) or European providers (Mistral) -**For cost optimization**: DeepSeek, Google (free tier), or OpenRouter - -### Can I use multiple AI providers simultaneously? - -**Yes**: Open Notebook supports multiple providers. You can configure different providers for different tasks: -- OpenAI for chat -- Google for embeddings -- ElevenLabs for text-to-speech -- Anthropic for complex reasoning - -### What are the best model combinations? - -**Budget-friendly**: -- Language: `gpt-5-mini` (OpenAI) or `deepseek-chat` (DeepSeek) -- Embedding: `text-embedding-3-small` (OpenAI) -- TTS: `gpt-4o-mini-tts` (OpenAI) - -**High-quality**: -- Language: `claude-3-7-sonnet` (Anthropic) or `gpt-4o` (OpenAI) -- Embedding: `text-embedding-3-large` (OpenAI) -- TTS: `eleven_turbo_v2_5` (ElevenLabs) - -**Privacy-focused**: -- Language: Local Ollama models -- Embedding: Local embedding models -- TTS: Local TTS solutions - -### How do I set up local models with Ollama? - -1. **Install Ollama**: Download from https://ollama.ai -2. **Start Ollama**: `ollama serve` -3. **Download models**: `ollama pull llama2` -4. **Configure Open Notebook**: - ```env - OLLAMA_API_BASE=http://localhost:11434 - ``` -5. **Select models**: In Models, choose Ollama models - -### Why are my AI requests failing? - -**Common causes**: -- Invalid API keys -- Insufficient credits/billing -- Model not available for your account -- Rate limiting -- Network connectivity issues - -**Solutions**: -1. Verify API keys in provider dashboard -2. Check billing and usage limits -3. Try different models -4. Wait and retry for rate limits -5. Check internet connection - -### How do I optimize AI costs? - -**Model selection**: -- Use smaller models for simple tasks -- Use larger models only for complex reasoning -- Leverage free tiers when available - -**Usage optimization**: -- Process documents in batches -- Use shorter prompts -- Cache results when possible -- Use local models for frequent tasks - -**Provider diversity**: -- Use OpenRouter for expensive models -- Use free tier providers for testing -- Mix providers based on strength - -## Data Management - -### Where is my data stored? - -**Local storage**: By default, all data is stored locally: -- Database: SurrealDB files in `surreal_data/` -- Uploads: Files in `notebook_data/` -- No external data transmission (except to chosen AI providers) - -**Cloud storage**: Not implemented, but can be configured with external storage solutions. - -### How do I backup my data? - -**Manual backup**: -```bash -# Create backup -tar -czf backup-$(date +%Y%m%d).tar.gz notebook_data/ surreal_data/ - -# Restore backup -tar -xzf backup-20240101.tar.gz -``` - -**Automated backup**: Set up cron jobs or use your preferred backup solution to backup the data directories. - -### Can I sync data between devices? - -**Currently**: No built-in sync functionality. -**Workarounds**: -- Use shared network storage for data directories -- Manual backup/restore between devices -- Database replication (advanced) - -### How do I migrate data between installations? - -1. **Stop services**: `make stop-all` -2. **Copy data directories**: - ```bash - cp -r surreal_data/ new_installation/ - cp -r notebook_data/ new_installation/ - ``` -3. **Start new installation** -4. **Verify data integrity** - -### What happens to my data if I delete a notebook? - -**Soft deletion**: Notebooks are marked as archived, not permanently deleted. -**Hard deletion**: Currently not implemented in UI, but possible via API. -**Recovery**: Archived notebooks can be restored from the database. - -### How do I clean up old data? - -**Manual cleanup**: -- Delete unused notebooks through UI -- Remove old files from `notebook_data/` -- Clear browser cache - -**Database cleanup**: Advanced users can query the database directly to remove old records. - -## Best Practices - -### How should I organize my notebooks? - -**By topic**: Create separate notebooks for different research areas -**By project**: One notebook per project or course -**By source type**: Separate notebooks for different content types -**By time period**: Monthly or quarterly notebooks - -### What's the optimal notebook size? - -**Recommended**: 20-100 sources per notebook -**Performance**: Larger notebooks may have slower search -**Organization**: Better to have focused notebooks than everything in one - -### How do I get the best search results? - -**Use descriptive queries**: Instead of "data", use "data analysis methods" -**Combine keywords**: Use multiple related terms -**Use natural language**: Ask questions as you would to a human -**Refine iteratively**: Start broad, then get more specific - -### How can I improve chat responses? - -**Provide context**: Reference specific sources or topics -**Be specific**: Ask detailed questions rather than general ones -**Use follow-up questions**: Build on previous responses -**Include examples**: Show what kind of response you want - -### What's the best way to process large documents? - -**Break into sections**: Split large documents into smaller parts -**Use transformations**: Apply summarization before adding to notebook -**Batch processing**: Process multiple documents at once -**Use background jobs**: For heavy processing tasks - -### How do I handle multiple languages? - -**Model selection**: Choose models that support your languages -**Language-specific providers**: Some providers are better for certain languages -**Separate notebooks**: Consider separate notebooks for different languages -**Encoding**: Ensure proper text encoding for non-English content - -### What are the security best practices? - -**API keys**: Never share API keys publicly -**Password protection**: Use strong passwords for public deployments -**Network security**: Use HTTPS for production deployments -**Regular updates**: Keep Docker images updated -**Backup encryption**: Encrypt backups if they contain sensitive data - -### How do I optimize performance? - -**Hardware**: -- Use SSD storage for database -- Allocate sufficient RAM (4GB+ recommended) -- Use fast internet connection - -**Configuration**: -- Choose appropriate models for your needs -- Optimize embedding dimensions -- Use efficient file formats - -**Usage patterns**: -- Process documents in batches -- Use background jobs for heavy tasks -- Clear cache periodically - -## Technical Questions - -### Can I use Open Notebook programmatically? - -**Yes**: Open Notebook provides a comprehensive REST API: -- Full API documentation at `/docs` endpoint -- Support for all UI functionality -- Authentication via API keys -- Webhook support for notifications - -### How do I extend Open Notebook? - -**Plugin system**: Add custom transformations and processors -**API integration**: Build custom applications using the API -**Source code**: Modify the open-source codebase -**Custom models**: Add support for new AI providers - -### Can I run Open Notebook in production? - -**Yes**: Designed for production use with: -- Docker deployment -- Horizontal scaling capability -- Security features -- Monitoring and logging - -**Considerations**: -- Use production-grade database settings -- Implement proper backup strategy -- Configure monitoring and alerting -- Use HTTPS and security best practices - -### How do I contribute to Open Notebook? - -**Ways to contribute**: -- Report bugs and issues -- Suggest new features -- Contribute code improvements -- Improve documentation -- Help other users in the community - -**Getting started**: -- Join Discord community -- Check GitHub issues -- Read contribution guidelines -- Start with small improvements - -### What's the development roadmap? - -**Current focus**: -- Stability and performance improvements -- Additional AI provider support -- Enhanced podcast generation -- Better mobile experience - -**Future plans**: -- Multi-user support -- Advanced analytics -- Integration with external tools -- Cloud deployment options - -## Troubleshooting - -### Why do I get timeout errors even though transformations complete successfully? - -**Cause**: The default client timeout (5 minutes) may be too short for slow AI providers or hardware. - -**Quick fix**: -```bash -# Add to your .env file -API_CLIENT_TIMEOUT=600 # 10 minutes for slow hardware -``` - -**When this happens**: -- Using local Ollama models on CPU -- Using remote LM Studio over slow network -- First transformation after starting (model loading) -- Very large documents -- Slower hardware configurations - -**Detailed solutions**: See [Common Issues - API Timeout Errors](./common-issues.md#api-timeout-errors-during-transformations) - -**Note**: If transformations complete after you refresh the page, you only need to increase `API_CLIENT_TIMEOUT`, not `ESPERANTO_LLM_TIMEOUT`. - -### My question isn't answered here. What should I do? - -1. **Check the troubleshooting guide**: [Common Issues](./common-issues.md) -2. **Search existing issues**: GitHub repository issues -3. **Ask the community**: Discord server -4. **Create a GitHub issue**: For bugs or feature requests -5. **Check the documentation**: Other documentation sections - -### How do I report a bug? - -**Include**: -- Steps to reproduce -- Expected vs actual behavior -- Error messages and logs -- System information -- Configuration details (without API keys) - -**Submit to**: GitHub Issues with bug report template - -### How do I request a new feature? - -**Process**: -1. Check if feature already exists or is planned -2. Discuss in Discord to gauge interest -3. Create detailed GitHub issue -4. Consider contributing implementation - -### Where can I get help with installation? - -**Resources**: -- [Installation Guide](../getting-started/installation.md) -- [Docker Deployment Guide](../deployment/docker.md) -- [ChatGPT Installation Assistant](https://chatgpt.com/g/g-68776e2765b48191bd1bae3f30212631-open-notebook-installation-assistant) -- Discord community support - -### How do I stay updated with new releases? - -**Methods**: -- Watch GitHub repository -- Join Discord for announcements -- Follow release notes -- Enable automatic Docker updates - ---- - -*This FAQ is updated regularly based on community questions and feedback. If you have a question that's not covered here, please ask in our Discord community or create a GitHub issue.* \ No newline at end of file diff --git a/docs/troubleshooting/index.md b/docs/troubleshooting/index.md deleted file mode 100644 index 54d0fe76..00000000 --- a/docs/troubleshooting/index.md +++ /dev/null @@ -1,160 +0,0 @@ -# Troubleshooting Guide - -Welcome to the Open Notebook troubleshooting guide. This section provides comprehensive solutions for common issues, debugging techniques, and best practices for getting the most out of your Open Notebook installation. - -## 📋 Quick Navigation - -### 🔧 Common Issues -- [Installation Problems](./common-issues.md#installation-problems) -- [Runtime Errors](./common-issues.md#runtime-errors) -- [Performance Issues](./common-issues.md#performance-issues) -- [Configuration Problems](./common-issues.md#configuration-problems) - -### ❓ Frequently Asked Questions -- [General Usage](./faq.md#general-usage) -- [AI Models and Providers](./faq.md#ai-models-and-providers) -- [Data Management](./faq.md#data-management) -- [Best Practices](./faq.md#best-practices) - -### 🐛 Debugging and Analysis -- [Log Analysis](./debugging.md#log-analysis) -- [Error Diagnosis](./debugging.md#error-diagnosis) -- [Performance Profiling](./debugging.md#performance-profiling) -- [Support Information](./debugging.md#support-information) - -## 🚨 Emergency Quick Fixes - -### Service Not Starting -```bash -# Check all services -make status - -# Restart everything -make stop-all -make start-all -``` - -### Database Connection Issues -```bash -# Restart database -docker compose restart surrealdb - -# Check database logs -docker compose logs surrealdb -``` - -### API Errors -```bash -# Check API logs -docker compose logs open_notebook - -# Restart API only -pkill -f "run_api.py" -make api -``` - -### Memory Issues -```bash -# Check resource usage -docker stats - -# Increase Docker memory limit -# Docker Desktop → Settings → Resources → Memory -``` - -## 🔍 First Steps for Any Issue - -1. **Check Service Status** - ```bash - make status - ``` - -2. **Review Recent Logs** - ```bash - docker compose logs --tail=50 -f - ``` - -3. **Verify Configuration** - ```bash - # Check environment variables - cat .env | grep -v "API_KEY" - - # For Docker - cat docker.env | grep -v "API_KEY" - ``` - -4. **Test Basic Connectivity** - ```bash - # Database - curl http://localhost:8000/health - - # API - curl http://localhost:5055/health - - # UI - curl http://localhost:8502/healthz - ``` - -## 📞 Getting Help - -### Community Support -- **Discord**: [https://discord.gg/37XJPXfz2w](https://discord.gg/37XJPXfz2w) -- **GitHub Issues**: [https://github.com/lfnovo/open-notebook/issues](https://github.com/lfnovo/open-notebook/issues) -- **Installation Assistant**: [ChatGPT Assistant](https://chatgpt.com/g/g-68776e2765b48191bd1bae3f30212631-open-notebook-installation-assistant) - -### Before Asking for Help -1. Check this troubleshooting guide -2. Search existing GitHub issues -3. Collect relevant logs and error messages -4. Note your installation method and environment -5. Include steps to reproduce the issue - -### Information to Include -- Installation method (Docker/source) -- Operating system and version -- Error messages and logs -- Configuration (without API keys) -- Steps to reproduce the issue - -## 🛠️ Advanced Troubleshooting - -For complex issues that aren't covered in the basic guides: - -1. **Enable Debug Logging** - ```bash - # Add to .env or docker.env - LOG_LEVEL=DEBUG - ``` - -2. **Use Development Mode** - ```bash - # For more detailed error information - make dev - ``` - -3. **Check System Resources** - ```bash - # Monitor resource usage - htop - docker stats - ``` - -4. **Test Individual Components** - ```bash - # Test database connection - uv run python -c "from open_notebook.database.repository import repo_query; import asyncio; print(asyncio.run(repo_query('SELECT * FROM system')))" - - # Test AI providers - uv run python -c "from esperanto import AIFactory; model = AIFactory.create_language('openai', 'gpt-5-mini'); print(model.chat_complete([{'role': 'user', 'content': 'Hello'}]))" - ``` - -## 📚 Related Documentation - -- [Installation Guide](../getting-started/installation.md) -- [Docker Deployment](../deployment/docker.md) -- [Architecture Overview](../development/architecture.md) -- [API Reference](../development/api-reference.md) - ---- - -*This troubleshooting guide is continuously updated based on user feedback and common issues. If you encounter a problem not covered here, please contribute by opening an issue on GitHub.* \ No newline at end of file diff --git a/docs/troubleshooting/quick-fixes.md b/docs/troubleshooting/quick-fixes.md deleted file mode 100644 index f5ca5718..00000000 --- a/docs/troubleshooting/quick-fixes.md +++ /dev/null @@ -1,433 +0,0 @@ -# 5-Minute Troubleshooting - -**Problem:** Something isn't working? Let's fix it fast. - -## Start Here: What's Your Issue? - -Click your problem: - -### 1. ["Unable to connect to server" or blank page](#fix-connection-error) -### 2. [Container won't start or crashes](#fix-container-crash) -### 3. [Quotes in environment variables](#fix-quotes-in-env) -### 4. [SurrealDB configuration issues](#fix-surrealdb-config) -### 5. [Network timeouts (slow connections / China)](#fix-network-timeouts) -### 6. [Works on server but not from my computer](#fix-remote-access) -### 7. [API or authentication errors](#fix-api-errors) -### 8. [Slow or timeout errors](#fix-performance) - ---- - - -## Fix: "Unable to connect to server" - -This means your frontend can't reach the API. **99% of the time, this is an API_URL problem.** - -### Step-by-Step Fix: - -1. **Check if API is running:** - ```bash - curl http://localhost:5055/health - # Should return: {"status": "healthy"} or similar - ``` - - - ❌ **Connection refused?** → Port 5055 is not exposed. [Jump to port fix](#fix-missing-port) - - ✅ **Got response?** → API is running, continue below. - -2. **Are you accessing from a different machine?** - - - Your browser on **Computer A** - - Docker running on **Computer B** (server, Raspberry Pi, NAS, etc.) - - → **You MUST set API_URL** - - Find your server's IP: - ```bash - # On the server running Docker: - hostname -I # Linux - ipconfig # Windows - ifconfig # Mac - ``` - - Set API_URL (replace 192.168.1.100 with YOUR server IP): - - **Docker Compose** - Add to your `docker-compose.yml`: - ```yaml - environment: - - OPENAI_API_KEY=your_key - - API_URL=http://192.168.1.100:5055 - ``` - - **Docker Run** - Add this flag: - ```bash - -e API_URL=http://192.168.1.100:5055 - ``` - - Then restart: - ```bash - docker compose down && docker compose up -d - # or for docker run, stop and restart the container - ``` - -3. **Still not working?** - - Check what URL your browser is trying to access: - - Open browser DevTools (F12) - - Go to Network tab - - Refresh the page - - Look for failed requests to `/api/config` - - The URL should match: `http://YOUR_SERVER_IP:5055/api/config` - - If it shows `localhost:5055` or wrong IP, your API_URL is not set correctly. - - -### Fix: Port 5055 Not Exposed - -**Check currently exposed ports:** -```bash -docker ps -# Look for: 0.0.0.0:5055->5055 -``` - -**Not there?** Add it: - -**Docker Compose** - Update your `docker-compose.yml`: -```yaml -services: - open_notebook: - ports: - - "8502:8502" - - "5055:5055" # Add this line! -``` - -**Docker Run** - Add `-p 5055:5055`: -```bash -docker run -d \ - -p 8502:8502 \ - -p 5055:5055 \ # Add this! - # ... rest of your command -``` - -Then restart the container. - ---- - - -## Fix: Container Won't Start - -**Check the logs:** -```bash -docker logs open-notebook -# or -docker compose logs -``` - -### Common causes: - -| Error Message | Fix | -|---------------|-----| -| "Port already in use" | Change port: `-p 8503:8502` or stop conflicting service | -| "Permission denied" | Add user to docker group: `sudo usermod -aG docker $USER` (then log out/in) | -| "Invalid API key" | Check OPENAI_API_KEY in environment variables | -| "Out of memory" | Increase Docker memory limit to 2GB+ in Docker Desktop settings | -| "No such file or directory" | Check volume paths exist and are accessible | -| "'' is not a valid UrlScheme" | [Remove quotes from environment variables](#fix-quotes-in-env) | -| "There was a problem with authentication" | [Check SurrealDB configuration](#fix-surrealdb-config) | -| Worker/API crashes on startup | [Check network timeouts](#fix-network-timeouts) | - -**Quick reset:** -```bash -docker compose down -v -docker compose up -d -``` - - -### Fix: Quotes in Environment Variables - -**Symptom:** Error `'' is not a valid UrlScheme` or database connection fails with empty URL. - -**Cause:** Docker Compose interprets quotes literally. If you have quotes around values in your `docker-compose.yml` or `.env` file, they become part of the value. - -❌ **Wrong** (quotes become part of the value): -```yaml -environment: - - SURREAL_URL="ws://localhost:8000/rpc" - - SURREAL_USER="root" -``` - -❌ **Also wrong** in `.env` files: -```env -SURREAL_URL="ws://localhost:8000/rpc" -SURREAL_USER="root" -``` - -✅ **Correct** (no quotes): -```yaml -environment: - - SURREAL_URL=ws://localhost:8000/rpc - - SURREAL_USER=root - - SURREAL_PASSWORD=root - - SURREAL_NAMESPACE=open_notebook - - SURREAL_DATABASE=production -``` - -✅ **Correct** `.env` file: -```env -SURREAL_URL=ws://localhost:8000/rpc -SURREAL_USER=root -``` - -After fixing, restart: -```bash -docker compose down && docker compose up -d -``` - - -### Fix: SurrealDB Configuration Issues - -#### Single Container Already Has SurrealDB - -**Symptom:** Authentication errors or connection issues when using `v1-latest-single` with an external SurrealDB. - -**Cause:** The `-single` image already includes SurrealDB. You don't need to run a separate SurrealDB container. - -❌ **Wrong** - running separate SurrealDB with single container: -```yaml -services: - surrealdb: - image: surrealdb/surrealdb:latest # Not needed! - - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - - SURREAL_URL=ws://surrealdb:8000/rpc # Wrong! -``` - -✅ **Correct** - single container uses built-in SurrealDB: -```yaml -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - - SURREAL_URL=ws://localhost:8000/rpc # Uses internal DB -``` - -**If you want a separate SurrealDB**, use the `v1-latest` image (without `-single`) instead. - -#### SurrealDB Version Compatibility - -**Symptom:** Various database errors, authentication failures, or unexpected behavior. - -**Cause:** Open Notebook currently supports **SurrealDB v2.x only**. SurrealDB v3 (alpha) is not yet supported. - -✅ **Supported versions:** -```yaml -# Use v2.x -image: surrealdb/surrealdb:v2.1.4 -image: surrealdb/surrealdb:v2 # Latest v2 -``` - -❌ **Not supported yet:** -```yaml -# Don't use v3 alpha -image: surrealdb/surrealdb:v3.0.0-alpha.17 -``` - - -### Fix: Network Timeouts (Slow Connections / China) - -**Symptom:** Container crashes on startup with `exit status 1`, worker enters FATAL state, or pip/uv dependency downloads fail. - -**Cause:** The container downloads Python dependencies on first startup. Slow networks or restricted access (especially in China) can cause timeouts. - -✅ **Fix:** Add timeout and mirror configuration: - -```yaml -services: - open_notebook: - image: lfnovo/open_notebook:v1-latest-single - environment: - # Increase download timeout to 10 minutes (default is 30s) - - UV_HTTP_TIMEOUT=600 - - # For users in China - use mirror - - UV_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple - - PIP_INDEX_URL=https://pypi.tuna.tsinghua.edu.cn/simple -``` - -**Alternative mirrors for China:** -- Tsinghua: `https://pypi.tuna.tsinghua.edu.cn/simple` -- Aliyun: `https://mirrors.aliyun.com/pypi/simple/` -- Huawei: `https://repo.huaweicloud.com/repository/pypi/simple` - -**Note:** First startup may take several minutes while dependencies are downloaded. Subsequent startups will be faster. - ---- - - -## Fix: Works on Server But Not From My Computer - -**Symptom:** Open Notebook works when accessed on the server itself (`localhost:8502`) but not from another computer. - -**This is 100% an API_URL problem.** - -✅ **The Fix:** - -Your API_URL must match the URL you use to access Open Notebook. - -| You access via | Set API_URL to | -|----------------|----------------| -| `http://192.168.1.50:8502` | `http://192.168.1.50:5055` | -| `http://myserver:8502` | `http://myserver:5055` | -| `http://10.0.0.5:8502` | `http://10.0.0.5:5055` | - -**Apply the fix:** - -1. Edit your `docker-compose.yml`: - ```yaml - environment: - - OPENAI_API_KEY=your_key - - API_URL=http://YOUR_SERVER_IP_OR_HOSTNAME:5055 - ``` - -2. Or edit your `docker.env` file: - ```env - API_URL=http://YOUR_SERVER_IP_OR_HOSTNAME:5055 - ``` - -3. Restart: - ```bash - docker compose down && docker compose up -d - ``` - -**Common mistakes:** -- ❌ Using `localhost` in API_URL when accessing remotely -- ❌ Using your client computer's IP instead of the server's IP -- ❌ Adding `/api` to the end (it's automatic) - ---- - - -## Fix: API or Authentication Errors - -### "Missing authorization header" - -You have password auth enabled. Make sure it's set correctly: - -```yaml -environment: - - OPEN_NOTEBOOK_PASSWORD=your_password -``` - -Or provide the password when logging into the web interface. - -### "API config endpoint returned status 404" - -You added `/api` to API_URL. Remove it: - -❌ **Wrong:** `API_URL=http://192.168.1.100:5055/api` - -✅ **Correct:** `API_URL=http://192.168.1.100:5055` - -The `/api` path is added automatically by the application. - -### "Invalid API key" or "Incorrect API key" - -1. Check key format (OpenAI keys start with `sk-`) -2. Verify you have credits in your provider account -3. Check for spaces around the key in your .env file: - ```env - # Wrong - has spaces - OPENAI_API_KEY = sk-your-key - - # Correct - OPENAI_API_KEY=sk-your-key - ``` -4. Test your key directly: - ```bash - curl https://api.openai.com/v1/models \ - -H "Authorization: Bearer YOUR_KEY" - ``` - ---- - - -## Fix: Slow or Timeout Errors - -### Increase timeouts for local models: - -If you're using Ollama or LM Studio: - -```yaml -environment: - - API_CLIENT_TIMEOUT=600 # 10 minutes - - ESPERANTO_LLM_TIMEOUT=180 # 3 minutes -``` - -**Recommended timeouts by setup:** -- Cloud APIs (OpenAI, Anthropic): Default (300s) -- Local Ollama with GPU: 600s -- Local Ollama with CPU: 1200s -- Remote LM Studio: 900s - -### Use faster models: - -- **Cloud APIs:** OpenAI, Anthropic, Groq (fastest) -- **Local models:** Try smaller models first - - Fast: `gemma2:2b`, `phi3:mini` - - Medium: `llama3:8b`, `mistral:7b` - - Slow: `llama3:70b`, `mixtral:8x7b` - -### Preload local models: - -```bash -# This prevents first-run delays -ollama run llama3 -# Press Ctrl+D to exit after model loads -``` - ---- - -## Still Stuck? - -### Collect diagnostics: - -```bash -# Container status -docker ps - -# Container logs (last 100 lines) -docker logs --tail 100 open-notebook > logs.txt - -# Or for docker compose -docker compose logs --tail 100 > logs.txt - -# Check resource usage -docker stats --no-stream -``` - -### Get help: - -1. **[Discord](https://discord.gg/37XJPXfz2w)** - Fastest response from community -2. **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Bug reports and features -3. **[Full Troubleshooting Guide](common-issues.md)** - More detailed solutions - -**Before asking:** -- Include your `docker-compose.yml` (remove API keys!) -- Include relevant logs -- Describe your setup (local vs remote, OS, Docker version) -- What you've already tried - ---- - -## Quick Reference: API_URL Settings - -| Scenario | API_URL Value | Example | -|----------|---------------|---------| -| **Local access only** | Not needed | Leave unset | -| **Remote on same network** | `http://SERVER_IP:5055` | `http://192.168.1.100:5055` | -| **Remote with hostname** | `http://HOSTNAME:5055` | `http://myserver.local:5055` | -| **Behind reverse proxy (no SSL)** | `http://DOMAIN:5055` | `http://notebook.local:5055` | -| **Behind reverse proxy (SSL)** | `https://DOMAIN/api` | `https://notebook.example.com/api` | - -**Remember:** The API_URL is from your **browser's perspective**, not the server's! diff --git a/docs/user-guide/chat.md b/docs/user-guide/chat.md deleted file mode 100644 index a0d1c230..00000000 --- a/docs/user-guide/chat.md +++ /dev/null @@ -1,422 +0,0 @@ -# Chat User Guide - -The Chat Assistant in Open Notebook provides an intelligent interface for interacting with your research materials. This guide will help you master the chat features to get the most insightful and productive conversations with AI about your content. - -## Chat Interface Basics - -### Starting Your First Chat - -1. **Navigate to Your Notebook**: Open the notebook containing your research materials -2. **Access the Chat Panel**: Look for the chat interface in your notebook layout -3. **Review Your Context**: Check which sources are available for the conversation -4. **Start Typing**: Enter your question or topic in the chat input field -5. **Send Your Message**: Press Enter or click the send button to begin - -### Understanding the Chat Layout - -The chat interface consists of several key components: - -- **Message History**: Shows your conversation thread -- **Context Panel**: Displays which sources are available to the AI -- **Input Field**: Where you type your messages -- **Action Buttons**: Options to save messages as notes or manage the conversation - -### Basic Chat Commands and Interactions - -#### Effective Question Patterns - -**Research Questions**: -- "What are the main arguments in [source name]?" -- "How do these sources relate to each other?" -- "What evidence supports [specific claim]?" - -**Analysis Requests**: -- "Analyze the methodology used in [source]" -- "Compare the findings across all sources" -- "Identify gaps in the current research" - -**Synthesis Queries**: -- "Synthesize the key insights from all sources" -- "What conclusions can be drawn from this evidence?" -- "How do these findings challenge existing theory?" - -## Context Configuration Options - -Context management is crucial for effective AI conversations. Open Notebook gives you complete control over what information the AI can access. - -### Context Levels Explained - -#### Not in Context -- **What it means**: The AI cannot see this content -- **When to use**: For sensitive information or irrelevant sources -- **Benefits**: Maintains privacy, reduces costs, keeps conversations focused - -#### Summary Context -- **What it means**: AI receives a condensed version of the content -- **When to use**: For background information or supporting materials -- **Benefits**: Provides context without overwhelming the conversation, cost-effective - -#### Full Content Context -- **What it means**: AI has access to the complete source material -- **When to use**: For detailed analysis or when precise citations are needed -- **Benefits**: Most comprehensive analysis, detailed responses - -### Strategic Context Management - -#### The Minimalist Approach -Start with minimal context and expand as needed: -1. Begin with "Summary" context for background sources -2. Add "Full Content" only for primary sources -3. Gradually include more context if the AI needs additional information - -#### The Focused Approach -Select context based on your current research question: -- **For broad overview**: Use summary context for multiple sources -- **For detailed analysis**: Use full content for 1-2 key sources -- **For comparison**: Use full content for sources being compared - -#### The Cost-Conscious Approach -Balance insight quality with API costs: -- Use summary context as the default -- Reserve full content for the most important sources -- Remove sources from context when they're no longer relevant - -### Dynamic Context Adjustment - -You can modify context settings during conversations: - -1. **Mid-Conversation Changes**: Adjust context based on how the conversation develops -2. **Follow-up Questions**: Add relevant sources when diving deeper into topics -3. **Topic Shifts**: Remove irrelevant sources and add new ones for different questions - -## Multiple Chat Sessions - -Open Notebook allows you to maintain separate chat threads for different aspects of your research. - -### Creating and Managing Multiple Chats - -#### When to Start a New Chat Session - -**Different Research Questions**: -- Each major research question deserves its own chat -- Separate exploratory from analytical conversations -- Keep methodology discussions separate from findings - -**Different Source Sets**: -- Create chats for different groups of sources -- Separate primary and secondary source discussions -- Use different chats for different time periods or topics - -**Different Analysis Stages**: -- Initial exploration chat -- Deep analysis chat -- Synthesis and conclusion chat - -#### Best Practices for Multiple Chats - -1. **Descriptive Names**: Give each chat a clear, descriptive name -2. **Consistent Context**: Maintain appropriate context settings for each chat's purpose -3. **Regular Reviews**: Periodically review and organize your chat sessions -4. **Cross-Reference**: Note connections between different chat sessions - -### Chat Organization Strategies - -#### By Research Phase -- **Phase 1**: Initial exploration and question formulation -- **Phase 2**: Deep analysis and evidence gathering -- **Phase 3**: Synthesis and conclusion development - -#### By Source Type -- **Academic Sources**: Scholarly papers and research -- **Media Sources**: News articles and reports -- **Primary Sources**: Original documents and data - -#### By Topic or Theme -- **Main Research Question**: Core topic investigation -- **Related Topics**: Supporting or tangential inquiries -- **Methodology**: Research approach and methods - -## Chat History Management - -### Reviewing Past Conversations - -#### Finding Important Information -- **Search Within Chats**: Use keywords to find specific discussions -- **Bookmark Key Messages**: Save important AI responses as notes -- **Review Summaries**: Periodically summarize key insights from chats - -#### Organizing Chat History -- **Regular Cleanup**: Archive or delete outdated conversations -- **Export Key Insights**: Move important findings to permanent notes -- **Create Indexes**: Maintain lists of important chat topics - -### Using Chat History for Research Development - -#### Tracking Research Evolution -- **Question Development**: See how your research questions evolved -- **Insight Building**: Track how insights developed over time -- **Gap Identification**: Notice what topics need more exploration - -#### Building on Previous Conversations -- **Reference Earlier Chats**: Mention previous discussions in new messages -- **Continuing Threads**: Pick up where previous conversations left off -- **Connecting Insights**: Link findings across different chat sessions - -## Best Practices for Effective AI Conversations - -### Asking Better Questions - -#### Be Specific and Clear -**Instead of**: "What does this say?" -**Try**: "What are the main arguments in Smith's 2023 paper about climate policy?" - -**Instead of**: "Are these related?" -**Try**: "How do the findings in Source A support or contradict the theory presented in Source B?" - -#### Provide Context for Your Questions -- Explain why you're asking the question -- Mention your research goals -- Reference previous parts of the conversation - -#### Use Progressive Questioning -1. **Start Broad**: "What are the main themes in these sources?" -2. **Get Specific**: "Can you elaborate on the methodology used in Source A?" -3. **Synthesize**: "How do these findings relate to my research question about X?" - -### Optimizing AI Responses - -#### Request Specific Formats -- "Please provide a bullet-point summary" -- "Create a comparison table" -- "List the evidence for each argument" - -#### Ask for Citations -- "Please include specific quotes to support your analysis" -- "Which sources support this conclusion?" -- "Can you provide page numbers for these references?" - -#### Encourage Critical Analysis -- "What are the limitations of this study?" -- "What alternative interpretations are possible?" -- "Where might there be bias in this source?" - -### Managing Long Conversations - -#### Breaking Down Complex Topics -- Address one aspect at a time -- Use follow-up questions to drill down -- Summarize key points before moving to new topics - -#### Maintaining Conversation Flow -- Reference previous points in the conversation -- Ask the AI to summarize before continuing -- Use transitional phrases to connect ideas - -## Citation Features in Chat - -### Understanding AI Citations - -When the AI references your sources, it provides: -- **Source Identification**: Which document the information came from -- **Specific References**: Page numbers or sections when available -- **Quote Attribution**: Direct quotes with proper attribution - -### Requesting Better Citations - -#### Ask for Specific References -- "Please provide the exact quote that supports this point" -- "Which page in the document contains this information?" -- "Can you cite the specific study mentioned?" - -#### Verify Citation Accuracy -- **Cross-Check**: Verify quotes against original sources -- **Context Check**: Ensure citations are used appropriately -- **Completeness**: Make sure important citations aren't missing - -### Using Citations for Your Research - -#### Building Bibliography -- Save cited sources for your reference list -- Note page numbers for future reference -- Track which sources are most frequently cited - -#### Fact-Checking -- Use citations to verify AI claims -- Cross-reference information across sources -- Identify potential contradictions - -## Advanced Chat Features - -### Using the Ask Feature - -The Ask feature provides sophisticated research assistance: - -1. **Query Strategy**: AI determines what to search for -2. **Individual Analysis**: Each query gets processed separately -3. **Final Synthesis**: All results are combined into a coherent answer - -#### Optimizing Ask Queries -- **Be Specific**: Clear, focused questions get better results -- **Provide Context**: Explain what you're trying to understand -- **Multiple Angles**: Ask questions that explore different aspects - -### Integration with Notes - -#### Saving Chat Messages as Notes -1. **Identify Valuable Responses**: Look for insights worth preserving -2. **Click "Save as Note"**: Convert any AI message to a permanent note -3. **Edit and Enhance**: Customize the note with your own insights -4. **Link to Sources**: Connect the note to relevant source materials - -#### Using Notes to Enhance Chats -- Reference your notes in conversations -- Ask the AI to build on your existing notes -- Use notes as starting points for new discussions - -## Troubleshooting Common Chat Issues - -### AI Responses Lack Depth - -**Problem**: AI gives superficial answers -**Solutions**: -- Provide more specific context -- Ask follow-up questions -- Request examples or evidence -- Use full content context for key sources - -### AI Misunderstands Questions - -**Problem**: AI doesn't address your actual question -**Solutions**: -- Rephrase your question more clearly -- Provide additional context -- Break complex questions into smaller parts -- Reference specific sources or sections - -### Context Limitations - -**Problem**: AI doesn't have access to needed information -**Solutions**: -- Adjust context settings -- Add relevant sources to full content context -- Upload additional sources if needed -- Provide direct quotes in your messages - -### High API Costs - -**Problem**: Chat usage is expensive -**Solutions**: -- Use summary context as default -- Remove unnecessary sources from context -- Choose more cost-effective models -- Be more targeted with questions - -## Model Selection and Configuration - -### Choosing the Right Model - -#### For Different Task Types - -**Research and Analysis**: -- Claude Sonnet or GPT-4 for deep analysis -- Gemini Pro for comprehensive understanding -- Grok for creative insights - -**Quick Questions**: -- GPT-5-mini for fast, cost-effective responses -- Gemini Flash for quick summaries -- Ollama models for privacy-focused usage - -**Specialized Tasks**: -- Use model strengths for specific capabilities -- Consider language requirements -- Balance cost with quality needs - -### Customizing Model Behavior - -#### Through Context Management -- Adjust what information the model sees -- Control the level of detail available -- Focus on relevant source materials - -#### Through Question Framing -- Specify the type of response you want -- Request specific formats or structures -- Guide the model's analytical approach - -## Privacy and Security Considerations - -### Data Protection - -#### What Gets Shared -- Only sources you've explicitly added to context -- Your chat messages and questions -- Selected content based on context settings - -#### What Remains Private -- Sources set to "Not in Context" -- Other notebooks and their contents -- Personal information not explicitly shared - -### Best Practices for Privacy - -1. **Review Context Settings**: Regularly audit what's shared -2. **Sensitive Information**: Keep private data out of context -3. **Model Selection**: Consider privacy implications of different providers -4. **Regular Cleanup**: Remove outdated or sensitive conversations - -## Future Chat Enhancements - -### Planned Features - -#### Enhanced Context Management -- **Smart Context Suggestions**: AI-recommended context settings -- **Dynamic Context**: Automatic adjustment based on conversation -- **Context History**: Track how context affected conversations - -#### Improved Collaboration -- **Shared Chats**: Collaborate with others on conversations -- **Chat Templates**: Pre-configured chat setups for common tasks -- **Workflow Integration**: Connect chats to broader research workflows - -#### Advanced Analysis -- **Multi-Modal Support**: Image and video analysis in chat -- **Real-Time Updates**: Live updates as new sources are added -- **Predictive Insights**: AI suggestions for research directions - -## Getting the Most from Chat - -### Regular Practices - -1. **Daily Check-ins**: Brief conversations to maintain research momentum -2. **Weekly Reviews**: Summarize key insights and plan next steps -3. **Monthly Audits**: Review chat history and extract key learnings - -### Continuous Improvement - -- **Experiment with Different Approaches**: Try various questioning styles -- **Learn from Responses**: Notice what types of questions work best -- **Share Insights**: Contribute to the community with your discoveries - -### Integration with Overall Workflow - -- **Connect to Note-Taking**: Use chat insights to inform your notes -- **Link to Source Analysis**: Let chat guide your source selection -- **Support Writing**: Use chat insights in your research outputs - -## Getting Help and Community - -### Resources for Chat Support - -1. **Documentation**: This guide and other help materials -2. **Community Forum**: [GitHub Discussions](https://github.com/lfnovo/open-notebook/discussions) -3. **Feature Requests**: Suggest improvements in the discussions -4. **Bug Reports**: Report issues through GitHub Issues - -### Sharing Your Experience - -- **Share Successful Strategies**: Help others learn from your experience -- **Request Features**: Suggest new chat capabilities -- **Provide Feedback**: Help improve the chat experience for everyone - -The chat feature in Open Notebook is designed to be your research partner. With practice and experimentation, you'll develop a conversation style that maximizes the insights you can gain from your research materials. \ No newline at end of file diff --git a/docs/user-guide/index.md b/docs/user-guide/index.md deleted file mode 100644 index 9578eba4..00000000 --- a/docs/user-guide/index.md +++ /dev/null @@ -1,94 +0,0 @@ -# User Guide - -This comprehensive user guide covers all aspects of using Open Notebook effectively. Each section provides detailed instructions, best practices, and practical examples to help you master the platform. - -## Core Interface & Navigation - -### 📱 **[Interface Overview](interface-overview.md)** -Understanding Open Notebook's three-column design and navigation. -- Three-column layout (Sources, Notes, Chat) -- Navigation basics and menu structure -- Settings and preferences -- Mobile responsiveness -- Customization options - -## Content Management - -### 📚 **[Notebooks](notebooks.md)** -Organizing your research across multiple notebooks. -- Creating and managing notebooks -- Organization strategies -- Notebook settings and configuration -- Import/export capabilities -- Best practices for productivity - -### 📄 **[Sources](sources.md)** -Adding and managing all types of research content. -- Supported file types and formats -- Adding sources step-by-step -- Source management and organization -- Processing and transformations -- Troubleshooting source issues - -### 📝 **[Notes](notes.md)** -Creating and managing both manual and AI-generated notes. -- Manual note creation -- AI-assisted note generation -- Note templates and formatting -- Organization and linking -- Export and sharing options - -## AI Interaction - -### 💬 **[Chat](chat.md)** -Conversing with AI using your research context. -- Chat interface basics -- Context configuration options -- Multiple chat sessions -- Chat history management -- Best practices for effective conversations - -### 🔍 **[Search](search.md)** -Finding information across all your content. -- Full-text search capabilities -- Vector search functionality -- Advanced search techniques -- Search integration with chat and notes -- Performance optimization - ---- - -## How to Use This Guide - -### For New Users -1. Start with **[Interface Overview](interface-overview.md)** to understand the layout -2. Learn **[Notebooks](notebooks.md)** for organizing your work -3. Master **[Sources](sources.md)** to add content effectively -4. Explore **[Notes](notes.md)** and **[Chat](chat.md)** for AI-powered insights - -### For Experienced Users -- Jump to specific sections for advanced techniques -- Use as a reference for best practices -- Explore integration patterns between features - -### Common Workflows -- **Research Projects**: Notebooks → Sources → Notes → Chat -- **Content Creation**: Sources → Chat → Notes → Export -- **Learning**: Sources → Search → Notes → Chat - -## Next Steps - -After mastering the basics, explore: -- **[Features](../features/index.md)** - Advanced capabilities like podcasts and transformations -- **[Deployment](../deployment/index.md)** - Production deployment and scaling -- **[Development](../development/index.md)** - API usage and customization - -## Need Help? - -- 💬 **[Discord Community](https://discord.gg/37XJPXfz2w)** - Get help and share ideas -- 🐛 **[GitHub Issues](https://github.com/lfnovo/open-notebook/issues)** - Report bugs and request features -- 📖 **[Troubleshooting](../troubleshooting/index.md)** - Common issues and solutions - ---- - -*This user guide is designed to be your complete reference for using Open Notebook effectively. Each section builds on the others, but you can also use individual sections as standalone references.* \ No newline at end of file diff --git a/docs/user-guide/interface-overview.md b/docs/user-guide/interface-overview.md deleted file mode 100644 index fc24fc0a..00000000 --- a/docs/user-guide/interface-overview.md +++ /dev/null @@ -1,251 +0,0 @@ -# Interface Overview - -Open Notebook features a clean, intuitive interface designed to streamline your research workflow. This guide covers the layout, navigation, and interaction patterns to help you work efficiently with the platform. - -## Interface Design Philosophy - -Open Notebook follows a three-column layout inspired by Google Notebook LM but enhanced with additional features and customization options. The design prioritizes: - -- **Workspace Organization**: Clear separation between content management and AI interaction -- **Context Awareness**: Visual indicators for what information is available to the AI -- **Privacy Control**: Granular control over data sharing with AI models -- **Streamlined Workflow**: Logical progression from source management to knowledge creation - -## Three-Column Layout - -### Main Workspace (Left Side) -The main workspace is divided into two columns that manage your research materials: - -#### Sources Column (Left) -- **Add Source Button**: Quick access to add new research materials -- **Source Cards**: Visual representations of your documents, URLs, and other content -- **Context Indicators**: Visual cues showing whether sources are included in AI context -- **Source Actions**: Edit, delete, and transformation options for each source - -#### Notes Column (Right) -- **Write a Note Button**: Create manual notes and observations -- **Note Cards**: Display both human-written and AI-generated notes -- **Note Types**: Visual distinction between manual and AI-generated content -- **Note Actions**: Edit, delete, and organization options - -### Chat Interface (Right Side) -The chat interface provides AI interaction capabilities: - -#### Chat Tab -- **Session Management**: Create, rename, and switch between conversation sessions -- **Message History**: Scrollable conversation history with the AI assistant -- **Context Display**: Shows what content is available to the AI (token count and character count) -- **Input Field**: Type questions and interact with your knowledge base - -#### Podcast Tab -- **Episode Generation**: Create podcast episodes from your research materials -- **Profile Selection**: Choose from pre-configured episode profiles -- **Speaker Configuration**: Select and customize podcast speakers -- **Generation Controls**: Start podcast creation with custom instructions - -## Navigation Structure - -### Main Navigation -Open Notebook uses a page-based navigation system accessible through the sidebar: - -- **📒 Notebooks**: Main workspace for research projects -- **🔍 Ask and Search**: Query your knowledge base across all notebooks -- **🎙️ Podcasts**: Manage podcast profiles and view generated episodes -- **🤖 Models**: Configure AI providers and model settings -- **💱 Transformations**: Create and manage content transformation prompts -- **⚙️ Settings**: Application configuration and preferences - -### Notebook Navigation -Within each notebook: -- **Back to List**: Return to the notebook overview -- **Refresh**: Reload current notebook content -- **Notebook Header**: Edit name and description, archive/unarchive options -- **Session Controls**: Manage chat sessions and conversation history - -## Settings and Preferences - -### Location -Access settings through the **⚙️ Settings** page in the main navigation. - -### Key Configuration Options - -#### Content Processing -- **Document Engine**: Choose between auto, docling, or simple processing -- **URL Engine**: Select from auto, firecrawl, jina, or simple web scraping -- **Embedding Options**: Configure automatic content embedding for vector search - -#### File Management -- **Auto-Delete Files**: Automatically remove uploaded files after processing -- **YouTube Languages**: Set preferred languages for transcript download - -#### Quality Settings -- **Processing Accuracy**: Balance between speed and accuracy for different content types -- **API Integration**: Configure external service API keys for enhanced processing - -## Context Control System - -### Three-Level Context System -Open Notebook provides granular control over what information AI models can access: - -#### Not in Context -- Sources and notes marked as "not in context" are invisible to the AI -- Useful for keeping sensitive or irrelevant information private -- Reduces API costs by limiting context size - -#### Summary Only -- AI receives condensed summaries of the content -- Balances information access with cost optimization -- AI can request full content if needed for specific queries - -#### Full Content -- AI has access to complete document text -- Provides maximum context for detailed analysis -- Higher API costs but most comprehensive responses - -### Context Indicators -Visual indicators throughout the interface show: -- **Token Count**: Current context size in tokens -- **Character Count**: Total characters in context -- **Context Composition**: What sources and notes are included -- **Cost Estimation**: Approximate API usage for current context - -## Mobile Responsiveness - -### Responsive Design -Open Notebook is built with Next.js, providing: -- **Adaptive Layout**: Columns collapse and stack on smaller screens -- **Touch-Friendly**: Buttons and interactions optimized for mobile devices -- **Scrollable Interface**: All content accessible through touch scrolling - -### Mobile Usage Patterns -- **Vertical Stacking**: Three-column layout becomes vertically stacked -- **Collapsible Sections**: Expandable areas to save screen space -- **Touch Navigation**: Tap-friendly buttons and controls -- **Readable Text**: Responsive text sizing for different screen sizes - -### Mobile Limitations -- **Complex Interactions**: Some advanced features work better on desktop -- **File Upload**: Limited file management capabilities on mobile browsers -- **Multi-tasking**: Reduced ability to reference multiple sources simultaneously - -## Customization Options - -### Interface Customization -- **Sidebar State**: Collapsed or expanded sidebar based on preference -- **Page Layout**: Wide or narrow layout options -- **Theme**: Follows system theme preferences (light/dark) - -### Content Customization -- **Transformation Prompts**: Create custom AI prompts for content analysis -- **Episode Profiles**: Configure podcast generation with custom speakers and styles -- **Model Selection**: Choose different AI models for different tasks - -### Workflow Customization -- **Session Management**: Organize conversations by topic or project -- **Note Organization**: Manual and AI-assisted note creation -- **Source Processing**: Choose processing engines based on content type - -## Common UI Patterns - -### Action Buttons -- **Primary Actions**: Bold, colored buttons for main actions (Create, Save, Generate) -- **Secondary Actions**: Subtle buttons for supporting actions (Edit, Delete, Refresh) -- **Icon Buttons**: Symbolic representations for common actions (📝, 🔄, 🗑️) - -### Status Indicators -- **Loading States**: Spinners and progress indicators during processing -- **Success Messages**: Toast notifications for completed actions -- **Error Handling**: Clear error messages with actionable suggestions -- **Warning States**: Alerts for missing configuration or potential issues - -### Content Cards -- **Source Cards**: Preview, metadata, and action buttons for documents -- **Note Cards**: Content preview with creation date and type indicators -- **Message Cards**: Chat history with clear sender identification - -### Expandable Sections -- **Context View**: Collapsible JSON view of AI context -- **Help Sections**: Expandable guidance for configuration options -- **Session History**: Collapsible list of previous conversations - -## Interaction Patterns - -### Content Management -1. **Add Source**: Click "Add Source" → Choose input method → Process content -2. **Create Note**: Click "Write a Note" → Enter content → Save -3. **Transform Content**: Select source → Choose transformation → Generate insight - -### AI Interaction -1. **Set Context**: Select sources/notes for AI access -2. **Ask Question**: Type in chat input → Receive AI response -3. **Save Response**: Click "💾 New Note" to save AI responses - -### Session Management -1. **Create Session**: Click "Create New Session" → Name session → Start chatting -2. **Switch Session**: Select from session list → Load conversation history -3. **Rename Session**: Edit session name → Save changes - -### Content Discovery -1. **Search**: Use search page → Enter query → Review results -2. **Filter**: Choose search type (text/vector) → Specify content types -3. **Navigate**: Click search results → View source content - -## Screenshots Reference - -The following screenshots illustrate key interface elements: - -- **New Notebook**: ![New Notebook](/assets/new_notebook.png) - Notebook creation interface -- **Add Source**: ![Add Source](/assets/add_source.png) - Source addition dialog -- **Source List**: ![Source List](/assets/asset_list.png) - Three-column layout with sources -- **Context Control**: ![Context](/assets/context.png) - Context management interface -- **Transformations**: ![Transformations](/assets/transformations.png) - Content transformation tools -- **Human Note**: ![Human Note](/assets/human_note.png) - Manual note creation -- **AI Note**: ![AI Note](/assets/ai_note.png) - AI-generated note saving -- **Podcast Interface**: ![Podcast](/assets/podcast_listen.png) - Podcast generation interface -- **Search**: ![Search](/assets/search.png) - Search and discovery interface - -## Keyboard Shortcuts - -Currently, Open Notebook relies primarily on mouse/touch interactions. Standard browser shortcuts apply: - -- **Ctrl/Cmd + R**: Refresh page -- **Ctrl/Cmd + F**: Find text on page -- **Tab**: Navigate between form fields -- **Enter**: Submit forms and chat messages -- **Escape**: Close dialogs and expandable sections - -## Performance Considerations - -### Interface Responsiveness -- **Lazy Loading**: Content loads as needed to maintain performance -- **Caching**: Frequently accessed data is cached for faster loading -- **Optimized Rendering**: Efficient display of large document lists - -### Resource Management -- **Context Limits**: Token count displays help manage API costs -- **Memory Usage**: Efficient handling of large documents and conversations -- **Network Optimization**: Minimal data transfer for interface updates - -## Tips for Efficient Use - -### Organization -- Use descriptive notebook names and descriptions -- Keep related sources together in the same notebook -- Create focused chat sessions for different research aspects - -### Context Management -- Start with "Summary Only" context to save costs -- Use "Full Content" only when detailed analysis is needed -- Regular review and cleanup of unused sources - -### Search Strategy -- Use specific keywords for text search -- Try vector search for conceptual queries -- Combine different search types for comprehensive results - -### Workflow Optimization -- Create transformation prompts for recurring analysis tasks -- Use episode profiles for consistent podcast generation -- Organize notes by topic or research phase - -This interface overview should help you navigate Open Notebook effectively and take advantage of its powerful features while maintaining control over your research data and AI interactions. \ No newline at end of file diff --git a/docs/user-guide/notebooks.md b/docs/user-guide/notebooks.md deleted file mode 100644 index df8be143..00000000 --- a/docs/user-guide/notebooks.md +++ /dev/null @@ -1,396 +0,0 @@ -# Notebooks User Guide - -Notebooks are the core organizational unit in Open Notebook, providing a structured way to collect, organize, and analyze research materials. This comprehensive guide covers everything you need to know about creating, managing, and optimizing your notebooks for maximum productivity. - -## Table of Contents - -1. [Creating and Managing Notebooks](#creating-and-managing-notebooks) -2. [Notebook Organization Strategies](#notebook-organization-strategies) -3. [Switching Between Notebooks](#switching-between-notebooks) -4. [Notebook Settings and Configuration](#notebook-settings-and-configuration) -5. [Sharing and Collaboration Features](#sharing-and-collaboration-features) -6. [Import/Export Capabilities](#importexport-capabilities) -7. [Best Practices for Notebook Organization](#best-practices-for-notebook-organization) - -## Creating and Managing Notebooks - -### Creating a New Notebook - -1. **Access the Notebooks Page**: Navigate to **📒 Notebooks** in the sidebar -2. **Click "New Notebook"**: Find the expandable "➕ New Notebook" section -3. **Provide Essential Information**: - - **Name**: Choose a clear, descriptive name that will help you identify the notebook later - - **Description**: Write a detailed description explaining the notebook's purpose, scope, and research goals - -#### Writing Effective Notebook Descriptions - -Your notebook description is crucial for AI interactions. The more detailed and specific you make it, the better the AI will understand your research context. Include: - -- **Research objectives**: What you're trying to accomplish -- **Target audience**: Who will use this research -- **Key topics**: Main areas of focus -- **Methodology**: How you plan to approach the research -- **Expected outcomes**: What you hope to achieve - -**Example Description:** -``` -Research notebook focused on climate change impacts on coastal cities. -Collecting scientific papers, policy documents, and case studies from -major coastal metropolitan areas. Goal is to understand adaptation -strategies and policy recommendations for urban planning departments. -Target audience: city planners and policy makers. -``` - -### Managing Existing Notebooks - -#### Viewing Notebook List - -The main Notebooks page displays all your notebooks with: -- **Name and description**: Clear identification of each notebook -- **Creation and update timestamps**: Track when notebooks were created and last modified -- **Archive status**: Visual distinction between active and archived notebooks - -#### Editing Notebook Details - -1. **Open the notebook** you want to edit -2. **Click on the description area** (or "click to add a description" if empty) -3. **Modify the details**: - - Update the name in the text input field - - Edit the description in the text area - - Use the placeholder text as guidance for comprehensive descriptions -4. **Save changes** by clicking the "💾 Save" button - -#### Archiving and Unarchiving - -**To Archive a Notebook:** -1. Open the notebook -2. Expand the description section -3. Click "🗃️ Archive" -4. The notebook will be moved to the archived section - -**To Unarchive a Notebook:** -1. Find the notebook in the "🗃️ Archived Notebooks" section -2. Open the notebook -3. Expand the description section -4. Click "🗃️ Unarchive" - -**Important Notes:** -- Archived notebooks remain searchable and accessible -- Archived notebooks don't appear in the main notebook list -- You can still add sources, notes, and use all features in archived notebooks - -#### Deleting Notebooks - -**⚠️ Warning**: Notebook deletion is permanent and cannot be undone. - -1. Open the notebook you want to delete -2. Expand the description section -3. Click "☠️ Delete forever" -4. Confirm the deletion - -This will permanently remove: -- The notebook and all its metadata -- All sources associated with the notebook -- All notes and AI-generated insights -- All chat sessions and conversation history - -## Notebook Organization Strategies - -### Topic-Based Organization - -Organize notebooks around specific research topics or projects: - -**Examples:** -- `Climate Change Research` -- `Machine Learning Fundamentals` -- `Urban Planning Strategies` -- `Digital Marketing Trends 2024` - -### Project-Based Organization - -Create notebooks for specific projects or deliverables: - -**Examples:** -- `Q1 Market Analysis Report` -- `PhD Literature Review - Chapter 2` -- `Product Launch Strategy - Widget X` -- `Client Proposal - ABC Corporation` - -### Temporal Organization - -Organize notebooks by time periods or phases: - -**Examples:** -- `Weekly Research Notes - January 2024` -- `Conference Prep - Tech Summit 2024` -- `Quarterly Planning - Q2 2024` -- `Course Materials - Spring Semester` - -### Hierarchical Organization - -Use naming conventions to create logical hierarchies: - -**Examples:** -- `01 - Project Alpha - Literature Review` -- `02 - Project Alpha - Data Analysis` -- `03 - Project Alpha - Final Report` - -Or: -- `Marketing - SEO Research` -- `Marketing - Social Media Strategies` -- `Marketing - Content Calendar` - -### Hybrid Organization - -Combine multiple strategies for complex research needs: - -**Examples:** -- `2024-Q1 - Climate Policy - Legislative Analysis` -- `PhD-Ch3 - Urban Planning - Case Studies` -- `Client-ABC - Market Research - Competitive Analysis` - -## Switching Between Notebooks - -### Quick Navigation - -1. **From Any Notebook**: Click the "🔙 Back to the list" button in the notebook header -2. **From the Notebooks List**: Click "Open" on any notebook card -3. **Browser Navigation**: Use browser back/forward buttons (maintains session state) - -### Session Management - -Open Notebook maintains separate session states for each notebook: - -- **Context Settings**: Each notebook remembers which sources are in context -- **Chat History**: Conversation threads are preserved per notebook -- **UI State**: Column layouts and interface preferences are maintained - -### Workflow Optimization - -**For Active Research:** -- Keep 2-3 primary notebooks open in different browser tabs -- Use the "🔄 Refresh" button to update content without losing session state -- Bookmark frequently accessed notebooks - -**For Reference Work:** -- Use the archived notebooks section for completed projects -- Maintain a "Reference Materials" notebook for general resources -- Create template notebooks for recurring project types - -## Notebook Settings and Configuration - -### Notebook-Level Settings - -Each notebook maintains its own configuration: - -**Description Management:** -- Detailed purpose and scope definitions -- Research objectives and methodologies -- Target audience and expected outcomes -- Context for AI interactions - -**Archive Status:** -- Active notebooks appear in the main list -- Archived notebooks are collapsed but remain accessible -- Archive status affects search and display but not functionality - -### Default Content Settings - -**Context Configuration:** -- Set default context levels for new sources -- Configure AI interaction preferences -- Establish source visibility standards - -**Transformation Settings:** -- Default transformation prompts for the notebook -- Custom transformation patterns -- Model preferences for content processing - -### Integration Settings - -**Model Configuration:** -- Preferred language models for chat interactions -- Embedding models for search and context -- Speech-to-text and text-to-speech preferences - -**API Access:** -- Programmatic access to notebook content -- Integration with external tools and services -- Authentication and permission settings - -## Sharing and Collaboration Features - -### Current Sharing Capabilities - -**Export Options:** -- Full notebook content export -- Individual source and note export -- Generated insights and transformations -- Chat conversation histories - -**API Access:** -- RESTful API for programmatic access -- Authentication via API keys or password protection -- Full CRUD operations on notebooks and content - -### Collaboration Workflows - -**Team Research:** -1. **Shared Environment**: Deploy Open Notebook on shared infrastructure -2. **Notebook Conventions**: Establish naming and organization standards -3. **Content Guidelines**: Define source quality and annotation standards -4. **Review Processes**: Implement peer review for critical research - -**Knowledge Sharing:** -- Export notebooks as comprehensive research packages -- Generate podcast summaries for team consumption -- Create shareable transformation outputs -- Maintain citation and reference standards - -### Security and Privacy - -**Access Control:** -- Password protection for public deployments -- API authentication for programmatic access -- Local deployment options for sensitive research - -**Data Management:** -- Local storage with no external data sharing -- Configurable AI provider selection -- Full control over context and content sharing - -## Import/Export Capabilities - -### Supported Import Formats - -**Direct Source Integration:** -- **Web Links**: Automatic content extraction from URLs -- **Documents**: PDF, DOCX, TXT, Markdown, EPUB -- **Media**: Audio files, video files, YouTube videos -- **Office Files**: Excel, PowerPoint, and other Office formats -- **Text Content**: Direct paste or manual entry - -**Bulk Import Strategies:** -- Create notebooks with standardized source organization -- Use consistent naming conventions for imported materials -- Maintain metadata and source attribution -- Document import dates and processing notes - -### Export Options - -**Notebook Export:** -- Complete notebook structure with all sources and notes -- Chat conversation histories and AI interactions -- Transformation outputs and generated insights -- Metadata including creation dates and source information - -**Content Export:** -- Individual source materials with annotations -- Generated notes and AI insights -- Podcast episodes and audio content -- Search results and analysis summaries - -**Integration Export:** -- API-accessible data structures -- JSON format for programmatic processing -- Structured data for external tool integration -- Citation and reference formatting - -### Migration Strategies - -**From Other Research Tools:** -1. **Export existing content** from current tools -2. **Prepare import materials** in supported formats -3. **Create notebook structure** matching your organizational needs -4. **Import content systematically** with proper categorization -5. **Verify content integrity** and fix any import issues - -**Platform Migration:** -- Export complete notebooks before moving between instances -- Maintain consistent API configurations -- Preserve chat histories and AI interactions -- Document custom transformations and settings - -## Best Practices for Notebook Organization - -### Naming Conventions - -**Consistent Patterns:** -- Use descriptive, searchable names -- Include date or version information when relevant -- Maintain consistent capitalization and formatting -- Avoid special characters that might cause issues - -**Effective Examples:** -- `2024-Q1-Market-Research-SaaS-Tools` -- `PhD-Literature-Review-Urban-Planning` -- `Client-ABC-Competitive-Analysis-Phase-1` -- `Personal-Learning-Machine-Learning-Basics` - -### Content Organization - -**Source Management:** -- Add sources consistently as you find them -- Use descriptive titles that explain the source's relevance -- Include creation dates and source attribution -- Maintain a balance between comprehensive and focused content - -**Note-Taking Strategy:** -- Create manual notes for personal insights and observations -- Save AI-generated insights that provide value -- Use consistent formatting for different types of notes -- Link related notes and sources when appropriate - -### Maintenance Workflows - -**Regular Review:** -- **Weekly**: Review active notebooks and update descriptions -- **Monthly**: Archive completed projects and clean up unused content -- **Quarterly**: Evaluate notebook organization and optimize structure -- **Annually**: Export important notebooks and review retention policies - -**Quality Control:** -- Verify source accuracy and reliability -- Update notebook descriptions as research evolves -- Remove outdated or irrelevant content -- Maintain consistent citation and reference standards - -### Performance Optimization - -**Context Management:** -- Use minimum necessary context for AI interactions -- Set appropriate context levels for different source types -- Monitor API usage and costs -- Optimize embedding and search performance - -**Storage Management:** -- Regular cleanup of temporary files and cached content -- Monitor database size and performance -- Archive old notebooks to improve system performance -- Maintain backup strategies for important research - -### Advanced Workflows - -**Research Methodology:** -1. **Planning Phase**: Create notebook with detailed research objectives -2. **Collection Phase**: Systematically add sources with proper organization -3. **Analysis Phase**: Use transformations and AI interactions for insights -4. **Synthesis Phase**: Generate comprehensive notes and summaries -5. **Communication Phase**: Create podcasts and shareable content -6. **Archive Phase**: Preserve completed research for future reference - -**Multi-Notebook Projects:** -- Use consistent naming conventions across related notebooks -- Maintain cross-references between related research -- Create summary notebooks that reference detailed research -- Develop template notebooks for recurring project types - ---- - -## Conclusion - -Effective notebook management in Open Notebook requires thoughtful organization, consistent practices, and strategic use of the platform's features. By following these guidelines and adapting them to your specific research needs, you can create a powerful knowledge management system that enhances your research capabilities and maintains long-term value. - -Remember that Open Notebook is designed to be flexible and adaptable to your workflow. Experiment with different organizational strategies, naming conventions, and content management approaches to find what works best for your research style and objectives. - -For additional support and advanced features, consult the [API documentation](http://localhost:5055/docs) and explore the [transformation system](../features/transformations.md) for custom content processing capabilities. \ No newline at end of file diff --git a/docs/user-guide/notes.md b/docs/user-guide/notes.md deleted file mode 100644 index cf0bf2a5..00000000 --- a/docs/user-guide/notes.md +++ /dev/null @@ -1,298 +0,0 @@ -# Notes User Guide - -Notes are the heart of your research workflow in Open Notebook. They provide a flexible system for capturing, organizing, and building upon your insights. This guide covers everything you need to know to get the most out of your note-taking experience. - -## Manual Note Creation - -### Creating Your First Note - -The simplest way to create a note is manually: - -1. **Access the Notes Panel**: Inside any notebook page, you'll find a dedicated column for your notes -2. **Click "Add Note"**: Look for the "Add Note" button in the notes section -3. **Add Title and Content**: Enter a descriptive title and write your note content -4. **Save**: Your note is automatically saved and becomes part of your notebook - -### Best Practices for Manual Notes - -- **Use descriptive titles**: Make your notes easy to find later with clear, specific titles -- **Keep notes focused**: Each note should cover a single concept or idea -- **Use consistent formatting**: Develop a personal style for structuring your notes -- **Date important insights**: Add timestamps for time-sensitive information - -## AI-Assisted Note Generation - -Open Notebook provides powerful AI features to help you create more insightful notes with less effort. - -### Creating Notes from Source Insights - -Transform research insights into permanent notes: - -1. **Generate Source Insights**: Use [Transformations](/features/transformations.html) to analyze your sources -2. **Review the Insight**: Read through the AI-generated insight to ensure it captures what you need -3. **Click "Save as Note"**: Convert the insight directly into a note with one click -4. **Edit and Enhance**: Customize the note title and content as needed - -### Creating Notes from Chat Conversations - -Capture valuable AI conversations as notes: - -1. **Chat with the Assistant**: Have a conversation about your research topic -2. **Identify Useful Responses**: Look for AI messages that provide valuable insights -3. **Click "Save as Note"**: Use the "Save as Note" button on any AI message -4. **Review and Edit**: The message becomes a note that you can modify and expand - -### Using Transformations for Note Enhancement - -Transformations can help you create different types of notes: - -- **Summary Notes**: Create concise overviews of complex sources -- **Key Insights**: Extract the most important points from your research -- **Reflective Questions**: Generate thought-provoking questions about your content -- **Action Items**: Identify next steps based on your research - -## Note Templates and Formatting - -### Suggested Note Templates - -#### Research Summary Template -``` -# [Source Title] - Summary - -## Key Points -- Main argument 1 -- Main argument 2 -- Main argument 3 - -## Notable Quotes -> "Important quote here" - -## My Thoughts -[Your analysis and reflection] - -## Questions for Further Research -- Question 1 -- Question 2 - -## Related Topics -- Topic 1 -- Topic 2 -``` - -#### Insight Development Template -``` -# [Insight Title] - -## Context -[What prompted this insight] - -## Core Idea -[The main insight] - -## Supporting Evidence -- Evidence 1 -- Evidence 2 - -## Implications -[What this means for your research] - -## Next Steps -[How to build on this insight] -``` - -#### Meeting/Interview Notes Template -``` -# [Meeting/Interview Title] -**Date**: [Date] -**Participants**: [Names] - -## Key Discussion Points -- Point 1 -- Point 2 - -## Important Quotes -> "Quote 1" -> "Quote 2" - -## Action Items -- [ ] Action 1 -- [ ] Action 2 - -## Follow-up Questions -- Question 1 -- Question 2 -``` - -### Formatting Tips - -- **Use Markdown**: Notes support full Markdown formatting -- **Structure with Headers**: Use `#`, `##`, `###` for hierarchy -- **Emphasize Important Points**: Use **bold** and *italic* text -- **Create Lists**: Use bullet points and numbered lists -- **Include Quotes**: Use `>` for blockquotes -- **Add Code**: Use backticks for inline code or triple backticks for code blocks - -## Linking and Cross-Referencing - -### Current Capabilities - -While Open Notebook is working on advanced linking features, you can currently: - -- **Reference source materials** in your notes -- **Mention related notes** by title -- **Create thematic connections** through consistent tagging and naming - -### Best Practices for Cross-Referencing - -1. **Use Consistent Terminology**: Develop a personal vocabulary for your research area -2. **Create Index Notes**: Build master notes that link to related topics -3. **Reference Sources**: Always note which sources contributed to your insights -4. **Build Concept Maps**: Create notes that outline relationships between ideas - -### Future Linking Features - -The roadmap includes: -- **Zettelkasten-style linking**: Direct links between notes -- **Automatic relationship detection**: AI-powered suggestions for related notes -- **Visual mind mapping**: Canvas-like interface for note relationships - -## Organization and Tagging - -### Organizational Strategies - -#### By Research Phase -- **Collection Notes**: Initial gathering of information -- **Analysis Notes**: Deeper examination of sources -- **Synthesis Notes**: Combining insights from multiple sources -- **Conclusion Notes**: Final thoughts and outcomes - -#### By Content Type -- **Source Summaries**: Notes derived from external sources -- **Personal Insights**: Your original thoughts and analysis -- **Questions**: Areas needing further research -- **Action Items**: Tasks and next steps - -#### By Topic or Theme -- **Project-Specific Notes**: Organize by current research project -- **Concept Notes**: Group by theoretical frameworks or key concepts -- **Method Notes**: Organize by research methodology or approach - -### Naming Conventions - -Develop consistent naming patterns: - -- **Date-based**: `2024-01-15-Meeting-Notes` -- **Topic-based**: `Climate-Change-Policy-Summary` -- **Source-based**: `Smith-2023-Key-Insights` -- **Type-based**: `Action-Items-Q1-2024` - -### Using the Search Function - -Open Notebook provides powerful search capabilities: - -1. **Full-Text Search**: Find notes containing specific words or phrases -2. **Vector Search**: Discover semantically related content -3. **Combined Search**: Use both methods for comprehensive results - -## Export and Sharing Options - -### Current Export Capabilities - -While Open Notebook continues to develop export features, you can currently: - -- **Copy Note Content**: Select and copy text from individual notes -- **Search and Review**: Use the search function to find and review notes across notebooks -- **Reference in Chat**: Notes become part of your knowledge base for AI conversations - -### Sharing Through the Platform - -- **Notebook Collaboration**: Share entire notebooks with collaborators -- **Context Sharing**: Include notes in AI chat contexts -- **Citation References**: Notes can be referenced in AI-generated responses - -### Best Practices for Export Preparation - -1. **Consistent Formatting**: Use standard Markdown formatting for easy conversion -2. **Complete Citations**: Include full source information in your notes -3. **Clear Structure**: Organize notes with clear headings and sections -4. **Regular Reviews**: Keep notes updated and relevant - -## Advanced Note-Taking Strategies - -### The Progressive Summarization Method - -1. **Initial Capture**: Create a comprehensive note from your source -2. **First Pass**: Highlight the most important points -3. **Second Pass**: Bold the most critical insights -4. **Third Pass**: Create a short summary at the top - -### The Cornell Note Method - -Structure your notes with: -- **Main Content**: Primary information and insights -- **Key Points**: Bullet points of essential information -- **Summary**: Brief overview at the bottom - -### The Zettelkasten Approach - -While full Zettelkasten features are coming, you can start: -- **Atomic Notes**: One idea per note -- **Unique Titles**: Distinctive, searchable titles -- **Conceptual Connections**: Link ideas through references -- **Regular Review**: Revisit and update notes regularly - -## Troubleshooting Common Issues - -### Note Organization Problems - -**Issue**: Too many notes, hard to find information -**Solution**: -- Use consistent naming conventions -- Create index notes for major topics -- Regularly review and archive old notes - -### AI-Generated Notes Need Improvement - -**Issue**: AI notes don't capture what you need -**Solution**: -- Customize your transformation prompts -- Edit AI-generated notes after creation -- Provide more specific context to the AI - -### Missing Context in Notes - -**Issue**: Notes make sense now but not later -**Solution**: -- Always include source information -- Add context about why the note was created -- Use the suggested templates for structure - -## Future Note Features - -The Open Notebook roadmap includes exciting developments: - -### Canvas-Like Interface -- **Visual Note Arrangement**: Organize notes spatially -- **AI Collaboration**: Work with AI directly on note content -- **Mind Mapping**: Create visual connections between ideas - -### Enhanced Linking -- **Automatic Suggestions**: AI-powered recommendations for related notes -- **Bidirectional Links**: See connections from both sides -- **Link Strength**: Understand the relationship between connected notes - -### Smart Organization -- **Auto-Tagging**: AI-generated tags based on content -- **Similarity Detection**: Find related notes automatically -- **Trend Analysis**: Identify patterns in your note-taking - -## Getting Help - -If you need assistance with notes: - -1. **Check the Documentation**: Review this guide and other docs -2. **Community Discussions**: Join the [GitHub Discussions](https://github.com/lfnovo/open-notebook/discussions/categories/ideas) -3. **Feature Requests**: Suggest new note features in the discussions -4. **Report Issues**: Use GitHub Issues for bugs or problems - -Remember, effective note-taking is a skill that develops over time. Experiment with different approaches and find what works best for your research style and goals. \ No newline at end of file diff --git a/docs/user-guide/search.md b/docs/user-guide/search.md deleted file mode 100644 index adbc13c6..00000000 --- a/docs/user-guide/search.md +++ /dev/null @@ -1,351 +0,0 @@ -# Search User Guide - -Open Notebook provides powerful search capabilities to help you find information quickly across your entire knowledge base. This guide covers both traditional search methods and AI-powered question answering. - -## Overview - -Open Notebook offers two main search approaches: - -1. **Direct Search** - Find specific content using text or vector search -2. **Ask Your Knowledge Base** - Get AI-generated answers based on your content - -## Direct Search - -### Search Types - -#### Text Search -Text search uses full-text indexing with BM25 ranking to find exact matches and similar terms across your content. - -**Best for:** -- Finding specific keywords, phrases, or terms -- Locating exact quotes or references -- Technical terms and proper nouns -- When you know approximately what you're looking for - -**Search Coverage:** -- **Sources**: Title, full text content, embedded chunks, and insights -- **Notes**: Title and content - -**Features:** -- Highlighted search results show matching terms -- BM25 relevance scoring -- Stemming and lowercase matching -- Punctuation and camel case tokenization - -#### Vector Search -Vector search uses semantic embeddings to find conceptually similar content, even when exact keywords don't match. - -**Best for:** -- Finding concepts and ideas -- Discovering related content -- Exploring themes and topics -- When you're not sure of exact terminology - -**Requirements:** -- An embedding model must be configured (see [Models Guide](../models.md)) -- Content must be processed with embeddings - -**Search Coverage:** -- **Sources**: Embedded content chunks and insights -- **Notes**: Full note content (with embeddings) - -**Features:** -- Cosine similarity scoring -- Configurable minimum similarity threshold (default: 0.2) -- Semantic understanding of content relationships - -### Search Interface - -#### Basic Search -1. Go to the **Search** tab in the "Ask and Search" page -2. Enter your search query -3. Select search type (Text or Vector) -4. Choose what to search: - - **Search Sources**: Include imported documents and content - - **Search Notes**: Include your personal notes -5. Click **Search** - -#### Search Results -Results are displayed with: -- **Relevance/Similarity Score**: Higher scores indicate better matches -- **Title**: Content title or note title -- **Content Preview**: Matching text excerpt -- **Source Link**: Click to view the full source or note -- **Highlights**: Matching terms highlighted in text search - -### Search Tips - -#### Text Search Best Practices -- Use specific keywords for better results -- Try different variations of terms -- Use quotes for exact phrase matching -- Include technical terms and acronyms -- Be specific rather than general - -**Examples:** -``` -machine learning algorithms -"neural network architecture" -API documentation -React hooks -``` - -#### Vector Search Best Practices -- Use natural language descriptions -- Focus on concepts rather than exact words -- Describe what you're looking for thematically -- Use complete sentences or phrases - -**Examples:** -``` -How to optimize database performance -Strategies for team collaboration -Best practices for code review -User interface design principles -``` - -### Search Filters and Options - -#### Content Type Filters -- **Search Sources**: Include imported documents, PDFs, web pages, etc. -- **Search Notes**: Include your personal notes and AI-generated content - -#### Search Parameters -- **Limit**: Maximum number of results (default: 100, max: 1000) -- **Minimum Score**: For vector search, set similarity threshold (0.0 to 1.0) - -### Advanced Search Techniques - -#### Combining Search Types -1. Start with vector search for broad concept discovery -2. Use text search for specific details -3. Cross-reference results between search types - -#### Iterative Search Strategy -1. Begin with broader terms -2. Refine based on initial results -3. Use discovered keywords for follow-up searches -4. Explore related concepts found in results - -#### Search Result Analysis -- Pay attention to similarity/relevance scores -- Look for patterns in top results -- Use result previews to assess relevance -- Follow source links for full context - -## Ask Your Knowledge Base - -The Ask feature uses AI to generate comprehensive answers based on your content, combining multiple search queries automatically. - -### How It Works - -1. **Query Strategy**: AI analyzes your question and generates multiple search queries -2. **Individual Searches**: Each query is processed using vector search -3. **Individual Answers**: AI generates answers for each search result -4. **Final Answer**: All individual answers are combined into a comprehensive response - -### Requirements - -- **Embedding Model**: Required for vector search functionality -- **Three AI Models**: - - **Query Strategy Model**: Powerful model for search planning (GPT-4, Claude, etc.) - - **Individual Answer Model**: Can be faster/cheaper model (GPT-4 Mini, etc.) - - **Final Answer Model**: Powerful model for synthesis (GPT-4, Claude, etc.) - -### Using the Ask Feature - -1. Go to the **Ask Your Knowledge Base** tab -2. Enter your question in natural language -3. Select your AI models for each processing stage -4. Click **Ask** - -### Model Selection Guidelines - -#### Query Strategy Model -**Recommended**: GPT-4, Claude Sonnet, Gemini Pro, Llama 3.2 -- Needs strong reasoning for search strategy -- Determines what information to look for -- Critical for answer quality - -#### Individual Answer Model -**Recommended**: GPT-4 Mini, Gemini Flash, cheaper models -- Processes individual search results -- Can use faster models for efficiency -- Multiple instances run in parallel - -#### Final Answer Model -**Recommended**: GPT-4, Claude Sonnet, Gemini Pro -- Synthesizes all information -- Creates coherent final response -- Benefits from strong language capabilities - -### Question Types - -#### Factual Questions -``` -What are the main benefits of microservices architecture? -How does React handle state management? -What security measures are recommended for APIs? -``` - -#### Analytical Questions -``` -Compare different database indexing strategies -Analyze the pros and cons of remote work policies -What are the trade-offs between SQL and NoSQL databases? -``` - -#### Synthesis Questions -``` -Summarize the key findings from my research on user experience -What patterns emerge from my project retrospectives? -How do different sources approach machine learning optimization? -``` - -### Answer Features - -#### Citations and References -- Answers include links to source documents -- Click citations to view original content -- Source attribution for fact-checking -- Transparency in information sources - -#### Saving Answers -- Save AI-generated answers as notes -- Select target notebook -- Preserved as "AI" note type -- Maintains question-answer format - -### Best Practices - -#### Effective Questions -- Be specific about what you need -- Provide context when helpful -- Ask follow-up questions to drill down -- Use natural language - -#### Question Examples -**Good:** -``` -How do the papers in my collection approach neural network optimization? -What are the common themes in my customer feedback notes? -Based on my research, what are the best practices for API design? -``` - -**Less Effective:** -``` -Tell me about AI -What's in my notes? -Help me understand stuff -``` - -#### Managing Model Costs -- Use cheaper models for individual answers -- Reserve powerful models for strategy and final synthesis -- Monitor token usage in model settings -- Consider using local models for frequent queries - -## Search Performance Optimization - -### Content Preparation -- **Source Processing**: Ensure sources are properly imported and processed -- **Note Organization**: Well-structured notes improve search results -- **Embedding Coverage**: Verify content has embeddings for vector search - -### Search Strategy -- **Progressive Refinement**: Start broad, then narrow down -- **Mixed Approach**: Combine text and vector search -- **Result Evaluation**: Review search scores and relevance - -### System Optimization -- **Embedding Model**: Choose appropriate model for your use case -- **Index Health**: Ensure search indices are properly maintained -- **Content Volume**: Balance between comprehensive coverage and search speed - -## Integration with Notes and Chat - -### Saving Search Results -- **Direct Saving**: Save useful search results as notes -- **Answer Preservation**: Save AI-generated answers for reference -- **Notebook Organization**: Organize saved searches by topic - -### Search in Workflow -1. **Research Phase**: Use search to gather relevant information -2. **Analysis Phase**: Ask targeted questions about findings -3. **Synthesis Phase**: Combine insights into new notes -4. **Review Phase**: Search for related content and updates - -### Chat Integration -- Use search results to inform chat conversations -- Ask follow-up questions based on search findings -- Reference search results in chat for context - -## Troubleshooting - -### Common Issues - -#### No Vector Search Available -**Problem**: Vector search option not showing -**Solution**: Configure an embedding model in the Models section - -#### Poor Search Results -**Problem**: Search returns irrelevant results -**Solutions**: -- Try different keywords or phrases -- Switch between text and vector search -- Check search filters (sources/notes) -- Verify content has been properly processed - -#### Ask Feature Not Working -**Problem**: Ask feature shows errors -**Solutions**: -- Ensure embedding model is configured -- Verify all three AI models are selected -- Check model API keys and settings -- Confirm content has embeddings - -#### Slow Search Performance -**Problem**: Search takes too long -**Solutions**: -- Reduce search limit -- Use more specific queries -- Check system resources -- Consider content volume optimization - -### Getting Help - -If you encounter issues: -1. Check the [Troubleshooting Guide](../troubleshooting/) -2. Verify model configurations -3. Review search query syntax -4. Check system requirements - -## Advanced Features - -### Search Result Analysis -- Review relevance scores to understand match quality -- Use highlighted excerpts to verify result accuracy -- Follow source links for full context - -### Batch Processing -- Use Ask feature for processing multiple related questions -- Save answers as notes for systematic knowledge building -- Create question templates for consistent analysis - -### Integration Workflows -- Combine search with transformation features -- Use search results as input for AI analysis -- Create knowledge maps from search patterns - -## Conclusion - -Open Notebook's search capabilities provide both precision and discovery tools for your knowledge base. By combining traditional text search with modern vector search and AI-powered question answering, you can efficiently find information and generate insights from your content. - -Remember to: -- Choose the right search type for your needs -- Configure appropriate AI models for Ask feature -- Save valuable results as notes -- Use iterative search strategies for best results -- Leverage both search types for comprehensive coverage - -The search system grows more valuable as you add more content and develop better search strategies tailored to your specific knowledge domains. \ No newline at end of file diff --git a/docs/user-guide/sources.md b/docs/user-guide/sources.md deleted file mode 100644 index 30fb317f..00000000 --- a/docs/user-guide/sources.md +++ /dev/null @@ -1,310 +0,0 @@ -# Sources Guide - -Open Notebook serves as your central hub for research materials, supporting a wide variety of content formats. This guide covers everything you need to know about adding, managing, and organizing sources in your notebooks. - -## Supported File Types and Formats - -Open Notebook leverages the powerful [content-core](https://github.com/lfnovo/content-core) library to process various content types with intelligent engine selection. - -### 📄 Documents -- **PDF** - Research papers, reports, books -- **EPUB** - E-books and digital publications -- **Microsoft Office**: - - Word documents (.docx, .doc) - - PowerPoint presentations (.pptx, .ppt) - - Excel spreadsheets (.xlsx, .xls) -- **Text files** - Plain text (.txt), Markdown (.md) -- **HTML** - Web pages and HTML files - -### 🎥 Media Files -- **Video formats**: - - MP4, AVI, MOV, WMV - - Automatic transcription to text -- **Audio formats**: - - MP3, WAV, M4A, AAC - - Speech-to-text conversion - -### 🌐 Web Content -- **URLs** - Any web page, blog post, or article -- **YouTube videos** - Automatic transcript extraction -- **News articles** - Automatic content extraction - -### 🖼️ Images -- **JPG, PNG, TIFF** - With OCR text recognition -- **Screenshots** - Perfect for capturing visual information - -### 📦 Archives -- **ZIP, TAR, GZ** - Compressed file support - -## Adding Sources Step-by-Step - -### Method 1: Adding Links - -1. **Navigate to your notebook** -2. **Click "Add Source"** -3. **Select "Link" option** -4. **Enter the URL** in the text field -5. **Configure options** (see Configuration Options below) -6. **Click "Process"** - -**Examples:** -- Research articles: `https://arxiv.org/abs/2301.00001` -- YouTube videos: `https://www.youtube.com/watch?v=dQw4w9WgXcQ` -- News articles: `https://example.com/article` -- Blog posts: `https://blog.example.com/post` - -### Method 2: Uploading Files - -1. **Navigate to your notebook** -2. **Click "Add Source"** -3. **Select "Upload" option** -4. **Click "Choose File"** and select your document -5. **Configure options** (see Configuration Options below) -6. **Click "Process"** - -**Supported formats:** -- Documents: PDF, DOCX, PPTX, XLSX, EPUB, TXT, MD -- Media: MP4, MP3, WAV, M4A (requires speech-to-text model) -- Images: JPG, PNG, TIFF (with OCR) -- Archives: ZIP, TAR, GZ - -### Method 3: Adding Text Content - -1. **Navigate to your notebook** -2. **Click "Add Source"** -3. **Select "Text" option** -4. **Paste or type your content** in the text area -5. **Configure options** (see Configuration Options below) -6. **Click "Process"** - -**Use cases:** -- Meeting notes or transcripts -- Research findings -- Interview transcripts -- Code snippets or documentation - -## Configuration Options - -### Transformations -Apply AI-powered transformations to extract insights from your sources: - -- **Summary** - Generate concise summaries -- **Key Points** - Extract main ideas and takeaways -- **Questions** - Generate questions for further research -- **Analysis** - Provide detailed analysis of content -- **Custom transformations** - Create your own prompts - -### Embedding Options -Choose how content should be embedded for vector search: - -- **Ask every time** - Prompt for each source -- **Always embed** - Automatically embed all sources -- **Never embed** - Skip embedding (can be done later) - -**Note:** Embedding enables AI-powered search and context retrieval but uses tokens from your AI provider. - -### File Management -- **Delete after processing** - Remove uploaded files from server after processing -- **Keep files** - Retain files on server (useful for archival) - -## Source Management and Organization - -### Viewing Source Details -Click the **"Expand"** button on any source to view: -- Full extracted content -- Generated insights (transformations) -- Processing metadata -- Embedded chunk information - -### Context Configuration -Control how sources are included in AI conversations: - -- **🚫 Not in Context** - Exclude from AI context -- **📄 Summary** - Include summary only (recommended) -- **📋 Full Content** - Include complete content (uses more tokens) - -### Source Metadata -Each source includes: -- **Title** - Extracted or custom title -- **Topics** - Automatically detected or manually added tags -- **Created/Updated** - Timestamps for tracking -- **Embedded chunks** - Number of vector embeddings -- **Insights count** - Number of generated insights - -### Searching Sources -Use the search functionality to find specific sources: -- **Text search** - Search titles and content -- **Vector search** - Semantic similarity search -- **Filter by notebook** - View sources from specific notebooks -- **Filter by type** - URLs, uploads, or text content - -## Source Processing and Transformation - -### Content Extraction Engines -Open Notebook uses intelligent engine selection: - -- **Docling** - PDF and Office documents (default) -- **PyMuPDF** - Lightweight PDF processing -- **Firecrawl** - Enhanced web scraping -- **Jina** - Advanced content extraction -- **BeautifulSoup** - Standard web scraping - -### Processing Workflow -1. **Upload/URL submission** - Source is received -2. **Engine selection** - Best extraction method chosen -3. **Content extraction** - Text and metadata extracted -4. **Transformation application** - AI insights generated -5. **Embedding creation** - Vector embeddings for search -6. **Storage** - Content saved to database - -### Speech-to-Text Processing -For audio and video files: -1. **Audio extraction** - Video converted to audio -2. **Transcription** - Speech converted to text -3. **Content processing** - Standard text processing applied - -**Requirements:** -- Speech-to-text model configured (OpenAI Whisper, etc.) -- Compatible audio/video format - -## Best Practices - -### Content Organization -- **Use descriptive titles** - Edit auto-generated titles for clarity -- **Add relevant topics** - Tag sources for better categorization -- **Group related sources** - Keep related materials in same notebook -- **Regular cleanup** - Remove outdated or irrelevant sources - -### Performance Optimization -- **Selective embedding** - Only embed sources you'll search -- **Context management** - Use summary context when possible -- **Batch processing** - Add multiple sources at once -- **File cleanup** - Enable automatic file deletion - -### Cost Management -- **Monitor token usage** - Track embedding and transformation costs -- **Use summary context** - Reduce token consumption in conversations -- **Selective transformations** - Only apply needed transformations -- **Provider selection** - Choose cost-effective AI providers - -## Limitations and Considerations - -### File Size Limits -- **Maximum upload size** - Depends on server configuration -- **Processing time** - Large files take longer to process -- **Memory usage** - Very large files may cause processing issues - -### Format Limitations -- **Scanned PDFs** - May require OCR processing -- **Password-protected files** - Cannot be processed -- **Corrupted files** - Will fail processing gracefully -- **Proprietary formats** - Some formats may not be supported - -### Language Support -- **YouTube transcripts** - Configurable preferred languages -- **Multi-language content** - Supported by AI models -- **OCR accuracy** - Varies by image quality and language - -### Privacy and Security -- **File storage** - Temporary files deleted after processing -- **Content persistence** - Extracted text stored in database -- **AI processing** - Content sent to configured AI providers -- **Access control** - Password protection available - -## Troubleshooting Source Issues - -### Common Problems and Solutions - -#### "Unsupported file type" error -**Solution:** -- Check the supported formats list above -- Ensure file is not corrupted -- Try converting to a supported format - -#### "No transcript found" for YouTube videos -**Solution:** -- Verify video has captions/subtitles -- Check YouTube transcript language preferences -- Try manually uploading audio if available - -#### "Processing failed" for documents -**Solution:** -- Ensure file is not password-protected -- Check file size (try smaller files) -- Verify file is not corrupted -- Try different processing engine in settings - -#### "Audio/video upload disabled" warning -**Solution:** -- Configure speech-to-text model in Models -- Ensure provider API keys are set -- Check model availability - -#### Embedding fails or takes too long -**Solution:** -- Check embedding model configuration -- Verify API key and quota limits -- Try processing without embedding first -- Check content length (very long content may fail) - -### Getting Help -- **Check server logs** - Enable debug logging for detailed error info -- **GitHub Issues** - Report bugs or request features -- **Discord Community** - Get help from other users -- **Documentation** - Review setup and configuration guides - -## Advanced Features - -### Custom Transformations -Create your own AI-powered transformations: -1. Navigate to **Settings → Transformations** -2. Click **"Create New"** -3. Define your prompt template -4. Set default application preferences -5. Test with sample content - -### Bulk Operations -- **Multiple file upload** - Select multiple files at once -- **Batch transformations** - Apply to multiple sources -- **Bulk embedding** - Process multiple sources for search - -### API Integration -Use the REST API for programmatic source management: -- **Create sources** - `POST /api/sources` -- **List sources** - `GET /api/sources` -- **Get source details** - `GET /api/sources/{id}` -- **Update source** - `PUT /api/sources/{id}` -- **Delete source** - `DELETE /api/sources/{id}` - -### Automation -- **Auto-embedding** - Configure default embedding behavior -- **Default transformations** - Apply specific transformations to all sources -- **File cleanup** - Automatic deletion of temporary files -- **Regular processing** - Schedule source updates - -## Integration Examples - -### Research Workflow -1. **Add research papers** (PDF uploads) -2. **Include relevant articles** (URL links) -3. **Add meeting notes** (text content) -4. **Apply analysis transformation** to extract insights -5. **Enable embedding** for cross-source search -6. **Use summary context** for efficient AI conversations - -### Content Creation Workflow -1. **Gather reference materials** (mixed formats) -2. **Apply summary transformations** for quick overviews -3. **Extract key points** for outline creation -4. **Use full content context** for detailed writing -5. **Search across sources** for specific information - -### Learning and Study Workflow -1. **Upload course materials** (PDFs, videos) -2. **Add supplementary articles** (web links) -3. **Create study notes** (text content) -4. **Apply question generation** for self-testing -5. **Use vector search** for concept lookup -6. **Generate summaries** for review - -This comprehensive sources guide should help you make the most of Open Notebook's powerful content processing capabilities. Remember to experiment with different configurations to find the workflow that works best for your specific use case. \ No newline at end of file diff --git a/frontend/src/CLAUDE.md b/frontend/src/CLAUDE.md new file mode 100644 index 00000000..1d09ad5a --- /dev/null +++ b/frontend/src/CLAUDE.md @@ -0,0 +1,159 @@ +# Frontend Architecture + +Next.js React application providing UI for Open Notebook research assistant. Three-layer architecture: **pages** (Next.js App Router), **components** (feature-specific UI), and **lib** (data fetching, state management, utilities). + +## High-Level Data Flow + +``` +Pages (Next.js) → Components (feature-specific) → Hooks (queries/mutations) + ↓ + Stores (auth/modal state) → API module → Backend +``` + +User interactions trigger mutations/queries via hooks, which communicate with the backend through the API module. Store state (auth, modals) flows back to components via hooks. Child CLAUDE.md files document specific modules in detail: + +- **`lib/api/CLAUDE.md`**: Axios client, FormData handling, interceptors +- **`lib/hooks/CLAUDE.md`**: TanStack Query wrappers, SSE streaming, context building +- **`lib/stores/CLAUDE.md`**: Zustand auth/modal state, localStorage persistence +- **`components/ui/CLAUDE.md`**: Radix UI primitives, CVA styling, accessibility + +## Architectural Layers + +### Pages (`src/app/`) — Next.js App Router +- `(auth)/login`: Authentication entry point +- `(dashboard)/`: Protected routes (notebooks, sources, search, models, etc.) +- Directory-based routing; each `page.tsx` is a route endpoint +- **Key pattern**: Pages call hooks to fetch data, render components with state +- **Router groups** `(auth)`, `(dashboard)` organize routes by feature without affecting URL + +### Components (`src/components/`) — Feature-Specific UI +- **layout**: `AppShell.tsx`, `AppSidebar.tsx` — main layout wrapper used by all pages +- **providers**: `ThemeProvider`, `QueryProvider`, `ModalProvider` — app-wide context setup +- **auth**: `LoginForm.tsx` — authentication UI +- **common**: `CommandPalette`, `ErrorBoundary`, `ContextToggle`, `ModelSelector` — shared across pages +- **ui**: Reusable Radix UI building blocks (see child CLAUDE.md) +- **source**, **notebooks**, **search**, **podcasts**: Feature-specific components consuming hooks + +**Component composition pattern**: Pages → Feature components → UI components. Feature components handle page-level state (loading, error), UI components remain stateless and styled. + +### Lib (`src/lib/`) — Data & State Layer + +#### `lib/api/` — Backend Communication +- **`client.ts`**: Central Axios instance with auth interceptor, FormData handling, 10-min timeout +- **`query-client.ts`**: TanStack Query configuration +- **Resource modules** (`sources.ts`, `chat.ts`, `notebooks.ts`, etc.): Endpoint-specific functions returning typed responses +- **Pattern**: All requests go through `apiClient`; auth token auto-added from localStorage + +#### `lib/hooks/` — React Query + Custom Logic +- **Query hooks**: `useNotebookSources`, `useSources`, `useSource` — TanStack Query wrappers with cache keys +- **Mutation hooks**: `useCreateSource`, `useUpdateSource`, `useDeleteSource` — mutations with toast feedback + cache invalidation +- **Complex hooks**: `useNotebookChat`, `useSourceChat` — session management, message streaming, context building +- **SSE streaming**: `useAsk` — parses newline-delimited JSON from backend for multi-stage workflows +- **Pattern**: Hooks return `{ data, isLoading, error, refetch }` + action functions; cache invalidation on mutations + +#### `lib/stores/` — Application State +- **`auth-store.ts`**: Authentication state (token, isAuthenticated) with 30-second check caching +- **Zustand + persist middleware**: Auto-syncs sensitive state to localStorage +- **Pattern**: Store actions (`login()`, `logout()`, `checkAuth()`) update state; consumed via hooks in components + +#### `lib/types/` — TypeScript Definitions +- API request/response shapes, domain models (Notebook, Source, Note, etc.) +- Ensures type safety across API calls and store mutations + +## Data & Control Flow Walkthrough + +### Example: Notebook Chat +1. **Page** (`notebooks/[id]/page.tsx`) fetches initial data, passes `notebookId` to `ChatColumn` component +2. **Hook call** (`useNotebookChat()`): + - Queries sessions for notebook via TanStack Query + - Sets up message state + context building logic + - Returns `{ messages, sendMessage(), setModelOverride() }` +3. **Component renders**: `ChatColumn` displays messages, text input +4. **User sends message**: Component calls `sendMessage()` hook +5. **Hook execution**: + - Builds context from selected sources/notes via `buildContext()` helper + - Calls `chatApi.sendMessage()` (from API module) + - Client-side optimistic update: adds message to local state before response +6. **Backend response** arrives, TanStack Query updates cache +7. **Cache invalidation** on other source/note mutations ensures stale UI refreshes + +### Example: File Upload with Source Creation +1. **Component** (`SourceDialog`) renders form with file picker +2. **Hook** (`useFileUpload`): + - Converts file to FormData (JSON fields stringified) + - Calls `sourcesApi.create()` with FormData + - API client interceptor deletes Content-Type header (lets browser set multipart boundary) +3. **Toast notifications** show progress +4. **Cache invalidation** on success: `queryClient.invalidateQueries(['sources'])` +5. **Related queries** auto-refetch: notebooks, sources list, etc. + +## Key Patterns & Cross-Layer Coordination + +### Caching & Invalidation +- **Query keys**: `QUERY_KEYS.notebook(id)`, `QUERY_KEYS.sources(notebookId)` — hierarchical structure +- **Broad invalidation**: `['sources']` invalidates all source queries; trade-off between accuracy + performance +- **Auto-refetch**: `refetchOnWindowFocus: true` on frequently-changing data (sources, notebooks) + +### Auth & Protected Routes +- **Middleware** (`src/middleware.ts`): Redirects unauthenticated users to `/login` +- **Auth store**: Validates token via `/notebooks` API call (actual validation, not JWT decode) +- **Interceptor**: Adds `Bearer {token}` to all requests; 401 response clears auth and redirects to login + +### Modal State Management +- **Modal hooks**: Components query modal state from stores +- **Context**: Modals pass data (e.g., notebook ID) to child components +- **Pattern**: One store per modal type; triggered by button clicks + data passing via hook arguments + +### Error Handling +- **API errors**: All request failures propagate to consuming code; components show toast notifications +- **Toast feedback**: Mutations show success/error toasts (from `sonner` library) +- **Error boundary**: App-level error boundary catches React render errors; shows fallback UI + +### FormData Handling +- **JSON fields**: Nested objects (arrays, objects) must be JSON stringified before FormData +- **Content-Type header**: Removed by interceptor for FormData requests (lets browser set boundary) +- **Example**: `sources` array converted to string via `JSON.stringify()` before appending to FormData + +## Component Organization Within Features + +- **Feature folders** (`source/`, `notebooks/`, `podcasts/`): Group related components +- **Composition**: Larger components nest smaller ones; no deep prop drilling (state lifted to hooks) +- **Dialog patterns**: Features define dialog components for inline actions (edit, create, delete) +- **Props**: Components accept data + action callbacks from parent or hooks + +## Providers & Context Setup + +**Root layout** (`app/layout.tsx`) wraps app with: +1. `ThemeProvider` — next-themes for light/dark mode +2. `QueryProvider` — TanStack Query client +3. `ErrorBoundary` — React error boundary +4. `ConnectionGuard` — checks backend connectivity on startup +5. `Toaster` — sonner toast notification system + +## Important Gotchas & Design Decisions + +- **Token storage**: Stored in localStorage under `auth-storage` key (Zustand persist); consumed by API interceptor +- **Base URL discovery**: API client fetches base URL from runtime config on first request (async; can be slow on startup) +- **Optimistic updates**: Chat messages added to state before server confirmation; removed on error +- **Modal lifecycle**: Dialogs not auto-reset; parent must clear form state after submit +- **Focus management**: Dialog auto-focuses first input; can cause layout shifts if inputs are conditional +- **Cache invalidation breadth**: Trade-off between precision + simplicity; broad invalidation simpler but may over-fetch + +## How to Add a New Feature + +1. **Create page**: `app/(dashboard)/feature/page.tsx` — calls hooks, renders components +2. **Create feature components**: `components/feature/` — compose UI + business logic +3. **Add hooks** (if data needed): `lib/hooks/useFeature.ts` — TanStack Query wrapper +4. **Add API module** (if backend call needed): `lib/api/feature.ts` — resource-specific functions +5. **Add types**: `lib/types/api.ts` — request/response shapes +6. **Use UI components**: Import from `components/ui/` for consistent styling +7. **Handle auth**: Middleware redirects unauthenticated users; no special handling needed in component + +## Testing + +- **Hooks**: Mock API functions, wrap in `QueryClientProvider`, assert query/mutation behavior +- **Components**: Mock hooks via `vi.fn()`, test rendering + user interactions +- **API calls**: Mock `axios` interceptors; test request/response shapes +- **Stores**: Mock store state, test mutations via `act()`, assert state changes + +See child CLAUDE.md files for module-specific testing patterns. diff --git a/frontend/src/components/ui/CLAUDE.md b/frontend/src/components/ui/CLAUDE.md new file mode 100644 index 00000000..ff45b175 --- /dev/null +++ b/frontend/src/components/ui/CLAUDE.md @@ -0,0 +1,64 @@ +# UI Components Module + +Radix UI-based accessible component library with CVA styling, composed building blocks, and theming support. + +## Key Components + +- **Primitives** (`button.tsx`, `dialog.tsx`, `select.tsx`, `dropdown-menu.tsx`): Radix UI wrappers with Tailwind styling +- **Composite components** (`checkbox-list.tsx`, `wizard-container.tsx`, `command.tsx`): Multi-part patterns combining primitives +- **Form components** (`input.tsx`, `textarea.tsx`, `label.tsx`, `form-section.tsx`): Input handling with accessibility +- **Feedback** (`alert.tsx`, `alert-dialog.tsx`, `sonner.tsx`, `progress.tsx`): User notifications and status +- **Layout** (`card.tsx`, `accordion.tsx`, `tabs.tsx`, `scroll-area.tsx`): Structural wrappers +- **Utilities** (`badge.tsx`, `separator.tsx`, `tooltip.tsx`, `popover.tsx`, `collapsible.tsx`): Small focused components + +## Important Patterns + +- **Radix UI wrappers**: Components delegate to Radix primitives; apply Tailwind classes via `cn()` utility +- **CVA (Class Variance Authority)**: `button.tsx` and similar use CVA for variant/size combinations +- **Composition via Slot**: `Button` uses `asChild` prop + `Slot` from radix to render as any element type +- **Data slots**: All components have `data-slot` attributes for testing/styling isolation +- **Controlled styling**: Classes hardcoded in components; use `className` prop to override/extend +- **Animations**: Radix `data-[state]` selectors for open/close animations (fade-in, zoom-in) +- **Accessibility first**: ARIA attributes from Radix (aria-invalid, sr-only labels, focus rings) +- **Dark mode support**: Uses Tailwind dark: prefix for color scheme (e.g., `dark:border-input`) + +## Key Dependencies + +- `@radix-ui/*`: Unstyled accessible primitives (dialog, select, dropdown-menu, etc.) +- `class-variance-authority`: CVA for variant patterns +- `lucide-react`: Icon library (XIcon in dialog close button) +- `@/lib/utils`: `cn()` utility for class merging + +## How to Add New Components + +1. Create `.tsx` file wrapping Radix primitive or composing existing components +2. Add `data-slot="component-name"` to root element +3. Use `cn()` to merge default classes with `className` prop +4. Export both component and variants (if using CVA) +5. Document prop shape and usage in JSDoc + +## Important Quirks & Gotchas + +- **Slot forwarding**: `asChild={true}` on Button passes all props to child; ensure child accepts them +- **FormData in dialogs**: Dialog not reset automatically; parent must manually clear form state +- **Focus management**: Dialog auto-focuses first input; can cause layout shifts if inputs conditionally rendered +- **Z-index stacking**: Fixed elements (Dialog overlay, dropdown menus) use z-50; be careful with other fixed elements +- **Click outside closes dropdown**: Radix dropdowns auto-close on outside click; may conflict with hover-triggered actions +- **SVG size inference**: Button uses `[&_svg:not([class*='size-'])]:size-4` to default unlabeled icons to 4x4; be explicit if different size needed +- **CSS-in-JS conflicts**: Hardcoded Tailwind classes may conflict with global CSS; specificity matters +- **Dark mode class**: Requires `dark` class on document root; not automatic with prefers-color-scheme alone + +## Testing Patterns + +```typescript +// Test component rendering with props +render() +expect(screen.getByRole('button')).toHaveClass('bg-destructive') + +// Test Dialog interaction +render(Content) +expect(screen.getByText('Content')).toBeInTheDocument() + +// Test accessibility +expect(screen.getByRole('dialog')).toHaveAttribute('role', 'dialog') +``` diff --git a/frontend/src/lib/api/CLAUDE.md b/frontend/src/lib/api/CLAUDE.md new file mode 100644 index 00000000..2307eb06 --- /dev/null +++ b/frontend/src/lib/api/CLAUDE.md @@ -0,0 +1,66 @@ +# API Module + +Axios-based client and resource-specific API modules for backend communication with auth, FormData handling, and error recovery. + +## Key Components + +- **`client.ts`**: Central Axios instance with request/response interceptors, auth headers, base URL resolution +- **Resource modules** (`sources.ts`, `notebooks.ts`, `chat.ts`, `search.ts`, etc.): Endpoint-specific functions returning typed responses +- **`query-client.ts`**: TanStack Query client configuration with default options +- **`models.ts`, `notes.ts`, `embeddings.ts`, `settings.ts`**: Additional resource APIs + +## Important Patterns + +- **Single axios instance**: `apiClient` with 10-minute timeout (for slow LLM operations) +- **Request interceptor**: Auto-fetches base URL from config, adds Bearer auth from localStorage `auth-storage` +- **FormData handling**: Auto-removes Content-Type header for FormData to let browser set multipart boundary +- **Response interceptor**: 401 clears auth and redirects to `/login` +- **Async base URL resolution**: `getApiUrl()` fetches from runtime config on first request +- **Error propagation**: All functions return typed responses via `response.data` +- **Method chaining**: Resource modules export namespaced objects (e.g., `sourcesApi.list()`, `sourcesApi.create()`) + +## Key Dependencies + +- `axios`: HTTP client library +- `@/lib/config`: `getApiUrl()` for dynamic base URL +- `@/lib/types/api`: TypeScript types for request/response shapes + +## How to Add New API Modules + +1. Create new file (e.g., `transforms.ts`) +2. Import `apiClient` +3. Export namespaced object with methods: + ```typescript + export const transformsApi = { + list: async () => { const response = await apiClient.get('/transforms'); return response.data } + } + ``` +4. Add types to `@/lib/types/api` if new response shapes needed + +## Important Quirks & Gotchas + +- **Base URL delay**: First request waits for `getApiUrl()` to resolve; can be slow on startup +- **FormData fields as JSON strings**: Nested objects (arrays, objects) must be JSON stringified in FormData (e.g., `notebooks`, `transformations`) +- **Timeout for streaming**: 10-minute timeout may not cover very long-running LLM operations; consider extending if needed +- **Auth token management**: Token stored in localStorage `auth-storage` key; uses Zustand persist middleware +- **Headers mutation in interceptor**: Mutating `config.headers` directly; be careful with middleware order +- **No retry logic**: Failed requests not automatically retried; must be handled in consuming code +- **Content-Type header precedence**: FormData interceptor deletes Content-Type after checking; subsequent interceptors won't re-add it + +## Usage Example + +```typescript +// Basic list +const sources = await sourcesApi.list({ notebook_id: notebookId }) + +// File upload with FormData +const response = await sourcesApi.create({ + type: 'upload', + file: fileObj, + notebook_id: notebookId, + async_processing: true +}) + +// With auth token (auto-added by interceptor) +const notes = await notesApi.list() +``` diff --git a/frontend/src/lib/hooks/CLAUDE.md b/frontend/src/lib/hooks/CLAUDE.md new file mode 100644 index 00000000..2e969a52 --- /dev/null +++ b/frontend/src/lib/hooks/CLAUDE.md @@ -0,0 +1,64 @@ +# Hooks Module + +React hooks for API data fetching, state management, and complex workflows (chat, streaming, file handling). + +## Key Components + +- **Query hooks** (`useNotebookSources`, `useSource`, `useSources`): TanStack Query wrappers for source data with infinite scroll and refetch strategies +- **Mutation hooks** (`useCreateSource`, `useUpdateSource`, `useDeleteSource`, `useFileUpload`, `useRetrySource`): Server mutations with toast notifications and cache invalidation +- **Chat hooks** (`useNotebookChat`, `useSourceChat`): Complex session management, context building, and message streaming +- **Streaming hooks** (`useAsk`): SSE parsing for multi-stage Ask workflows (strategy → answers → final answer) +- **Model/config hooks** (`useModels`, `useSettings`, `useTransformations`): Application-level settings and model management +- **Utility hooks** (`useMediaQuery`, `useToast`, `useNavigation`, `useAuth`): UI state and auth checking + +## Important Patterns + +- **TanStack Query integration**: All data hooks use `useQuery`/`useMutation` with `QUERY_KEYS` for cache consistency +- **Optimistic updates**: Mutations add local state before server response (e.g., notebook chat messages) +- **Cache invalidation**: Broad invalidation of query keys on mutations (e.g., `['sources']` catches all source queries) +- **Auto-refetch on return**: `refetchOnWindowFocus: true` on frequently-changing data (sources, notebooks) +- **Manual refetch controls**: Hooks return `refetch()` for parent components to trigger refresh +- **SSE streaming pattern**: `useAsk` manually parses newline-delimited JSON from `/api/search/ask`; handles incomplete buffers +- **Status polling**: `useSourceStatus` auto-refetches every 2s while `status === 'running' | 'queued' | 'new'` +- **Context building**: `useNotebookChat.buildContext()` assembles selected sources + notes with token/char counts + +## Key Dependencies + +- `@tanstack/react-query`: Data fetching and caching +- `sonner`: Toast notifications +- `@/lib/api/*`: API module exports (sourcesApi, chatApi, searchApi, etc.) +- `@/lib/types/api`: TypeScript response types +- Zustand stores: `useAuthStore`, modal managers + +## How to Add New Hooks + +1. **Data queries**: Create `useQuery` hook wrapping API call; use `QUERY_KEYS.entityName(id)` for cache key +2. **Mutations**: Create `useMutation` hook with `onSuccess` cache invalidation + toast feedback +3. **Complex state**: Use `useState` + callbacks for local state (see `useAsk`, `useNotebookChat`) +4. **Return shape**: Export object with both state and action functions for composability + +## Important Quirks & Gotchas + +- **Cache invalidation breadth**: Invalidating `['sources']` affects ALL source queries; be precise if performance matters +- **Optimistic updates + error handling**: `useNotebookChat` removes optimistic messages on error; ensure cleanup +- **SSE buffer handling**: `useAsk` keeps incomplete lines in buffer between reads; incomplete JSON silently skipped +- **Model override timing**: `useNotebookChat` stores pending model override if no session exists; applied on session creation +- **Pagination cursor**: `useNotebookSources` uses offset-based pagination; `nextOffset` calculated from page size +- **Status polling race**: `useSourceStatus` may refetch stale data before server catches up; retry logic has 3-attempt limit +- **Keyboard trap in dialogs**: Some hooks manage modal state; ensure Dialog/Modal components handle escape key properly +- **Form data handling**: `useFileUpload` and source creation convert JSON fields to strings in FormData + +## Testing Patterns + +```typescript +// Mock API +const mockApi = { + list: vi.fn().mockResolvedValue([...]) +} + +// Test hook with QueryClientProvider + wrapper +render(, { wrapper: QueryClientProvider }) + +// Assert mutations trigger cache invalidation +await waitFor(() => expect(queryClient.invalidateQueries).toHaveBeenCalled()) +``` diff --git a/frontend/src/lib/stores/CLAUDE.md b/frontend/src/lib/stores/CLAUDE.md new file mode 100644 index 00000000..1214b7ff --- /dev/null +++ b/frontend/src/lib/stores/CLAUDE.md @@ -0,0 +1,68 @@ +# Stores Module + +Zustand-based state management for authentication, modals, and application-level settings with localStorage persistence. + +## Key Components + +- **`auth-store.ts`**: Authentication state (token, isAuthenticated) with login, logout, auth checking, and Zustand persistence +- **Modal stores** (imported via hooks): Modal visibility and data state management +- **Settings persistence**: Auto-saves sensitive state (token, auth status) to localStorage via Zustand persist middleware + +## Important Patterns + +- **Zustand create + persist**: State + actions combined in single store; `persist` middleware auto-syncs to localStorage +- **Selective persistence**: `partialize` option limits what's saved (e.g., only `token` and `isAuthenticated`, not `isLoading`) +- **Hydration tracking**: `setHasHydrated()` marks when localStorage data loaded; used to avoid hydration mismatch in SSR +- **Auth caching**: 30-second cache on `checkAuth()` to avoid excessive API calls; stores `lastAuthCheck` timestamp +- **Network resilience**: Handles 401 globally in API interceptor; graceful degradation if API unreachable +- **API validation**: Uses actual API call (`/notebooks` endpoint) to validate token instead of parsing JWT + +## Key Dependencies + +- `zustand`: State management library +- `@/lib/config`: `getApiUrl()` for dynamic server discovery +- localStorage: Browser persistence API + +## How to Add New Stores + +1. Create new file (e.g., `settings-store.ts`) +2. Define interface extending store state and actions +3. Use `create()(persist(...))` for persistence, or plain `create()` for ephemeral state: + ```typescript + export const useSettingsStore = create()( + persist((set) => ({ + theme: 'dark', + setTheme: (theme) => set({ theme }) + }), { + name: 'settings-storage' + }) + ) + ``` + +## Important Quirks & Gotchas + +- **Hydration mismatch**: Server-side rendered stores must check `hasHydrated` before rendering to prevent SSR mismatches +- **localStorage key collision**: Persist middleware uses `name` option as localStorage key; ensure unique per store +- **Token not validated**: `login()` only checks HTTP 200 response; doesn't decode or validate JWT structure +- **Auth check race condition**: Multiple simultaneous `checkAuth()` calls return early if one already in progress (`isCheckingAuth`) +- **Error messages from HTTP**: Shows 401/403/5xx status codes to user; helps with debugging but may leak info +- **Network timeout handling**: Network errors in `checkAuthRequired()` set `authRequired: null` (safe default); `login()` shows generic message +- **Logout doesn't invalidate session**: Client-side logout only clears local token; server session may still be valid +- **Double authentication**: Both `login()` and `checkAuth()` test same `/notebooks` endpoint; could be optimized with dedicated endpoint + +## Testing Patterns + +```typescript +// Mock store +const mockAuthStore = { + isAuthenticated: true, + token: 'test-token', + checkAuth: vi.fn().mockResolvedValue(true), + login: vi.fn().mockResolvedValue(true), + logout: vi.fn() +} + +// Test store mutations +act(() => store.setState({ theme: 'light' })) +expect(store.getState().theme).toBe('light') +``` diff --git a/new_docs/0-START-HERE/quick-start-local.md b/new_docs/0-START-HERE/quick-start-local.md new file mode 100644 index 00000000..8b8c901a --- /dev/null +++ b/new_docs/0-START-HERE/quick-start-local.md @@ -0,0 +1,260 @@ +# Quick Start - Local & Private (5 minutes) + +Get Open Notebook running with **100% local AI** using Ollama. No cloud API keys needed, completely private. + +## Prerequisites + +1. **Docker Desktop** installed + - [Download here](https://www.docker.com/products/docker-desktop/) + - Already have it? Skip to step 2 + +2. **Ollama** installed (for local LLM) + - [Download here](https://ollama.ai/) + - Or use Docker image (easier): `ollama/ollama` + +## Step 1: Choose Your Setup (1 min) + +### 🏠 Local Machine (Same Computer) +Everything runs on your machine. Recommended for testing/learning. + +### 🌐 Remote Server (Raspberry Pi, NAS, Cloud VM) +Run on a different computer, access from another. Needs network configuration. + +--- + +## Step 2: Create Configuration (1 min) + +Create a new folder `open-notebook-local` and add this file: + +**docker-compose.yml**: +```yaml +services: + surrealdb: + image: surrealdb/surrealdb:v2 + command: start --user root --pass password --bind 0.0.0.0:8000 memory + ports: + - "8000:8000" + + open_notebook: + image: lfnovo/open_notebook:v1-latest-single + ports: + - "8502:8502" # Web UI (React frontend) + - "5055:5055" # API (required!) + environment: + # NO API KEYS NEEDED - Using Ollama (free, local) + - OLLAMA_API_BASE=http://ollama:11434 + + # Database (required) + - SURREAL_URL=ws://surrealdb:8000/rpc + - SURREAL_USER=root + - SURREAL_PASSWORD=password + - SURREAL_NAMESPACE=open_notebook + - SURREAL_DATABASE=open_notebook + volumes: + - ./notebook_data:/app/data + - ./surreal_data:/mydata + depends_on: + - surrealdb + restart: always + + ollama: + image: ollama/ollama:latest + ports: + - "11434:11434" + volumes: + - ./ollama_models:/root/.ollama + environment: + # Optional: set GPU support if available + - OLLAMA_NUM_GPU=0 + restart: always +``` + +**That's it!** No API keys, no secrets, completely private. + +--- + +## Step 3: Start Services (1 min) + +Open terminal in your `open-notebook-local` folder: + +```bash +docker compose up -d +``` + +Wait 10-15 seconds for all services to start. + +--- + +## Step 4: Download a Model (2-3 min) + +Ollama needs at least one language model. Pick one: + +```bash +# Fastest & smallest (recommended for testing) +docker exec open_notebook-ollama-1 ollama pull mistral + +# OR: Better quality but slower +docker exec open_notebook-ollama-1 ollama pull neural-chat + +# OR: Even better quality, more VRAM needed +docker exec open_notebook-ollama-1 ollama pull llama2 +``` + +This downloads the model (will take 1-5 minutes depending on your internet). + +--- + +## Step 5: Access Open Notebook (instant) + +Open your browser: +``` +http://localhost:8502 +``` + +You should see the Open Notebook interface. + +--- + +## Step 6: Configure Local Model (1 min) + +1. Click **Settings** (top right) → **Models** +2. Set: + - **Language Model**: `ollama/mistral` (or whichever model you downloaded) + - **Embedding Model**: `ollama/nomic-embed-text` (auto-downloads if missing) +3. Click **Save** + +--- + +## Step 7: Create Your First Notebook (1 min) + +1. Click **New Notebook** +2. Name: "My Private Research" +3. Click **Create** + +--- + +## Step 8: Add Local Content (1 min) + +1. Click **Add Source** +2. Choose **Text** +3. Paste some text or a local document +4. Click **Add** + +--- + +## Step 9: Chat With Your Content (1 min) + +1. Go to **Chat** +2. Type: "What did you learn from this?" +3. Click **Send** +4. Watch as the local Ollama model responds! + +--- + +## Verification Checklist + +- [ ] Docker is running +- [ ] You can access `http://localhost:8502` +- [ ] Models are configured +- [ ] You created a notebook +- [ ] Chat works with local model + +**All checked?** 🎉 You have a completely **private, offline** research assistant! + +--- + +## Advantages of Local Setup + +✅ **No API costs** - Free forever +✅ **No internet required** - True offline capability +✅ **Privacy first** - Your data never leaves your machine +✅ **No subscriptions** - No monthly bills + +**Trade-off:** Slower than cloud models (depends on your CPU/GPU) + +--- + +## Troubleshooting + +### "ollama: command not found" + +Docker image name might be different: +```bash +docker ps # Find the Ollama container name +docker exec ollama pull mistral +``` + +### Model Download Stuck + +Check internet connection and restart: +```bash +docker compose restart ollama +``` + +Then retry the model pull command. + +### "Address already in use" Error + +```bash +docker compose down +docker compose up -d +``` + +### Low Performance + +Check if GPU is available: +```bash +# Show available GPUs +docker exec open_notebook-ollama-1 ollama ps + +# Enable GPU in docker-compose.yml: +# - OLLAMA_NUM_GPU=1 +``` + +Then restart: `docker compose restart ollama` + +### Adding More Models + +```bash +# List available models +docker exec open_notebook-ollama-1 ollama list + +# Pull additional model +docker exec open_notebook-ollama-1 ollama pull neural-chat +``` + +--- + +## Next Steps + +**Now that it's running:** + +1. **Add Your Own Content**: PDFs, documents, articles (see 3-USER-GUIDE) +2. **Explore Features**: Podcasts, transformations, search +3. **Full Documentation**: [See all features](../3-USER-GUIDE/index.md) +4. **Scale Up**: Deploy to a server with better hardware for faster responses +5. **Benchmark Models**: Try different models to find the speed/quality tradeoff you prefer + +--- + +## Going Further + +- **Switch models**: Change in Settings → Models anytime +- **Add more models**: Run `ollama pull ` and they'll appear in Settings +- **Deploy to server**: Same docker-compose.yml works anywhere +- **Use cloud hybrid**: Keep some local models, add OpenAI/Anthropic for complex tasks + +--- + +## Common Model Choices + +| Model | Speed | Quality | VRAM | Best For | +|-------|-------|---------|------|----------| +| **mistral** | Fast | Good | 4GB | Testing, general use | +| **neural-chat** | Medium | Better | 6GB | Balanced, recommended | +| **llama2** | Slow | Best | 8GB+ | Complex reasoning | +| **phi** | Very Fast | Fair | 2GB | Minimal hardware | + +--- + +**Need Help?** Join our [Discord community](https://discord.gg/37XJPXfz2w) - many users run local setups! diff --git a/new_docs/1-INSTALLATION/docker-compose.md b/new_docs/1-INSTALLATION/docker-compose.md new file mode 100644 index 00000000..630b7c9d --- /dev/null +++ b/new_docs/1-INSTALLATION/docker-compose.md @@ -0,0 +1,324 @@ +# Docker Compose Installation (Recommended) + +Multi-container setup with separate services. **Best for most users.** + +## Prerequisites + +- **Docker Desktop** installed ([Download](https://www.docker.com/products/docker-desktop/)) +- **5-10 minutes** of your time +- **API key** for at least one AI provider (OpenAI recommended for beginners) + +## Step 1: Get an API Key (2 min) + +Choose at least one AI provider. **OpenAI or OpenRouter recommended if you're unsure:** + +``` +OpenAI: https://platform.openai.com/api-keys +Anthropic: https://console.anthropic.com/ +Google: https://aistudio.google.com/ +Groq: https://console.groq.com/ +``` + +Add at least $5 in credits to your account. + +(Skip this if using Ollama for free local models) + +--- + +## Step 2: Create Configuration (2 min) + +Create a folder `open-notebook` and add this file: + +**docker-compose.yml**: +```yaml +services: + surrealdb: + image: surrealdb/surrealdb:v2 + command: start --user root --pass password --bind 0.0.0.0:8000 memory + ports: + - "8000:8000" + volumes: + - surreal_data:/mydata + + api: + image: lfnovo/open_notebook:v1-latest + ports: + - "5055:5055" + environment: + # AI Provider (choose ONE) + - OPENAI_API_KEY=sk-... # Your OpenAI key + # - ANTHROPIC_API_KEY=sk-ant-... # Or Anthropic + # - GOOGLE_API_KEY=... # Or Google + + # Database + - SURREAL_URL=ws://surrealdb:8000/rpc + - SURREAL_USER=root + - SURREAL_PASSWORD=password + - SURREAL_NAMESPACE=open_notebook + - SURREAL_DATABASE=open_notebook + + # API Configuration + - API_URL=http://localhost:5055 + depends_on: + - surrealdb + volumes: + - ./data:/app/data + restart: always + + frontend: + image: lfnovo/open_notebook-frontend:v1-latest + ports: + - "3000:3000" + environment: + - NEXT_PUBLIC_API_URL=http://localhost:5055 + depends_on: + - api + restart: always + +volumes: + surreal_data: +``` + +**Edit the file:** +- Replace `sk-...` with your actual OpenAI API key +- (Or use Anthropic, Google, Groq keys instead) +- If you have multiple keys, uncomment the ones you want + +--- + +## Step 3: Start Services (2 min) + +Open terminal in the `open-notebook` folder: + +```bash +docker compose up -d +``` + +Wait 15-20 seconds for all services to start: +``` +✅ surrealdb running on :8000 +✅ api running on :5055 +✅ frontend running on :3000 +``` + +Check status: +```bash +docker compose ps +``` + +--- + +## Step 4: Verify Installation (1 min) + +**API Health:** +```bash +curl http://localhost:5055/health +# Should return: {"status": "healthy"} +``` + +**Frontend Access:** +Open browser to: +``` +http://localhost:3000 +``` + +You should see the Open Notebook interface! + +--- + +## Step 5: First Notebook (2 min) + +1. Click **New Notebook** +2. Name: "My Research" +3. Description: "Getting started" +4. Click **Create** + +Done! You now have a fully working Open Notebook instance. 🎉 + +--- + +## Configuration + +### Using Different AI Providers + +Change `environment` section in `docker-compose.yml`: + +```yaml +# For Anthropic (Claude) +- ANTHROPIC_API_KEY=sk-ant-... + +# For Google Gemini +- GOOGLE_API_KEY=... + +# For Groq (fast, free tier available) +- GROQ_API_KEY=... + +# For local Ollama (free, offline) +- OLLAMA_BASE_URL=http://ollama:11434 +``` + +### Adding Ollama (Free Local Models) + +Add to `docker-compose.yml`: + +```yaml + ollama: + image: ollama/ollama:latest + ports: + - "11434:11434" + volumes: + - ollama_models:/root/.ollama + restart: always + +volumes: + surreal_data: + ollama_models: +``` + +Then update API service: +```yaml +environment: + - OLLAMA_BASE_URL=http://ollama:11434 +``` + +Restart and pull a model: +```bash +docker compose restart +docker exec open_notebook-ollama-1 ollama pull mistral +``` + +--- + +## Environment Variables Reference + +| Variable | Purpose | Example | +|----------|---------|---------| +| `OPENAI_API_KEY` | OpenAI API key | `sk-proj-...` | +| `ANTHROPIC_API_KEY` | Anthropic/Claude key | `sk-ant-...` | +| `SURREAL_URL` | Database connection | `ws://surrealdb:8000/rpc` | +| `SURREAL_USER` | Database user | `root` | +| `SURREAL_PASSWORD` | Database password | `password` | +| `SURREAL_NAMESPACE` | The namespace | `open_notebook` | +| `SURREAL_DATABASE` | The database to use | `open_notebook` | +| `API_URL` | API external URL | `http://localhost:5055` | +| `NEXT_PUBLIC_API_URL` | Frontend API URL | `http://localhost:5055` | + +--- + +## Common Tasks + +### Stop Services +```bash +docker compose down +``` + +### View Logs +```bash +# All services +docker compose logs -f + +# Specific service +docker compose logs -f api +``` + +### Restart Services +```bash +docker compose restart +``` + +### Update to Latest Version +```bash +docker compose down +docker compose pull +docker compose up -d +``` + +### Remove All Data +```bash +docker compose down -v +``` + +--- + +## Troubleshooting + +### "Cannot connect to API" Error + +1. Check if Docker is running: +```bash +docker ps +``` + +2. Check if services are running: +```bash +docker compose ps +``` + +3. Check API logs: +```bash +docker compose logs api +``` + +4. Wait longer - services can take 20-30 seconds to start on first run + +--- + +### Port Already in Use + +If you get "Port 3000 already in use", change the port: + +```yaml +ports: + - "3001:3000" # Use 3001 instead +``` + +Then access at `http://localhost:3001` + +--- + +### API Key Not Working + +1. Double-check your API key in the file (no extra spaces) +2. Verify key is valid at provider's website +3. Check you added credits to your account +4. Restart: `docker compose restart api` + +--- + +### Database Connection Issues + +Check SurrealDB is running: +```bash +docker compose logs surrealdb +``` + +Reset database: +```bash +docker compose down -v +docker compose up -d +``` + +--- + +## Next Steps + +1. **Add Content**: Sources, notebooks, documents +2. **Configure Models**: Settings → Models (choose your preferences) +3. **Explore Features**: Chat, search, transformations +4. **Read Guide**: [User Guide](../3-USER-GUIDE/index.md) + +--- + +## Production Deployment + +For production use, see: +- [Security Hardening](https://github.com/lfnovo/open-notebook/blob/main/docs/deployment/security.md) +- [Reverse Proxy](https://github.com/lfnovo/open-notebook/blob/main/docs/deployment/reverse-proxy.md) + +--- + +## Getting Help + +- **Discord**: [Community support](https://discord.gg/37XJPXfz2w) +- **Issues**: [GitHub Issues](https://github.com/lfnovo/open-notebook/issues) +- **Docs**: [Full documentation](../index.md) diff --git a/open_notebook/CLAUDE.md b/open_notebook/CLAUDE.md new file mode 100644 index 00000000..808d3c57 --- /dev/null +++ b/open_notebook/CLAUDE.md @@ -0,0 +1,242 @@ +# Open Notebook Core Backend + +The `open_notebook` module is the heart of the system: a multi-layer backend orchestrating AI-powered research workflows. It bridges domain models, asynchronous database operations, LangGraph-based content processing, and multi-provider AI model management. + +## Purpose + +Encapsulates the entire backend architecture: +1. **Data layer**: SurrealDB persistence with async CRUD and migrations +2. **Domain layer**: Research models (Notebook, Source, Note, etc.) with embedded relationships +3. **Workflow layer**: LangGraph state machines for content ingestion, chat, and transformations +4. **AI provisioning**: Multi-provider model management with smart fallback logic +5. **Support services**: Context building, tokenization, and utility functions + +All components communicate through async/await patterns and use Pydantic for validation. + +## Architecture Overview + +``` +┌─────────────────────────────────────────────────────────────┐ +│ API / Streamlit UI │ +└──────────────────────┬──────────────────────────────────────┘ + │ + ┌──────────────────┴──────────────────┐ + │ │ +┌───▼────────────────────┐ ┌──────────▼────────────────┐ +│ Graphs (LangGraph) │ │ Domain Models (Data) │ +│ - source.py (ingestion) │ │ - Notebook, Source, Note │ +│ - chat.py │ │ - ChatSession, Asset │ +│ - ask.py (search) │ │ - SourceInsight, Embedding│ +│ - transformation.py │ │ - Transformation, Settings│ +└───┬────────────────────┘ │ - EpisodeProfile, Podcast │ + │ └──────────┬─────────────────┘ + │ │ + └───────────────────┬───────────────┘ + │ + ┌───────────────────┴────────────────────┐ + │ │ +┌───▼─────────────────┐ ┌──────────────▼──────┐ +│ AI Module (Models) │ │ Utils (Helpers) │ +│ - ModelManager │ │ - ContextBuilder │ +│ - DefaultModels │ │ - TokenUtils │ +│ - provision_langchain│ │ - TextUtils │ +│ - Multi-provider AI │ │ - VersionUtils │ +└───┬─────────────────┘ └──────────┬──────────┘ + │ │ + └───────────────────┬───────────────┘ + │ + ┌──────────────▼────────────────┐ + │ Database (SurrealDB) │ + │ - repository.py (CRUD ops) │ + │ - async_migrate.py (schema) │ + │ - Configuration │ + └────────────────────────────────┘ +``` + +## Component Catalog + +### Core Layers + +**See dedicated CLAUDE.md files for detailed patterns and usage:** + +- **`database/`**: Async repository pattern (repo_query, repo_create, repo_upsert), connection pooling, and automatic schema migrations on API startup. See `database/CLAUDE.md`. + +- **`domain/`**: Core data models using Pydantic with SurrealDB persistence. Two base classes: `ObjectModel` (mutable records with auto-increment IDs and embedding) and `RecordModel` (singleton configuration). Includes search functions (text_search, vector_search). See `domain/CLAUDE.md`. + +- **`graphs/`**: LangGraph state machines for async workflows. Content ingestion (source.py), conversational agents (chat.py), search synthesis (ask.py), and transformations. Uses provision_langchain_model() for smart model selection with token-aware fallback. See `graphs/CLAUDE.md`. + +- **`ai/`**: Centralized AI model lifecycle via Esperanto library. ModelManager factory with intelligent fallback (large context detection, type-specific defaults, config override). Supports 8+ providers (OpenAI, Anthropic, Google, Groq, Ollama, Mistral, DeepSeek, xAI). See `ai/CLAUDE.md`. + +- **`utils/`**: Cross-cutting utilities: ContextBuilder (flexible context assembly from sources/notes/insights with token budgeting), TextUtils (truncation, cleaning), TokenUtils (GPT token counting), VersionUtils (schema compatibility). See `utils/CLAUDE.md`. + +- **`podcasts/`**: Podcast generation models: SpeakerProfile (TTS voice config), EpisodeProfile (generation settings), PodcastEpisode (job tracking via surreal-commands). See `podcasts/CLAUDE.md`. + +### Configuration & Exceptions + +- **`config.py`**: Paths for data folder, uploads, LangGraph checkpoints, and tiktoken cache. Auto-creates directories. +- **`exceptions.py`**: Hierarchy of OpenNotebookError subclasses for database, file, network, authentication, and rate-limit failures. + +## Data Flow: Content Ingestion + +``` +User uploads file/URL + │ + ▼ +┌─────────────────────────────────────┐ +│ source.py (LangGraph state machine) │ +├─────────────────────────────────────┤ +│ 1. content_process() │ +│ - extract_content() from file/URL│ +│ - Use ContentSettings defaults │ +│ - speech_to_text model from DB │ +│ │ +│ 2. save_source() │ +│ - Update Source with full_text │ +│ - Preserve title if empty │ +│ │ +│ 3. trigger_transformations() │ +│ - Parallel fan-out to each TXN │ +└────────────────┬────────────────────┘ + │ + ▼ + ┌──────────────┐ + │ transformation.py (parallel) + │ - Apply prompt to source text + │ - Generate insights + │ - Auto-embed results + └──────────────┘ + │ + ▼ + ┌────────────────────┐ + │ Database Storage │ + │ - Source.full_text │ + │ - SourceInsight │ + │ - Embeddings │ + │ - (async job) │ + └────────────────────┘ +``` + +**Fire-and-forget embeddings**: Source.vectorize() returns command_id without awaiting; embedding happens asynchronously via surreal-commands job system. + +## Data Flow: Chat & Search + +``` +User message in chat + │ + ▼ +┌──────────────────────────┐ +│ ContextBuilder │ +│ - Select sources/notes │ +│ - Token budget limiting │ +│ - Priority weighting │ +└──────────┬───────────────┘ + │ + ▼ +┌──────────────────────────────────┐ +│ chat.py or ask.py (LangGraph) │ +│ - Load context from above │ +│ - provision_langchain_model() │ +│ * Auto-upgrade for large text │ +│ * Apply model_id override │ +│ - Call LLM with context │ +│ - Store message in SqliteSaver │ +└──────────┬───────────────────────┘ + │ + ▼ + ┌──────────────┐ + │ LLM Response │ + │ (persisted) │ + └──────────────┘ +``` + +## Key Patterns Across Layers + +### Async/Await Everywhere +All database operations, model provisioning, and graph execution are async. Mix with sync code only via `asyncio.run()` or LangGraph's async bridges (see graphs/CLAUDE.md for workarounds). + +### Type-Driven Dispatch +Model types (language, embedding, speech_to_text, text_to_speech) drive factory logic in ModelManager. Domain model IDs encode their type: `notebook:uuid`, `source:uuid`, `note:uuid`. + +### Smart Fallback Logic +`provision_langchain_model()` auto-detects large contexts (105K+ tokens) and upgrades to dedicated large_context_model. Falls back to default_chat_model if specific type not found. + +### Fire-and-Forget Jobs +Time-consuming operations (embedding, podcast generation) return command_id immediately. Caller polls surreal-commands for status; no blocking. + +### Embedding on Save +Domain models with `needs_embedding()=True` auto-generate embeddings in `save()`. Search functions (text_search, vector_search) use embeddings for semantic matching. + +### Relationship Management +SurrealDB graph edges link entities: Notebook→Source (has), Source→Note (artifact), Note→Source (refers_to). See `relate()` in domain/base.py. + +## Integration Points + +**API startup** (`api/main.py`): +- AsyncMigrationManager.run_migration_up() on lifespan startup +- Ensures schema is current before handling requests + +**Streamlit UI** (`pages/stream_app/`): +- Calls domain models directly to fetch/create notebooks, sources, notes +- Invokes graphs (chat, source, ask) via async wrapper +- Relies on API for migrations (deprecated check in UI) + +**Background Jobs** (`surreal_commands`): +- Source.vectorize() submits async embedding job +- PodcastEpisode.get_job_status() polls job queue +- Decouples long-running operations from request flow + +## Important Quirks & Gotchas + +1. **Token counting rough estimate**: Uses cl100k_base encoding; may differ 5-10% from actual model +2. **Large context threshold hard-coded**: 105,000 token limit for large_context_model upgrade (not configurable) +3. **Async loop gymnastics in graphs**: ThreadPoolExecutor workaround for LangGraph sync nodes calling async functions (fragile) +4. **DefaultModels always fresh**: get_instance() bypasses singleton cache to pick up live config changes +5. **Polymorphic model.get()**: Resolves subclass from ID prefix; fails silently if subclass not imported +6. **RecordID string inconsistency**: repo_update() accepts both "table:id" format and full RecordID +7. **Snapshot profiles**: podcast profiles stored as dicts, so config updates don't affect past episodes +8. **No connection pooling**: Each repo_* creates new connection (adequate for HTTP but inefficient for bulk) +9. **Circular import guard**: utils imports domain; domain must not import utils (breaks on import) +10. **SqliteSaver shared location**: LangGraph checkpoints from LANGGRAPH_CHECKPOINT_FILE env var; all graphs use same file + +## How to Add New Feature + +**New data model**: +1. Create class inheriting from `ObjectModel` with `table_name` ClassVar +2. Define Pydantic fields and validators +3. Override `needs_embedding()` if searchable +4. Add custom methods for domain logic (get_X, add_to_Y) +5. Register in domain/__init__.py exports + +**New workflow**: +1. Create state machine in graphs/WORKFLOW.py using StateGraph +2. Import domain models and provision_langchain_model() +3. Define nodes as async functions taking State, returning dict +4. Compile with graph.compile() +5. Invoke from API endpoint or Streamlit page + +**New AI model type**: +1. Add type string to Model class +2. Add AIFactory.create_* method in Esperanto +3. Handle in ModelManager.get_model() +4. Add DefaultModels field + getter + +## Key Dependencies + +- **surrealdb**: AsyncSurreal client, RecordID type +- **pydantic**: Validation, field_validator +- **langgraph**: StateGraph, Send, SqliteSaver, async/sync bridging +- **langchain_core**: Messages, OutputParser, RunnableConfig +- **esperanto**: Multi-provider AI model abstraction (OpenAI, Anthropic, Google, Groq, Ollama, etc.) +- **content-core**: File/URL content extraction +- **ai_prompter**: Jinja2 template rendering for prompts +- **surreal_commands**: Async job queue for embeddings, podcast generation +- **loguru**: Structured logging throughout +- **tiktoken**: GPT token encoding for context window estimation + +## Codebase Statistics + +- **Modules**: 6 core layers + support services +- **Async operations**: Database, AI provisioning, graph execution, embedding, job tracking +- **Supported AI providers**: 8+ (OpenAI, Anthropic, Google, Groq, Ollama, Mistral, DeepSeek, xAI, OpenRouter) +- **Domain models**: Notebook, Source, Note, SourceInsight, SourceEmbedding, ChatSession, Asset, Transformation, ContentSettings, EpisodeProfile, SpeakerProfile, PodcastEpisode +- **Graph workflows**: 6 (source, chat, source_chat, ask, transformation, prompt) diff --git a/open_notebook/ai/CLAUDE.md b/open_notebook/ai/CLAUDE.md new file mode 100644 index 00000000..604a54da --- /dev/null +++ b/open_notebook/ai/CLAUDE.md @@ -0,0 +1,109 @@ +# AI Module + +Model configuration, provisioning, and management for multi-provider AI integration via Esperanto. + +## Purpose + +Centralizes AI model lifecycle: database models for model metadata (provider, type), default model configuration, and factory for instantiating LLM/embedding/speech models at runtime with fallback logic. + +## Architecture Overview + +**Two-tier system**: +1. **Database models** (`Model`, `DefaultModels`): Metadata storage and default configuration +2. **ModelManager**: Factory for provisioning models with intelligent fallback (large context detection, config override) + +All models use Esperanto library as provider abstraction (OpenAI, Anthropic, Google, Groq, Ollama, Mistral, DeepSeek, xAI, OpenRouter). + +## Component Catalog + +### models.py + +#### Model (ObjectModel) +- Database record: name, provider, type (language/embedding/speech_to_text/text_to_speech) +- `get_models_by_type()`: Async query to fetch all models of a specific type +- Stores provider-model pairs for AI factory instantiation + +#### DefaultModels (RecordModel) +- Singleton configuration record (record_id: `open_notebook:default_models`) +- Fields: default_chat_model, default_transformation_model, large_context_model, default_text_to_speech_model, default_speech_to_text_model, default_embedding_model, default_tools_model +- `get_instance()`: Always fetches fresh from database (overrides parent caching for real-time updates) +- Returns fresh instance on each call (no singleton cache) + +#### ModelManager +- Stateless factory for instantiating AI models +- `get_model(model_id)`: Retrieves Model by ID, creates via AIFactory.create_* based on type +- `get_defaults()`: Fetches DefaultModels configuration +- `get_default_model(model_type)`: Smart lookup (e.g., "chat" → default_chat_model, "transformation" → default_transformation_model with fallback to chat) +- `get_speech_to_text()`, `get_text_to_speech()`, `get_embedding_model()`: Type-specific convenience methods with assertions +- **Global instance**: `model_manager` singleton exported for use throughout app + +### provision.py + +#### provision_langchain_model() +- Factory for LangGraph nodes needing LLM provisioning +- **Smart fallback logic**: + - If tokens > 105,000: Use `large_context_model` + - Elif `model_id` specified: Use specific model + - Else: Use default model for type (e.g., "chat", "transformation") +- Returns LangChain-compatible model via `.to_langchain()` +- Logs model selection decision + +## Common Patterns + +- **Type dispatch**: Model.type field drives factory logic (4 model types) +- **Provider abstraction**: Esperanto handles provider differences; ModelManager unaware of provider specifics +- **Fresh defaults**: DefaultModels.get_instance() always fetches from database (not cached) for live config updates +- **Config override**: provision_langchain_model() accepts kwargs passed to AIFactory.create_* methods +- **Token-based selection**: provision_langchain_model() detects large contexts and upgrades model automatically +- **Type assertions**: get_speech_to_text(), get_embedding_model() assert returned type (safety check) + +## Key Dependencies + +- `esperanto`: AIFactory.create_language(), create_embedding(), create_speech_to_text(), create_text_to_speech() +- `open_notebook.database.repository`: repo_query, ensure_record_id +- `open_notebook.domain.base`: ObjectModel, RecordModel base classes +- `open_notebook.utils`: token_count() for context size detection +- `loguru`: Logging for model selection decisions + +## Important Quirks & Gotchas + +- **Token counting rough estimate**: provision_langchain_model() uses token_count() which estimates via cl100k_base encoding (may differ 5-10% from actual model) +- **Large context threshold hard-coded**: 105,000 token threshold for large_context_model upgrade (not configurable) +- **DefaultModels.get_instance() fresh fetch**: Intentionally bypasses parent singleton cache to pick up live config changes; creates new instance each call +- **Type-specific getters use assertions**: get_speech_to_text() asserts isinstance (catches misconfiguration early) +- **No validation of model existence**: ModelManager.get_model() raises ValueError if model not found (not caught upstream) +- **Esperanto caching**: Actual model instances cached by Esperanto (not by ModelManager); ModelManager stateless +- **Fallback chain specificity**: "transformation" type falls back to default_chat_model if not explicitly set (convention-based) +- **kwargs passed through**: provision_langchain_model() passes kwargs to AIFactory but doesn't validate what's accepted + +## How to Extend + +1. **Add new model type**: Add type string to Model.type enum, add create_* method in AIFactory, handle in ModelManager.get_model() +2. **Add new default configuration**: Extend DefaultModels with new field (e.g., default_vision_model), add getter in ModelManager +3. **Change fallback logic**: Modify provision_langchain_model() token threshold or fallback chain +4. **Add model filtering**: Extend Model.get_models_by_type() with additional filters (e.g., by provider) +5. **Implement model caching**: Wrap ModelManager methods with functools.lru_cache (be aware of kwargs mutability) + +## Usage Example + +```python +from open_notebook.ai.models import model_manager + +# Get default chat model +chat_model = await model_manager.get_default_model("chat") + +# Get specific model by ID +embedding_model = await model_manager.get_model("model:openai_embedding") + +# Get embedding model with config override +embedding_model = await model_manager.get_embedding_model(temperature=0.1) + +# Provision model for LangGraph (auto-detects large context) +from open_notebook.ai.provision import provision_langchain_model +langchain_model = await provision_langchain_model( + content=long_text, + model_id=None, # Use default + default_type="chat", + temperature=0.7 +) +``` diff --git a/open_notebook/ai/__init__.py b/open_notebook/ai/__init__.py new file mode 100644 index 00000000..617d6373 --- /dev/null +++ b/open_notebook/ai/__init__.py @@ -0,0 +1,2 @@ +# AI infrastructure module +# Contains model configuration, provisioning, and management diff --git a/open_notebook/domain/models.py b/open_notebook/ai/models.py similarity index 100% rename from open_notebook/domain/models.py rename to open_notebook/ai/models.py diff --git a/open_notebook/graphs/utils.py b/open_notebook/ai/provision.py similarity index 95% rename from open_notebook/graphs/utils.py rename to open_notebook/ai/provision.py index 05264e2a..3548b82f 100644 --- a/open_notebook/graphs/utils.py +++ b/open_notebook/ai/provision.py @@ -2,7 +2,7 @@ from langchain_core.language_models.chat_models import BaseChatModel from loguru import logger -from open_notebook.domain.models import model_manager +from open_notebook.ai.models import model_manager from open_notebook.utils import token_count diff --git a/open_notebook/database/CLAUDE.md b/open_notebook/database/CLAUDE.md new file mode 100644 index 00000000..17d808d1 --- /dev/null +++ b/open_notebook/database/CLAUDE.md @@ -0,0 +1,124 @@ +# Database Module + +SurrealDB abstraction layer providing repository pattern for CRUD operations and async migration management. + +## Purpose + +Encapsulates all database interactions: connection pooling, async CRUD operations, relationship management, and schema migrations. Provides clean interface for domain models and API endpoints to interact with SurrealDB without direct query knowledge. + +## Architecture Overview + +Two-tier system: +1. **Repository Layer** (repository.py): Raw async CRUD operations on SurrealDB via AsyncSurreal client +2. **Migration Layer** (async_migrate.py): Schema versioning and migration execution + +Both leverage connection context manager for lifecycle management and automatic cleanup. + +## Component Catalog + +### repository.py + +**Connection Management** +- `get_database_url()`: Resolves `SURREAL_URL` or constructs from `SURREAL_ADDRESS`/`SURREAL_PORT` (backward compatible) +- `get_database_password()`: Falls back from `SURREAL_PASSWORD` to legacy `SURREAL_PASS` env var +- `db_connection()`: Async context manager handling sign-in, namespace/database selection, and cleanup + - Opens AsyncSurreal, authenticates, selects namespace/database, yields connection, closes on exit + +**Query Operations** +- `repo_query(query_str, vars)`: Execute raw SurrealQL with parameter substitution; returns list of dicts +- `repo_create(table, data)`: Insert record; auto-adds `created`/`updated` timestamps; removes any existing `id` field +- `repo_insert(table, data_list, ignore_duplicates)`: Bulk insert multiple records; optionally ignores "already contains" errors +- `repo_upsert(table, id, data, add_timestamp)`: MERGE operation for create-or-update; optionally adds `updated` timestamp +- `repo_update(table, id, data)`: Update existing record by table+id or full record_id; auto-adds `updated`, parses ISO dates +- `repo_delete(record_id)`: Delete record by RecordID +- `repo_relate(source, relationship, target, data)`: Create graph relationship; optional relationship data + +**Utilities** +- `parse_record_ids(obj)`: Recursively converts SurrealDB RecordID objects to strings (deep tree traversal) +- `ensure_record_id(value)`: Coerces string or RecordID to RecordID type + +### async_migrate.py + +**Migration Classes** +- `AsyncMigration`: Single migration wrapper + - `from_file(path)`: Load .surrealql file; strips comments and whitespace + - `run(bump)`: Execute SQL; call bump_version() on success (bump=True) or lower_version() (bump=False) + +- `AsyncMigrationRunner`: Sequences multiple migrations + - `run_all()`: Execute pending migrations from current_version to end + - `run_one_up()`: Run next migration + - `run_one_down()`: Rollback latest migration + +- `AsyncMigrationManager`: Main orchestrator + - Loads 9 up migrations + 9 down migrations (hard-coded in __init__) + - `get_current_version()`: Query max version from _sbl_migrations table + - `needs_migration()`: Boolean check (current < total migrations available) + - `run_migration_up()`: Run all pending migrations with logging + +**Version Tracking** +- `get_latest_version()`: Query max version; returns 0 if _sbl_migrations table missing +- `get_all_versions()`: Fetch all migration records; returns empty list on error +- `bump_version()`: INSERT new entry into _sbl_migrations with version + applied_at timestamp +- `lower_version()`: DELETE latest migration record (rollback) + +### migrate.py + +**Backward Compatibility** +- `MigrationManager`: Sync wrapper around AsyncMigrationManager + - `get_current_version()`: Wraps async call with asyncio.run() + - `needs_migration` property: Checks if migration pending + - `run_migration_up()`: Execute migrations synchronously + +## Common Patterns + +- **Async-first design**: All operations async via AsyncSurreal; sync wrapper provided for legacy code +- **Connection per operation**: Each repo_* function opens/closes connection (no pooling); designed for serverless/stateless API +- **Auto-timestamping**: repo_create() and repo_update() auto-set `created`/`updated` fields +- **Error resilience**: RuntimeError for transaction conflicts (retriable); catches and re-raises other exceptions +- **RecordID polymorphism**: Functions accept string or RecordID; coerced to consistent type +- **Graceful degradation**: Migration queries catch exceptions and treat table-not-found as version 0 + +## Key Dependencies + +- `surrealdb`: AsyncSurreal client, RecordID type +- `loguru`: Logging with context (debug/error/success levels) +- Python stdlib: `os` (env vars), `datetime` (timestamps), `contextlib` (async context manager) + +## Important Quirks & Gotchas + +- **No connection pooling**: Each repo_* operation creates new connection; adequate for HTTP request-scoped operations but inefficient for bulk workloads +- **Hard-coded migration files**: AsyncMigrationManager lists migrations 1-9 explicitly; adding new migration requires code change (not auto-discovery) +- **Record ID format inconsistency**: repo_update() accepts both `table:id` format and full RecordID; path handling can be subtle +- **ISO date parsing**: repo_update() parses `created` field from string to datetime if present; assumes ISO format +- **Timestamp overwrite risk**: repo_create() always sets new timestamps; can't preserve original created time on reimport +- **Transaction conflict handling**: RuntimeError from transaction conflicts logged without stack trace (prevents log spam) +- **Graceful null returns**: get_all_versions() returns [] on table missing; allows migration system to bootstrap cleanly + +## How to Extend + +1. **Add new CRUD operation**: Follow repo_* pattern (open connection, execute query, handle errors, close) +2. **Add migration**: Create migration file in `/migrations/N.surrealql` and `/migrations/N_down.surrealql`; update AsyncMigrationManager to load new files +3. **Change timestamp behavior**: Modify repo_create()/repo_update() to not auto-set `updated` field if caller-provided +4. **Implement connection pooling**: Replace db_connection context manager with pool.acquire() pattern (for high-throughput scenarios) + +## Integration Points + +- **API startup** (api/main.py): FastAPI lifespan handler calls AsyncMigrationManager.run_migration_up() on server start +- **Domain models** (domain/*.py): All models call repo_* functions for persistence +- **Commands** (commands/*.py): Background jobs use repo_* for state updates +- **Streamlit UI** (pages/*.py): Deprecated migration check; relies on API to run migrations + +## Usage Example + +```python +from open_notebook.database.repository import repo_create, repo_query, repo_update + +# Create +record = await repo_create("notebooks", {"title": "Research"}) + +# Query +results = await repo_query("SELECT * FROM notebooks WHERE title = $title", {"title": "Research"}) + +# Update +await repo_update("notebooks", record["id"], {"title": "Updated Research"}) +``` diff --git a/open_notebook/database/async_migrate.py b/open_notebook/database/async_migrate.py index f3057588..da333c08 100644 --- a/open_notebook/database/async_migrate.py +++ b/open_notebook/database/async_migrate.py @@ -96,26 +96,26 @@ class AsyncMigrationManager: def __init__(self): """Initialize migration manager.""" self.up_migrations = [ - AsyncMigration.from_file("migrations/1.surrealql"), - AsyncMigration.from_file("migrations/2.surrealql"), - AsyncMigration.from_file("migrations/3.surrealql"), - AsyncMigration.from_file("migrations/4.surrealql"), - AsyncMigration.from_file("migrations/5.surrealql"), - AsyncMigration.from_file("migrations/6.surrealql"), - AsyncMigration.from_file("migrations/7.surrealql"), - AsyncMigration.from_file("migrations/8.surrealql"), - AsyncMigration.from_file("migrations/9.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/1.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/2.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/3.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/4.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/5.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/6.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/7.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/8.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/9.surrealql"), ] self.down_migrations = [ - AsyncMigration.from_file("migrations/1_down.surrealql"), - AsyncMigration.from_file("migrations/2_down.surrealql"), - AsyncMigration.from_file("migrations/3_down.surrealql"), - AsyncMigration.from_file("migrations/4_down.surrealql"), - AsyncMigration.from_file("migrations/5_down.surrealql"), - AsyncMigration.from_file("migrations/6_down.surrealql"), - AsyncMigration.from_file("migrations/7_down.surrealql"), - AsyncMigration.from_file("migrations/8_down.surrealql"), - AsyncMigration.from_file("migrations/9_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/1_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/2_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/3_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/4_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/5_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/6_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/7_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/8_down.surrealql"), + AsyncMigration.from_file("open_notebook/database/migrations/9_down.surrealql"), ] self.runner = AsyncMigrationRunner( up_migrations=self.up_migrations, diff --git a/migrations/1.surrealql b/open_notebook/database/migrations/1.surrealql similarity index 100% rename from migrations/1.surrealql rename to open_notebook/database/migrations/1.surrealql diff --git a/migrations/1_down.surrealql b/open_notebook/database/migrations/1_down.surrealql similarity index 100% rename from migrations/1_down.surrealql rename to open_notebook/database/migrations/1_down.surrealql diff --git a/migrations/2.surrealql b/open_notebook/database/migrations/2.surrealql similarity index 100% rename from migrations/2.surrealql rename to open_notebook/database/migrations/2.surrealql diff --git a/migrations/2_down.surrealql b/open_notebook/database/migrations/2_down.surrealql similarity index 100% rename from migrations/2_down.surrealql rename to open_notebook/database/migrations/2_down.surrealql diff --git a/migrations/3.surrealql b/open_notebook/database/migrations/3.surrealql similarity index 100% rename from migrations/3.surrealql rename to open_notebook/database/migrations/3.surrealql diff --git a/migrations/3_down.surrealql b/open_notebook/database/migrations/3_down.surrealql similarity index 100% rename from migrations/3_down.surrealql rename to open_notebook/database/migrations/3_down.surrealql diff --git a/migrations/4.surrealql b/open_notebook/database/migrations/4.surrealql similarity index 100% rename from migrations/4.surrealql rename to open_notebook/database/migrations/4.surrealql diff --git a/migrations/4_down.surrealql b/open_notebook/database/migrations/4_down.surrealql similarity index 100% rename from migrations/4_down.surrealql rename to open_notebook/database/migrations/4_down.surrealql diff --git a/migrations/5.surrealql b/open_notebook/database/migrations/5.surrealql similarity index 100% rename from migrations/5.surrealql rename to open_notebook/database/migrations/5.surrealql diff --git a/migrations/5_down.surrealql b/open_notebook/database/migrations/5_down.surrealql similarity index 100% rename from migrations/5_down.surrealql rename to open_notebook/database/migrations/5_down.surrealql diff --git a/migrations/6.surrealql b/open_notebook/database/migrations/6.surrealql similarity index 100% rename from migrations/6.surrealql rename to open_notebook/database/migrations/6.surrealql diff --git a/migrations/6_down.surrealql b/open_notebook/database/migrations/6_down.surrealql similarity index 100% rename from migrations/6_down.surrealql rename to open_notebook/database/migrations/6_down.surrealql diff --git a/migrations/7.surrealql b/open_notebook/database/migrations/7.surrealql similarity index 100% rename from migrations/7.surrealql rename to open_notebook/database/migrations/7.surrealql diff --git a/migrations/7_down.surrealql b/open_notebook/database/migrations/7_down.surrealql similarity index 100% rename from migrations/7_down.surrealql rename to open_notebook/database/migrations/7_down.surrealql diff --git a/migrations/8.surrealql b/open_notebook/database/migrations/8.surrealql similarity index 100% rename from migrations/8.surrealql rename to open_notebook/database/migrations/8.surrealql diff --git a/migrations/8_down.surrealql b/open_notebook/database/migrations/8_down.surrealql similarity index 100% rename from migrations/8_down.surrealql rename to open_notebook/database/migrations/8_down.surrealql diff --git a/migrations/9.surrealql b/open_notebook/database/migrations/9.surrealql similarity index 100% rename from migrations/9.surrealql rename to open_notebook/database/migrations/9.surrealql diff --git a/migrations/9_down.surrealql b/open_notebook/database/migrations/9_down.surrealql similarity index 100% rename from migrations/9_down.surrealql rename to open_notebook/database/migrations/9_down.surrealql diff --git a/open_notebook/domain/CLAUDE.md b/open_notebook/domain/CLAUDE.md new file mode 100644 index 00000000..81075fdb --- /dev/null +++ b/open_notebook/domain/CLAUDE.md @@ -0,0 +1,100 @@ +# Domain Module + +Core data models for notebooks, sources, notes, and settings with async SurrealDB persistence, auto-embedding, and relationship management. + +## Purpose + +Two base classes support different persistence patterns: **ObjectModel** (mutable records with auto-increment IDs) and **RecordModel** (singleton configuration with fixed IDs). + +## Key Components + +### base.py +- **ObjectModel**: Base for notebooks, sources, notes + - `save()`: Create/update with auto-embedding for searchable content + - `delete()`: Remove by ID + - `relate(relationship, target_id)`: Create graph relationships (reference, artifact, refers_to) + - `get(id)`: Polymorphic fetch; resolves subclass from ID prefix + - `get_all(order_by)`: Fetch all records from table + - Integrates with ModelManager for automatic embedding + +- **RecordModel**: Singleton configuration (ContentSettings, DefaultPrompts) + - Fixed record_id per subclass + - `update()`: Upsert to database + - Lazy DB loading via `_load_from_db()` + +### notebook.py +- **Notebook**: Research project container + - `get_sources()`, `get_notes()`, `get_chat_sessions()`: Navigate relationships + +- **Source**: Content item (file/URL) + - `vectorize()`: Submit async embedding job (returns command_id, fire-and-forget) + - `get_status()`, `get_processing_progress()`: Track job via surreal_commands + - `get_context()`: Returns summary for LLM context + - `add_insight()`: Generate and store insights with embeddings + +- **Note**: Standalone or linked notes + - `needs_embedding()`: Always True (searchable) + - `add_to_notebook()`: Link to notebook + +- **SourceInsight, SourceEmbedding**: Derived content models +- **ChatSession**: Conversation container with optional model_override +- **Asset**: File/URL reference helper + +- **Search functions**: + - `text_search()`: Full-text keyword search + - `vector_search()`: Semantic search via embeddings (default minimum_score=0.2) + +### content_settings.py +- **ContentSettings**: Singleton for processing engines, embedding strategy, file deletion, YouTube languages + +### transformation.py +- **Transformation**: Reusable prompts for content transformation +- **DefaultPrompts**: Singleton with transformation instructions + +## Important Patterns + +- **Async/await**: All DB operations async; always use await +- **Polymorphic get()**: `ObjectModel.get(id)` determines subclass from ID prefix (table:id format) +- **Auto-embedding**: `save()` generates embeddings if `needs_embedding()` returns True +- **Nullable fields**: Declare via `nullable_fields` ClassVar to allow None in database +- **Timestamps**: `created` and `updated` auto-managed as ISO strings +- **Fire-and-forget jobs**: `source.vectorize()` returns command_id without waiting + +## Key Dependencies + +- `surrealdb`: RecordID type for relationships +- `pydantic`: Validation and field_validator decorators +- `open_notebook.database.repository`: CRUD and relationship functions +- `open_notebook.ai.models`: ModelManager for embeddings +- `surreal_commands`: Async job submission (vectorization, insights) +- `loguru`: Logging + +## Quirks & Gotchas + +- **Polymorphic resolution**: `ObjectModel.get()` fails if subclass not imported (search subclasses list) +- **RecordModel singleton**: __new__ returns existing instance; call `clear_instance()` in tests +- **Source.command field**: Stored as RecordID; auto-parsed from strings via field_validator +- **Text truncation**: `Note.get_context(short)` hardcodes 100-char limit +- **Embedding async**: Only Note and SourceInsight embed on save; Source too large (uses async job) +- **Relationship strings**: Must match SurrealDB schema (reference, artifact, refers_to) + +## How to Add New Model + +1. Inherit from ObjectModel with table_name ClassVar +2. Define Pydantic fields with validators +3. Override `needs_embedding()` if searchable +4. Add custom methods for domain logic (get_X, add_to_Y) +5. Implement `_prepare_save_data()` if custom serialization needed + +## Usage + +```python +notebook = Notebook(name="Research", description="My project") +await notebook.save() + +obj = await ObjectModel.get("notebook:123") # Polymorphic fetch + +# Search +await text_search("quantum", results=5) +await vector_search("quantum computing", results=10, minimum_score=0.3) +``` diff --git a/open_notebook/domain/base.py b/open_notebook/domain/base.py index f9992e25..76844890 100644 --- a/open_notebook/domain/base.py +++ b/open_notebook/domain/base.py @@ -2,7 +2,7 @@ from typing import Any, ClassVar, Dict, List, Optional, Type, TypeVar, Union, cast from loguru import logger -from pydantic import BaseModel, ValidationError, field_validator, model_validator +from pydantic import BaseModel, ConfigDict, ValidationError, field_validator, model_validator from open_notebook.database.repository import ( ensure_record_id, @@ -111,7 +111,7 @@ def get_embedding_content(self) -> Optional[str]: return None async def save(self) -> None: - from open_notebook.domain.models import model_manager + from open_notebook.ai.models import model_manager try: self.model_validate(self.model_dump(), strict=True) @@ -211,19 +211,20 @@ def parse_datetime(cls, value): class RecordModel(BaseModel): + model_config = ConfigDict( + validate_assignment=True, + arbitrary_types_allowed=True, + extra="allow", + from_attributes=True, + defer_build=True, + ) + record_id: ClassVar[str] auto_save: ClassVar[bool] = ( False # Default to False, can be overridden in subclasses ) _instances: ClassVar[Dict[str, "RecordModel"]] = {} # Store instances by record_id - class Config: - validate_assignment = True - arbitrary_types_allowed = True - extra = "allow" - from_attributes = True - defer_build = True - def __new__(cls, **kwargs): # If an instance already exists for this record_id, return it if cls.record_id in cls._instances: diff --git a/open_notebook/domain/notebook.py b/open_notebook/domain/notebook.py index 2f589a68..3507509f 100644 --- a/open_notebook/domain/notebook.py +++ b/open_notebook/domain/notebook.py @@ -2,13 +2,13 @@ from typing import Any, ClassVar, Dict, List, Literal, Optional, Tuple, Union from loguru import logger -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator from surreal_commands import submit_command from surrealdb import RecordID +from open_notebook.ai.models import model_manager from open_notebook.database.repository import ensure_record_id, repo_query from open_notebook.domain.base import ObjectModel -from open_notebook.domain.models import model_manager from open_notebook.exceptions import DatabaseOperationError, InvalidInputError from open_notebook.utils import split_text @@ -142,6 +142,8 @@ async def save_as_note(self, notebook_id: Optional[str] = None) -> Any: class Source(ObjectModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + table_name: ClassVar[str] = "source" asset: Optional[Asset] = None title: Optional[str] = None @@ -151,9 +153,6 @@ class Source(ObjectModel): default=None, description="Link to surreal-commands processing job" ) - class Config: - arbitrary_types_allowed = True - @field_validator("command", mode="before") @classmethod def parse_command(cls, value): diff --git a/open_notebook/graphs/CLAUDE.md b/open_notebook/graphs/CLAUDE.md new file mode 100644 index 00000000..3406576b --- /dev/null +++ b/open_notebook/graphs/CLAUDE.md @@ -0,0 +1,61 @@ +# Graphs Module + +LangGraph-based workflow orchestration for content processing, chat interactions, and AI-powered transformations. + +## Key Components + +- **`chat.py`**: Conversational agent with message history, notebook context, and model override support +- **`source_chat.py`**: Source-focused chat with ContextBuilder for insights/content injection and context tracking +- **`ask.py`**: Multi-search strategy agent (generates search terms, retrieves results, synthesizes answers) +- **`source.py`**: Content ingestion pipeline (extract → save → transform with content-core) +- **`transformation.py`**: Single-node transformation executor with prompt templating via ai_prompter +- **`prompt.py`**: Generic pattern chain for arbitrary prompt-based LLM calls +- **`tools.py`**: Minimal tool library (currently just `get_current_timestamp()`) + +## Important Patterns + +- **Async/sync bridging in graphs**: Both `chat.py` and `source_chat.py` use `asyncio.new_event_loop()` workaround because LangGraph nodes are sync but `provision_langchain_model()` is async +- **State machines via StateGraph**: Each graph compiles to stateful runnable; conditional edges fan out work (ask.py, source.py do parallel transforms) +- **Prompt templating**: `ai_prompter.Prompter` with Jinja2 templates referenced by path ("chat/system", "ask/entry", etc.) +- **Model provisioning via context**: Config dict passed to node via `RunnableConfig`; defaults fall back to state overrides +- **Checkpointing**: `chat.py` and `source_chat.py` use SqliteSaver for message history (LangGraph's built-in persistence) +- **Content extraction**: `source.py` uses content-core library with provider/model from DefaultModels; URLs and files both supported + +## Quirks & Edge Cases + +- **Async loop gymnastics**: ThreadPoolExecutor workaround needed because LangGraph invokes sync nodes but we call async functions; fragile if event loop state changes +- **`clean_thinking_content()` ubiquitous**: Strips `...` tags from model responses (handles extended thinking models) +- **source_chat.py builds context twice**: ContextBuilder runs during node execution to fetch source/insights; rebuilds list from context_data (inefficient but safe) +- **source.py embedding is async**: `source.vectorize()` returns job command ID; not awaited (fire-and-forget) +- **transformation.py nullable source**: Accepts `input_text` or `source.full_text` (falls back to second if first missing) +- **ask.py hard-coded vector_search**: No fallback to text search despite commented code suggesting it was planned +- **SqliteSaver location**: Checkpoints stored in path from `LANGGRAPH_CHECKPOINT_FILE` env var; connection shared across graphs + +## Key Dependencies + +- `langgraph`: StateGraph, Send, END, START, SqliteSaver checkpoint persistence +- `langchain_core`: Messages, OutputParser, RunnableConfig +- `ai_prompter`: Prompter for Jinja2 template rendering +- `content_core`: `extract_content()` for file/URL processing +- `open_notebook.ai.provision`: `provision_langchain_model()` (async factory with fallback logic) +- `open_notebook.domain.notebook`: Domain models (Source, Note, SourceInsight, vector_search) +- `loguru`: Logging + +## Usage Example + +```python +# Invoke a graph with config override +config = {"configurable": {"model_id": "model:custom_id"}} +result = await chat_graph.ainvoke( + {"messages": [HumanMessage(content="...")], "notebook": notebook}, + config=config +) + +# Source processing (content → save → transform) +result = await source_graph.ainvoke({ + "content_state": {...}, # ProcessSourceState from content-core + "apply_transformations": [t1, t2], + "source_id": "source:123", + "embed": True +}) +``` diff --git a/open_notebook/graphs/ask.py b/open_notebook/graphs/ask.py index 8a0846b1..bb11d128 100644 --- a/open_notebook/graphs/ask.py +++ b/open_notebook/graphs/ask.py @@ -9,8 +9,8 @@ from pydantic import BaseModel, Field from typing_extensions import TypedDict +from open_notebook.ai.provision import provision_langchain_model from open_notebook.domain.notebook import vector_search -from open_notebook.graphs.utils import provision_langchain_model from open_notebook.utils import clean_thinking_content diff --git a/open_notebook/graphs/chat.py b/open_notebook/graphs/chat.py index 442396a4..408e0198 100644 --- a/open_notebook/graphs/chat.py +++ b/open_notebook/graphs/chat.py @@ -5,16 +5,15 @@ from ai_prompter import Prompter from langchain_core.messages import AIMessage, SystemMessage from langchain_core.runnables import RunnableConfig - -from open_notebook.utils import clean_thinking_content from langgraph.checkpoint.sqlite import SqliteSaver from langgraph.graph import END, START, StateGraph from langgraph.graph.message import add_messages from typing_extensions import TypedDict +from open_notebook.ai.provision import provision_langchain_model from open_notebook.config import LANGGRAPH_CHECKPOINT_FILE from open_notebook.domain.notebook import Notebook -from open_notebook.graphs.utils import provision_langchain_model +from open_notebook.utils import clean_thinking_content class ThreadState(TypedDict): @@ -26,7 +25,7 @@ class ThreadState(TypedDict): def call_model_with_messages(state: ThreadState, config: RunnableConfig) -> dict: - system_prompt = Prompter(prompt_template="chat").render(data=state) # type: ignore[arg-type] + system_prompt = Prompter(prompt_template="chat/system").render(data=state) # type: ignore[arg-type] payload = [SystemMessage(content=system_prompt)] + state.get("messages", []) model_id = config.get("configurable", {}).get("model_id") or state.get( "model_override" diff --git a/open_notebook/graphs/prompt.py b/open_notebook/graphs/prompt.py index 176fdbfb..b454904d 100644 --- a/open_notebook/graphs/prompt.py +++ b/open_notebook/graphs/prompt.py @@ -6,7 +6,7 @@ from langgraph.graph import END, START, StateGraph from typing_extensions import TypedDict -from open_notebook.graphs.utils import provision_langchain_model +from open_notebook.ai.provision import provision_langchain_model class PatternChainState(TypedDict): diff --git a/open_notebook/graphs/source.py b/open_notebook/graphs/source.py index da0b3d63..68e16f5b 100644 --- a/open_notebook/graphs/source.py +++ b/open_notebook/graphs/source.py @@ -9,8 +9,8 @@ from loguru import logger from typing_extensions import Annotated, TypedDict +from open_notebook.ai.models import Model, ModelManager from open_notebook.domain.content_settings import ContentSettings -from open_notebook.domain.models import Model, ModelManager from open_notebook.domain.notebook import Asset, Source from open_notebook.domain.transformation import Transformation from open_notebook.graphs.transformation import graph as transform_graph diff --git a/open_notebook/graphs/source_chat.py b/open_notebook/graphs/source_chat.py index 99217a8d..dc353bfd 100644 --- a/open_notebook/graphs/source_chat.py +++ b/open_notebook/graphs/source_chat.py @@ -5,16 +5,15 @@ from ai_prompter import Prompter from langchain_core.messages import AIMessage, SystemMessage from langchain_core.runnables import RunnableConfig - -from open_notebook.utils import clean_thinking_content from langgraph.checkpoint.sqlite import SqliteSaver from langgraph.graph import END, START, StateGraph from langgraph.graph.message import add_messages from typing_extensions import TypedDict +from open_notebook.ai.provision import provision_langchain_model from open_notebook.config import LANGGRAPH_CHECKPOINT_FILE from open_notebook.domain.notebook import Source, SourceInsight -from open_notebook.graphs.utils import provision_langchain_model +from open_notebook.utils import clean_thinking_content from open_notebook.utils.context_builder import ContextBuilder @@ -111,7 +110,7 @@ def build_context(): } # Apply the source_chat prompt template - system_prompt = Prompter(prompt_template="source_chat").render(data=prompt_data) + system_prompt = Prompter(prompt_template="source_chat/system").render(data=prompt_data) payload = [SystemMessage(content=system_prompt)] + state.get("messages", []) # Handle async model provisioning from sync context diff --git a/open_notebook/graphs/transformation.py b/open_notebook/graphs/transformation.py index 8c86237a..cb4906d6 100644 --- a/open_notebook/graphs/transformation.py +++ b/open_notebook/graphs/transformation.py @@ -4,9 +4,9 @@ from langgraph.graph import END, START, StateGraph from typing_extensions import TypedDict +from open_notebook.ai.provision import provision_langchain_model from open_notebook.domain.notebook import Source from open_notebook.domain.transformation import DefaultPrompts, Transformation -from open_notebook.graphs.utils import provision_langchain_model from open_notebook.utils import clean_thinking_content diff --git a/open_notebook/plugins/podcasts.py b/open_notebook/plugins/podcasts.py deleted file mode 100644 index 9afabac4..00000000 --- a/open_notebook/plugins/podcasts.py +++ /dev/null @@ -1,293 +0,0 @@ -from typing import ClassVar, List, Optional - -from loguru import logger -from podcastfy.client import generate_podcast -from pydantic import Field, field_validator, model_validator - -from open_notebook.config import DATA_FOLDER -from open_notebook.domain.notebook import ObjectModel - - -class PodcastEpisode(ObjectModel): - table_name: ClassVar[str] = "podcast_episode" - name: str - template: str - instructions: str - text: str - audio_file: str - - -class PodcastConfig(ObjectModel): - table_name: ClassVar[str] = "podcast_config" - name: str - podcast_name: str - podcast_tagline: str - output_language: str = Field(default="English") - person1_role: List[str] - person2_role: List[str] - conversation_style: List[str] - engagement_technique: List[str] - dialogue_structure: List[str] - transcript_model: Optional[str] = None - transcript_model_provider: Optional[str] = None - user_instructions: Optional[str] = None - ending_message: Optional[str] = None - creativity: float = Field(ge=0, le=1) - provider: str = Field(default="openai") - voice1: str - voice2: str - model: str - - # Backwards compatibility - @field_validator("person1_role", "person2_role", mode="before") - @classmethod - def split_string_to_list(cls, value): - if isinstance(value, str): - return [item.strip() for item in value.split(",")] - return value - - @model_validator(mode="after") - def validate_voices(self) -> "PodcastConfig": - if not self.voice1 or not self.voice2: - raise ValueError("Both voice1 and voice2 must be provided") - return self - - async def generate_episode( - self, - episode_name: str, - text: str, - instructions: str = "", - longform: bool = False, - chunks: int = 8, - min_chunk_size=600, - ): - self.user_instructions = ( - instructions if instructions else self.user_instructions - ) - conversation_config = { - "max_num_chunks": chunks, - "min_chunk_size": min_chunk_size, - "conversation_style": self.conversation_style, - "roles_person1": self.person1_role, - "roles_person2": self.person2_role, - "dialogue_structure": self.dialogue_structure, - "podcast_name": self.podcast_name, - "podcast_tagline": self.podcast_tagline, - "output_language": self.output_language, - "user_instructions": self.user_instructions, - "engagement_techniques": self.engagement_technique, - "creativity": self.creativity, - "text_to_speech": { - "output_directories": { - "transcripts": f"{DATA_FOLDER}/podcasts/transcripts", - "audio": f"{DATA_FOLDER}/podcasts/audio", - }, - "temp_audio_dir": f"{DATA_FOLDER}/podcasts/audio/tmp", - "ending_message": "Thank you for listening to this episode. Don't forget to subscribe to our podcast for more interesting conversations.", - "default_tts_model": self.provider, - self.provider: { - "default_voices": { - "question": self.voice1, - "answer": self.voice2, - }, - "model": self.model, - }, - "audio_format": "mp3", - }, - } - - api_key_label = None - llm_model_name = None - tts_model = None - - if self.transcript_model_provider: - if self.transcript_model_provider == "openai": - api_key_label = "OPENAI_API_KEY" - llm_model_name = self.transcript_model - elif self.transcript_model_provider == "anthropic": - api_key_label = "ANTHROPIC_API_KEY" - llm_model_name = self.transcript_model - elif self.transcript_model_provider == "gemini": - api_key_label = "GOOGLE_API_KEY" - llm_model_name = self.transcript_model - - if self.provider == "google": - tts_model = "gemini" - elif self.provider == "openai": - tts_model = "openai" - elif self.provider == "anthropic": - tts_model = "anthropic" - elif self.provider == "vertexai": - tts_model = "geminimulti" - elif self.provider == "elevenlabs": - tts_model = "elevenlabs" - - logger.info( - f"Generating episode {episode_name} with config {conversation_config} and using model {llm_model_name}, tts model {tts_model}" - ) - - try: - audio_file = generate_podcast( - conversation_config=conversation_config, - text=text, - tts_model=tts_model, - llm_model_name=llm_model_name, - api_key_label=api_key_label, - longform=longform, - ) - episode = PodcastEpisode( - name=episode_name, - template=self.name, - instructions=instructions, - text=str(text), - audio_file=audio_file, - ) - await episode.save() - except Exception as e: - logger.error(f"Failed to generate episode {episode_name}: {e}") - raise - - @field_validator( - "name", "podcast_name", "podcast_tagline", "output_language", "model" - ) - @classmethod - def validate_required_strings(cls, value: str, field) -> str: - if value is None or value.strip() == "": - raise ValueError(f"{field.field_name} cannot be None or empty string") - return value.strip() - - @field_validator("creativity") - def validate_creativity(cls, value): - if not 0 <= value <= 1: - raise ValueError("Creativity must be between 0 and 1") - return value - - -conversation_styles = [ - "Analytical", - "Argumentative", - "Informative", - "Humorous", - "Casual", - "Formal", - "Inspirational", - "Debate-style", - "Interview-style", - "Storytelling", - "Satirical", - "Educational", - "Philosophical", - "Speculative", - "Motivational", - "Fun", - "Technical", - "Light-hearted", - "Serious", - "Investigative", - "Debunking", - "Didactic", - "Thought-provoking", - "Controversial", - "Sarcastic", - "Emotional", - "Exploratory", - "Fast-paced", - "Slow-paced", - "Introspective", -] - -# Dialogue Structures -dialogue_structures = [ - "Topic Introduction", - "Opening Monologue", - "Guest Introduction", - "Icebreakers", - "Historical Context", - "Defining Terms", - "Problem Statement", - "Overview of the Issue", - "Deep Dive into Subtopics", - "Pro Arguments", - "Con Arguments", - "Cross-examination", - "Expert Interviews", - "Case Studies", - "Myth Busting", - "Q&A Session", - "Rapid-fire Questions", - "Summary of Key Points", - "Recap", - "Key Takeaways", - "Actionable Tips", - "Call to Action", - "Future Outlook", - "Closing Remarks", - "Resource Recommendations", - "Trending Topics", - "Closing Inspirational Quote", - "Final Reflections", -] - -# Podcast Participant Roles -participant_roles = [ - "Main Summarizer", - "Questioner/Clarifier", - "Optimist", - "Skeptic", - "Specialist", - "Thesis Presenter", - "Counterargument Provider", - "Professor", - "Student", - "Moderator", - "Host", - "Co-host", - "Expert Guest", - "Novice", - "Devil's Advocate", - "Analyst", - "Storyteller", - "Fact-checker", - "Comedian", - "Interviewer", - "Interviewee", - "Historian", - "Visionary", - "Strategist", - "Critic", - "Enthusiast", - "Mediator", - "Commentator", - "Researcher", - "Reporter", - "Advocate", - "Debater", - "Explorer", -] - -# Engagement Techniques -engagement_techniques = [ - "Rhetorical Questions", - "Anecdotes", - "Analogies", - "Humor", - "Metaphors", - "Storytelling", - "Quizzes", - "Personal Testimonials", - "Quotes", - "Jokes", - "Emotional Appeals", - "Provocative Statements", - "Sarcasm", - "Pop Culture References", - "Thought Experiments", - "Puzzles and Riddles", - "Role-playing", - "Debates", - "Catchphrases", - "Statistics and Facts", - "Open-ended Questions", - "Challenges to Assumptions", - "Evoking Curiosity", -] diff --git a/open_notebook/podcasts/CLAUDE.md b/open_notebook/podcasts/CLAUDE.md new file mode 100644 index 00000000..95a8f6c6 --- /dev/null +++ b/open_notebook/podcasts/CLAUDE.md @@ -0,0 +1,68 @@ +# Podcasts Module + +Domain models for podcast generation featuring speaker and episode profile management with job tracking. + +## Purpose + +Encapsulates podcast metadata and configuration: speaker profiles (voice/personality config), episode profiles (generation settings), and podcast episodes (with job status tracking via surreal-commands). + +## Architecture Overview + +Two-tier profile system: +- **SpeakerProfile**: TTS provider/model + 1-4 speaker configurations (name, voice_id, backstory, personality) +- **EpisodeProfile**: Generation settings (outline/transcript models, segment count, briefing template) +- **PodcastEpisode**: Generated episode record linking profiles, content, and async job + +All inherit from `ObjectModel` (SurrealDB base class with table_name and save/load). + +## Component Catalog + +### SpeakerProfile +- Validates 1-4 speakers with required fields: name, voice_id, backstory, personality +- Stores TTS provider/model (e.g., "elevenlabs", "openai") +- `get_by_name()` async query by profile name +- Raises ValueError on invalid speaker counts or missing fields + +### EpisodeProfile +- Configures outline/transcript generation: provider, model, num_segments (3-20 validated) +- References speaker_config by name +- Stores default_briefing template for episode generation +- `get_by_name()` async query + +### PodcastEpisode +- Stores episode_profile and speaker_profile as dicts (snapshots of config at generation time) +- Optional audio_file path, transcript/outline dicts +- **Job tracking**: command field links to surreal-commands RecordID +- `get_job_status()` fetches async job status via surreal-commands library +- `_prepare_save_data()` ensures command field is always RecordID format for database + +## Common Patterns + +- **Profile snapshots**: episode_profile and speaker_profile stored as dicts to freeze config at generation time +- **Field validation**: Pydantic validators enforce constraints (segment count, speaker count, required fields) +- **Async database access**: `get_by_name()` queries via repo_query +- **Job tracking**: command field delegates to surreal-commands; get_job_status() returns "unknown" on failure +- **Record ID handling**: ensure_record_id() converts string to RecordID before save + +## Key Dependencies + +- `pydantic`: Field validators, ObjectModel inheritance +- `surrealdb`: RecordID type for job references +- `open_notebook.database.repository`: repo_query, ensure_record_id +- `open_notebook.domain.base`: ObjectModel base class +- `surreal_commands` (optional): get_command_status() for job status + +## Important Quirks & Gotchas + +- **Snapshot approach**: Episode/speaker profiles stored as dicts (not references), so profile updates don't retroactively affect past episodes +- **Job status resilience**: get_job_status() catches all exceptions and returns "unknown" (no error propagation) +- **validate_speakers executes late**: Validators run at instantiation; bulk inserts may not trigger full validation +- **RecordID coercion**: ensure_record_id() handles both string and RecordID inputs; command field parsed during deserialization +- **No cascade delete**: Removing a profile doesn't cascade to episodes using it + +## How to Extend + +1. **Add new speaker field**: Add to required_fields list in validate_speakers() +2. **Add episode config field**: Validate in EpisodeProfile, update briefing generation code +3. **Add job metadata**: Extend PodcastEpisode with new fields (e.g., progress tracking) +4. **Change job provider**: Replace surreal-commands with alternative job queue library; update get_job_status() diff --git a/open_notebook/podcasts/__init__.py b/open_notebook/podcasts/__init__.py new file mode 100644 index 00000000..526ebe25 --- /dev/null +++ b/open_notebook/podcasts/__init__.py @@ -0,0 +1,2 @@ +# Podcasts module +# Contains podcast episode models, profiles, and generation logic diff --git a/open_notebook/domain/podcast.py b/open_notebook/podcasts/models.py similarity index 97% rename from open_notebook/domain/podcast.py rename to open_notebook/podcasts/models.py index ad5ef443..5545f947 100644 --- a/open_notebook/domain/podcast.py +++ b/open_notebook/podcasts/models.py @@ -1,6 +1,6 @@ from typing import Any, ClassVar, Dict, List, Optional, Union -from pydantic import Field, field_validator +from pydantic import ConfigDict, Field, field_validator from surrealdb import RecordID from open_notebook.database.repository import ensure_record_id, repo_query @@ -114,8 +114,7 @@ class PodcastEpisode(ObjectModel): default=None, description="Link to surreal-commands job" ) - class Config: - arbitrary_types_allowed = True + model_config = ConfigDict(arbitrary_types_allowed=True) async def get_job_status(self) -> Optional[str]: """Get the status of the associated command""" diff --git a/open_notebook/utils/CLAUDE.md b/open_notebook/utils/CLAUDE.md new file mode 100644 index 00000000..aec35396 --- /dev/null +++ b/open_notebook/utils/CLAUDE.md @@ -0,0 +1,113 @@ +# Utils Module + +Utility functions and helpers for context building, text processing, tokenization, and versioning. + +## Purpose + +Provides cross-cutting concerns: building LLM context from sources/insights, text utilities (truncation, cleaning), token counting, and version management. + +## Architecture Overview + +**Four core utilities**: +1. **context_builder.py**: Flexible context assembly from sources, notes, insights with token budgeting +2. **text_utils.py**: Text truncation, whitespace cleaning, formatting helpers +3. **token_utils.py**: Token counting for LLM context windows (wrapper around encoding library) +4. **version_utils.py**: Version parsing, comparison, and schema compatibility checks + +Each utility is stateless and can be imported independently. + +## Component Catalog + +### context_builder.py +- **ContextItem**: Dataclass for individual context piece (id, type, content, priority, token_count) +- **ContextConfig**: Configuration for context building (sources/notes/insights selection, max tokens, priority weights) +- **ContextBuilder**: Main class assembling context + - `add_source()`: Include source by ID with inclusion level + - `add_note()`: Include note by ID + - `add_insight()`: Include insight by ID + - `build()`: Assemble context respecting token budget and priorities + - Uses vector_search to fetch source/insight content from SurrealDB + - Returns list of ContextItem objects sorted by priority + +**Key behavior**: +- Token counting is automatic (calculated in ContextItem.__post_init__) +- Max token enforcement via priority weighting (higher priority items included first) +- Type-specific fetching: sources → Source.full_text, notes → Note.content, insights → SourceInsight.content +- Raises DatabaseOperationError if source/note fetch fails + +### text_utils.py +- **truncate_text(text, max_chars, suffix="...")**: Truncates string, adds ellipsis +- **clean_text(text)**: Removes extra whitespace, normalizes newlines +- **extract_sentences(text, max_count)**: Splits text into sentences up to limit +- **normalize_whitespace(text)**: Collapse multiple spaces/newlines into single +- **format_for_llm(text)**: Combines cleaning + normalization for LLM consumption + +**Key behavior**: All functions are pure (no side effects); safe for high-volume processing + +### token_utils.py +- **token_count(text)**: Returns estimated token count for string (via encoding library) +- **remaining_tokens(max_tokens, used)**: Returns remaining tokens in budget +- **fits_in_context(text, max_tokens)**: Boolean check if text fits token budget + +**Key behavior**: Uses fixed encoding (cl100k_base for GPT models); may differ slightly from actual model tokenization + +### version_utils.py +- **parse_version(version_string)**: Parses "1.2.3" format; returns Version namedtuple +- **compare_versions(v1, v2)**: Returns -1 (v1 < v2), 0 (equal), 1 (v1 > v2) +- **is_compatible(current, required)**: Checks if current version meets requirement (e.g., current >= required) +- **schema_version_check()**: Validates database schema version on startup + +**Key behavior**: Assumes semantic versioning (MAJOR.MINOR.PATCH); non-standard formats raise ValueError + +## Common Patterns + +- **Dataclass-driven config**: ContextConfig used by ContextBuilder (immutable after init) +- **Token budgeting**: ContextBuilder respects max_tokens constraint; prioritizes high-priority items +- **Error handling resilience**: token_count() returns estimate; context_builder catches DB errors gracefully +- **Pure text functions**: text_utils functions are stateless utilities (no class needed) +- **Lazy evaluation**: ContextBuilder doesn't fetch items until build() called +- **Type hints throughout**: All functions use Optional, List, Dict for clarity + +## Key Dependencies + +- `open_notebook.domain.notebook`: Source, Note, SourceInsight models; vector_search function +- `open_notebook.exceptions`: DatabaseOperationError, NotFoundError +- `tiktoken` (via token_utils.py): Token encoding for GPT models +- `loguru`: Logging in context_builder (debug-level) + +## Important Quirks & Gotchas + +- **Token count estimation**: Uses cl100k_base encoding; may differ 5-10% from actual model tokens +- **Priority weights default**: If not specified, ContextConfig uses default weights (source=1, note=0.8, insight=1.2) +- **Vector search required**: ContextBuilder assumes vector_search is available on Notebook model; fails if not +- **Source.full_text vs content**: Uses full_text field (may include extracted text + metadata) +- **Type-specific fetch logic**: ContextItem.content stores raw dict; caller must parse (e.g., dict["content"]) +- **Circular import risk**: context_builder imports from domain.notebook; avoid domain importing utils +- **Max tokens hard limit**: ContextBuilder stops adding items once max_tokens exceeded (not prorated) +- **No caching**: Every build() call re-fetches from database (use cache layer if needed) +- **Whitespace normalization lossy**: clean_text() may change intended formatting (code blocks, poetry, etc.) + +## How to Extend + +1. **Add new context source type**: Create fetch method in ContextBuilder; update ContextConfig.sources dict +2. **Add text preprocessing**: Add new function to text_utils (e.g., remove_urls, extract_keywords) +3. **Change tokenization**: Replace tiktoken with alternative library in token_utils; update all calls +4. **Add context filtering**: Extend ContextConfig with filter_by_date, filter_by_topic fields +5. **Implement caching**: Wrap ContextBuilder.build() with functools.lru_cache (be aware of mutability) + +## Usage Example + +```python +from open_notebook.utils.context_builder import ContextBuilder, ContextConfig + +config = ContextConfig( + sources={"source:123": "full", "source:456": "summary"}, + max_tokens=2000, +) +builder = ContextBuilder(notebook, config) +context_items = await builder.build() + +# context_items is List[ContextItem] sorted by priority +for item in context_items: + print(f"{item.type}:{item.id} ({item.token_count} tokens)") +``` diff --git a/prompts/CLAUDE.md b/prompts/CLAUDE.md new file mode 100644 index 00000000..976a1a75 --- /dev/null +++ b/prompts/CLAUDE.md @@ -0,0 +1,190 @@ +# Prompts Module + +Jinja2 prompt templates for multi-provider AI workflows in Open Notebook. + +## Purpose + +Centralized prompt repository using `ai_prompter` library to: +1. Separate prompt engineering from Python application logic +2. Provide reusable Jinja2 templates with variable injection +3. Support multi-stage prompt chains (orchestrated by LangGraph workflows) +4. Ensure consistency across similar workflows (chat, search, content generation) + +## Architecture Overview + +**Template Organization by Workflow**: +- **`ask/`**: Multi-stage search synthesis (entry → query_process → final_answer) +- **`chat/`**: Conversational agent with notebook context (system prompt only) +- **`source_chat/`**: Source-focused chat with insight injection (system prompt only) +- **`podcast/`**: Podcast generation pipeline (outline → transcript) + +**Rendering Pattern** (all workflows): +```python +from ai_prompter import Prompter + +# Load template + render with variables +system_prompt = Prompter(prompt_template="ask/entry", parser=parser).render( + data=state +) + +# Then invoke LLM +model = await provision_langchain_model(system_prompt, ...) +response = await model.ainvoke(system_prompt) +``` + +See detailed workflow integration in `open_notebook/graphs/CLAUDE.md` for how each template fits into chat.py, ask.py, source_chat.py. + +## Prompt Engineering Patterns + +### 1. Multi-Stage Chain (Ask Workflow) + +Three-template chain for intelligent search: + +``` +entry.jinja (user question → search strategy) + ↓ +query_process.jinja (run each search, generate sub-answer) + ↓ (multiple parallel) +final_answer.jinja (synthesize all results into final response) +``` + +**Key pattern**: `entry.jinja` generates JSON-structured reasoning (via PydanticOutputParser). Each `query_process.jinja` invocation receives one search term + retrieved results. `final_answer.jinja` combines all answers with proper source citation. + +### 2. Conditional Variable Injection (Podcast Workflow) + +Templates accept optional variables for context assembly: + +```jinja +{% if notebook %} +# PROJECT INFORMATION +{{ notebook }} +{% endif %} + +{% if context %} +# CONTEXT +{{ context }} +{% endif %} +``` + +Enabled by Jinja2's conditional blocks. Critical for podcast outline (handles list or string context) and source_chat (injects variable notebook/insight data). + +### 3. Repeated Emphasis on Citation Format (Ask & Chat) + +All response-generating templates emphasize source citation rules: +- Document ID syntax: `[source:id]`, `[note:id]`, `[insight:id]` +- "Do not make up document IDs" repeated multiple times +- Example citations provided inline + +**Rationale**: LLMs naturally hallucinate citations without explicit guidance; repetition + examples reduce hallucination. + +### 4. Format Instructions Delegation + +Templates accept external `{{ format_instructions }}` variable: + +```jinja +# OUTPUT FORMATTING +{{ format_instructions }} +``` + +Allows caller to inject JSON schema, XML format, or other output constraints without modifying template. Decouples prompt from output format evolution. + +### 5. JSON Output with Extended Thinking Support + +Podcast templates include extended thinking pattern: + +```jinja +IMPORTANT OUTPUT FORMAT: +- If you use extended thinking with tags, put ALL your reasoning inside tags +- Put the final JSON output OUTSIDE and AFTER any tags +``` + +Guides models with extended thinking capability to separate reasoning from output (cleaner parsing downstream). + +## File Catalog + +**`ask/` - Search Synthesis Pipeline**: +- **entry.jinja**: Analyzes user question, generates search strategy with JSON output (term + instructions per search) +- **query_process.jinja**: Accepts one search term + retrieved results, generates sub-answer with citations +- **final_answer.jinja**: Combines all sub-answers into coherent final response, enforces source citation + +**`chat/` - Conversational Agent**: +- **system.jinja**: Single system prompt for general chat. Uses conditional blocks for optional notebook context. Emphasizes citation format. + +**`source_chat/` - Source-Focused Chat**: +- **system.jinja**: Single system prompt for source-specific discussion. Injects source metadata (ID, title, topics) + selected context. Conditional blocks for optional notebook/context data. + +**`podcast/` - Podcast Generation**: +- **outline.jinja**: Takes briefing + content + speaker profiles (list support via Jinja2 for-loop). Generates JSON outline with segments (name, description, size). +- **transcript.jinja**: Takes outline + segment index + optional existing transcript. Generates JSON dialogue array (speaker name + dialogue). Iterates speakers with for-loop. + +## Key Dependencies + +- **ai_prompter**: Prompter class for Jinja2 template rendering with optional OutputParser binding +- **Jinja2** (transitive via ai_prompter): Template syntax (if/for, filters, variable interpolation) +- **No external AI calls**: Templates are pure text; LLM invocation happens in calling code (graphs/) + +## How to Add New Template + +1. **Create subdirectory** in `prompts/` matching workflow name (e.g., `prompts/new_workflow/`) +2. **Define .jinja file(s)** with Jinja2 syntax: + - Use `{{ variable_name }}` for scalar injection + - Use `{% if condition %} ... {% endif %}` for optional sections + - Use `{% for item in list %} ... {% endfor %}` for iteration +3. **Document template variables** as inline comments (follow existing templates) +4. **Reference in calling code** (graphs/): + ```python + from ai_prompter import Prompter + prompt = Prompter(prompt_template="new_workflow/template_name").render(data=context_dict) + ``` +5. **If structured output needed**: Pass `parser=PydanticOutputParser(...)` to Prompter +6. **Document in graphs/CLAUDE.md** how new template fits into workflow chain + +## Important Quirks & Gotchas + +1. **Template path syntax**: Uses forward slashes without `.jinja` extension in Prompter. `"ask/entry"` maps to `/prompts/ask/entry.jinja` +2. **Variable key convention**: All data passed as `data=dict` arg to `.render()`. Template accesses variables directly (e.g., `{{ question }}`). Ensure dict keys match template variable names. +3. **OutputParser binding**: When using PydanticOutputParser, Prompter auto-injects `{{ format_instructions }}` into template. If template doesn't have this placeholder, parser is ignored. +4. **Jinja2 whitespace sensitivity**: Template indentation doesn't affect output, but raw newlines do. Use explicit `\n` or trim filters if output formatting matters. +5. **Conditional blocks are loose**: Jinja2 if-condition evaluates any truthy value (non-empty string, list, dict). `{% if variable %}` is False for empty string/"" but True for any non-empty content. +6. **For-loop list assumption**: Templates using `{% for item in list %}` don't validate list type. If caller passes string instead of list, iteration happens character-by-character (bug risk). +7. **No template composition/inheritance**: Templates are flat (no `{% extends %}` or `{% include %}`). Each workflow keeps templates independent to avoid coupling. +8. **Citation ID format is caller's responsibility**: Templates emphasize citation rules but don't validate. If caller returns wrong ID format, template can't catch it upstream. +9. **Parser extraction happens post-render**: OutputParser.parse() is called AFTER `.render()` returns string. If template has syntax errors, render fails before parsing logic runs. +10. **Template cache**: Prompter likely caches loaded templates. File edits require app restart if using cached instance. + +## Testing Patterns + +**Manual render test**: +```python +from ai_prompter import Prompter + +prompt = Prompter(prompt_template="ask/entry").render( + data={"question": "What is RAG?"} +) +print(prompt) # Inspect Jinja2 output before sending to LLM +``` + +**With parser**: +```python +from pydantic import BaseModel +from langchain_core.output_parsers.pydantic import PydanticOutputParser + +class Strategy(BaseModel): + reasoning: str + searches: list + +parser = PydanticOutputParser(pydantic_object=Strategy) +prompt = Prompter(prompt_template="ask/entry", parser=parser).render( + data={"question": "..."} +) +# prompt now includes {{ format_instructions }} substitution +``` + +**Integration test** (invoke full graph): +See `open_notebook/graphs/ask.py` for how entry.jinja is invoked inside ask_graph workflow. + +## Reference Documentation + +- **Jinja2 syntax guide**: See existing templates for for-loop, if-conditional, variable interpolation patterns +- **Graph integration**: `open_notebook/graphs/CLAUDE.md` documents which template is used in which workflow +- **Sub-directory CLAUDE.md files**: `ask/CLAUDE.md`, `chat/CLAUDE.md`, `podcast/CLAUDE.md` (if created) provide template-specific implementation notes diff --git a/prompts/chat.jinja b/prompts/chat/system.jinja similarity index 100% rename from prompts/chat.jinja rename to prompts/chat/system.jinja diff --git a/prompts/source_chat.jinja b/prompts/source_chat/system.jinja similarity index 100% rename from prompts/source_chat.jinja rename to prompts/source_chat/system.jinja diff --git a/pyproject.toml b/pyproject.toml index 569fee9c..1d8c0f7a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "open-notebook" -version = "1.2.4" +version = "1.3.0" description = "An open source implementation of a research assistant, inspired by Google Notebook LM" authors = [ {name = "Luis Novo", email = "lfnovo@gmail.com"} @@ -17,26 +17,25 @@ dependencies = [ "uvicorn>=0.24.0", "pydantic>=2.9.2", "loguru>=0.7.2", - "langchain>=0.3.3", - "langgraph>=0.2.38", - "tiktoken>=0.8.0", - "langgraph-checkpoint-sqlite>=2.0.0", - "langchain-community>=0.3.3", - "langchain-openai>=0.2.3", - "langchain-anthropic>=0.2.3", - "langchain-ollama>=0.2.0", - "langchain-google-genai>=2.1.10", - "langchain-groq>=0.2.1", - "langchain_mistralai>=0.2.1", - "langchain_deepseek>=0.1.3", + "langchain>=1.2.0", + "langgraph>=1.0.5", + "tiktoken>=0.12.0", + "langgraph-checkpoint-sqlite>=3.0.1", + "langchain-community>=0.4.1", + "langchain-openai>=1.1.6", + "langchain-anthropic>=1.3.0", + "langchain-ollama>=1.0.1", + "langchain-google-genai>=4.1.2", + "langchain-groq>=1.1.1", + "langchain_mistralai>=1.1.1", + "langchain_deepseek>=1.0.0", + "langchain-google-vertexai>=3.2.0", "tomli>=2.0.2", - "groq>=0.12.0", "python-dotenv>=1.0.1", "httpx[socks]>=0.27.0", "content-core>=1.0.2", "ai-prompter>=0.3", - "esperanto>=2.8.3", - "langchain-google-vertexai>=2.0.28", + "esperanto>=2.13", "surrealdb>=1.0.4", "podcast-creator>=0.7.0", "surreal-commands>=1.2.0", diff --git a/setup_guide/docker.env b/setup_guide/docker.env index b7962da4..63f3e0e1 100644 --- a/setup_guide/docker.env +++ b/setup_guide/docker.env @@ -9,5 +9,5 @@ SURREAL_URL="ws://localhost/rpc:8000" SURREAL_USER="root" SURREAL_PASSWORD="root" SURREAL_NAMESPACE="open_notebook" -SURREAL_DATABASE="staging" +SURREAL_DATABASE="open_notebook" diff --git a/tests/test_domain.py b/tests/test_domain.py index 0c10ab4d..3f57ae2f 100644 --- a/tests/test_domain.py +++ b/tests/test_domain.py @@ -8,13 +8,13 @@ import pytest from pydantic import ValidationError +from open_notebook.ai.models import ModelManager from open_notebook.domain.base import RecordModel from open_notebook.domain.content_settings import ContentSettings -from open_notebook.domain.models import ModelManager from open_notebook.domain.notebook import Note, Notebook, Source -from open_notebook.domain.podcast import EpisodeProfile, SpeakerProfile from open_notebook.domain.transformation import Transformation from open_notebook.exceptions import InvalidInputError +from open_notebook.podcasts.models import EpisodeProfile, SpeakerProfile # ============================================================================ # TEST SUITE 1: RecordModel Singleton Pattern diff --git a/tests/test_models_api.py b/tests/test_models_api.py index 08c49389..60319ced 100644 --- a/tests/test_models_api.py +++ b/tests/test_models_api.py @@ -8,6 +8,7 @@ def client(): """Create test client after environment variables have been cleared by conftest.""" from api.main import app + return TestClient(app) @@ -17,64 +18,78 @@ class TestModelCreation: @pytest.mark.asyncio @patch("open_notebook.database.repository.repo_query") @patch("api.routers.models.Model.save") - async def test_create_duplicate_model_same_case(self, mock_save, mock_repo_query, client): + async def test_create_duplicate_model_same_case( + self, mock_save, mock_repo_query, client + ): """Test that creating a duplicate model with same case returns 400.""" # Mock repo_query to return a duplicate model - mock_repo_query.return_value = [{"id": "model:123", "name": "gpt-4", "provider": "openai", "type": "language"}] + mock_repo_query.return_value = [ + { + "id": "model:123", + "name": "gpt-4", + "provider": "openai", + "type": "language", + } + ] # Attempt to create duplicate response = client.post( "/api/models", - json={ - "name": "gpt-4", - "provider": "openai", - "type": "language" - } + json={"name": "gpt-4", "provider": "openai", "type": "language"}, ) assert response.status_code == 400 - assert response.json()["detail"] == "Model 'gpt-4' already exists for provider 'openai'" + assert ( + response.json()["detail"] + == "Model 'gpt-4' already exists for provider 'openai'" + ) @pytest.mark.asyncio @patch("open_notebook.database.repository.repo_query") @patch("api.routers.models.Model.save") - async def test_create_duplicate_model_different_case(self, mock_save, mock_repo_query, client): + async def test_create_duplicate_model_different_case( + self, mock_save, mock_repo_query, client + ): """Test that creating a duplicate model with different case returns 400.""" # Mock repo_query to return a duplicate model (case-insensitive match) - mock_repo_query.return_value = [{"id": "model:123", "name": "gpt-4", "provider": "openai", "type": "language"}] + mock_repo_query.return_value = [ + { + "id": "model:123", + "name": "gpt-4", + "provider": "openai", + "type": "language", + } + ] # Attempt to create duplicate with different case response = client.post( "/api/models", - json={ - "name": "GPT-4", - "provider": "OpenAI", - "type": "language" - } + json={"name": "GPT-4", "provider": "OpenAI", "type": "language"}, ) assert response.status_code == 400 - assert response.json()["detail"] == "Model 'GPT-4' already exists for provider 'OpenAI'" + assert ( + response.json()["detail"] + == "Model 'GPT-4' already exists for provider 'OpenAI'" + ) @pytest.mark.asyncio @patch("open_notebook.database.repository.repo_query") - async def test_create_same_model_name_different_provider(self, mock_repo_query, client): + async def test_create_same_model_name_different_provider( + self, mock_repo_query, client + ): """Test that creating a model with same name but different provider is allowed.""" - from open_notebook.domain.models import Model + from open_notebook.ai.models import Model # Mock repo_query to return empty (no duplicate found for different provider) mock_repo_query.return_value = [] # Patch the save method on the Model class - with patch.object(Model, 'save', new_callable=AsyncMock) as mock_save: + with patch.object(Model, "save", new_callable=AsyncMock) as mock_save: # Attempt to create same model name with different provider (anthropic) response = client.post( "/api/models", - json={ - "name": "gpt-4", - "provider": "anthropic", - "type": "language" - } + json={"name": "gpt-4", "provider": "anthropic", "type": "language"}, ) # Should succeed because provider is different @@ -124,7 +139,9 @@ def env_side_effect(key): @patch("api.routers.models.os.environ.get") @patch("api.routers.models.AIFactory.get_available_providers") - def test_mode_specific_env_vars_llm_embedding(self, mock_esperanto, mock_env, client): + def test_mode_specific_env_vars_llm_embedding( + self, mock_esperanto, mock_env, client + ): """Test mode-specific env vars (LLM + EMBEDDING) enable only those 2 modes.""" # Mock environment: only LLM and EMBEDDING specific vars are set @@ -193,7 +210,9 @@ def env_side_effect(key): @patch("api.routers.models.os.environ.get") @patch("api.routers.models.AIFactory.get_available_providers") - def test_mixed_config_generic_and_mode_specific(self, mock_esperanto, mock_env, client): + def test_mixed_config_generic_and_mode_specific( + self, mock_esperanto, mock_env, client + ): """Test mixed config: generic + mode-specific (generic should enable all).""" # Mock environment: both generic and mode-specific vars are set diff --git a/uv.lock b/uv.lock index dabab2bc..86e3d7df 100644 --- a/uv.lock +++ b/uv.lock @@ -31,7 +31,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.13.2" +version = "3.13.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -42,42 +42,42 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/35/74/b321e7d7ca762638cdf8cdeceb39755d9c745aff7a64c8789be96ddf6e96/aiohttp-3.13.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4647d02df098f6434bafd7f32ad14942f05a9caa06c7016fdcc816f343997dd0", size = 743409, upload-time = "2025-10-28T20:56:00.354Z" }, - { url = "https://files.pythonhosted.org/packages/99/3d/91524b905ec473beaf35158d17f82ef5a38033e5809fe8742e3657cdbb97/aiohttp-3.13.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e3403f24bcb9c3b29113611c3c16a2a447c3953ecf86b79775e7be06f7ae7ccb", size = 497006, upload-time = "2025-10-28T20:56:01.85Z" }, - { url = "https://files.pythonhosted.org/packages/eb/d3/7f68bc02a67716fe80f063e19adbd80a642e30682ce74071269e17d2dba1/aiohttp-3.13.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:43dff14e35aba17e3d6d5ba628858fb8cb51e30f44724a2d2f0c75be492c55e9", size = 493195, upload-time = "2025-10-28T20:56:03.314Z" }, - { url = "https://files.pythonhosted.org/packages/98/31/913f774a4708775433b7375c4f867d58ba58ead833af96c8af3621a0d243/aiohttp-3.13.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e2a9ea08e8c58bb17655630198833109227dea914cd20be660f52215f6de5613", size = 1747759, upload-time = "2025-10-28T20:56:04.904Z" }, - { url = "https://files.pythonhosted.org/packages/e8/63/04efe156f4326f31c7c4a97144f82132c3bb21859b7bb84748d452ccc17c/aiohttp-3.13.2-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53b07472f235eb80e826ad038c9d106c2f653584753f3ddab907c83f49eedead", size = 1704456, upload-time = "2025-10-28T20:56:06.986Z" }, - { url = "https://files.pythonhosted.org/packages/8e/02/4e16154d8e0a9cf4ae76f692941fd52543bbb148f02f098ca73cab9b1c1b/aiohttp-3.13.2-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e736c93e9c274fce6419af4aac199984d866e55f8a4cec9114671d0ea9688780", size = 1807572, upload-time = "2025-10-28T20:56:08.558Z" }, - { url = "https://files.pythonhosted.org/packages/34/58/b0583defb38689e7f06798f0285b1ffb3a6fb371f38363ce5fd772112724/aiohttp-3.13.2-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ff5e771f5dcbc81c64898c597a434f7682f2259e0cd666932a913d53d1341d1a", size = 1895954, upload-time = "2025-10-28T20:56:10.545Z" }, - { url = "https://files.pythonhosted.org/packages/6b/f3/083907ee3437425b4e376aa58b2c915eb1a33703ec0dc30040f7ae3368c6/aiohttp-3.13.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3b6fb0c207cc661fa0bf8c66d8d9b657331ccc814f4719468af61034b478592", size = 1747092, upload-time = "2025-10-28T20:56:12.118Z" }, - { url = "https://files.pythonhosted.org/packages/ac/61/98a47319b4e425cc134e05e5f3fc512bf9a04bf65aafd9fdcda5d57ec693/aiohttp-3.13.2-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:97a0895a8e840ab3520e2288db7cace3a1981300d48babeb50e7425609e2e0ab", size = 1606815, upload-time = "2025-10-28T20:56:14.191Z" }, - { url = "https://files.pythonhosted.org/packages/97/4b/e78b854d82f66bb974189135d31fce265dee0f5344f64dd0d345158a5973/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9e8f8afb552297aca127c90cb840e9a1d4bfd6a10d7d8f2d9176e1acc69bad30", size = 1723789, upload-time = "2025-10-28T20:56:16.101Z" }, - { url = "https://files.pythonhosted.org/packages/ed/fc/9d2ccc794fc9b9acd1379d625c3a8c64a45508b5091c546dea273a41929e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:ed2f9c7216e53c3df02264f25d824b079cc5914f9e2deba94155190ef648ee40", size = 1718104, upload-time = "2025-10-28T20:56:17.655Z" }, - { url = "https://files.pythonhosted.org/packages/66/65/34564b8765ea5c7d79d23c9113135d1dd3609173da13084830f1507d56cf/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:99c5280a329d5fa18ef30fd10c793a190d996567667908bef8a7f81f8202b948", size = 1785584, upload-time = "2025-10-28T20:56:19.238Z" }, - { url = "https://files.pythonhosted.org/packages/30/be/f6a7a426e02fc82781afd62016417b3948e2207426d90a0e478790d1c8a4/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:2ca6ffef405fc9c09a746cb5d019c1672cd7f402542e379afc66b370833170cf", size = 1595126, upload-time = "2025-10-28T20:56:20.836Z" }, - { url = "https://files.pythonhosted.org/packages/e5/c7/8e22d5d28f94f67d2af496f14a83b3c155d915d1fe53d94b66d425ec5b42/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:47f438b1a28e926c37632bff3c44df7d27c9b57aaf4e34b1def3c07111fdb782", size = 1800665, upload-time = "2025-10-28T20:56:22.922Z" }, - { url = "https://files.pythonhosted.org/packages/d1/11/91133c8b68b1da9fc16555706aa7276fdf781ae2bb0876c838dd86b8116e/aiohttp-3.13.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9acda8604a57bb60544e4646a4615c1866ee6c04a8edef9b8ee6fd1d8fa2ddc8", size = 1739532, upload-time = "2025-10-28T20:56:25.924Z" }, - { url = "https://files.pythonhosted.org/packages/17/6b/3747644d26a998774b21a616016620293ddefa4d63af6286f389aedac844/aiohttp-3.13.2-cp311-cp311-win32.whl", hash = "sha256:868e195e39b24aaa930b063c08bb0c17924899c16c672a28a65afded9c46c6ec", size = 431876, upload-time = "2025-10-28T20:56:27.524Z" }, - { url = "https://files.pythonhosted.org/packages/c3/63/688462108c1a00eb9f05765331c107f95ae86f6b197b865d29e930b7e462/aiohttp-3.13.2-cp311-cp311-win_amd64.whl", hash = "sha256:7fd19df530c292542636c2a9a85854fab93474396a52f1695e799186bbd7f24c", size = 456205, upload-time = "2025-10-28T20:56:29.062Z" }, - { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" }, - { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" }, - { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" }, - { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" }, - { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" }, - { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" }, - { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" }, - { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" }, - { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" }, - { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" }, - { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" }, - { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" }, - { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" }, - { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" }, - { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" }, - { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/4c/a164164834f03924d9a29dc3acd9e7ee58f95857e0b467f6d04298594ebb/aiohttp-3.13.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b6073099fb654e0a068ae678b10feff95c5cae95bbfcbfa7af669d361a8aa6b", size = 746051, upload-time = "2026-01-03T17:29:43.287Z" }, + { url = "https://files.pythonhosted.org/packages/82/71/d5c31390d18d4f58115037c432b7e0348c60f6f53b727cad33172144a112/aiohttp-3.13.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cb93e166e6c28716c8c6aeb5f99dfb6d5ccf482d29fe9bf9a794110e6d0ab64", size = 499234, upload-time = "2026-01-03T17:29:44.822Z" }, + { url = "https://files.pythonhosted.org/packages/0e/c9/741f8ac91e14b1d2e7100690425a5b2b919a87a5075406582991fb7de920/aiohttp-3.13.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28e027cf2f6b641693a09f631759b4d9ce9165099d2b5d92af9bd4e197690eea", size = 494979, upload-time = "2026-01-03T17:29:46.405Z" }, + { url = "https://files.pythonhosted.org/packages/75/b5/31d4d2e802dfd59f74ed47eba48869c1c21552c586d5e81a9d0d5c2ad640/aiohttp-3.13.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b61b7169ababd7802f9568ed96142616a9118dd2be0d1866e920e77ec8fa92a", size = 1748297, upload-time = "2026-01-03T17:29:48.083Z" }, + { url = "https://files.pythonhosted.org/packages/1a/3e/eefad0ad42959f226bb79664826883f2687d602a9ae2941a18e0484a74d3/aiohttp-3.13.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:80dd4c21b0f6237676449c6baaa1039abae86b91636b6c91a7f8e61c87f89540", size = 1707172, upload-time = "2026-01-03T17:29:49.648Z" }, + { url = "https://files.pythonhosted.org/packages/c5/3a/54a64299fac2891c346cdcf2aa6803f994a2e4beeaf2e5a09dcc54acc842/aiohttp-3.13.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:65d2ccb7eabee90ce0503c17716fc77226be026dcc3e65cce859a30db715025b", size = 1805405, upload-time = "2026-01-03T17:29:51.244Z" }, + { url = "https://files.pythonhosted.org/packages/6c/70/ddc1b7169cf64075e864f64595a14b147a895a868394a48f6a8031979038/aiohttp-3.13.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5b179331a481cb5529fca8b432d8d3c7001cb217513c94cd72d668d1248688a3", size = 1899449, upload-time = "2026-01-03T17:29:53.938Z" }, + { url = "https://files.pythonhosted.org/packages/a1/7e/6815aab7d3a56610891c76ef79095677b8b5be6646aaf00f69b221765021/aiohttp-3.13.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d4c940f02f49483b18b079d1c27ab948721852b281f8b015c058100e9421dd1", size = 1748444, upload-time = "2026-01-03T17:29:55.484Z" }, + { url = "https://files.pythonhosted.org/packages/6b/f2/073b145c4100da5511f457dc0f7558e99b2987cf72600d42b559db856fbc/aiohttp-3.13.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f9444f105664c4ce47a2a7171a2418bce5b7bae45fb610f4e2c36045d85911d3", size = 1606038, upload-time = "2026-01-03T17:29:57.179Z" }, + { url = "https://files.pythonhosted.org/packages/0a/c1/778d011920cae03ae01424ec202c513dc69243cf2db303965615b81deeea/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:694976222c711d1d00ba131904beb60534f93966562f64440d0c9d41b8cdb440", size = 1724156, upload-time = "2026-01-03T17:29:58.914Z" }, + { url = "https://files.pythonhosted.org/packages/0e/cb/3419eabf4ec1e9ec6f242c32b689248365a1cf621891f6f0386632525494/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f33ed1a2bf1997a36661874b017f5c4b760f41266341af36febaf271d179f6d7", size = 1722340, upload-time = "2026-01-03T17:30:01.962Z" }, + { url = "https://files.pythonhosted.org/packages/7a/e5/76cf77bdbc435bf233c1f114edad39ed4177ccbfab7c329482b179cff4f4/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e636b3c5f61da31a92bf0d91da83e58fdfa96f178ba682f11d24f31944cdd28c", size = 1783041, upload-time = "2026-01-03T17:30:03.609Z" }, + { url = "https://files.pythonhosted.org/packages/9d/d4/dd1ca234c794fd29c057ce8c0566b8ef7fd6a51069de5f06fa84b9a1971c/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:5d2d94f1f5fcbe40838ac51a6ab5704a6f9ea42e72ceda48de5e6b898521da51", size = 1596024, upload-time = "2026-01-03T17:30:05.132Z" }, + { url = "https://files.pythonhosted.org/packages/55/58/4345b5f26661a6180afa686c473620c30a66afdf120ed3dd545bbc809e85/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2be0e9ccf23e8a94f6f0650ce06042cefc6ac703d0d7ab6c7a917289f2539ad4", size = 1804590, upload-time = "2026-01-03T17:30:07.135Z" }, + { url = "https://files.pythonhosted.org/packages/7b/06/05950619af6c2df7e0a431d889ba2813c9f0129cec76f663e547a5ad56f2/aiohttp-3.13.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9af5e68ee47d6534d36791bbe9b646d2a7c7deb6fc24d7943628edfbb3581f29", size = 1740355, upload-time = "2026-01-03T17:30:09.083Z" }, + { url = "https://files.pythonhosted.org/packages/3e/80/958f16de79ba0422d7c1e284b2abd0c84bc03394fbe631d0a39ffa10e1eb/aiohttp-3.13.3-cp311-cp311-win32.whl", hash = "sha256:a2212ad43c0833a873d0fb3c63fa1bacedd4cf6af2fee62bf4b739ceec3ab239", size = 433701, upload-time = "2026-01-03T17:30:10.869Z" }, + { url = "https://files.pythonhosted.org/packages/dc/f2/27cdf04c9851712d6c1b99df6821a6623c3c9e55956d4b1e318c337b5a48/aiohttp-3.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:642f752c3eb117b105acbd87e2c143de710987e09860d674e068c4c2c441034f", size = 457678, upload-time = "2026-01-03T17:30:12.719Z" }, + { url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" }, + { url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" }, + { url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" }, + { url = "https://files.pythonhosted.org/packages/8d/a8/5a35dc56a06a2c90d4742cbf35294396907027f80eea696637945a106f25/aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29", size = 1737839, upload-time = "2026-01-03T17:30:19.422Z" }, + { url = "https://files.pythonhosted.org/packages/bf/62/4b9eeb331da56530bf2e198a297e5303e1c1ebdceeb00fe9b568a65c5a0c/aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3", size = 1703932, upload-time = "2026-01-03T17:30:21.756Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f6/af16887b5d419e6a367095994c0b1332d154f647e7dc2bd50e61876e8e3d/aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d", size = 1771906, upload-time = "2026-01-03T17:30:23.932Z" }, + { url = "https://files.pythonhosted.org/packages/ce/83/397c634b1bcc24292fa1e0c7822800f9f6569e32934bdeef09dae7992dfb/aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463", size = 1871020, upload-time = "2026-01-03T17:30:26Z" }, + { url = "https://files.pythonhosted.org/packages/86/f6/a62cbbf13f0ac80a70f71b1672feba90fdb21fd7abd8dbf25c0105fb6fa3/aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc", size = 1755181, upload-time = "2026-01-03T17:30:27.554Z" }, + { url = "https://files.pythonhosted.org/packages/0a/87/20a35ad487efdd3fba93d5843efdfaa62d2f1479eaafa7453398a44faf13/aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf", size = 1561794, upload-time = "2026-01-03T17:30:29.254Z" }, + { url = "https://files.pythonhosted.org/packages/de/95/8fd69a66682012f6716e1bc09ef8a1a2a91922c5725cb904689f112309c4/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033", size = 1697900, upload-time = "2026-01-03T17:30:31.033Z" }, + { url = "https://files.pythonhosted.org/packages/e5/66/7b94b3b5ba70e955ff597672dad1691333080e37f50280178967aff68657/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f", size = 1728239, upload-time = "2026-01-03T17:30:32.703Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/6f72f77f9f7d74719692ab65a2a0252584bf8d5f301e2ecb4c0da734530a/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679", size = 1740527, upload-time = "2026-01-03T17:30:34.695Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b4/75ec16cbbd5c01bdaf4a05b19e103e78d7ce1ef7c80867eb0ace42ff4488/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423", size = 1554489, upload-time = "2026-01-03T17:30:36.864Z" }, + { url = "https://files.pythonhosted.org/packages/52/8f/bc518c0eea29f8406dcf7ed1f96c9b48e3bc3995a96159b3fc11f9e08321/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce", size = 1767852, upload-time = "2026-01-03T17:30:39.433Z" }, + { url = "https://files.pythonhosted.org/packages/9d/f2/a07a75173124f31f11ea6f863dc44e6f09afe2bca45dd4e64979490deab1/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a", size = 1722379, upload-time = "2026-01-03T17:30:41.081Z" }, + { url = "https://files.pythonhosted.org/packages/3c/4a/1a3fee7c21350cac78e5c5cef711bac1b94feca07399f3d406972e2d8fcd/aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046", size = 428253, upload-time = "2026-01-03T17:30:42.644Z" }, + { url = "https://files.pythonhosted.org/packages/d9/b7/76175c7cb4eb73d91ad63c34e29fc4f77c9386bba4a65b53ba8e05ee3c39/aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57", size = 455407, upload-time = "2026-01-03T17:30:44.195Z" }, ] [[package]] @@ -95,11 +95,11 @@ wheels = [ [[package]] name = "aiosqlite" -version = "0.22.0" +version = "0.22.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/3a/0d/449c024bdabd0678ae07d804e60ed3b9786facd3add66f51eee67a0fccea/aiosqlite-0.22.0.tar.gz", hash = "sha256:7e9e52d72b319fcdeac727668975056c49720c995176dc57370935e5ba162bb9", size = 14707, upload-time = "2025-12-13T18:32:45.762Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/8a/64761f4005f17809769d23e518d915db74e6310474e733e3593cfc854ef1/aiosqlite-0.22.1.tar.gz", hash = "sha256:043e0bd78d32888c0a9ca90fc788b38796843360c855a7262a532813133a0650", size = 14821, upload-time = "2025-12-23T19:25:43.997Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/39/b2181148075272edfbbd6d87e6cd78cc71dca243446fa3b381fd4116950b/aiosqlite-0.22.0-py3-none-any.whl", hash = "sha256:96007fac2ce70eda3ca1bba7a3008c435258a592b8fbf2ee3eeaa36d33971a09", size = 17263, upload-time = "2025-12-13T18:32:44.619Z" }, + { url = "https://files.pythonhosted.org/packages/00/b7/e3bf5133d697a08128598c8d0abc5e16377b51465a33756de24fa7dee953/aiosqlite-0.22.1-py3-none-any.whl", hash = "sha256:21c002eb13823fad740196c5a2e9d8e62f6243bd9e7e4a1f87fb5e44ecb4fceb", size = 17405, upload-time = "2025-12-23T19:25:42.139Z" }, ] [[package]] @@ -297,11 +297,11 @@ wheels = [ [[package]] name = "certifi" -version = "2025.11.12" +version = "2026.1.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, + { url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" }, ] [[package]] @@ -532,7 +532,7 @@ wheels = [ [[package]] name = "cyclopts" -version = "4.3.0" +version = "4.4.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, @@ -540,9 +540,9 @@ dependencies = [ { name = "rich" }, { name = "rich-rst" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1b/0f/fe026df2ab8301e30a2b0bd425ff1462ad858fd4f991c1ac0389c2059c24/cyclopts-4.3.0.tar.gz", hash = "sha256:e95179cd0a959ce250ecfb2f0262a5996a92c1f9467bccad2f3d829e6833cef5", size = 151411, upload-time = "2025-11-25T02:59:33.572Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8f/21/732453ae69d65d72fe37a34f8b1a455c72313b8b0a905b876da20ff7e81a/cyclopts-4.4.3.tar.gz", hash = "sha256:03797c71b49a39dcad8324d6655363056fb998e2ba0240940050331a7f63fe65", size = 159360, upload-time = "2025-12-28T18:57:03.831Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/7a/e8/77a231ae531cf38765b75ddf27dae28bb5f70b41d8bb4f15ce1650e93f57/cyclopts-4.3.0-py3-none-any.whl", hash = "sha256:91a30b69faf128ada7cfeaefd7d9649dc222e8b2a8697f1fc99e4ee7b7ca44f3", size = 187184, upload-time = "2025-11-25T02:59:32.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/28/03f9b8fbf396b3f2eaf65a7ff441ba2fb7dd397109d563a4e556dc5b3efb/cyclopts-4.4.3-py3-none-any.whl", hash = "sha256:951611a9d4d88d9916716ae281faca9af1cb79b88bb4f22bd0192cff54e7dec6", size = 196707, upload-time = "2025-12-28T18:57:04.884Z" }, ] [[package]] @@ -649,11 +649,11 @@ wheels = [ [[package]] name = "docutils" -version = "0.22.3" +version = "0.22.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d9/02/111134bfeb6e6c7ac4c74594e39a59f6c0195dc4846afbeac3cba60f1927/docutils-0.22.3.tar.gz", hash = "sha256:21486ae730e4ca9f622677b1412b879af1791efcfba517e4c6f60be543fc8cdd", size = 2290153, upload-time = "2025-11-06T02:35:55.655Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/b6/03bb70946330e88ffec97aefd3ea75ba575cb2e762061e0e62a213befee8/docutils-0.22.4.tar.gz", hash = "sha256:4db53b1fde9abecbb74d91230d32ab626d94f6badfc575d6db9194a49df29968", size = 2291750, upload-time = "2025-12-18T19:00:26.443Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/11/a8/c6a4b901d17399c77cd81fb001ce8961e9f5e04d3daf27e8925cb012e163/docutils-0.22.3-py3-none-any.whl", hash = "sha256:bd772e4aca73aff037958d44f2be5229ded4c09927fcf8690c577b66234d6ceb", size = 633032, upload-time = "2025-11-06T02:35:52.391Z" }, + { url = "https://files.pythonhosted.org/packages/02/10/5da547df7a391dcde17f59520a231527b8571e6f46fc8efb02ccb370ab12/docutils-0.22.4-py3-none-any.whl", hash = "sha256:d0013f540772d1420576855455d050a2180186c91c15779301ac2ccb3eeb68de", size = 633196, upload-time = "2025-12-18T19:00:18.077Z" }, ] [[package]] @@ -671,15 +671,15 @@ wheels = [ [[package]] name = "esperanto" -version = "2.12.1" +version = "2.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/7f/7d/856ecb7ab6b05fa212ef4cc9186be5008373aeced2dc1ebc99013c96fa3c/esperanto-2.12.1.tar.gz", hash = "sha256:177f001363d7710a7bdf747adc2c3f5e00aebd6b759c3ac6642e2c8df8a4e1cf", size = 811783, upload-time = "2025-12-16T00:53:46.798Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b5/8c/9f655703422fc895f4c327316a8b4c824ec334374a8f6e2dea61f2512362/esperanto-2.13.0.tar.gz", hash = "sha256:78df58492700d4cdfe9dd715313c48e5e4b816ecb87dc22a56d03610431c640e", size = 742118, upload-time = "2026-01-04T22:14:24.996Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/65/48/5d8c6bc2b5db29f3b8f8be7c0964f8d78bd40b3586133f71b773530f67af/esperanto-2.12.1-py3-none-any.whl", hash = "sha256:341c239d3c7a14d556a3b4225c6e1b5f66d635e34ec8026117fc18f4f608b3c3", size = 152050, upload-time = "2025-12-16T00:53:48.023Z" }, + { url = "https://files.pythonhosted.org/packages/8c/44/e1c9aa604f252a351d67398c886bb9b1cd572881289864d3e0d45212e61d/esperanto-2.13.0-py3-none-any.whl", hash = "sha256:5b9d12eb3d03f63acb7a77c0c17ed90b32b761ded1f122bc44bb4e8b6625cec0", size = 152019, upload-time = "2026-01-04T22:14:24.031Z" }, ] [[package]] @@ -714,15 +714,15 @@ wheels = [ [[package]] name = "fakeredis" -version = "2.32.1" +version = "2.33.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "redis" }, { name = "sortedcontainers" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/56/14/b47b8471303af7deed7080290c14cff27a831fa47b38f45643e6bf889cee/fakeredis-2.32.1.tar.gz", hash = "sha256:dd8246db159f0b66a1ced7800c9d5ef07769e3d2fde44b389a57f2ce2834e444", size = 171582, upload-time = "2025-11-06T01:40:57.836Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5f/f9/57464119936414d60697fcbd32f38909bb5688b616ae13de6e98384433e0/fakeredis-2.33.0.tar.gz", hash = "sha256:d7bc9a69d21df108a6451bbffee23b3eba432c21a654afc7ff2d295428ec5770", size = 175187, upload-time = "2025-12-16T19:45:52.269Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/d2/c28f6909864bfdb7411bb8f39fabedb5a50da1cbd7da5a1a3a46dfea2eab/fakeredis-2.32.1-py3-none-any.whl", hash = "sha256:e80c8886db2e47ba784f7dfe66aad6cd2eab76093c6bfda50041e5bc890d46cf", size = 118964, upload-time = "2025-11-06T01:40:55.885Z" }, + { url = "https://files.pythonhosted.org/packages/6e/78/a850fed8aeef96d4a99043c90b818b2ed5419cd5b24a4049fd7cfb9f1471/fakeredis-2.33.0-py3-none-any.whl", hash = "sha256:de535f3f9ccde1c56672ab2fdd6a8efbc4f2619fc2f1acc87b8737177d71c965", size = 119605, upload-time = "2025-12-16T19:45:51.08Z" }, ] [package.optional-dependencies] @@ -732,7 +732,7 @@ lua = [ [[package]] name = "fastapi" -version = "0.124.4" +version = "0.128.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, @@ -740,14 +740,14 @@ dependencies = [ { name = "starlette" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cd/21/ade3ff6745a82ea8ad88552b4139d27941549e4f19125879f848ac8f3c3d/fastapi-0.124.4.tar.gz", hash = "sha256:0e9422e8d6b797515f33f500309f6e1c98ee4e85563ba0f2debb282df6343763", size = 378460, upload-time = "2025-12-12T15:00:43.891Z" } +sdist = { url = "https://files.pythonhosted.org/packages/52/08/8c8508db6c7b9aae8f7175046af41baad690771c9bcde676419965e338c7/fastapi-0.128.0.tar.gz", hash = "sha256:1cc179e1cef10a6be60ffe429f79b829dce99d8de32d7acb7e6c8dfdf7f2645a", size = 365682, upload-time = "2025-12-27T15:21:13.714Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3e/57/aa70121b5008f44031be645a61a7c4abc24e0e888ad3fc8fda916f4d188e/fastapi-0.124.4-py3-none-any.whl", hash = "sha256:6d1e703698443ccb89e50abe4893f3c84d9d6689c0cf1ca4fad6d3c15cf69f15", size = 113281, upload-time = "2025-12-12T15:00:42.44Z" }, + { url = "https://files.pythonhosted.org/packages/5c/05/5cbb59154b093548acd0f4c7c474a118eda06da25aa75c616b72d8fcd92a/fastapi-0.128.0-py3-none-any.whl", hash = "sha256:aebd93f9716ee3b4f4fcfe13ffb7cf308d99c9f3ab5622d8877441072561582d", size = 103094, upload-time = "2025-12-27T15:21:12.154Z" }, ] [[package]] name = "fastmcp" -version = "2.14.1" +version = "2.14.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "authlib" }, @@ -767,18 +767,18 @@ dependencies = [ { name = "uvicorn" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9e/50/d38e4371bdc34e709f4731b1e882cb7bc50e51c1a224859d4cd381b3a79b/fastmcp-2.14.1.tar.gz", hash = "sha256:132725cbf77b68fa3c3d165eff0cfa47e40c1479457419e6a2cfda65bd84c8d6", size = 8263331, upload-time = "2025-12-15T02:26:27.102Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/1e/e3528227688c248283f6d86869b1e900563ffc223eff00f4f923d2750365/fastmcp-2.14.2.tar.gz", hash = "sha256:bd23d1b808b6f446444f10114dac468b11bfb9153ed78628f5619763d0cf573e", size = 8272966, upload-time = "2025-12-31T15:26:13.433Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/82/72401d09dc27c27fdf72ad6c2fe331e553e3c3646e01b5ff16473191033d/fastmcp-2.14.1-py3-none-any.whl", hash = "sha256:fb3e365cc1d52573ab89caeba9944dd4b056149097be169bce428e011f0a57e5", size = 412176, upload-time = "2025-12-15T02:26:25.356Z" }, + { url = "https://files.pythonhosted.org/packages/0d/67/8456d39484fcb7afd0defed21918e773ed59a98b39e5b633328527c88367/fastmcp-2.14.2-py3-none-any.whl", hash = "sha256:e33cd622e1ebd5110af6a981804525b6cd41072e3c7d68268ed69ef3be651aca", size = 413279, upload-time = "2025-12-31T15:26:11.178Z" }, ] [[package]] name = "filelock" -version = "3.20.1" +version = "3.20.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a7/23/ce7a1126827cedeb958fc043d61745754464eb56c5937c35bbf2b8e26f34/filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c", size = 19476, upload-time = "2025-12-15T23:54:28.027Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c1/e0/a75dbe4bca1e7d41307323dad5ea2efdd95408f74ab2de8bd7dba9b51a1a/filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64", size = 19510, upload-time = "2026-01-02T15:33:32.582Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a", size = 16666, upload-time = "2025-12-15T23:54:26.874Z" }, + { url = "https://files.pythonhosted.org/packages/9a/30/ab407e2ec752aa541704ed8f93c11e2a5d92c168b8a755d818b74a3c5c2d/filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8", size = 16697, upload-time = "2026-01-02T15:33:31.133Z" }, ] [[package]] @@ -792,7 +792,7 @@ wheels = [ [[package]] name = "firecrawl-py" -version = "4.11.0" +version = "4.12.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -803,9 +803,9 @@ dependencies = [ { name = "requests" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/33/a8/8bf5173020ca19feb955c9ce7535b9bdab585c239d583075c3e27fb68b22/firecrawl_py-4.11.0.tar.gz", hash = "sha256:9cb11d44c95c2574baf3a5f87dc9a7e57ed08a6996ea68e8f6da76825faee98e", size = 155548, upload-time = "2025-12-14T21:37:59.33Z" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/bb/4864c17040eea3f68979cb077b6c7d8a66032140d9fcbcbfe934d0cf8f3e/firecrawl_py-4.12.0.tar.gz", hash = "sha256:a914d0ce759cd2b77dba6f63f2b647a3d527dc380e98243bc5620e031881bc3f", size = 160692, upload-time = "2025-12-19T05:39:22.297Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ce/6a/71c15be08a65e86764e632e28e05ee09cddf9f3aa6b349d12963da9dd023/firecrawl_py-4.11.0-py3-none-any.whl", hash = "sha256:12963e358519e01996832674e48e1b8f6024573e78fdf0f7d1caf14da8fadcdf", size = 194528, upload-time = "2025-12-14T21:37:57.913Z" }, + { url = "https://files.pythonhosted.org/packages/89/a8/32317c2d007b42ed7814cf7440fcac98ecb6cdb9a44feab4f85ae458f86e/firecrawl_py-4.12.0-py3-none-any.whl", hash = "sha256:0807fa7e881570749a52bd9971e1e1f2373e5aafd21b6f30d194b1b8a32aa58f", size = 201511, upload-time = "2025-12-19T05:39:20.605Z" }, ] [[package]] @@ -901,7 +901,7 @@ requests = [ [[package]] name = "google-cloud-aiplatform" -version = "1.130.0" +version = "1.132.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "docstring-parser" }, @@ -918,9 +918,9 @@ dependencies = [ { name = "shapely" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fe/2b/fda8309b8cd6d7b8fe9b866d115b7433a1ea3a6a050566ba0ec8559972f0/google_cloud_aiplatform-1.130.0.tar.gz", hash = "sha256:f66aeb23f0a6848fc2d5bbdf1b5777c3cf8e06056f73ef815317abf89d5a0262", size = 9808235, upload-time = "2025-12-10T14:03:06.473Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/ab/059eeadb017fc4ef9fce952b862d553d07a75f8ce928e15c1cd0094f42e5/google_cloud_aiplatform-1.132.0.tar.gz", hash = "sha256:345d113ffe2b6f705810453418dc75f3e0ad9354e499be75f6dac5f1464d30b4", size = 9919155, upload-time = "2025-12-17T06:10:11.076Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/46/61/780ce1aadf576a1e8b9acedde13f356691cd0878d5749db35903dfe33a54/google_cloud_aiplatform-1.130.0-py2.py3-none-any.whl", hash = "sha256:f578ccee55655dd9e2300cfcafb178e47c3dfdcf746ad465234b875d3e955929", size = 8145868, upload-time = "2025-12-10T14:03:03.944Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c6/6aafb41ec97b6ccf103434a28bc5cc7779a955799a68707dfa89969e9490/google_cloud_aiplatform-1.132.0-py2.py3-none-any.whl", hash = "sha256:4e68673a61f6f5700f607fb35baf38015b06351175706fc25b6bc5f436741bd8", size = 8180348, upload-time = "2025-12-17T06:10:07.887Z" }, ] [[package]] @@ -1010,7 +1010,7 @@ wheels = [ [[package]] name = "google-genai" -version = "1.55.0" +version = "1.56.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1024,9 +1024,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1d/7c/19b59750592702305ae211905985ec8ab56f34270af4a159fba5f0214846/google_genai-1.55.0.tar.gz", hash = "sha256:ae9f1318fedb05c7c1b671a4148724751201e8908a87568364a309804064d986", size = 477615, upload-time = "2025-12-11T02:49:28.624Z" } +sdist = { url = "https://files.pythonhosted.org/packages/70/ad/d3ac5a102135bd3f1e4b1475ca65d2bd4bcc22eb2e9348ac40fe3fadb1d6/google_genai-1.56.0.tar.gz", hash = "sha256:0491af33c375f099777ae207d9621f044e27091fafad4c50e617eba32165e82f", size = 340451, upload-time = "2025-12-17T12:35:05.412Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3e/86/a5a8e32b2d40b30b5fb20e7b8113fafd1e38befa4d1801abd5ce6991065a/google_genai-1.55.0-py3-none-any.whl", hash = "sha256:98c422762b5ff6e16b8d9a1e4938e8e0ad910392a5422e47f5301498d7f373a1", size = 703389, upload-time = "2025-12-11T02:49:27.105Z" }, + { url = "https://files.pythonhosted.org/packages/84/93/94bc7a89ef4e7ed3666add55cd859d1483a22737251df659bf1aa46e9405/google_genai-1.56.0-py3-none-any.whl", hash = "sha256:9e6b11e0c105ead229368cb5849a480e4d0185519f8d9f538d61ecfcf193b052", size = 426563, upload-time = "2025-12-17T12:35:03.717Z" }, ] [[package]] @@ -1247,11 +1247,11 @@ wheels = [ [[package]] name = "humanize" -version = "4.14.0" +version = "4.15.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b6/43/50033d25ad96a7f3845f40999b4778f753c3901a11808a584fed7c00d9f5/humanize-4.14.0.tar.gz", hash = "sha256:2fa092705ea640d605c435b1ca82b2866a1b601cdf96f076d70b79a855eba90d", size = 82939, upload-time = "2025-10-15T13:04:51.214Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/66/a3921783d54be8a6870ac4ccffcd15c4dc0dd7fcce51c6d63b8c63935276/humanize-4.15.0.tar.gz", hash = "sha256:1dd098483eb1c7ee8e32eb2e99ad1910baefa4b75c3aff3a82f4d78688993b10", size = 83599, upload-time = "2025-12-20T20:16:13.19Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c3/5b/9512c5fb6c8218332b530f13500c6ff5f3ce3342f35e0dd7be9ac3856fd3/humanize-4.14.0-py3-none-any.whl", hash = "sha256:d57701248d040ad456092820e6fde56c930f17749956ac47f4f655c0c547bfff", size = 132092, upload-time = "2025-10-15T13:04:49.404Z" }, + { url = "https://files.pythonhosted.org/packages/c5/7b/bca5613a0c3b542420cf92bd5e5fb8ebd5435ce1011a091f66bb7693285e/humanize-4.15.0-py3-none-any.whl", hash = "sha256:b1186eb9f5a9749cd9cb8565aee77919dd7c8d076161cf44d70e59e3301e1769", size = 132203, upload-time = "2025-12-20T20:16:11.67Z" }, ] [[package]] @@ -1301,14 +1301,14 @@ wheels = [ [[package]] name = "importlib-metadata" -version = "8.7.0" +version = "8.7.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "zipp" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, ] [[package]] @@ -1408,26 +1408,26 @@ wheels = [ [[package]] name = "jaraco-context" -version = "6.0.1" +version = "6.0.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "backports-tarfile", marker = "python_full_version < '3.12'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/df/ad/f3777b81bf0b6e7bc7514a1656d3e637b2e8e15fab2ce3235730b3e7a4e6/jaraco_context-6.0.1.tar.gz", hash = "sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3", size = 13912, upload-time = "2024-08-20T03:39:27.358Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/7d/41acf8e22d791bde812cb6c2c36128bb932ed8ae066bcb5e39cb198e8253/jaraco_context-6.0.2.tar.gz", hash = "sha256:953ae8dddb57b1d791bf72ea1009b32088840a7dd19b9ba16443f62be919ee57", size = 14994, upload-time = "2025-12-24T19:21:35.784Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ff/db/0c52c4cf5e4bd9f5d7135ec7669a3a767af21b3a308e1ed3674881e52b62/jaraco.context-6.0.1-py3-none-any.whl", hash = "sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4", size = 6825, upload-time = "2024-08-20T03:39:25.966Z" }, + { url = "https://files.pythonhosted.org/packages/c7/0c/1e0096ced9c55f9c6c6655446798df74165780375d3f5ab5f33751e087ae/jaraco_context-6.0.2-py3-none-any.whl", hash = "sha256:55fc21af4b4f9ca94aa643b6ee7fe13b1e4c01abf3aeb98ca4ad9c80b741c786", size = 6988, upload-time = "2025-12-24T19:21:34.557Z" }, ] [[package]] name = "jaraco-functools" -version = "4.3.0" +version = "4.4.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "more-itertools" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f7/ed/1aa2d585304ec07262e1a83a9889880701079dde796ac7b1d1826f40c63d/jaraco_functools-4.3.0.tar.gz", hash = "sha256:cfd13ad0dd2c47a3600b439ef72d8615d482cedcff1632930d6f28924d92f294", size = 19755, upload-time = "2025-08-18T20:05:09.91Z" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/27/056e0638a86749374d6f57d0b0db39f29509cce9313cf91bdc0ac4d91084/jaraco_functools-4.4.0.tar.gz", hash = "sha256:da21933b0417b89515562656547a77b4931f98176eb173644c0d35032a33d6bb", size = 19943, upload-time = "2025-12-21T09:29:43.6Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b4/09/726f168acad366b11e420df31bf1c702a54d373a83f968d94141a8c3fde0/jaraco_functools-4.3.0-py3-none-any.whl", hash = "sha256:227ff8ed6f7b8f62c56deff101545fa7543cf2c8e7b82a7c2116e672f29c26e8", size = 10408, upload-time = "2025-08-18T20:05:08.69Z" }, + { url = "https://files.pythonhosted.org/packages/fd/c4/813bb09f0985cb21e959f21f2464169eca882656849adf727ac7bb7e1767/jaraco_functools-4.4.0-py3-none-any.whl", hash = "sha256:9eec1e36f45c818d9bf307c8948eb03b2b56cd44087b3cdc989abca1f20b9176", size = 10481, upload-time = "2025-12-21T09:29:42.27Z" }, ] [[package]] @@ -1654,7 +1654,7 @@ wheels = [ [[package]] name = "langchain-classic" -version = "1.0.0" +version = "1.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, @@ -1665,9 +1665,9 @@ dependencies = [ { name = "requests" }, { name = "sqlalchemy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d9/b1/a66babeccb2c05ed89690a534296688c0349bee7a71641e91ecc2afd72fd/langchain_classic-1.0.0.tar.gz", hash = "sha256:a63655609254ebc36d660eb5ad7c06c778b2e6733c615ffdac3eac4fbe2b12c5", size = 10514930, upload-time = "2025-10-17T16:02:47.887Z" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/4b/bd03518418ece4c13192a504449b58c28afee915dc4a6f4b02622458cb1b/langchain_classic-1.0.1.tar.gz", hash = "sha256:40a499684df36b005a1213735dc7f8dca8f5eb67978d6ec763e7a49780864fdc", size = 10516020, upload-time = "2025-12-23T22:55:22.615Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/74/74/246f809a3741c21982f985ca0113ec92d3c84896308561cc4414823f6951/langchain_classic-1.0.0-py3-none-any.whl", hash = "sha256:97f71f150c10123f5511c08873f030e35ede52311d729a7688c721b4e1e01f33", size = 1040701, upload-time = "2025-10-17T16:02:46.35Z" }, + { url = "https://files.pythonhosted.org/packages/83/0f/eab87f017d7fe28e8c11fff614f4cdbfae32baadb77d0f79e9f922af1df2/langchain_classic-1.0.1-py3-none-any.whl", hash = "sha256:131d83a02bb80044c68fedc1ab4ae885d5b8f8c2c742d8ab9e7534ad9cda8e80", size = 1040666, upload-time = "2025-12-23T22:55:21.025Z" }, ] [[package]] @@ -1695,7 +1695,7 @@ wheels = [ [[package]] name = "langchain-core" -version = "1.2.1" +version = "1.2.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jsonpatch" }, @@ -1707,9 +1707,9 @@ dependencies = [ { name = "typing-extensions" }, { name = "uuid-utils" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f5/a0/2177f4ef4dfbea8edeba377b7b4889d177b8356ce186640e4651b240fd4d/langchain_core-1.2.1.tar.gz", hash = "sha256:131e6ad105b47ec2adc4d4d973f569276688f48cd890ba44603d48e76d9993ce", size = 802986, upload-time = "2025-12-15T14:32:50.845Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b9/ce/ba5ed5ea6df22965b2893c2ed28ebb456204962723d408904c4acfa5e942/langchain_core-1.2.6.tar.gz", hash = "sha256:b4e7841dd7f8690375aa07c54739178dc2c635147d475e0c2955bf82a1afa498", size = 833343, upload-time = "2026-01-02T21:35:44.749Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/cc/95/98c47dbb4b6098934ff70e0f52efef3a85505dbcccc9eb63587e21fde4c9/langchain_core-1.2.1-py3-none-any.whl", hash = "sha256:2f63859f85dc3d95f768e35fed605702e3ff5aa3e92c7b253103119613e79768", size = 475972, upload-time = "2025-12-15T14:32:49.698Z" }, + { url = "https://files.pythonhosted.org/packages/6f/40/0655892c245d8fbe6bca6d673ab5927e5c3ab7be143de40b52289a0663bc/langchain_core-1.2.6-py3-none-any.whl", hash = "sha256:aa6ed954b4b1f4504937fe75fdf674317027e9a91ba7a97558b0de3dc8004e34", size = 489096, upload-time = "2026-01-02T21:35:43.391Z" }, ] [[package]] @@ -1727,7 +1727,7 @@ wheels = [ [[package]] name = "langchain-google-genai" -version = "4.0.0" +version = "4.1.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filetype" }, @@ -1735,9 +1735,9 @@ dependencies = [ { name = "langchain-core" }, { name = "pydantic" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d9/76/acb0d8b33a69a97ebca89f237db3ff3484a5d253ae874bad84d790d8ca1f/langchain_google_genai-4.0.0.tar.gz", hash = "sha256:22b1546abe677ddea8daebf4be1de162266d278dd0643cdc2a24f45979e5cf8e", size = 271354, upload-time = "2025-12-08T16:14:46.793Z" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/04/c8d2840d96f05485abeb5288bd88ec8c5fb7a24065968201fa54969a47d8/langchain_google_genai-4.1.2.tar.gz", hash = "sha256:aa0dd7807a9a15651d10cd228c574f23fe46e2ce62921bf21d73a63869ecd814", size = 276143, upload-time = "2025-12-19T04:10:57.799Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/d0/f05cb2ce4ad5b4f4c3addd5978e6b776479db68dcfe55d09e30a8974eda7/langchain_google_genai-4.0.0-py3-none-any.whl", hash = "sha256:42c592a222a075ce87f424d466ba6d39194eb58fbb9a84006c401d08034a4feb", size = 63553, upload-time = "2025-12-08T16:14:45.834Z" }, + { url = "https://files.pythonhosted.org/packages/f6/2f/a63dde25c9d11340d0f5f538a9fea77571b4b4e73294ad58fa6ea84079a0/langchain_google_genai-4.1.2-py3-none-any.whl", hash = "sha256:89790f2e3ca113f7e45883f541a834120d279e21f235fffc491c81cd1af11fdd", size = 65640, upload-time = "2025-12-19T04:10:56.386Z" }, ] [[package]] @@ -1805,16 +1805,16 @@ wheels = [ [[package]] name = "langchain-openai" -version = "1.1.3" +version = "1.1.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "langchain-core" }, { name = "openai" }, { name = "tiktoken" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/93/67/6126a1c645b34388edee917473e51b2158812af1fcc8fedc23a330478329/langchain_openai-1.1.3.tar.gz", hash = "sha256:d8be85e4d1151258e1d2ed29349179ad971499115948b01364c2a1ab0474b1bf", size = 1038144, upload-time = "2025-12-12T22:28:08.611Z" } +sdist = { url = "https://files.pythonhosted.org/packages/ae/67/228dc28b4498ea16422577013b5bb4ba35a1b99f8be975d6747c7a9f7e6a/langchain_openai-1.1.6.tar.gz", hash = "sha256:e306612654330ae36fb6bbe36db91c98534312afade19e140c3061fe4208dac8", size = 1038310, upload-time = "2025-12-18T17:58:52.84Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d1/11/2b3b4973495fc5f0456ed5c8c88a6ded7ca34c8608c72faafa87088acf5a/langchain_openai-1.1.3-py3-none-any.whl", hash = "sha256:58945d9e87c1ab3a91549c3f3744c6c9571511cdc3cf875b8842aaec5b3e32a6", size = 84585, upload-time = "2025-12-12T22:28:07.066Z" }, + { url = "https://files.pythonhosted.org/packages/db/5b/1f6521df83c1a8e8d3f52351883b59683e179c0aa1bec75d0a77a394c9e7/langchain_openai-1.1.6-py3-none-any.whl", hash = "sha256:c42d04a67a85cee1d994afe400800d2b09ebf714721345f0b651eb06a02c3948", size = 84701, upload-time = "2025-12-18T17:58:51.527Z" }, ] [[package]] @@ -1897,20 +1897,20 @@ wheels = [ [[package]] name = "langgraph-sdk" -version = "0.3.0" +version = "0.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, { name = "orjson" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2b/1b/f328afb4f24f6e18333ff357d9580a3bb5b133ff2c7aae34fef7f5b87f31/langgraph_sdk-0.3.0.tar.gz", hash = "sha256:4145bc3c34feae227ae918341f66d3ba7d1499722c1ef4a8aae5ea828897d1d4", size = 130366, upload-time = "2025-12-12T22:19:30.323Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/d3/b6be0b0aba2a53a8920a2b0b4328a83121ec03eea9952e576d06a4182f6f/langgraph_sdk-0.3.1.tar.gz", hash = "sha256:f6dadfd2444eeff3e01405a9005c95fb3a028d4bd954ebec80ea6150084f92bb", size = 130312, upload-time = "2025-12-18T22:11:47.42Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/48/ee4d7afb3c3d38bd2ebe51a4d37f1ed7f1058dd242f35994b562203067aa/langgraph_sdk-0.3.0-py3-none-any.whl", hash = "sha256:c1ade483fba17ae354ee920e4779042b18d5aba875f2a858ba569f62f628f26f", size = 66489, upload-time = "2025-12-12T22:19:29.228Z" }, + { url = "https://files.pythonhosted.org/packages/ab/fe/0c1c9c01a154eba62b20b02fabe811fd94a2b810061ae9e4d8462b8cf85a/langgraph_sdk-0.3.1-py3-none-any.whl", hash = "sha256:0b856923bfd20bf3441ce9d03bef488aa333fb610e972618799a9d584436acad", size = 66517, upload-time = "2025-12-18T22:11:46.625Z" }, ] [[package]] name = "langsmith" -version = "0.4.59" +version = "0.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "httpx" }, @@ -1922,39 +1922,39 @@ dependencies = [ { name = "uuid-utils" }, { name = "zstandard" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/61/71/d61524c3205bde7ec90423d997cf1a228d8adf2811110ec91ed40c8e8a34/langsmith-0.4.59.tar.gz", hash = "sha256:6b143214c2303dafb29ab12dcd05ac50bdfc60dac01c6e0450e50cee1d2415e0", size = 992784, upload-time = "2025-12-11T02:40:52.231Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5f/48/fb62df712cfd77804999f3bc08e3cba33ecb81064dd2973dd67cd68eaf93/langsmith-0.6.0.tar.gz", hash = "sha256:b60f1785aed4dac5e01f24db01aa18fa1af258bad4531e045e739438daa3f8c2", size = 883012, upload-time = "2026-01-02T18:42:13.721Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/63/54/4577ef9424debea2fa08af338489d593276520d2e2f8950575d292be612c/langsmith-0.4.59-py3-none-any.whl", hash = "sha256:97c26399286441a7b7b06b912e2801420fbbf3a049787e609d49dc975ab10bc5", size = 413051, upload-time = "2025-12-11T02:40:50.523Z" }, + { url = "https://files.pythonhosted.org/packages/10/c6/322df2c18ab462712c968415fb31779ed3e1fd1976357fd78f31f51b2632/langsmith-0.6.0-py3-none-any.whl", hash = "sha256:f7570175aed705b1f4c4dae724c07980a737b8b565252444d11394dda9931e8c", size = 283280, upload-time = "2026-01-02T18:42:11.966Z" }, ] [[package]] name = "librt" -version = "0.7.4" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/93/e4/b59bdf1197fdf9888452ea4d2048cdad61aef85eb83e99dc52551d7fdc04/librt-0.7.4.tar.gz", hash = "sha256:3871af56c59864d5fd21d1ac001eb2fb3b140d52ba0454720f2e4a19812404ba", size = 145862, upload-time = "2025-12-15T16:52:43.862Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/84/64/44089b12d8b4714a7f0e2f33fb19285ba87702d4be0829f20b36ebeeee07/librt-0.7.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3485b9bb7dfa66167d5500ffdafdc35415b45f0da06c75eb7df131f3357b174a", size = 54709, upload-time = "2025-12-15T16:51:16.699Z" }, - { url = "https://files.pythonhosted.org/packages/26/ef/6fa39fb5f37002f7d25e0da4f24d41b457582beea9369eeb7e9e73db5508/librt-0.7.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:188b4b1a770f7f95ea035d5bbb9d7367248fc9d12321deef78a269ebf46a5729", size = 56663, upload-time = "2025-12-15T16:51:17.856Z" }, - { url = "https://files.pythonhosted.org/packages/9d/e4/cbaca170a13bee2469c90df9e47108610b4422c453aea1aec1779ac36c24/librt-0.7.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1b668b1c840183e4e38ed5a99f62fac44c3a3eef16870f7f17cfdfb8b47550ed", size = 161703, upload-time = "2025-12-15T16:51:19.421Z" }, - { url = "https://files.pythonhosted.org/packages/d0/32/0b2296f9cc7e693ab0d0835e355863512e5eac90450c412777bd699c76ae/librt-0.7.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0e8f864b521f6cfedb314d171630f827efee08f5c3462bcbc2244ab8e1768cd6", size = 171027, upload-time = "2025-12-15T16:51:20.721Z" }, - { url = "https://files.pythonhosted.org/packages/d8/33/c70b6d40f7342716e5f1353c8da92d9e32708a18cbfa44897a93ec2bf879/librt-0.7.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4df7c9def4fc619a9c2ab402d73a0c5b53899abe090e0100323b13ccb5a3dd82", size = 184700, upload-time = "2025-12-15T16:51:22.272Z" }, - { url = "https://files.pythonhosted.org/packages/e4/c8/555c405155da210e4c4113a879d378f54f850dbc7b794e847750a8fadd43/librt-0.7.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f79bc3595b6ed159a1bf0cdc70ed6ebec393a874565cab7088a219cca14da727", size = 180719, upload-time = "2025-12-15T16:51:23.561Z" }, - { url = "https://files.pythonhosted.org/packages/6b/88/34dc1f1461c5613d1b73f0ecafc5316cc50adcc1b334435985b752ed53e5/librt-0.7.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:77772a4b8b5f77d47d883846928c36d730b6e612a6388c74cba33ad9eb149c11", size = 174535, upload-time = "2025-12-15T16:51:25.031Z" }, - { url = "https://files.pythonhosted.org/packages/b6/5a/f3fafe80a221626bcedfa9fe5abbf5f04070989d44782f579b2d5920d6d0/librt-0.7.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:064a286e6ab0b4c900e228ab4fa9cb3811b4b83d3e0cc5cd816b2d0f548cb61c", size = 195236, upload-time = "2025-12-15T16:51:26.328Z" }, - { url = "https://files.pythonhosted.org/packages/d8/77/5c048d471ce17f4c3a6e08419be19add4d291e2f7067b877437d482622ac/librt-0.7.4-cp311-cp311-win32.whl", hash = "sha256:42da201c47c77b6cc91fc17e0e2b330154428d35d6024f3278aa2683e7e2daf2", size = 42930, upload-time = "2025-12-15T16:51:27.853Z" }, - { url = "https://files.pythonhosted.org/packages/fb/3b/514a86305a12c3d9eac03e424b07cd312c7343a9f8a52719aa079590a552/librt-0.7.4-cp311-cp311-win_amd64.whl", hash = "sha256:d31acb5886c16ae1711741f22504195af46edec8315fe69b77e477682a87a83e", size = 49240, upload-time = "2025-12-15T16:51:29.037Z" }, - { url = "https://files.pythonhosted.org/packages/ba/01/3b7b1914f565926b780a734fac6e9a4d2c7aefe41f4e89357d73697a9457/librt-0.7.4-cp311-cp311-win_arm64.whl", hash = "sha256:114722f35093da080a333b3834fff04ef43147577ed99dd4db574b03a5f7d170", size = 42613, upload-time = "2025-12-15T16:51:30.194Z" }, - { url = "https://files.pythonhosted.org/packages/f3/e7/b805d868d21f425b7e76a0ea71a2700290f2266a4f3c8357fcf73efc36aa/librt-0.7.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7dd3b5c37e0fb6666c27cf4e2c88ae43da904f2155c4cfc1e5a2fdce3b9fcf92", size = 55688, upload-time = "2025-12-15T16:51:31.571Z" }, - { url = "https://files.pythonhosted.org/packages/59/5e/69a2b02e62a14cfd5bfd9f1e9adea294d5bcfeea219c7555730e5d068ee4/librt-0.7.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9c5de1928c486201b23ed0cc4ac92e6e07be5cd7f3abc57c88a9cf4f0f32108", size = 57141, upload-time = "2025-12-15T16:51:32.714Z" }, - { url = "https://files.pythonhosted.org/packages/6e/6b/05dba608aae1272b8ea5ff8ef12c47a4a099a04d1e00e28a94687261d403/librt-0.7.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:078ae52ffb3f036396cc4aed558e5b61faedd504a3c1f62b8ae34bf95ae39d94", size = 165322, upload-time = "2025-12-15T16:51:33.986Z" }, - { url = "https://files.pythonhosted.org/packages/8f/bc/199533d3fc04a4cda8d7776ee0d79955ab0c64c79ca079366fbc2617e680/librt-0.7.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce58420e25097b2fc201aef9b9f6d65df1eb8438e51154e1a7feb8847e4a55ab", size = 174216, upload-time = "2025-12-15T16:51:35.384Z" }, - { url = "https://files.pythonhosted.org/packages/62/ec/09239b912a45a8ed117cb4a6616d9ff508f5d3131bd84329bf2f8d6564f1/librt-0.7.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b719c8730c02a606dc0e8413287e8e94ac2d32a51153b300baf1f62347858fba", size = 189005, upload-time = "2025-12-15T16:51:36.687Z" }, - { url = "https://files.pythonhosted.org/packages/46/2e/e188313d54c02f5b0580dd31476bb4b0177514ff8d2be9f58d4a6dc3a7ba/librt-0.7.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3749ef74c170809e6dee68addec9d2458700a8de703de081c888e92a8b015cf9", size = 183960, upload-time = "2025-12-15T16:51:37.977Z" }, - { url = "https://files.pythonhosted.org/packages/eb/84/f1d568d254518463d879161d3737b784137d236075215e56c7c9be191cee/librt-0.7.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b35c63f557653c05b5b1b6559a074dbabe0afee28ee2a05b6c9ba21ad0d16a74", size = 177609, upload-time = "2025-12-15T16:51:40.584Z" }, - { url = "https://files.pythonhosted.org/packages/5d/43/060bbc1c002f0d757c33a1afe6bf6a565f947a04841139508fc7cef6c08b/librt-0.7.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1ef704e01cb6ad39ad7af668d51677557ca7e5d377663286f0ee1b6b27c28e5f", size = 199269, upload-time = "2025-12-15T16:51:41.879Z" }, - { url = "https://files.pythonhosted.org/packages/ff/7f/708f8f02d8012ee9f366c07ea6a92882f48bd06cc1ff16a35e13d0fbfb08/librt-0.7.4-cp312-cp312-win32.whl", hash = "sha256:c66c2b245926ec15188aead25d395091cb5c9df008d3b3207268cd65557d6286", size = 43186, upload-time = "2025-12-15T16:51:43.149Z" }, - { url = "https://files.pythonhosted.org/packages/f1/a5/4e051b061c8b2509be31b2c7ad4682090502c0a8b6406edcf8c6b4fe1ef7/librt-0.7.4-cp312-cp312-win_amd64.whl", hash = "sha256:71a56f4671f7ff723451f26a6131754d7c1809e04e22ebfbac1db8c9e6767a20", size = 49455, upload-time = "2025-12-15T16:51:44.336Z" }, - { url = "https://files.pythonhosted.org/packages/d0/d2/90d84e9f919224a3c1f393af1636d8638f54925fdc6cd5ee47f1548461e5/librt-0.7.4-cp312-cp312-win_arm64.whl", hash = "sha256:419eea245e7ec0fe664eb7e85e7ff97dcdb2513ca4f6b45a8ec4a3346904f95a", size = 42828, upload-time = "2025-12-15T16:51:45.498Z" }, +version = "0.7.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/29/47f29026ca17f35cf299290292d5f8331f5077364974b7675a353179afa2/librt-0.7.7.tar.gz", hash = "sha256:81d957b069fed1890953c3b9c3895c7689960f233eea9a1d9607f71ce7f00b2c", size = 145910, upload-time = "2026-01-01T23:52:22.87Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/56/30b5c342518005546df78841cb0820ae85a17e7d07d521c10ef367306d0d/librt-0.7.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a487b71fbf8a9edb72a8c7a456dda0184642d99cd007bc819c0b7ab93676a8ee", size = 54709, upload-time = "2026-01-01T23:51:02.774Z" }, + { url = "https://files.pythonhosted.org/packages/72/78/9f120e3920b22504d4f3835e28b55acc2cc47c9586d2e1b6ba04c3c1bf01/librt-0.7.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f4d4efb218264ecf0f8516196c9e2d1a0679d9fb3bb15df1155a35220062eba8", size = 56663, upload-time = "2026-01-01T23:51:03.838Z" }, + { url = "https://files.pythonhosted.org/packages/1c/ea/7d7a1ee7dfc1151836028eba25629afcf45b56bbc721293e41aa2e9b8934/librt-0.7.7-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b8bb331aad734b059c4b450cd0a225652f16889e286b2345af5e2c3c625c3d85", size = 161705, upload-time = "2026-01-01T23:51:04.917Z" }, + { url = "https://files.pythonhosted.org/packages/45/a5/952bc840ac8917fbcefd6bc5f51ad02b89721729814f3e2bfcc1337a76d6/librt-0.7.7-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:467dbd7443bda08338fc8ad701ed38cef48194017554f4c798b0a237904b3f99", size = 171029, upload-time = "2026-01-01T23:51:06.09Z" }, + { url = "https://files.pythonhosted.org/packages/fa/bf/c017ff7da82dc9192cf40d5e802a48a25d00e7639b6465cfdcee5893a22c/librt-0.7.7-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50d1d1ee813d2d1a3baf2873634ba506b263032418d16287c92ec1cc9c1a00cb", size = 184704, upload-time = "2026-01-01T23:51:07.549Z" }, + { url = "https://files.pythonhosted.org/packages/77/ec/72f3dd39d2cdfd6402ab10836dc9cbf854d145226062a185b419c4f1624a/librt-0.7.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c7e5070cf3ec92d98f57574da0224f8c73faf1ddd6d8afa0b8c9f6e86997bc74", size = 180719, upload-time = "2026-01-01T23:51:09.062Z" }, + { url = "https://files.pythonhosted.org/packages/78/86/06e7a1a81b246f3313bf515dd9613a1c81583e6fd7843a9f4d625c4e926d/librt-0.7.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:bdb9f3d865b2dafe7f9ad7f30ef563c80d0ddd2fdc8cc9b8e4f242f475e34d75", size = 174537, upload-time = "2026-01-01T23:51:10.611Z" }, + { url = "https://files.pythonhosted.org/packages/83/08/f9fb2edc9c7a76e95b2924ce81d545673f5b034e8c5dd92159d1c7dae0c6/librt-0.7.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8185c8497d45164e256376f9da5aed2bb26ff636c798c9dabe313b90e9f25b28", size = 195238, upload-time = "2026-01-01T23:51:11.762Z" }, + { url = "https://files.pythonhosted.org/packages/ba/56/ea2d2489d3ea1f47b301120e03a099e22de7b32c93df9a211e6ff4f9bf38/librt-0.7.7-cp311-cp311-win32.whl", hash = "sha256:44d63ce643f34a903f09ff7ca355aae019a3730c7afd6a3c037d569beeb5d151", size = 42939, upload-time = "2026-01-01T23:51:13.192Z" }, + { url = "https://files.pythonhosted.org/packages/58/7b/c288f417e42ba2a037f1c0753219e277b33090ed4f72f292fb6fe175db4c/librt-0.7.7-cp311-cp311-win_amd64.whl", hash = "sha256:7d13cc340b3b82134f8038a2bfe7137093693dcad8ba5773da18f95ad6b77a8a", size = 49240, upload-time = "2026-01-01T23:51:14.264Z" }, + { url = "https://files.pythonhosted.org/packages/7c/24/738eb33a6c1516fdb2dfd2a35db6e5300f7616679b573585be0409bc6890/librt-0.7.7-cp311-cp311-win_arm64.whl", hash = "sha256:983de36b5a83fe9222f4f7dcd071f9b1ac6f3f17c0af0238dadfb8229588f890", size = 42613, upload-time = "2026-01-01T23:51:15.268Z" }, + { url = "https://files.pythonhosted.org/packages/56/72/1cd9d752070011641e8aee046c851912d5f196ecd726fffa7aed2070f3e0/librt-0.7.7-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a85a1fc4ed11ea0eb0a632459ce004a2d14afc085a50ae3463cd3dfe1ce43fc", size = 55687, upload-time = "2026-01-01T23:51:16.291Z" }, + { url = "https://files.pythonhosted.org/packages/50/aa/d5a1d4221c4fe7e76ae1459d24d6037783cb83c7645164c07d7daf1576ec/librt-0.7.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c87654e29a35938baead1c4559858f346f4a2a7588574a14d784f300ffba0efd", size = 57136, upload-time = "2026-01-01T23:51:17.363Z" }, + { url = "https://files.pythonhosted.org/packages/23/6f/0c86b5cb5e7ef63208c8cc22534df10ecc5278efc0d47fb8815577f3ca2f/librt-0.7.7-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c9faaebb1c6212c20afd8043cd6ed9de0a47d77f91a6b5b48f4e46ed470703fe", size = 165320, upload-time = "2026-01-01T23:51:18.455Z" }, + { url = "https://files.pythonhosted.org/packages/16/37/df4652690c29f645ffe405b58285a4109e9fe855c5bb56e817e3e75840b3/librt-0.7.7-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1908c3e5a5ef86b23391448b47759298f87f997c3bd153a770828f58c2bb4630", size = 174216, upload-time = "2026-01-01T23:51:19.599Z" }, + { url = "https://files.pythonhosted.org/packages/9a/d6/d3afe071910a43133ec9c0f3e4ce99ee6df0d4e44e4bddf4b9e1c6ed41cc/librt-0.7.7-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbc4900e95a98fc0729523be9d93a8fedebb026f32ed9ffc08acd82e3e181503", size = 189005, upload-time = "2026-01-01T23:51:21.052Z" }, + { url = "https://files.pythonhosted.org/packages/d5/18/74060a870fe2d9fd9f47824eba6717ce7ce03124a0d1e85498e0e7efc1b2/librt-0.7.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a7ea4e1fbd253e5c68ea0fe63d08577f9d288a73f17d82f652ebc61fa48d878d", size = 183961, upload-time = "2026-01-01T23:51:22.493Z" }, + { url = "https://files.pythonhosted.org/packages/7c/5e/918a86c66304af66a3c1d46d54df1b2d0b8894babc42a14fb6f25511497f/librt-0.7.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ef7699b7a5a244b1119f85c5bbc13f152cd38240cbb2baa19b769433bae98e50", size = 177610, upload-time = "2026-01-01T23:51:23.874Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d7/b5e58dc2d570f162e99201b8c0151acf40a03a39c32ab824dd4febf12736/librt-0.7.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:955c62571de0b181d9e9e0a0303c8bc90d47670a5eff54cf71bf5da61d1899cf", size = 199272, upload-time = "2026-01-01T23:51:25.341Z" }, + { url = "https://files.pythonhosted.org/packages/18/87/8202c9bd0968bdddc188ec3811985f47f58ed161b3749299f2c0dd0f63fb/librt-0.7.7-cp312-cp312-win32.whl", hash = "sha256:1bcd79be209313b270b0e1a51c67ae1af28adad0e0c7e84c3ad4b5cb57aaa75b", size = 43189, upload-time = "2026-01-01T23:51:26.799Z" }, + { url = "https://files.pythonhosted.org/packages/61/8d/80244b267b585e7aa79ffdac19f66c4861effc3a24598e77909ecdd0850e/librt-0.7.7-cp312-cp312-win_amd64.whl", hash = "sha256:4353ee891a1834567e0302d4bd5e60f531912179578c36f3d0430f8c5e16b456", size = 49462, upload-time = "2026-01-01T23:51:27.813Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1f/75db802d6a4992d95e8a889682601af9b49d5a13bbfa246d414eede1b56c/librt-0.7.7-cp312-cp312-win_arm64.whl", hash = "sha256:a76f1d679beccccdf8c1958e732a1dfcd6e749f8821ee59d7bec009ac308c029", size = 42828, upload-time = "2026-01-01T23:51:28.804Z" }, ] [[package]] @@ -2109,14 +2109,14 @@ wheels = [ [[package]] name = "marshmallow" -version = "3.26.1" +version = "3.26.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "packaging" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/5e53d26b42ab75491cda89b871dab9e97c840bf12c63ec58a1919710cd06/marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6", size = 221825, upload-time = "2025-02-03T15:32:25.093Z" } +sdist = { url = "https://files.pythonhosted.org/packages/55/79/de6c16cc902f4fc372236926b0ce2ab7845268dcc30fb2fbb7f71b418631/marshmallow-3.26.2.tar.gz", hash = "sha256:bbe2adb5a03e6e3571b573f42527c6fe926e17467833660bebd11593ab8dfd57", size = 222095, upload-time = "2025-12-22T06:53:53.309Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/34/75/51952c7b2d3873b44a0028b1bd26a25078c18f92f256608e8d1dc61b39fd/marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c", size = 50878, upload-time = "2025-02-03T15:32:22.295Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/5108cb3ee4ba6501748c4908b908e55f42a5b66245b4cfe0c99326e1ef6e/marshmallow-3.26.2-py3-none-any.whl", hash = "sha256:013fa8a3c4c276c24d26d84ce934dc964e2aa794345a0f8c7e5a7191482c8a73", size = 50964, upload-time = "2025-12-22T06:53:51.801Z" }, ] [[package]] @@ -2133,7 +2133,7 @@ wheels = [ [[package]] name = "mcp" -version = "1.24.0" +version = "1.25.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2151,9 +2151,9 @@ dependencies = [ { name = "typing-inspection" }, { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d6/2c/db9ae5ab1fcdd9cd2bcc7ca3b7361b712e30590b64d5151a31563af8f82d/mcp-1.24.0.tar.gz", hash = "sha256:aeaad134664ce56f2721d1abf300666a1e8348563f4d3baff361c3b652448efc", size = 604375, upload-time = "2025-12-12T14:19:38.205Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/0d/5cf14e177c8ae655a2fd9324a6ef657ca4cafd3fc2201c87716055e29641/mcp-1.24.0-py3-none-any.whl", hash = "sha256:db130e103cc50ddc3dffc928382f33ba3eaef0b711f7a87c05e7ded65b1ca062", size = 232896, upload-time = "2025-12-12T14:19:36.14Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fc/6dc7659c2ae5ddf280477011f4213a74f806862856b796ef08f028e664bf/mcp-1.25.0-py3-none-any.whl", hash = "sha256:b37c38144a666add0862614cc79ec276e97d72aa8ca26d622818d4e278b9721a", size = 233076, upload-time = "2025-12-19T10:19:55.416Z" }, ] [[package]] @@ -2284,11 +2284,11 @@ wheels = [ [[package]] name = "nodeenv" -version = "1.9.1" +version = "1.10.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" }, + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, ] [[package]] @@ -2336,39 +2336,39 @@ wheels = [ [[package]] name = "numpy" -version = "2.3.5" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/76/65/21b3bc86aac7b8f2862db1e808f1ea22b028e30a225a34a5ede9bf8678f2/numpy-2.3.5.tar.gz", hash = "sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0", size = 20584950, upload-time = "2025-11-16T22:52:42.067Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/43/77/84dd1d2e34d7e2792a236ba180b5e8fcc1e3e414e761ce0253f63d7f572e/numpy-2.3.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10", size = 17034641, upload-time = "2025-11-16T22:49:19.336Z" }, - { url = "https://files.pythonhosted.org/packages/2a/ea/25e26fa5837106cde46ae7d0b667e20f69cbbc0efd64cba8221411ab26ae/numpy-2.3.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218", size = 12528324, upload-time = "2025-11-16T22:49:22.582Z" }, - { url = "https://files.pythonhosted.org/packages/4d/1a/e85f0eea4cf03d6a0228f5c0256b53f2df4bc794706e7df019fc622e47f1/numpy-2.3.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d", size = 5356872, upload-time = "2025-11-16T22:49:25.408Z" }, - { url = "https://files.pythonhosted.org/packages/5c/bb/35ef04afd567f4c989c2060cde39211e4ac5357155c1833bcd1166055c61/numpy-2.3.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5", size = 6893148, upload-time = "2025-11-16T22:49:27.549Z" }, - { url = "https://files.pythonhosted.org/packages/f2/2b/05bbeb06e2dff5eab512dfc678b1cc5ee94d8ac5956a0885c64b6b26252b/numpy-2.3.5-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7", size = 14557282, upload-time = "2025-11-16T22:49:30.964Z" }, - { url = "https://files.pythonhosted.org/packages/65/fb/2b23769462b34398d9326081fad5655198fcf18966fcb1f1e49db44fbf31/numpy-2.3.5-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4", size = 16897903, upload-time = "2025-11-16T22:49:34.191Z" }, - { url = "https://files.pythonhosted.org/packages/ac/14/085f4cf05fc3f1e8aa95e85404e984ffca9b2275a5dc2b1aae18a67538b8/numpy-2.3.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e", size = 16341672, upload-time = "2025-11-16T22:49:37.2Z" }, - { url = "https://files.pythonhosted.org/packages/6f/3b/1f73994904142b2aa290449b3bb99772477b5fd94d787093e4f24f5af763/numpy-2.3.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748", size = 18838896, upload-time = "2025-11-16T22:49:39.727Z" }, - { url = "https://files.pythonhosted.org/packages/cd/b9/cf6649b2124f288309ffc353070792caf42ad69047dcc60da85ee85fea58/numpy-2.3.5-cp311-cp311-win32.whl", hash = "sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c", size = 6563608, upload-time = "2025-11-16T22:49:42.079Z" }, - { url = "https://files.pythonhosted.org/packages/aa/44/9fe81ae1dcc29c531843852e2874080dc441338574ccc4306b39e2ff6e59/numpy-2.3.5-cp311-cp311-win_amd64.whl", hash = "sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c", size = 13078442, upload-time = "2025-11-16T22:49:43.99Z" }, - { url = "https://files.pythonhosted.org/packages/6d/a7/f99a41553d2da82a20a2f22e93c94f928e4490bb447c9ff3c4ff230581d3/numpy-2.3.5-cp311-cp311-win_arm64.whl", hash = "sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa", size = 10458555, upload-time = "2025-11-16T22:49:47.092Z" }, - { url = "https://files.pythonhosted.org/packages/44/37/e669fe6cbb2b96c62f6bbedc6a81c0f3b7362f6a59230b23caa673a85721/numpy-2.3.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e", size = 16733873, upload-time = "2025-11-16T22:49:49.84Z" }, - { url = "https://files.pythonhosted.org/packages/c5/65/df0db6c097892c9380851ab9e44b52d4f7ba576b833996e0080181c0c439/numpy-2.3.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769", size = 12259838, upload-time = "2025-11-16T22:49:52.863Z" }, - { url = "https://files.pythonhosted.org/packages/5b/e1/1ee06e70eb2136797abe847d386e7c0e830b67ad1d43f364dd04fa50d338/numpy-2.3.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5", size = 5088378, upload-time = "2025-11-16T22:49:55.055Z" }, - { url = "https://files.pythonhosted.org/packages/6d/9c/1ca85fb86708724275103b81ec4cf1ac1d08f465368acfc8da7ab545bdae/numpy-2.3.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4", size = 6628559, upload-time = "2025-11-16T22:49:57.371Z" }, - { url = "https://files.pythonhosted.org/packages/74/78/fcd41e5a0ce4f3f7b003da85825acddae6d7ecb60cf25194741b036ca7d6/numpy-2.3.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d", size = 14250702, upload-time = "2025-11-16T22:49:59.632Z" }, - { url = "https://files.pythonhosted.org/packages/b6/23/2a1b231b8ff672b4c450dac27164a8b2ca7d9b7144f9c02d2396518352eb/numpy-2.3.5-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28", size = 16606086, upload-time = "2025-11-16T22:50:02.127Z" }, - { url = "https://files.pythonhosted.org/packages/a0/c5/5ad26fbfbe2012e190cc7d5003e4d874b88bb18861d0829edc140a713021/numpy-2.3.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b", size = 16025985, upload-time = "2025-11-16T22:50:04.536Z" }, - { url = "https://files.pythonhosted.org/packages/d2/fa/dd48e225c46c819288148d9d060b047fd2a6fb1eb37eae25112ee4cb4453/numpy-2.3.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c", size = 18542976, upload-time = "2025-11-16T22:50:07.557Z" }, - { url = "https://files.pythonhosted.org/packages/05/79/ccbd23a75862d95af03d28b5c6901a1b7da4803181513d52f3b86ed9446e/numpy-2.3.5-cp312-cp312-win32.whl", hash = "sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952", size = 6285274, upload-time = "2025-11-16T22:50:10.746Z" }, - { url = "https://files.pythonhosted.org/packages/2d/57/8aeaf160312f7f489dea47ab61e430b5cb051f59a98ae68b7133ce8fa06a/numpy-2.3.5-cp312-cp312-win_amd64.whl", hash = "sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa", size = 12782922, upload-time = "2025-11-16T22:50:12.811Z" }, - { url = "https://files.pythonhosted.org/packages/78/a6/aae5cc2ca78c45e64b9ef22f089141d661516856cf7c8a54ba434576900d/numpy-2.3.5-cp312-cp312-win_arm64.whl", hash = "sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013", size = 10194667, upload-time = "2025-11-16T22:50:16.16Z" }, - { url = "https://files.pythonhosted.org/packages/c6/65/f9dea8e109371ade9c782b4e4756a82edf9d3366bca495d84d79859a0b79/numpy-2.3.5-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310", size = 16910689, upload-time = "2025-11-16T22:52:23.247Z" }, - { url = "https://files.pythonhosted.org/packages/00/4f/edb00032a8fb92ec0a679d3830368355da91a69cab6f3e9c21b64d0bb986/numpy-2.3.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c", size = 12457053, upload-time = "2025-11-16T22:52:26.367Z" }, - { url = "https://files.pythonhosted.org/packages/16/a4/e8a53b5abd500a63836a29ebe145fc1ab1f2eefe1cfe59276020373ae0aa/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18", size = 5285635, upload-time = "2025-11-16T22:52:29.266Z" }, - { url = "https://files.pythonhosted.org/packages/a3/2f/37eeb9014d9c8b3e9c55bc599c68263ca44fdbc12a93e45a21d1d56df737/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff", size = 6801770, upload-time = "2025-11-16T22:52:31.421Z" }, - { url = "https://files.pythonhosted.org/packages/7d/e4/68d2f474df2cb671b2b6c2986a02e520671295647dad82484cde80ca427b/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb", size = 14391768, upload-time = "2025-11-16T22:52:33.593Z" }, - { url = "https://files.pythonhosted.org/packages/b8/50/94ccd8a2b141cb50651fddd4f6a48874acb3c91c8f0842b08a6afc4b0b21/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7", size = 16729263, upload-time = "2025-11-16T22:52:36.369Z" }, - { url = "https://files.pythonhosted.org/packages/2d/ee/346fa473e666fe14c52fcdd19ec2424157290a032d4c41f98127bfb31ac7/numpy-2.3.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425", size = 12967213, upload-time = "2025-11-16T22:52:39.38Z" }, +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/7a/6a3d14e205d292b738db449d0de649b373a59edb0d0b4493821d0a3e8718/numpy-2.4.0.tar.gz", hash = "sha256:6e504f7b16118198f138ef31ba24d985b124c2c469fe8467007cf30fd992f934", size = 20685720, upload-time = "2025-12-20T16:18:19.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/7e/7bae7cbcc2f8132271967aa03e03954fc1e48aa1f3bf32b29ca95fbef352/numpy-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:316b2f2584682318539f0bcaca5a496ce9ca78c88066579ebd11fd06f8e4741e", size = 16940166, upload-time = "2025-12-20T16:15:43.434Z" }, + { url = "https://files.pythonhosted.org/packages/0f/27/6c13f5b46776d6246ec884ac5817452672156a506d08a1f2abb39961930a/numpy-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2718c1de8504121714234b6f8241d0019450353276c88b9453c9c3d92e101db", size = 12641781, upload-time = "2025-12-20T16:15:45.701Z" }, + { url = "https://files.pythonhosted.org/packages/14/1c/83b4998d4860d15283241d9e5215f28b40ac31f497c04b12fa7f428ff370/numpy-2.4.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:21555da4ec4a0c942520ead42c3b0dc9477441e085c42b0fbdd6a084869a6f6b", size = 5470247, upload-time = "2025-12-20T16:15:47.943Z" }, + { url = "https://files.pythonhosted.org/packages/54/08/cbce72c835d937795571b0464b52069f869c9e78b0c076d416c5269d2718/numpy-2.4.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:413aa561266a4be2d06cd2b9665e89d9f54c543f418773076a76adcf2af08bc7", size = 6799807, upload-time = "2025-12-20T16:15:49.795Z" }, + { url = "https://files.pythonhosted.org/packages/ff/be/2e647961cd8c980591d75cdcd9e8f647d69fbe05e2a25613dc0a2ea5fb1a/numpy-2.4.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0feafc9e03128074689183031181fac0897ff169692d8492066e949041096548", size = 14701992, upload-time = "2025-12-20T16:15:51.615Z" }, + { url = "https://files.pythonhosted.org/packages/a2/fb/e1652fb8b6fd91ce6ed429143fe2e01ce714711e03e5b762615e7b36172c/numpy-2.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8fdfed3deaf1928fb7667d96e0567cdf58c2b370ea2ee7e586aa383ec2cb346", size = 16646871, upload-time = "2025-12-20T16:15:54.129Z" }, + { url = "https://files.pythonhosted.org/packages/62/23/d841207e63c4322842f7cd042ae981cffe715c73376dcad8235fb31debf1/numpy-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e06a922a469cae9a57100864caf4f8a97a1026513793969f8ba5b63137a35d25", size = 16487190, upload-time = "2025-12-20T16:15:56.147Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/6a842c8421ebfdec0a230e65f61e0dabda6edbef443d999d79b87c273965/numpy-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:927ccf5cd17c48f801f4ed43a7e5673a2724bd2171460be3e3894e6e332ef83a", size = 18580762, upload-time = "2025-12-20T16:15:58.524Z" }, + { url = "https://files.pythonhosted.org/packages/0a/d1/c79e0046641186f2134dde05e6181825b911f8bdcef31b19ddd16e232847/numpy-2.4.0-cp311-cp311-win32.whl", hash = "sha256:882567b7ae57c1b1a0250208cc21a7976d8cbcc49d5a322e607e6f09c9e0bd53", size = 6233359, upload-time = "2025-12-20T16:16:00.938Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f0/74965001d231f28184d6305b8cdc1b6fcd4bf23033f6cb039cfe76c9fca7/numpy-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:8b986403023c8f3bf8f487c2e6186afda156174d31c175f747d8934dfddf3479", size = 12601132, upload-time = "2025-12-20T16:16:02.484Z" }, + { url = "https://files.pythonhosted.org/packages/65/32/55408d0f46dfebce38017f5bd931affa7256ad6beac1a92a012e1fbc67a7/numpy-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:3f3096405acc48887458bbf9f6814d43785ac7ba2a57ea6442b581dedbc60ce6", size = 10573977, upload-time = "2025-12-20T16:16:04.77Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ff/f6400ffec95de41c74b8e73df32e3fff1830633193a7b1e409be7fb1bb8c/numpy-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a8b6bb8369abefb8bd1801b054ad50e02b3275c8614dc6e5b0373c305291037", size = 16653117, upload-time = "2025-12-20T16:16:06.709Z" }, + { url = "https://files.pythonhosted.org/packages/fd/28/6c23e97450035072e8d830a3c411bf1abd1f42c611ff9d29e3d8f55c6252/numpy-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e284ca13d5a8367e43734148622caf0b261b275673823593e3e3634a6490f83", size = 12369711, upload-time = "2025-12-20T16:16:08.758Z" }, + { url = "https://files.pythonhosted.org/packages/bc/af/acbef97b630ab1bb45e6a7d01d1452e4251aa88ce680ac36e56c272120ec/numpy-2.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:49ff32b09f5aa0cd30a20c2b39db3e669c845589f2b7fc910365210887e39344", size = 5198355, upload-time = "2025-12-20T16:16:10.902Z" }, + { url = "https://files.pythonhosted.org/packages/c1/c8/4e0d436b66b826f2e53330adaa6311f5cac9871a5b5c31ad773b27f25a74/numpy-2.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:36cbfb13c152b1c7c184ddac43765db8ad672567e7bafff2cc755a09917ed2e6", size = 6545298, upload-time = "2025-12-20T16:16:12.607Z" }, + { url = "https://files.pythonhosted.org/packages/ef/27/e1f5d144ab54eac34875e79037011d511ac57b21b220063310cb96c80fbc/numpy-2.4.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35ddc8f4914466e6fc954c76527aa91aa763682a4f6d73249ef20b418fe6effb", size = 14398387, upload-time = "2025-12-20T16:16:14.257Z" }, + { url = "https://files.pythonhosted.org/packages/67/64/4cb909dd5ab09a9a5d086eff9586e69e827b88a5585517386879474f4cf7/numpy-2.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc578891de1db95b2a35001b695451767b580bb45753717498213c5ff3c41d63", size = 16363091, upload-time = "2025-12-20T16:16:17.32Z" }, + { url = "https://files.pythonhosted.org/packages/9d/9c/8efe24577523ec6809261859737cf117b0eb6fdb655abdfdc81b2e468ce4/numpy-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98e81648e0b36e325ab67e46b5400a7a6d4a22b8a7c8e8bbfe20e7db7906bf95", size = 16176394, upload-time = "2025-12-20T16:16:19.524Z" }, + { url = "https://files.pythonhosted.org/packages/61/f0/1687441ece7b47a62e45a1f82015352c240765c707928edd8aef875d5951/numpy-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d57b5046c120561ba8fa8e4030fbb8b822f3063910fa901ffadf16e2b7128ad6", size = 18287378, upload-time = "2025-12-20T16:16:22.866Z" }, + { url = "https://files.pythonhosted.org/packages/d3/6f/f868765d44e6fc466467ed810ba9d8d6db1add7d4a748abfa2a4c99a3194/numpy-2.4.0-cp312-cp312-win32.whl", hash = "sha256:92190db305a6f48734d3982f2c60fa30d6b5ee9bff10f2887b930d7b40119f4c", size = 5955432, upload-time = "2025-12-20T16:16:25.06Z" }, + { url = "https://files.pythonhosted.org/packages/d4/b5/94c1e79fcbab38d1ca15e13777477b2914dd2d559b410f96949d6637b085/numpy-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:680060061adb2d74ce352628cb798cfdec399068aa7f07ba9fb818b2b3305f98", size = 12306201, upload-time = "2025-12-20T16:16:26.979Z" }, + { url = "https://files.pythonhosted.org/packages/70/09/c39dadf0b13bb0768cd29d6a3aaff1fb7c6905ac40e9aaeca26b1c086e06/numpy-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:39699233bc72dd482da1415dcb06076e32f60eddc796a796c5fb6c5efce94667", size = 10308234, upload-time = "2025-12-20T16:16:29.417Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ef/088e7c7342f300aaf3ee5f2c821c4b9996a1bef2aaf6a49cc8ab4883758e/numpy-2.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b54c83f1c0c0f1d748dca0af516062b8829d53d1f0c402be24b4257a9c48ada6", size = 16819003, upload-time = "2025-12-20T16:18:03.41Z" }, + { url = "https://files.pythonhosted.org/packages/ff/ce/a53017b5443b4b84517182d463fc7bcc2adb4faa8b20813f8e5f5aeb5faa/numpy-2.4.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:aabb081ca0ec5d39591fc33018cd4b3f96e1a2dd6756282029986d00a785fba4", size = 12567105, upload-time = "2025-12-20T16:18:05.594Z" }, + { url = "https://files.pythonhosted.org/packages/77/58/5ff91b161f2ec650c88a626c3905d938c89aaadabd0431e6d9c1330c83e2/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:8eafe7c36c8430b7794edeab3087dec7bf31d634d92f2af9949434b9d1964cba", size = 5395590, upload-time = "2025-12-20T16:18:08.031Z" }, + { url = "https://files.pythonhosted.org/packages/1d/4e/f1a084106df8c2df8132fc437e56987308e0524836aa7733721c8429d4fe/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2f585f52b2baf07ff3356158d9268ea095e221371f1074fadea2f42544d58b4d", size = 6709947, upload-time = "2025-12-20T16:18:09.836Z" }, + { url = "https://files.pythonhosted.org/packages/63/09/3d8aeb809c0332c3f642da812ac2e3d74fc9252b3021f8c30c82e99e3f3d/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32ed06d0fe9cae27d8fb5f400c63ccee72370599c75e683a6358dd3a4fb50aaf", size = 14535119, upload-time = "2025-12-20T16:18:12.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/7f/68f0fc43a2cbdc6bb239160c754d87c922f60fbaa0fa3cd3d312b8a7f5ee/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:57c540ed8fb1f05cb997c6761cd56db72395b0d6985e90571ff660452ade4f98", size = 16475815, upload-time = "2025-12-20T16:18:14.433Z" }, + { url = "https://files.pythonhosted.org/packages/11/73/edeacba3167b1ca66d51b1a5a14697c2c40098b5ffa01811c67b1785a5ab/numpy-2.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a39fb973a726e63223287adc6dafe444ce75af952d711e400f3bf2b36ef55a7b", size = 12489376, upload-time = "2025-12-20T16:18:16.524Z" }, ] [[package]] @@ -2386,14 +2386,13 @@ wheels = [ [[package]] name = "open-notebook" -version = "1.2.4" +version = "1.3.0" source = { editable = "." } dependencies = [ { name = "ai-prompter" }, { name = "content-core" }, { name = "esperanto" }, { name = "fastapi" }, - { name = "groq" }, { name = "httpx", extra = ["socks"] }, { name = "langchain" }, { name = "langchain-anthropic" }, @@ -2440,24 +2439,23 @@ dev = [ requires-dist = [ { name = "ai-prompter", specifier = ">=0.3" }, { name = "content-core", specifier = ">=1.0.2" }, - { name = "esperanto", specifier = ">=2.8.3" }, + { name = "esperanto", specifier = ">=2.13" }, { name = "fastapi", specifier = ">=0.104.0" }, - { name = "groq", specifier = ">=0.12.0" }, { name = "httpx", extras = ["socks"], specifier = ">=0.27.0" }, { name = "ipykernel", marker = "extra == 'dev'", specifier = ">=6.29.5" }, { name = "ipywidgets", marker = "extra == 'dev'", specifier = ">=8.1.5" }, - { name = "langchain", specifier = ">=0.3.3" }, - { name = "langchain-anthropic", specifier = ">=0.2.3" }, - { name = "langchain-community", specifier = ">=0.3.3" }, - { name = "langchain-deepseek", specifier = ">=0.1.3" }, - { name = "langchain-google-genai", specifier = ">=2.1.10" }, - { name = "langchain-google-vertexai", specifier = ">=2.0.28" }, - { name = "langchain-groq", specifier = ">=0.2.1" }, - { name = "langchain-mistralai", specifier = ">=0.2.1" }, - { name = "langchain-ollama", specifier = ">=0.2.0" }, - { name = "langchain-openai", specifier = ">=0.2.3" }, - { name = "langgraph", specifier = ">=0.2.38" }, - { name = "langgraph-checkpoint-sqlite", specifier = ">=2.0.0" }, + { name = "langchain", specifier = ">=1.2.0" }, + { name = "langchain-anthropic", specifier = ">=1.3.0" }, + { name = "langchain-community", specifier = ">=0.4.1" }, + { name = "langchain-deepseek", specifier = ">=1.0.0" }, + { name = "langchain-google-genai", specifier = ">=4.1.2" }, + { name = "langchain-google-vertexai", specifier = ">=3.2.0" }, + { name = "langchain-groq", specifier = ">=1.1.1" }, + { name = "langchain-mistralai", specifier = ">=1.1.1" }, + { name = "langchain-ollama", specifier = ">=1.0.1" }, + { name = "langchain-openai", specifier = ">=1.1.6" }, + { name = "langgraph", specifier = ">=1.0.5" }, + { name = "langgraph-checkpoint-sqlite", specifier = ">=3.0.1" }, { name = "loguru", specifier = ">=0.7.2" }, { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.11.1" }, { name = "podcast-creator", specifier = ">=0.7.0" }, @@ -2468,7 +2466,7 @@ requires-dist = [ { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.5.5" }, { name = "surreal-commands", specifier = ">=1.2.0" }, { name = "surrealdb", specifier = ">=1.0.4" }, - { name = "tiktoken", specifier = ">=0.8.0" }, + { name = "tiktoken", specifier = ">=0.12.0" }, { name = "tomli", specifier = ">=2.0.2" }, { name = "types-requests", marker = "extra == 'dev'", specifier = ">=2.32.0.20241016" }, { name = "uvicorn", specifier = ">=0.24.0" }, @@ -2484,7 +2482,7 @@ dev = [ [[package]] name = "openai" -version = "2.12.0" +version = "2.14.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2496,9 +2494,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/86/f9/fb8abeb4cdba6f24daf3d7781f42ceb1be1ff579eb20705899e617dd95f1/openai-2.12.0.tar.gz", hash = "sha256:cc6dcbcb8bccf05976d983f6516c5c1f447b71c747720f1530b61e8f858bcbc9", size = 626183, upload-time = "2025-12-15T16:17:15.097Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/b1/12fe1c196bea326261718eb037307c1c1fe1dedc2d2d4de777df822e6238/openai-2.14.0.tar.gz", hash = "sha256:419357bedde9402d23bf8f2ee372fca1985a73348debba94bddff06f19459952", size = 626938, upload-time = "2025-12-19T03:28:45.742Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c3/a1/f055214448cb4b176e89459d889af9615fe7d927634fb5a2cecfb7674bc5/openai-2.12.0-py3-none-any.whl", hash = "sha256:7177998ce49ba3f90bcce8b5769a6666d90b1f328f0518d913aaec701271485a", size = 1066590, upload-time = "2025-12-15T16:17:13.301Z" }, + { url = "https://files.pythonhosted.org/packages/27/4b/7c1a00c2c3fbd004253937f7520f692a9650767aa73894d7a34f0d65d3f4/openai-2.14.0-py3-none-any.whl", hash = "sha256:7ea40aca4ffc4c4a776e77679021b47eec1160e341f42ae086ba949c9dcc9183", size = 1067558, upload-time = "2025-12-19T03:28:43.727Z" }, ] [[package]] @@ -2833,7 +2831,7 @@ wheels = [ [[package]] name = "pre-commit" -version = "4.5.0" +version = "4.5.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cfgv" }, @@ -2842,9 +2840,9 @@ dependencies = [ { name = "pyyaml" }, { name = "virtualenv" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f4/9b/6a4ffb4ed980519da959e1cf3122fc6cb41211daa58dbae1c73c0e519a37/pre_commit-4.5.0.tar.gz", hash = "sha256:dc5a065e932b19fc1d4c653c6939068fe54325af8e741e74e88db4d28a4dd66b", size = 198428, upload-time = "2025-11-22T21:02:42.304Z" } +sdist = { url = "https://files.pythonhosted.org/packages/40/f1/6d86a29246dfd2e9b6237f0b5823717f60cad94d47ddc26afa916d21f525/pre_commit-4.5.1.tar.gz", hash = "sha256:eb545fcff725875197837263e977ea257a402056661f09dae08e4b149b030a61", size = 198232, upload-time = "2025-12-16T21:14:33.552Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5d/c4/b2d28e9d2edf4f1713eb3c29307f1a63f3d67cf09bdda29715a36a68921a/pre_commit-4.5.0-py2.py3-none-any.whl", hash = "sha256:25e2ce09595174d9c97860a95609f9f852c0614ba602de3561e267547f2335e1", size = 226429, upload-time = "2025-11-22T21:02:40.836Z" }, + { url = "https://files.pythonhosted.org/packages/5d/19/fd3ef348460c80af7bb4669ea7926651d1f95c23ff2df18b9d24bab4f3fa/pre_commit-4.5.1-py2.py3-none-any.whl", hash = "sha256:3b3afd891e97337708c1674210f8eba659b52a38ea5f822ff142d10786221f77", size = 226437, upload-time = "2025-12-16T21:14:32.409Z" }, ] [[package]] @@ -2921,14 +2919,14 @@ wheels = [ [[package]] name = "proto-plus" -version = "1.26.1" +version = "1.27.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "protobuf" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/f4/ac/87285f15f7cce6d4a008f33f1757fb5a13611ea8914eb58c3d0d26243468/proto_plus-1.26.1.tar.gz", hash = "sha256:21a515a4c4c0088a773899e23c7bbade3d18f9c66c73edd4c7ee3816bc96a012", size = 56142, upload-time = "2025-03-10T15:54:38.843Z" } +sdist = { url = "https://files.pythonhosted.org/packages/01/89/9cbe2f4bba860e149108b683bc2efec21f14d5f7ed6e25562ad86acbc373/proto_plus-1.27.0.tar.gz", hash = "sha256:873af56dd0d7e91836aee871e5799e1c6f1bda86ac9a983e0bb9f0c266a568c4", size = 56158, upload-time = "2025-12-16T13:46:25.729Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/4e/6d/280c4c2ce28b1593a19ad5239c8b826871fc6ec275c21afc8e1820108039/proto_plus-1.26.1-py3-none-any.whl", hash = "sha256:13285478c2dcf2abb829db158e1047e2f1e8d63a077d94263c2b88b043c75a66", size = 50163, upload-time = "2025-03-10T15:54:37.335Z" }, + { url = "https://files.pythonhosted.org/packages/cd/24/3b7a0818484df9c28172857af32c2397b6d8fcd99d9468bd4684f98ebf0a/proto_plus-1.27.0-py3-none-any.whl", hash = "sha256:1baa7f81cf0f8acb8bc1f6d085008ba4171eaf669629d1b6d1673b21ed1c0a82", size = 50205, upload-time = "2025-12-16T13:46:24.76Z" }, ] [[package]] @@ -2948,16 +2946,18 @@ wheels = [ [[package]] name = "psutil" -version = "7.1.3" +version = "7.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e1/88/bdd0a41e5857d5d703287598cbf08dad90aed56774ea52ae071bae9071b6/psutil-7.1.3.tar.gz", hash = "sha256:6c86281738d77335af7aec228328e944b30930899ea760ecf33a4dba66be5e74", size = 489059, upload-time = "2025-11-02T12:25:54.619Z" } +sdist = { url = "https://files.pythonhosted.org/packages/73/cb/09e5184fb5fc0358d110fc3ca7f6b1d033800734d34cac10f4136cfac10e/psutil-7.2.1.tar.gz", hash = "sha256:f7583aec590485b43ca601dd9cea0dcd65bd7bb21d30ef4ddbf4ea6b5ed1bdd3", size = 490253, upload-time = "2025-12-29T08:26:00.169Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/94/46b9154a800253e7ecff5aaacdf8ebf43db99de4a2dfa18575b02548654e/psutil-7.1.3-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:2bdbcd0e58ca14996a42adf3621a6244f1bb2e2e528886959c72cf1e326677ab", size = 238359, upload-time = "2025-11-02T12:26:25.284Z" }, - { url = "https://files.pythonhosted.org/packages/68/3a/9f93cff5c025029a36d9a92fef47220ab4692ee7f2be0fba9f92813d0cb8/psutil-7.1.3-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31fa00f1fbc3c3802141eede66f3a2d51d89716a194bf2cd6fc68310a19880", size = 239171, upload-time = "2025-11-02T12:26:27.23Z" }, - { url = "https://files.pythonhosted.org/packages/ce/b1/5f49af514f76431ba4eea935b8ad3725cdeb397e9245ab919dbc1d1dc20f/psutil-7.1.3-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3bb428f9f05c1225a558f53e30ccbad9930b11c3fc206836242de1091d3e7dd3", size = 263261, upload-time = "2025-11-02T12:26:29.48Z" }, - { url = "https://files.pythonhosted.org/packages/e0/95/992c8816a74016eb095e73585d747e0a8ea21a061ed3689474fabb29a395/psutil-7.1.3-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d974e02ca2c8eb4812c3f76c30e28836fffc311d55d979f1465c1feeb2b68b", size = 264635, upload-time = "2025-11-02T12:26:31.74Z" }, - { url = "https://files.pythonhosted.org/packages/55/4c/c3ed1a622b6ae2fd3c945a366e64eb35247a31e4db16cf5095e269e8eb3c/psutil-7.1.3-cp37-abi3-win_amd64.whl", hash = "sha256:f39c2c19fe824b47484b96f9692932248a54c43799a84282cfe58d05a6449efd", size = 247633, upload-time = "2025-11-02T12:26:33.887Z" }, - { url = "https://files.pythonhosted.org/packages/c9/ad/33b2ccec09bf96c2b2ef3f9a6f66baac8253d7565d8839e024a6b905d45d/psutil-7.1.3-cp37-abi3-win_arm64.whl", hash = "sha256:bd0d69cee829226a761e92f28140bec9a5ee9d5b4fb4b0cc589068dbfff559b1", size = 244608, upload-time = "2025-11-02T12:26:36.136Z" }, + { url = "https://files.pythonhosted.org/packages/c5/cf/5180eb8c8bdf6a503c6919f1da28328bd1e6b3b1b5b9d5b01ae64f019616/psutil-7.2.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2e953fcfaedcfbc952b44744f22d16575d3aa78eb4f51ae74165b4e96e55f42", size = 128137, upload-time = "2025-12-29T08:26:27.759Z" }, + { url = "https://files.pythonhosted.org/packages/c5/2c/78e4a789306a92ade5000da4f5de3255202c534acdadc3aac7b5458fadef/psutil-7.2.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:05cc68dbb8c174828624062e73078e7e35406f4ca2d0866c272c2410d8ef06d1", size = 128947, upload-time = "2025-12-29T08:26:29.548Z" }, + { url = "https://files.pythonhosted.org/packages/29/f8/40e01c350ad9a2b3cb4e6adbcc8a83b17ee50dd5792102b6142385937db5/psutil-7.2.1-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e38404ca2bb30ed7267a46c02f06ff842e92da3bb8c5bfdadbd35a5722314d8", size = 154694, upload-time = "2025-12-29T08:26:32.147Z" }, + { url = "https://files.pythonhosted.org/packages/06/e4/b751cdf839c011a9714a783f120e6a86b7494eb70044d7d81a25a5cd295f/psutil-7.2.1-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab2b98c9fc19f13f59628d94df5cc4cc4844bc572467d113a8b517d634e362c6", size = 156136, upload-time = "2025-12-29T08:26:34.079Z" }, + { url = "https://files.pythonhosted.org/packages/44/ad/bbf6595a8134ee1e94a4487af3f132cef7fce43aef4a93b49912a48c3af7/psutil-7.2.1-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f78baafb38436d5a128f837fab2d92c276dfb48af01a240b861ae02b2413ada8", size = 148108, upload-time = "2025-12-29T08:26:36.225Z" }, + { url = "https://files.pythonhosted.org/packages/1c/15/dd6fd869753ce82ff64dcbc18356093471a5a5adf4f77ed1f805d473d859/psutil-7.2.1-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:99a4cd17a5fdd1f3d014396502daa70b5ec21bf4ffe38393e152f8e449757d67", size = 147402, upload-time = "2025-12-29T08:26:39.21Z" }, + { url = "https://files.pythonhosted.org/packages/34/68/d9317542e3f2b180c4306e3f45d3c922d7e86d8ce39f941bb9e2e9d8599e/psutil-7.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:b1b0671619343aa71c20ff9767eced0483e4fc9e1f489d50923738caf6a03c17", size = 136938, upload-time = "2025-12-29T08:26:41.036Z" }, + { url = "https://files.pythonhosted.org/packages/3e/73/2ce007f4198c80fcf2cb24c169884f833fe93fbc03d55d302627b094ee91/psutil-7.2.1-cp37-abi3-win_arm64.whl", hash = "sha256:0d67c1822c355aa6f7314d92018fb4268a76668a536f133599b91edd48759442", size = 133836, upload-time = "2025-12-29T08:26:43.086Z" }, ] [[package]] @@ -3162,7 +3162,7 @@ wheels = [ [[package]] name = "pydocket" -version = "0.15.5" +version = "0.16.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cloudpickle" }, @@ -3178,9 +3178,9 @@ dependencies = [ { name = "typer" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/86/77/842e41be3cf3592b971cf42b24cae76e282294f474dc2dbf7cd6808d1b09/pydocket-0.15.5.tar.gz", hash = "sha256:b3af47702a293dd1da2e5e0f8f73f27fd3b3c95e36de72a2f71026d16908d5ba", size = 277245, upload-time = "2025-12-12T22:28:47.32Z" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/c5/61dcfce4d50b66a3f09743294d37fab598b81bb0975054b7f732da9243ec/pydocket-0.16.3.tar.gz", hash = "sha256:78e9da576de09e9f3f410d2471ef1c679b7741ddd21b586c97a13872b69bd265", size = 297080, upload-time = "2025-12-23T23:37:33.32Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e5/c0/fdbc6e04e3369b90c6bf6567bc62871cf59e88550b94529821500dc807c1/pydocket-0.15.5-py3-none-any.whl", hash = "sha256:ad0d86c9a1bea394e875bcf8c793be2d0a7ebd1891bfe99e2e9eaf99ef0cb42e", size = 58517, upload-time = "2025-12-12T22:28:45.598Z" }, + { url = "https://files.pythonhosted.org/packages/2c/94/93b7f5981aa04f922e0d9ce7326a4587866ec7e39f7c180ffcf408e66ee8/pydocket-0.16.3-py3-none-any.whl", hash = "sha256:e2b50925356e7cd535286255195458ac7bba15f25293356651b36d223db5dd7c", size = 67087, upload-time = "2025-12-23T23:37:31.829Z" }, ] [[package]] @@ -3313,11 +3313,11 @@ wheels = [ [[package]] name = "python-multipart" -version = "0.0.20" +version = "0.0.21" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +sdist = { url = "https://files.pythonhosted.org/packages/78/96/804520d0850c7db98e5ccb70282e29208723f0964e88ffd9d0da2f52ea09/python_multipart-0.0.21.tar.gz", hash = "sha256:7137ebd4d3bbf70ea1622998f902b97a29434a9e8dc40eb203bbcf7c2a2cba92", size = 37196, upload-time = "2025-12-17T09:24:22.446Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, + { url = "https://files.pythonhosted.org/packages/aa/76/03af049af4dcee5d27442f71b6924f01f3efb5d2bd34f23fcd563f2cc5f5/python_multipart-0.0.21-py3-none-any.whl", hash = "sha256:cf7a6713e01c87aa35387f4774e812c4361150938d20d232800f75ffcf266090", size = 24541, upload-time = "2025-12-17T09:24:21.153Z" }, ] [[package]] @@ -3635,28 +3635,28 @@ wheels = [ [[package]] name = "ruff" -version = "0.14.9" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f6/1b/ab712a9d5044435be8e9a2beb17cbfa4c241aa9b5e4413febac2a8b79ef2/ruff-0.14.9.tar.gz", hash = "sha256:35f85b25dd586381c0cc053f48826109384c81c00ad7ef1bd977bfcc28119d5b", size = 5809165, upload-time = "2025-12-11T21:39:47.381Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/b8/1c/d1b1bba22cffec02351c78ab9ed4f7d7391876e12720298448b29b7229c1/ruff-0.14.9-py3-none-linux_armv6l.whl", hash = "sha256:f1ec5de1ce150ca6e43691f4a9ef5c04574ad9ca35c8b3b0e18877314aba7e75", size = 13576541, upload-time = "2025-12-11T21:39:14.806Z" }, - { url = "https://files.pythonhosted.org/packages/94/ab/ffe580e6ea1fca67f6337b0af59fc7e683344a43642d2d55d251ff83ceae/ruff-0.14.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ed9d7417a299fc6030b4f26333bf1117ed82a61ea91238558c0268c14e00d0c2", size = 13779363, upload-time = "2025-12-11T21:39:20.29Z" }, - { url = "https://files.pythonhosted.org/packages/7d/f8/2be49047f929d6965401855461e697ab185e1a6a683d914c5c19c7962d9e/ruff-0.14.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d5dc3473c3f0e4a1008d0ef1d75cee24a48e254c8bed3a7afdd2b4392657ed2c", size = 12925292, upload-time = "2025-12-11T21:39:38.757Z" }, - { url = "https://files.pythonhosted.org/packages/9e/e9/08840ff5127916bb989c86f18924fd568938b06f58b60e206176f327c0fe/ruff-0.14.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:84bf7c698fc8f3cb8278830fb6b5a47f9bcc1ed8cb4f689b9dd02698fa840697", size = 13362894, upload-time = "2025-12-11T21:39:02.524Z" }, - { url = "https://files.pythonhosted.org/packages/31/1c/5b4e8e7750613ef43390bb58658eaf1d862c0cc3352d139cd718a2cea164/ruff-0.14.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa733093d1f9d88a5d98988d8834ef5d6f9828d03743bf5e338bf980a19fce27", size = 13311482, upload-time = "2025-12-11T21:39:17.51Z" }, - { url = "https://files.pythonhosted.org/packages/5b/3a/459dce7a8cb35ba1ea3e9c88f19077667a7977234f3b5ab197fad240b404/ruff-0.14.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a1cfb04eda979b20c8c19550c8b5f498df64ff8da151283311ce3199e8b3648", size = 14016100, upload-time = "2025-12-11T21:39:41.948Z" }, - { url = "https://files.pythonhosted.org/packages/a6/31/f064f4ec32524f9956a0890fc6a944e5cf06c63c554e39957d208c0ffc45/ruff-0.14.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:1e5cb521e5ccf0008bd74d5595a4580313844a42b9103b7388eca5a12c970743", size = 15477729, upload-time = "2025-12-11T21:39:23.279Z" }, - { url = "https://files.pythonhosted.org/packages/7a/6d/f364252aad36ccd443494bc5f02e41bf677f964b58902a17c0b16c53d890/ruff-0.14.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd429a8926be6bba4befa8cdcf3f4dd2591c413ea5066b1e99155ed245ae42bb", size = 15122386, upload-time = "2025-12-11T21:39:33.125Z" }, - { url = "https://files.pythonhosted.org/packages/20/02/e848787912d16209aba2799a4d5a1775660b6a3d0ab3944a4ccc13e64a02/ruff-0.14.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab208c1b7a492e37caeaf290b1378148f75e13c2225af5d44628b95fd7834273", size = 14497124, upload-time = "2025-12-11T21:38:59.33Z" }, - { url = "https://files.pythonhosted.org/packages/f3/51/0489a6a5595b7760b5dbac0dd82852b510326e7d88d51dbffcd2e07e3ff3/ruff-0.14.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72034534e5b11e8a593f517b2f2f2b273eb68a30978c6a2d40473ad0aaa4cb4a", size = 14195343, upload-time = "2025-12-11T21:39:44.866Z" }, - { url = "https://files.pythonhosted.org/packages/f6/53/3bb8d2fa73e4c2f80acc65213ee0830fa0c49c6479313f7a68a00f39e208/ruff-0.14.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:712ff04f44663f1b90a1195f51525836e3413c8a773574a7b7775554269c30ed", size = 14346425, upload-time = "2025-12-11T21:39:05.927Z" }, - { url = "https://files.pythonhosted.org/packages/ad/04/bdb1d0ab876372da3e983896481760867fc84f969c5c09d428e8f01b557f/ruff-0.14.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:a111fee1db6f1d5d5810245295527cda1d367c5aa8f42e0fca9a78ede9b4498b", size = 13258768, upload-time = "2025-12-11T21:39:08.691Z" }, - { url = "https://files.pythonhosted.org/packages/40/d9/8bf8e1e41a311afd2abc8ad12be1b6c6c8b925506d9069b67bb5e9a04af3/ruff-0.14.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8769efc71558fecc25eb295ddec7d1030d41a51e9dcf127cbd63ec517f22d567", size = 13326939, upload-time = "2025-12-11T21:39:53.842Z" }, - { url = "https://files.pythonhosted.org/packages/f4/56/a213fa9edb6dd849f1cfbc236206ead10913693c72a67fb7ddc1833bf95d/ruff-0.14.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:347e3bf16197e8a2de17940cd75fd6491e25c0aa7edf7d61aa03f146a1aa885a", size = 13578888, upload-time = "2025-12-11T21:39:35.988Z" }, - { url = "https://files.pythonhosted.org/packages/33/09/6a4a67ffa4abae6bf44c972a4521337ffce9cbc7808faadede754ef7a79c/ruff-0.14.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7715d14e5bccf5b660f54516558aa94781d3eb0838f8e706fb60e3ff6eff03a8", size = 14314473, upload-time = "2025-12-11T21:39:50.78Z" }, - { url = "https://files.pythonhosted.org/packages/12/0d/15cc82da5d83f27a3c6b04f3a232d61bc8c50d38a6cd8da79228e5f8b8d6/ruff-0.14.9-py3-none-win32.whl", hash = "sha256:df0937f30aaabe83da172adaf8937003ff28172f59ca9f17883b4213783df197", size = 13202651, upload-time = "2025-12-11T21:39:26.628Z" }, - { url = "https://files.pythonhosted.org/packages/32/f7/c78b060388eefe0304d9d42e68fab8cffd049128ec466456cef9b8d4f06f/ruff-0.14.9-py3-none-win_amd64.whl", hash = "sha256:c0b53a10e61df15a42ed711ec0bda0c582039cf6c754c49c020084c55b5b0bc2", size = 14702079, upload-time = "2025-12-11T21:39:11.954Z" }, - { url = "https://files.pythonhosted.org/packages/26/09/7a9520315decd2334afa65ed258fed438f070e31f05a2e43dd480a5e5911/ruff-0.14.9-py3-none-win_arm64.whl", hash = "sha256:8e821c366517a074046d92f0e9213ed1c13dbc5b37a7fc20b07f79b64d62cc84", size = 13744730, upload-time = "2025-12-11T21:39:29.659Z" }, +version = "0.14.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/08/52232a877978dd8f9cf2aeddce3e611b40a63287dfca29b6b8da791f5e8d/ruff-0.14.10.tar.gz", hash = "sha256:9a2e830f075d1a42cd28420d7809ace390832a490ed0966fe373ba288e77aaf4", size = 5859763, upload-time = "2025-12-18T19:28:57.98Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/01/933704d69f3f05ee16ef11406b78881733c186fe14b6a46b05cfcaf6d3b2/ruff-0.14.10-py3-none-linux_armv6l.whl", hash = "sha256:7a3ce585f2ade3e1f29ec1b92df13e3da262178df8c8bdf876f48fa0e8316c49", size = 13527080, upload-time = "2025-12-18T19:29:25.642Z" }, + { url = "https://files.pythonhosted.org/packages/df/58/a0349197a7dfa603ffb7f5b0470391efa79ddc327c1e29c4851e85b09cc5/ruff-0.14.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:674f9be9372907f7257c51f1d4fc902cb7cf014b9980152b802794317941f08f", size = 13797320, upload-time = "2025-12-18T19:29:02.571Z" }, + { url = "https://files.pythonhosted.org/packages/7b/82/36be59f00a6082e38c23536df4e71cdbc6af8d7c707eade97fcad5c98235/ruff-0.14.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d85713d522348837ef9df8efca33ccb8bd6fcfc86a2cde3ccb4bc9d28a18003d", size = 12918434, upload-time = "2025-12-18T19:28:51.202Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/45c62a7f7e34da92a25804f813ebe05c88aa9e0c25e5cb5a7d23dd7450e3/ruff-0.14.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6987ebe0501ae4f4308d7d24e2d0fe3d7a98430f5adfd0f1fead050a740a3a77", size = 13371961, upload-time = "2025-12-18T19:29:04.991Z" }, + { url = "https://files.pythonhosted.org/packages/40/31/a5906d60f0405f7e57045a70f2d57084a93ca7425f22e1d66904769d1628/ruff-0.14.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:16a01dfb7b9e4eee556fbfd5392806b1b8550c9b4a9f6acd3dbe6812b193c70a", size = 13275629, upload-time = "2025-12-18T19:29:21.381Z" }, + { url = "https://files.pythonhosted.org/packages/3e/60/61c0087df21894cf9d928dc04bcd4fb10e8b2e8dca7b1a276ba2155b2002/ruff-0.14.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7165d31a925b7a294465fa81be8c12a0e9b60fb02bf177e79067c867e71f8b1f", size = 14029234, upload-time = "2025-12-18T19:29:00.132Z" }, + { url = "https://files.pythonhosted.org/packages/44/84/77d911bee3b92348b6e5dab5a0c898d87084ea03ac5dc708f46d88407def/ruff-0.14.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c561695675b972effb0c0a45db233f2c816ff3da8dcfbe7dfc7eed625f218935", size = 15449890, upload-time = "2025-12-18T19:28:53.573Z" }, + { url = "https://files.pythonhosted.org/packages/e9/36/480206eaefa24a7ec321582dda580443a8f0671fdbf6b1c80e9c3e93a16a/ruff-0.14.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bb98fcbbc61725968893682fd4df8966a34611239c9fd07a1f6a07e7103d08e", size = 15123172, upload-time = "2025-12-18T19:29:23.453Z" }, + { url = "https://files.pythonhosted.org/packages/5c/38/68e414156015ba80cef5473d57919d27dfb62ec804b96180bafdeaf0e090/ruff-0.14.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f24b47993a9d8cb858429e97bdf8544c78029f09b520af615c1d261bf827001d", size = 14460260, upload-time = "2025-12-18T19:29:27.808Z" }, + { url = "https://files.pythonhosted.org/packages/b3/19/9e050c0dca8aba824d67cc0db69fb459c28d8cd3f6855b1405b3f29cc91d/ruff-0.14.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59aabd2e2c4fd614d2862e7939c34a532c04f1084476d6833dddef4afab87e9f", size = 14229978, upload-time = "2025-12-18T19:29:11.32Z" }, + { url = "https://files.pythonhosted.org/packages/51/eb/e8dd1dd6e05b9e695aa9dd420f4577debdd0f87a5ff2fedda33c09e9be8c/ruff-0.14.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:213db2b2e44be8625002dbea33bb9c60c66ea2c07c084a00d55732689d697a7f", size = 14338036, upload-time = "2025-12-18T19:29:09.184Z" }, + { url = "https://files.pythonhosted.org/packages/6a/12/f3e3a505db7c19303b70af370d137795fcfec136d670d5de5391e295c134/ruff-0.14.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b914c40ab64865a17a9a5b67911d14df72346a634527240039eb3bd650e5979d", size = 13264051, upload-time = "2025-12-18T19:29:13.431Z" }, + { url = "https://files.pythonhosted.org/packages/08/64/8c3a47eaccfef8ac20e0484e68e0772013eb85802f8a9f7603ca751eb166/ruff-0.14.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1484983559f026788e3a5c07c81ef7d1e97c1c78ed03041a18f75df104c45405", size = 13283998, upload-time = "2025-12-18T19:29:06.994Z" }, + { url = "https://files.pythonhosted.org/packages/12/84/534a5506f4074e5cc0529e5cd96cfc01bb480e460c7edf5af70d2bcae55e/ruff-0.14.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c70427132db492d25f982fffc8d6c7535cc2fd2c83fc8888f05caaa248521e60", size = 13601891, upload-time = "2025-12-18T19:28:55.811Z" }, + { url = "https://files.pythonhosted.org/packages/0d/1e/14c916087d8598917dbad9b2921d340f7884824ad6e9c55de948a93b106d/ruff-0.14.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5bcf45b681e9f1ee6445d317ce1fa9d6cba9a6049542d1c3d5b5958986be8830", size = 14336660, upload-time = "2025-12-18T19:29:16.531Z" }, + { url = "https://files.pythonhosted.org/packages/f2/1c/d7b67ab43f30013b47c12b42d1acd354c195351a3f7a1d67f59e54227ede/ruff-0.14.10-py3-none-win32.whl", hash = "sha256:104c49fc7ab73f3f3a758039adea978869a918f31b73280db175b43a2d9b51d6", size = 13196187, upload-time = "2025-12-18T19:29:19.006Z" }, + { url = "https://files.pythonhosted.org/packages/fb/9c/896c862e13886fae2af961bef3e6312db9ebc6adc2b156fe95e615dee8c1/ruff-0.14.10-py3-none-win_amd64.whl", hash = "sha256:466297bd73638c6bdf06485683e812db1c00c7ac96d4ddd0294a338c62fdc154", size = 14661283, upload-time = "2025-12-18T19:29:30.16Z" }, + { url = "https://files.pythonhosted.org/packages/74/31/b0e29d572670dca3674eeee78e418f20bdf97fa8aa9ea71380885e175ca0/ruff-0.14.10-py3-none-win_arm64.whl", hash = "sha256:e51d046cf6dda98a4633b8a8a771451107413b0f07183b2bef03f075599e44e6", size = 13729839, upload-time = "2025-12-18T19:28:48.636Z" }, ] [[package]] @@ -3746,11 +3746,11 @@ wheels = [ [[package]] name = "soupsieve" -version = "2.8" +version = "2.8.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" } +sdist = { url = "https://files.pythonhosted.org/packages/89/23/adf3796d740536d63a6fbda113d07e60c734b6ed5d3058d1e47fc0495e47/soupsieve-2.8.1.tar.gz", hash = "sha256:4cf733bc50fa805f5df4b8ef4740fc0e0fa6218cf3006269afd3f9d6d80fd350", size = 117856, upload-time = "2025-12-18T13:50:34.655Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" }, + { url = "https://files.pythonhosted.org/packages/48/f3/b67d6ea49ca9154453b6d70b34ea22f3996b9fa55da105a79d8732227adc/soupsieve-2.8.1-py3-none-any.whl", hash = "sha256:a11fe2a6f3d76ab3cf2de04eb339c1be5b506a8a47f2ceb6d139803177f85434", size = 36710, upload-time = "2025-12-18T13:50:33.267Z" }, ] [[package]] @@ -3793,15 +3793,15 @@ wheels = [ [[package]] name = "sse-starlette" -version = "3.0.4" +version = "3.1.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, { name = "starlette" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/17/8b/54651ad49bce99a50fd61a7f19c2b6a79fbb072e693101fbb1194c362054/sse_starlette-3.0.4.tar.gz", hash = "sha256:5e34286862e96ead0eb70f5ddd0bd21ab1f6473a8f44419dd267f431611383dd", size = 22576, upload-time = "2025-12-14T16:22:52.493Z" } +sdist = { url = "https://files.pythonhosted.org/packages/da/34/f5df66cb383efdbf4f2db23cabb27f51b1dcb737efaf8a558f6f1d195134/sse_starlette-3.1.2.tar.gz", hash = "sha256:55eff034207a83a0eb86de9a68099bd0157838f0b8b999a1b742005c71e33618", size = 26303, upload-time = "2025-12-31T08:02:20.023Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/71/22/8ab1066358601163e1ac732837adba3672f703818f693e179b24e0d3b65c/sse_starlette-3.0.4-py3-none-any.whl", hash = "sha256:32c80ef0d04506ced4b0b6ab8fe300925edc37d26f666afb1874c754895f5dc3", size = 11764, upload-time = "2025-12-14T16:22:51.453Z" }, + { url = "https://files.pythonhosted.org/packages/b7/95/8c4b76eec9ae574474e5d2997557cebf764bcd3586458956c30631ae08f4/sse_starlette-3.1.2-py3-none-any.whl", hash = "sha256:cd800dd349f4521b317b9391d3796fa97b71748a4da9b9e00aafab32dda375c8", size = 12484, upload-time = "2025-12-31T08:02:18.894Z" }, ] [[package]] @@ -3998,7 +3998,7 @@ wheels = [ [[package]] name = "typer" -version = "0.20.0" +version = "0.21.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, @@ -4006,22 +4006,22 @@ dependencies = [ { name = "shellingham" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8f/28/7c85c8032b91dbe79725b6f17d2fffc595dff06a35c7a30a37bef73a1ab4/typer-0.20.0.tar.gz", hash = "sha256:1aaf6494031793e4876fb0bacfa6a912b551cf43c1e63c800df8b1a866720c37", size = 106492, upload-time = "2025-10-20T17:03:49.445Z" } +sdist = { url = "https://files.pythonhosted.org/packages/85/30/ff9ede605e3bd086b4dd842499814e128500621f7951ca1e5ce84bbf61b1/typer-0.21.0.tar.gz", hash = "sha256:c87c0d2b6eee3b49c5c64649ec92425492c14488096dfbc8a0c2799b2f6f9c53", size = 106781, upload-time = "2025-12-25T09:54:53.651Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/78/64/7713ffe4b5983314e9d436a90d5bd4f63b6054e2aca783a3cfc44cb95bbf/typer-0.20.0-py3-none-any.whl", hash = "sha256:5b463df6793ec1dca6213a3cf4c0f03bc6e322ac5e16e13ddd622a889489784a", size = 47028, upload-time = "2025-10-20T17:03:47.617Z" }, + { url = "https://files.pythonhosted.org/packages/e1/e4/5ebc1899d31d2b1601b32d21cfb4bba022ae6fce323d365f0448031b1660/typer-0.21.0-py3-none-any.whl", hash = "sha256:c79c01ca6b30af9fd48284058a7056ba0d3bf5cf10d0ff3d0c5b11b68c258ac6", size = 47109, upload-time = "2025-12-25T09:54:51.918Z" }, ] [[package]] name = "typer-slim" -version = "0.20.0" +version = "0.21.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8e/45/81b94a52caed434b94da65729c03ad0fb7665fab0f7db9ee54c94e541403/typer_slim-0.20.0.tar.gz", hash = "sha256:9fc6607b3c6c20f5c33ea9590cbeb17848667c51feee27d9e314a579ab07d1a3", size = 106561, upload-time = "2025-10-20T17:03:46.642Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/3b/2f60ce16f578b1db5b8816d37d6a4d9786b33b76407fc8c13b0b86312c31/typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557", size = 106841, upload-time = "2025-12-25T09:54:55.998Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5e/dd/5cbf31f402f1cc0ab087c94d4669cfa55bd1e818688b910631e131d74e75/typer_slim-0.20.0-py3-none-any.whl", hash = "sha256:f42a9b7571a12b97dddf364745d29f12221865acef7a2680065f9bb29c7dc89d", size = 47087, upload-time = "2025-10-20T17:03:44.546Z" }, + { url = "https://files.pythonhosted.org/packages/b4/84/e97abf10e4a699194ff07fd586ec7f4cf867d9d04bead559a65f9e7aff84/typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e", size = 47174, upload-time = "2025-12-25T09:54:54.609Z" }, ] [[package]] @@ -4119,15 +4119,15 @@ wheels = [ [[package]] name = "uvicorn" -version = "0.38.0" +version = "0.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "click" }, { name = "h11" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cb/ce/f06b84e2697fef4688ca63bdb2fdf113ca0a3be33f94488f2cadb690b0cf/uvicorn-0.38.0.tar.gz", hash = "sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d", size = 80605, upload-time = "2025-10-18T13:46:44.63Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ee/d9/d88e73ca598f4f6ff671fb5fde8a32925c2e08a637303a1d12883c7305fa/uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02", size = 68109, upload-time = "2025-10-18T13:46:42.958Z" }, + { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, ] [[package]]