0 → 1: Content Creation
+ Basic +Generate new content from scratch based on descriptions or prompts.
+-
+
- + + Generate file from description + +
- + + Generate images from text prompts + +
diff --git a/desktop/src/main/resources/welcome/modules/app-state.js b/desktop/src/main/resources/welcome/modules/app-state.js index 0a2b4c948..03c33c68d 100644 --- a/desktop/src/main/resources/welcome/modules/app-state.js +++ b/desktop/src/main/resources/welcome/modules/app-state.js @@ -35,6 +35,7 @@ class AppState { return { defaultModel: 'GPT4o', parsingModel: 'GPT4oMini', + imageModel: '', workingDir: '', temperature: 0.3, autoFix: false, diff --git a/desktop/src/main/resources/welcome/modules/models.js b/desktop/src/main/resources/welcome/modules/models.js index c91d838ac..8317babc9 100644 --- a/desktop/src/main/resources/welcome/modules/models.js +++ b/desktop/src/main/resources/welcome/modules/models.js @@ -8,38 +8,13 @@ class ModelManager { } - populateModelSelections() { - console.log('[populateModelSelections] Called'); - - const modelSelect = this.document.getElementById('model-selection'); - const parsingModelSelect = this.document.getElementById('parsing-model'); - - if (!modelSelect || !parsingModelSelect) { - console.warn('[populateModelSelections] Model select elements not found.'); - return; - } - // Ensure we have appState and availableModels - const currentModels = this.getAvailableModels(); - if (!this.appState || !currentModels) { - console.warn('[populateModelSelections] Missing required dependencies.'); - return; - } - - - const savedDefaultModel = this.appState.taskSettings.defaultModel; - const savedParsingModel = this.appState.taskSettings.parsingModel; - - this.clearModelSelections(modelSelect, parsingModelSelect); - this.addAvailableModels(modelSelect, parsingModelSelect); - this.setSelectedModels(modelSelect, parsingModelSelect, savedDefaultModel, savedParsingModel); - } - - clearModelSelections(modelSelect, parsingModelSelect) { + clearModelSelections(modelSelect, parsingModelSelect, imageModelSelect) { modelSelect.innerHTML = ''; parsingModelSelect.innerHTML = ''; + imageModelSelect.innerHTML = ''; } - addAvailableModels(modelSelect, parsingModelSelect) { + addAvailableModels(modelSelect, parsingModelSelect, imageModelSelect) { const addedModels = new Set(); const currentModels = this.getAvailableModels(); @@ -49,7 +24,7 @@ class ModelManager { if (key && currentModels[provider]) { currentModels[provider].forEach(model => { if (!addedModels.has(model.id)) { - this.addModelOption(modelSelect, parsingModelSelect, model, provider); + this.addModelOption(modelSelect, parsingModelSelect, imageModelSelect, model, provider); addedModels.add(model.id); } }); @@ -58,11 +33,40 @@ class ModelManager { } if (modelSelect.options.length === 0) { - this.addDefaultOptions(modelSelect, parsingModelSelect); + this.addDefaultOptions(modelSelect, parsingModelSelect, imageModelSelect); + } + } + + populateModelSelections() { + console.log('[populateModelSelections] Called'); + + const modelSelect = this.document.getElementById('model-selection'); + const parsingModelSelect = this.document.getElementById('parsing-model'); + const imageModelSelect = this.document.getElementById('image-model'); + + if (!modelSelect || !parsingModelSelect) { + console.warn('[populateModelSelections] Model select elements not found.'); + return; + } + // Ensure we have appState and availableModels + const currentModels = this.getAvailableModels(); + if (!this.appState || !currentModels) { + console.warn('[populateModelSelections] Missing required dependencies.'); + return; } + + + const savedDefaultModel = this.appState.taskSettings.defaultModel; + const savedParsingModel = this.appState.taskSettings.parsingModel; + const savedImageModel = this.appState.taskSettings.imageModel; + + this.clearModelSelections(modelSelect, parsingModelSelect, imageModelSelect); + this.addAvailableModels(modelSelect, parsingModelSelect, imageModelSelect); + + this.setSelectedModels(modelSelect, parsingModelSelect, savedDefaultModel, imageModelSelect, savedParsingModel, savedImageModel); } - addModelOption(modelSelect, parsingModelSelect, model, provider) { + addModelOption(modelSelect, parsingModelSelect, imageModelSelect, model, provider) { const option = document.createElement('option'); option.value = model.id; option.textContent = `${model.name} (${provider})`; @@ -74,9 +78,15 @@ class ModelManager { parsingOption.textContent = `${model.name} (${provider})`; parsingOption.title = model.description; parsingModelSelect.appendChild(parsingOption); + + const imageOption = document.createElement('option'); + imageOption.value = model.id; + imageOption.textContent = `${model.name} (${provider})`; + imageOption.title = model.description; + imageModelSelect.appendChild(imageOption); } - addDefaultOptions(modelSelect, parsingModelSelect) { + addDefaultOptions(modelSelect, parsingModelSelect, imageModelSelect) { const defaultOption = document.createElement('option'); defaultOption.value = 'GPT4o'; defaultOption.textContent = 'GPT-4o (OpenAI) - Configure API key'; @@ -86,9 +96,14 @@ class ModelManager { defaultParsingOption.value = 'GPT4oMini'; defaultParsingOption.textContent = 'GPT-4o Mini (OpenAI) - Configure API key'; parsingModelSelect.appendChild(defaultParsingOption); + + const defaultImageOption = document.createElement('option'); + defaultImageOption.value = 'DALL-E-3'; + defaultImageOption.textContent = 'DALL-E 3 (OpenAI) - Configure API key'; + imageModelSelect.appendChild(defaultImageOption); } - setSelectedModels(modelSelect, parsingModelSelect, savedDefaultModel, savedParsingModel) { + setSelectedModels(modelSelect, parsingModelSelect, savedDefaultModel, imageModelSelect, savedParsingModel, savedImageModel) { if (savedDefaultModel && Array.from(modelSelect.options).some(opt => opt.value === savedDefaultModel)) { modelSelect.value = savedDefaultModel; } else if (modelSelect.options.length > 0) { @@ -102,6 +117,14 @@ class ModelManager { parsingModelSelect.selectedIndex = 0; this.appState.updateTaskSetting('parsingModel', parsingModelSelect.value); } + + if (savedImageModel && Array.from(imageModelSelect.options).some(opt => opt.value === savedImageModel)) { + imageModelSelect.value = savedImageModel; + } else if (imageModelSelect.options.length > 0) { + imageModelSelect.selectedIndex = 0; + this.appState.updateTaskSetting('parsingModel', imageModelSelect.value); + } + } } diff --git a/desktop/src/main/resources/welcome/modules/state.js b/desktop/src/main/resources/welcome/modules/state.js index 0a7d98777..83124407e 100644 --- a/desktop/src/main/resources/welcome/modules/state.js +++ b/desktop/src/main/resources/welcome/modules/state.js @@ -12,6 +12,7 @@ class AppState { return { defaultModel: this.localStorage.getItem('defaultModel') || 'GPT4o', parsingModel: this.localStorage.getItem('parsingModel') || 'GPT4oMini', + imageModel: this.localStorage.getItem('imageModel') || '', workingDir: this.localStorage.getItem('workingDir') || '.', autoFix: this.localStorage.getItem('autoFix') === 'true', temperature: parseFloat(this.localStorage.getItem('temperature')) || 0.2, diff --git a/desktop/src/main/resources/welcome/modules/task-config.js b/desktop/src/main/resources/welcome/modules/task-config.js index dfcbfcd31..893574a6c 100644 --- a/desktop/src/main/resources/welcome/modules/task-config.js +++ b/desktop/src/main/resources/welcome/modules/task-config.js @@ -275,7 +275,85 @@ class TaskConfigManager { name: 'Meta-Cognitive Reflection', description: 'Reflect on thinking process and strategies', category: 'Reasoning', - configFields: [] + configFields: [ + { + id: 'construct_narrative', + label: 'Construct Narrative', + type: 'checkbox', + default: true, + tooltip: 'Construct a coherent narrative from the elements' + }, + { + id: 'identify_plot_points', + label: 'Identify Plot Points', + type: 'checkbox', + default: true, + tooltip: 'Identify key plot points and story arcs' + }, + { + id: 'predict_outcomes', + label: 'Predict Outcomes', + type: 'checkbox', + default: true, + tooltip: 'Predict narrative outcomes and resolutions' + }, + { + id: 'alternatives', + label: 'Alternative Narratives', + type: 'number', + min: 1, + max: 10, + default: 3, + tooltip: 'Number of narrative paths to explore (1-10)' + }, + { + id: 'analyze_motivations', + label: 'Analyze Motivations', + type: 'checkbox', + default: true, + tooltip: 'Analyze character motivations and stakeholder perspectives' + }, + { + id: 'find_inconsistencies', + label: 'Find Inconsistencies', + type: 'checkbox', + default: true, + tooltip: 'Identify narrative inconsistencies or gaps' + }, + { + id: 'generate_images', + label: 'Generate Images', + type: 'checkbox', + default: false, + tooltip: 'Generate images for key narrative elements' + }, + { + id: 'image_model', + label: 'Image Model', + type: 'select', + options: ['DallE3', 'DallE2'], + default: 'DallE3', + tooltip: 'Image generation model to use' + }, + { + id: 'image_width', + label: 'Image Width', + type: 'number', + min: 256, + max: 2048, + default: 1024, + tooltip: 'Width of generated images in pixels (256-2048)' + }, + { + id: 'image_height', + label: 'Image Height', + type: 'number', + min: 256, + max: 2048, + default: 1024, + tooltip: 'Height of generated images in pixels (256-2048)' + } + ] }, { id: 'MultiPerspectiveAnalysis', @@ -408,20 +486,142 @@ class TaskConfigManager { name: 'Temporal Reasoning', description: 'Reason about time-dependent relationships', category: 'Reasoning', - configFields: [] + configFields: [ + { + id: 'target_word_count', + label: 'Target Word Count', + type: 'number', + min: 500, + max: 50000, + default: 5000, + tooltip: 'Target word count for the complete narrative (500-50000)' + }, + { + id: 'number_of_acts', + label: 'Number of Acts', + type: 'number', + min: 1, + max: 7, + default: 3, + tooltip: 'Number of acts in the narrative structure (typically 3 or 5)' + }, + { + id: 'scenes_per_act', + label: 'Scenes Per Act', + type: 'number', + min: 1, + max: 10, + default: 3, + tooltip: 'Average number of scenes per act (1-10)' + }, + { + id: 'writing_style', + label: 'Writing Style', + type: 'select', + options: ['literary', 'thriller', 'technical', 'conversational', 'academic', 'journalistic'], + default: 'literary', + tooltip: 'Writing style for the narrative' + }, + { + id: 'point_of_view', + label: 'Point of View', + type: 'select', + options: ['first person', 'second person', 'third person limited', 'third person omniscient'], + default: 'third person limited', + tooltip: 'Narrative point of view' + }, + { + id: 'tone', + label: 'Tone', + type: 'select', + options: ['dramatic', 'humorous', 'suspenseful', 'reflective', 'optimistic', 'dark', 'neutral'], + default: 'dramatic', + tooltip: 'Overall tone of the narrative' + }, + { + id: 'detailed_descriptions', + label: 'Detailed Descriptions', + type: 'checkbox', + default: true, + tooltip: 'Include detailed scene descriptions' + }, + { + id: 'include_dialogue', + label: 'Include Dialogue', + type: 'checkbox', + default: true, + tooltip: 'Include character dialogue in scenes' + }, + { + id: 'show_internal_thoughts', + label: 'Show Internal Thoughts', + type: 'checkbox', + default: true, + tooltip: 'Show internal character thoughts and feelings' + }, + { + id: 'revision_passes', + label: 'Revision Passes', + type: 'number', + min: 0, + max: 5, + default: 1, + tooltip: 'Number of revision passes for each scene (0-5)' + }, + { + id: 'generate_scene_images', + label: 'Generate Scene Images', + type: 'checkbox', + default: false, + tooltip: 'Generate images for each scene' + }, + { + id: 'generate_cover_image', + label: 'Generate Cover Image', + type: 'checkbox', + default: false, + tooltip: 'Generate a cover image for the narrative' + }, + { + id: 'image_model', + label: 'Image Model', + type: 'select', + options: ['DallE3', 'DallE2'], + default: 'DallE3', + tooltip: 'Image generation model to use' + }, + { + id: 'image_width', + label: 'Image Width', + type: 'number', + min: 256, + max: 2048, + default: 1024, + tooltip: 'Width of generated images in pixels (256-2048)' + }, + { + id: 'image_height', + label: 'Image Height', + type: 'number', + min: 256, + max: 2048, + default: 1024, + tooltip: 'Height of generated images in pixels (256-2048)' + } + ] }, { id: 'NarrativeReasoning', name: 'Narrative Reasoning', description: 'Understand and analyze narrative structures', - category: 'Reasoning', + category: 'Writing', configFields: [] }, { id: 'NarrativeGeneration', name: 'Narrative Generation', description: 'Generate coherent narrative content', - category: 'Reasoning', + category: 'Writing', configFields: [] }, { @@ -527,7 +727,30 @@ class TaskConfigManager { name: 'Sub-Planning', description: 'Create and execute sub-plans within a larger plan', category: 'Planning & Orchestration', - configFields: [] + configFields: [ + { + id: 'purpose', + label: 'Purpose', + type: 'textarea', + placeholder: 'Describe the specific purpose or use case for this sub-planning configuration', + tooltip: 'Supplemental description of the purpose of this configuration', + rows: 3 + }, + { + id: 'cognitiveMode', + label: 'Cognitive Mode', + type: 'select', + options: ['Waterfall', 'Adaptive', 'Parallel', 'Iterative', 'Exploratory'], + default: 'Waterfall', + tooltip: 'Cognitive strategy to use for sub-planning' + }, + { + id: 'taskSettings', + label: 'Sub-Task Configurations', + type: 'subtasks', + tooltip: 'Configure which task types are available within sub-plans' + } + ] } ]; } @@ -600,11 +823,12 @@ class TaskConfigManager { }); } -// Create modal HTML + // Create modal HTML createTaskConfigModal(taskType, existingConfig) { const modal = this.document.createElement('div'); modal.className = 'modal'; modal.id = 'task-config-modal'; + modal.dataset.taskType = taskType.id; // Default config name to task type name if not provided const configName = existingConfig?.name || taskType.id; @@ -629,7 +853,7 @@ class TaskConfigManager {
${taskType.description}
Configure which task types are available within sub-plans. + Each task type can have its own configuration that will be used + when executing within a sub-plan context.
+${task.description}
+An Interactive Guide to Content Generation Workflows
+Generate new content from scratch based on descriptions or prompts.
+Transform existing content into a different form or format.
+Combine multiple inputs to create unified, synthesized content.
+Create new content by combining and modifying existing pieces through analogy.
+Generate additions to existing content, continuing from where it left off.
+Generate complex content through multiple stages of refinement and transformation.
+Repeatedly refine content through multiple editing passes.
+Identify and correct errors using feedback loops and external validators.
+Real-world workflows that combine multiple patterns for complex tasks.
+Hierarchical generation of module specifications
+ [Hierarchical] +Generate code from module specifications
+ [0→1 Creation] +Transform code into test suites
+ [1→1 Transformation] +Iteratively fix code until tests pass
+ [Error Correction] +