diff --git a/desktop/src/main/kotlin/com/simiacryptus/cognotik/UpdateManager.kt b/desktop/src/main/kotlin/com/simiacryptus/cognotik/UpdateManager.kt index 573368a54..074950e27 100644 --- a/desktop/src/main/kotlin/com/simiacryptus/cognotik/UpdateManager.kt +++ b/desktop/src/main/kotlin/com/simiacryptus/cognotik/UpdateManager.kt @@ -3,7 +3,7 @@ package com.simiacryptus.cognotik import com.google.gson.Gson import com.google.gson.annotations.SerializedName import com.simiacryptus.cognotik.SystemTrayManager.Companion.confirm -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent import com.simiacryptus.cognotik.util.LoggerFactory import java.awt.BorderLayout import java.awt.BorderLayout.* diff --git a/docs/image_agents.md b/docs/image_agents.md new file mode 100644 index 000000000..44e7623f1 --- /dev/null +++ b/docs/image_agents.md @@ -0,0 +1,524 @@ +# Image Agents Documentation + +## Overview + +The Cognotik framework provides two specialized agents for working with images in chat sessions: + +1. **ImageGenerationAgent** - Generates images from text prompts +2. **ImageModificationAgent** - Analyzes and modifies existing images using multimodal models + +Both agents integrate seamlessly with the chat system and support proper file management, display, and transcript recording. + +## ImageGenerationAgent + +### Purpose +Converts user text requests into optimized image generation prompts and renders the resulting images. + +### Basic Usage + +```kotlin +val imageAgent = ImageGenerationAgent( + prompt = "Transform the user request into an image generation prompt", + name = "ImageGenerator", + textModel = chatModel, // Model for prompt optimization + imageModel = ImageModel.DallE3, // Image generation model + imageClient = openAIClient, // API client + temperature = 0.3, + width = 1024, + height = 1024 +) + +// Generate an image +val result: ImageAndText = imageAgent.answer( + listOf("Create a serene mountain landscape at sunset") +) + +println(result.text) // Optimized prompt used +// result.image is a BufferedImage +``` + +### Key Features + +- **Automatic Prompt Optimization**: Uses a text model to refine user requests into effective image prompts +- **Prompt Length Management**: Automatically shortens prompts that exceed model limits +- **Multiple Format Support**: Handles both URL and base64-encoded image responses + +### Configuration Parameters + +| Parameter | Type | Description | Default | +|-----------|------|-------------|---------| +| `prompt` | String | System prompt for prompt optimization | "Transform the user request..." | +| `name` | String? | Agent identifier | null | +| `textModel` | ChatInterface | Model for prompt refinement | Required | +| `imageModel` | ImageModel? | Image generation model | Required | +| `imageClient` | ImageClientInterface? | API client | Required | +| `temperature` | Double | Creativity level (0.0-1.0) | 0.3 | +| `width` | Int | Output image width | 1024 | +| `height` | Int | Output image height | 1024 | + +## ImageModificationAgent + +### Purpose +Analyzes and modifies images based on text instructions using multimodal chat models. + +### Basic Usage + +```kotlin +val modificationAgent = ImageModificationAgent( + prompt = "Analyze and describe the image based on the user's request", + name = "ImageModifier", + model = multimodalChatModel, + temperature = 0.3 +) + +// Load an image +val inputImage: BufferedImage = ImageIO.read(File("input.png")) + +// Modify the image +val result: ImageAndText = modificationAgent.answer( + ImageAndText( + text = "Add a vintage filter and describe the mood", + image = inputImage + ) +) + +println(result.text) // Description of modifications +// result.image is the modified BufferedImage +``` + +### Key Features + +- **Multimodal Processing**: Sends both image and text to the model +- **Image Analysis**: Can describe, analyze, or modify images +- **Base64 Encoding**: Automatically handles image encoding for API transmission + +## Integration with ChatSocketManager + +### Setting Up Image Support in Chat + +```kotlin +class ImageChatSocketManager( + session: Session, + model: ChatInterface, + parsingModel: ChatInterface, + storage: StorageInterface?, + applicationClass: Class +) : ChatSocketManager( + session = session, + model = model, + parsingModel = parsingModel, + systemPrompt = "You are an AI assistant with image generation capabilities.", + storage = storage, + applicationClass = applicationClass +) { + + private val imageAgent = ImageGenerationAgent( + textModel = model, + imageModel = ImageModel.DallE3, + imageClient = openAIClient, + width = 1024, + height = 1024 + ) + + override fun respond( + task: SessionTask, + userMessage: String, + currentChatMessages: List, + transcriptStream: OutputStream? + ): String { + // Check if user wants to generate an image + if (userMessage.contains("generate image", ignoreCase = true) || + userMessage.contains("create image", ignoreCase = true)) { + return handleImageGeneration(task, userMessage, transcriptStream) + } + + // Default text response + return super.respond(task, userMessage, currentChatMessages, transcriptStream) + } + + private fun handleImageGeneration( + task: SessionTask, + userMessage: String, + transcriptStream: OutputStream? + ): String { + val result = imageAgent.answer(listOf(userMessage)) + + // Save the image and get display link + val imageHtml = saveAndDisplayImage(task, result.image, "generated") + + // Write to transcript + transcriptStream?.write( + """ + ## Generated Image + Prompt: ${result.text} + + $imageHtml + + """.trimIndent().toByteArray() + ) + transcriptStream?.flush() + + return """ + Generated image with prompt: "${result.text}" + + $imageHtml + """.trimIndent() + } +} +``` + +### Saving and Displaying Images + +```kotlin +fun saveAndDisplayImage( + task: SessionTask, + image: BufferedImage, + prefix: String = "image" +): String { + // Generate unique filename + val filename = "${prefix}_${UUID.randomUUID()}.png" + + // Create file in session directory + val (link, file) = task.createFile(filename) + + // Save the image + file?.let { + ImageIO.write(image, "png", it) + } + + // Return HTML for display + return """ + + $prefix + + """.trimIndent() +} +``` + +### Managing Multiple Images + +```kotlin +class ImageGalleryManager(private val task: SessionTask) { + private val images = mutableListOf>() // (description, link) + + fun addImage(image: BufferedImage, description: String): String { + val (link, file) = task.createFile("image_${images.size}.png") + file?.let { ImageIO.write(image, "png", it) } + images.add(description to link) + return link + } + + fun renderGallery(): String = buildString { + append("") + } +} +``` + +## Transcript Management + +### Writing Images to Transcript + +```kotlin +fun writeImageToTranscript( + transcriptStream: OutputStream?, + imageLink: String, + description: String, + prompt: String? = null +) { + val markdown = buildString { + appendLine("### Image") + if (prompt != null) { + appendLine("**Prompt:** $prompt") + appendLine() + } + appendLine("**Description:** $description") + appendLine() + appendLine("![Image]($imageLink)") + appendLine() + } + + transcriptStream?.write(markdown.toByteArray()) + transcriptStream?.flush() +} +``` + +### Filtering Transcript Links + +The `transcriptFilter()` extension function ensures links work correctly in exported transcripts: + +```kotlin +fun String.transcriptFilter() = this.let { + Regex("""(href=|src=['"])?fileIndex/[A-Za-z0-9\-_]+/""").replace(it) { matchResult -> + matchResult.groupValues[1] + } +} + +// Usage in transcript export +val transcriptContent = originalContent.transcriptFilter() +``` + +## Complete Example: Image Chat Session + +```kotlin +class ImageChatServer : ChatServer( + applicationName = "Image Chat", + path = "/imageChat" +) { + override fun newSession(session: Session): SocketManager { + return ImageEnabledChatSocketManager( + session = session, + model = ChatModel.GPT4Turbo.instance(apiKey), + parsingModel = ChatModel.GPT35Turbo.instance(apiKey), + storage = storage, + applicationClass = this::class.java + ) + } +} + +class ImageEnabledChatSocketManager( + session: Session, + model: ChatInterface, + parsingModel: ChatInterface, + storage: StorageInterface?, + applicationClass: Class +) : ChatSocketManager( + session = session, + model = model, + parsingModel = parsingModel, + systemPrompt = """ + You are an AI assistant with image generation and modification capabilities. + When users request images, you can generate or modify them. + Commands: + - "generate image: [description]" - Creates a new image + - "modify image: [instructions]" - Modifies the last generated image + """.trimIndent(), + storage = storage, + applicationClass = applicationClass +) { + + private val imageGenerator = ImageGenerationAgent( + textModel = model, + imageModel = ImageModel.DallE3, + imageClient = openAIClient, + width = 1024, + height = 1024 + ) + + private val imageModifier = ImageModificationAgent( + model = model, + temperature = 0.3 + ) + + private var lastImage: BufferedImage? = null + private val galleryManager = ImageGalleryManager(newTask()) + + override fun respond( + task: SessionTask, + userMessage: String, + currentChatMessages: List, + transcriptStream: OutputStream? + ): String { + return when { + userMessage.startsWith("generate image:", ignoreCase = true) -> { + handleImageGeneration( + task, + userMessage.substringAfter(":", "").trim(), + transcriptStream + ) + } + + userMessage.startsWith("modify image:", ignoreCase = true) && lastImage != null -> { + handleImageModification( + task, + userMessage.substringAfter(":", "").trim(), + lastImage!!, + transcriptStream + ) + } + + userMessage.equals("show gallery", ignoreCase = true) -> { + galleryManager.renderGallery() + } + + else -> super.respond(task, userMessage, currentChatMessages, transcriptStream) + } + } + + private fun handleImageGeneration( + task: SessionTask, + prompt: String, + transcriptStream: OutputStream? + ): String { + task.add("Generating image...") + + val result = imageGenerator.answer(listOf(prompt)) + lastImage = result.image + + val link = galleryManager.addImage(result.image, prompt) + val imageHtml = """ +
+

Generated Image

+

Optimized Prompt: ${result.text}

+ + Generated + +
+ """.trimIndent() + + // Write to transcript + writeImageToTranscript( + transcriptStream, + link, + prompt, + result.text + ) + + task.complete(imageHtml) + return "Image generated successfully. ${result.text}" + } + + private fun handleImageModification( + task: SessionTask, + instructions: String, + inputImage: BufferedImage, + transcriptStream: OutputStream? + ): String { + task.add("Modifying image...") + + val result = imageModifier.answer( + listOf(ImageAndText(text = instructions, image = inputImage)) + ) + lastImage = result.image + + val link = galleryManager.addImage(result.image, instructions) + val imageHtml = """ +
+

Modified Image

+

Instructions: $instructions

+

Analysis: ${result.text}

+ + Modified + +
+ """.trimIndent() + + // Write to transcript + writeImageToTranscript( + transcriptStream, + link, + instructions, + result.text + ) + + task.complete(imageHtml) + return "Image modified. ${result.text}" + } +} +``` + +## Best Practices + +### 1. File Management +- Always use `task.createFile()` to ensure proper session isolation +- Use descriptive filenames with timestamps or UUIDs +- Clean up temporary files when sessions end + +### 2. Display Optimization +```kotlin +// Responsive image display +fun responsiveImageHtml(link: String, alt: String) = """ + $alt +""".trimIndent() +``` + +### 3. Transcript Integration +- Always write images to transcript with context +- Use relative links for portability +- Include both markdown and HTML formats + +### 4. Error Handling +```kotlin +fun safeImageGeneration( + agent: ImageGenerationAgent, + prompt: String, + task: SessionTask +): ImageAndText? { + return try { + agent.answer(listOf(prompt)) + } catch (e: Exception) { + task.error(e) + log.error("Image generation failed", e) + null + } +} +``` + +### 5. Performance Considerations +- Cache generated images when appropriate +- Use thumbnails for galleries +- Implement lazy loading for large image sets +- Consider async generation for better UX + +## Advanced Features + +### Batch Image Generation +```kotlin +fun generateImageBatch( + prompts: List, + agent: ImageGenerationAgent, + task: SessionTask +): List { + val tabs = TabbedDisplay(task) + return prompts.mapIndexed { index, prompt -> + val subTask = newTask(root = false) + tabs["Image ${index + 1}"] = subTask.placeholder + agent.answer(listOf(prompt)).also { + val link = saveAndDisplayImage(subTask, it.image, "batch_$index") + subTask.complete(link) + } + }.also { + tabs.update() + } +} +``` + +### Image Comparison View +```kotlin +fun compareImages( + original: BufferedImage, + modified: BufferedImage, + task: SessionTask +): String { + val originalLink = saveAndDisplayImage(task, original, "original") + val modifiedLink = saveAndDisplayImage(task, modified, "modified") + + return """ +
+
+

Original

+ $originalLink +
+
+

Modified

+ $modifiedLink +
+
+ """.trimIndent() +} +``` diff --git a/experiment/newssite/src/main/kotlin/com/example/news/api/NewsServiceLoader.kt b/experiment/newssite/src/main/kotlin/com/example/news/api/NewsServiceLoader.kt index cae53521f..ef42a9dd0 100644 --- a/experiment/newssite/src/main/kotlin/com/example/news/api/NewsServiceLoader.kt +++ b/experiment/newssite/src/main/kotlin/com/example/news/api/NewsServiceLoader.kt @@ -1,7 +1,7 @@ package com.example.news.api import com.google.common.util.concurrent.MoreExecutors -import com.simiacryptus.cognotik.actors.ProxyAgent +import com.simiacryptus.cognotik.agents.ProxyAgent import com.simiacryptus.cognotik.chat.model.AnthropicModels import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.AbbrevWhitelistYamlDescriber diff --git a/gradle.properties b/gradle.properties index 1e81dc2e7..bd6f7adc8 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,7 +1,7 @@ pluginName=Cognotik pluginRepositoryUrl=https://github.com/SimiaCryptus/Cognotik libraryGroup=com.simiacryptus -libraryVersion=2.0.18 +libraryVersion=2.0.19 gradleVersion=8.13 org.gradle.caching=true diff --git a/intellij/src/main/kotlin/cognotik/actions/SelectionAction.kt b/intellij/src/main/kotlin/cognotik/actions/SelectionAction.kt index 50993a685..d8b17b0f5 100644 --- a/intellij/src/main/kotlin/cognotik/actions/SelectionAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/SelectionAction.kt @@ -13,7 +13,7 @@ import com.intellij.psi.PsiElement import com.intellij.psi.PsiFile import com.intellij.psi.PsiManager import com.intellij.psi.PsiRecursiveElementVisitor -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent import com.simiacryptus.cognotik.util.* abstract class SelectionAction( diff --git a/intellij/src/main/kotlin/cognotik/actions/agent/CustomFileSetPatchServer.kt b/intellij/src/main/kotlin/cognotik/actions/agent/CustomFileSetPatchServer.kt index e6f67a412..8195b2531 100644 --- a/intellij/src/main/kotlin/cognotik/actions/agent/CustomFileSetPatchServer.kt +++ b/intellij/src/main/kotlin/cognotik/actions/agent/CustomFileSetPatchServer.kt @@ -1,6 +1,6 @@ package cognotik.actions.agent -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.diff.PatchProcessor diff --git a/intellij/src/main/kotlin/cognotik/actions/agent/DocumentedMassPatchServer.kt b/intellij/src/main/kotlin/cognotik/actions/agent/DocumentedMassPatchServer.kt index 2e5162d3a..571ca791b 100644 --- a/intellij/src/main/kotlin/cognotik/actions/agent/DocumentedMassPatchServer.kt +++ b/intellij/src/main/kotlin/cognotik/actions/agent/DocumentedMassPatchServer.kt @@ -1,7 +1,7 @@ package cognotik.actions.agent import com.google.common.util.concurrent.Futures -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.diff.PatchProcessor diff --git a/intellij/src/main/kotlin/cognotik/actions/agent/MultiStepPatchAction.kt b/intellij/src/main/kotlin/cognotik/actions/agent/MultiStepPatchAction.kt index e364f1467..eeae4a6ed 100644 --- a/intellij/src/main/kotlin/cognotik/actions/agent/MultiStepPatchAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/agent/MultiStepPatchAction.kt @@ -7,9 +7,9 @@ import com.intellij.openapi.actionSystem.AnActionEvent import com.intellij.openapi.actionSystem.PlatformDataKeys import com.intellij.openapi.application.ApplicationManager import com.simiacryptus.cognotik.CognotikAppServer -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent -import com.simiacryptus.cognotik.actors.ParsedResponse +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedResponse import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.config.AppSettingsState diff --git a/intellij/src/main/kotlin/cognotik/actions/editor/CustomEditAction.kt b/intellij/src/main/kotlin/cognotik/actions/editor/CustomEditAction.kt index e93a005d6..5db4130d1 100644 --- a/intellij/src/main/kotlin/cognotik/actions/editor/CustomEditAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/editor/CustomEditAction.kt @@ -5,7 +5,7 @@ import com.intellij.openapi.actionSystem.ActionUpdateThread import com.intellij.openapi.diagnostic.Logger import com.intellij.openapi.progress.ProgressIndicator import com.intellij.openapi.project.Project -import com.simiacryptus.cognotik.actors.ProxyAgent +import com.simiacryptus.cognotik.agents.ProxyAgent import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.util.UITools import javax.swing.JOptionPane diff --git a/intellij/src/main/kotlin/cognotik/actions/editor/PasteAction.kt b/intellij/src/main/kotlin/cognotik/actions/editor/PasteAction.kt index aaed8ef05..5f204bee5 100644 --- a/intellij/src/main/kotlin/cognotik/actions/editor/PasteAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/editor/PasteAction.kt @@ -10,7 +10,7 @@ import com.intellij.openapi.actionSystem.ActionUpdateThread import com.intellij.openapi.actionSystem.AnActionEvent import com.intellij.openapi.progress.ProgressIndicator import com.intellij.openapi.project.Project -import com.simiacryptus.cognotik.actors.ProxyAgent +import com.simiacryptus.cognotik.agents.ProxyAgent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.util.ComputerLanguage diff --git a/intellij/src/main/kotlin/cognotik/actions/find/FindResultsChatAction.kt b/intellij/src/main/kotlin/cognotik/actions/find/FindResultsChatAction.kt index ce362cbf1..faf8e342a 100644 --- a/intellij/src/main/kotlin/cognotik/actions/find/FindResultsChatAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/find/FindResultsChatAction.kt @@ -12,7 +12,7 @@ import com.intellij.usages.ReadWriteAccessUsageInfo2UsageAdapter import com.intellij.usages.Usage import com.intellij.usages.UsageView import com.simiacryptus.cognotik.CognotikAppServer -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.platform.Session diff --git a/intellij/src/main/kotlin/cognotik/actions/find/FindResultsModificationAction.kt b/intellij/src/main/kotlin/cognotik/actions/find/FindResultsModificationAction.kt index d8417504d..5231a2854 100644 --- a/intellij/src/main/kotlin/cognotik/actions/find/FindResultsModificationAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/find/FindResultsModificationAction.kt @@ -14,7 +14,7 @@ import com.intellij.usages.Usage import com.intellij.usages.UsageInfo2UsageAdapter import com.intellij.usages.UsageView import com.simiacryptus.cognotik.CognotikAppServer -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.platform.Session import com.simiacryptus.cognotik.platform.model.User diff --git a/intellij/src/main/kotlin/cognotik/actions/generate/CreateImageAction.kt b/intellij/src/main/kotlin/cognotik/actions/generate/CreateImageAction.kt index eac3881d8..caab22e68 100644 --- a/intellij/src/main/kotlin/cognotik/actions/generate/CreateImageAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/generate/CreateImageAction.kt @@ -10,10 +10,9 @@ import com.intellij.openapi.ui.DialogWrapper import com.intellij.openapi.vfs.VirtualFile import com.intellij.openapi.vfs.VirtualFileManager import com.intellij.util.ui.JBUI -import com.simiacryptus.cognotik.actors.ImageAgent -import com.simiacryptus.cognotik.actors.ImageResponse +import com.simiacryptus.cognotik.agents.ImageGenerationAgent +import com.simiacryptus.cognotik.agents.ImageAndText import com.simiacryptus.cognotik.config.AppSettingsState -import com.simiacryptus.cognotik.config.imageModel import com.simiacryptus.cognotik.util.* import java.awt.GridBagConstraints import java.awt.GridBagLayout @@ -28,181 +27,183 @@ import javax.imageio.ImageIO import javax.swing.* class CreateImageAction : BaseAction() { - inner class ImageGenerationDialog(project: Project) : DialogWrapper(project) { - private val fileNameField = JTextField(generateDefaultFileName(), 20) - private val instructionsArea = JTextArea(3, 20) - - init { - log.debug("Initializing ImageGenerationDialog") - title = "Generate Image" - init() - } - - private fun generateDefaultFileName(): String { - val timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")) - return "generated_image_$timestamp.png" - } + inner class ImageGenerationDialog(project: Project) : DialogWrapper(project) { + private val fileNameField = JTextField(generateDefaultFileName(), 20) + private val instructionsArea = JTextArea(3, 20) + + init { + log.debug("Initializing ImageGenerationDialog") + title = "Generate Image" + init() + } - override fun createCenterPanel(): JComponent { - return JPanel(GridBagLayout()).apply { - val c = GridBagConstraints() - c.fill = GridBagConstraints.HORIZONTAL - c.insets = JBUI.insets(5) - c.gridx = 0; c.gridy = 0 - add(JLabel("Output filename:"), c) - c.gridx = 1; c.gridy = 0 - add(fileNameField, c) - c.gridx = 0; c.gridy = 1 - add(JLabel("Special instructions:"), c) - c.gridx = 1; c.gridy = 1 - c.fill = GridBagConstraints.BOTH - add(JScrollPane(instructionsArea), c) - } - } + private fun generateDefaultFileName(): String { + val timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")) + return "generated_image_$timestamp.png" + } - fun getFileName() = fileNameField.text - fun getInstructions() = instructionsArea.text + override fun createCenterPanel(): JComponent { + return JPanel(GridBagLayout()).apply { + val c = GridBagConstraints() + c.fill = GridBagConstraints.HORIZONTAL + c.insets = JBUI.insets(5) + c.gridx = 0; c.gridy = 0 + add(JLabel("Output filename:"), c) + c.gridx = 1; c.gridy = 0 + add(fileNameField, c) + c.gridx = 0; c.gridy = 1 + add(JLabel("Special instructions:"), c) + c.gridx = 1; c.gridy = 1 + c.fill = GridBagConstraints.BOTH + add(JScrollPane(instructionsArea), c) + } } - override fun getActionUpdateThread() = ActionUpdateThread.BGT + fun getFileName() = fileNameField.text + fun getInstructions() = instructionsArea.text + } + + override fun getActionUpdateThread() = ActionUpdateThread.BGT + + override fun handle(e: AnActionEvent) { + log.info("Starting CreateImageAction handler") + val rootRef = AtomicReference(null) + val codeFiles: MutableSet = mutableSetOf() + val dialog = ImageGenerationDialog(e.project!!) + if (!dialog.showAndGet()) { + log.debug("Dialog cancelled by user") + return + } + UITools.runAsync(e.project, "Creating Image", true) { progress -> + try { + progress.text = "Analyzing code files..." + log.debug("Beginning code analysis") + fun codeSummary() = codeFiles.filter { + rootRef.get()?.resolve(it)?.toFile()?.exists() ?: false + }.associateWith { rootRef.get()?.resolve(it)?.toFile()?.readText(Charsets.UTF_8) }.entries.joinToString( + "\n\n" + ) { (path, code) -> + val extension = + path.toString().split('.').lastOrNull()?.let { /*escapeHtml4*/(it)/*.indent(" ")*/ } + "# $path\n```$extension\n${code}\n```" + } - override fun handle(e: AnActionEvent) { - log.info("Starting CreateImageAction handler") - val rootRef = AtomicReference(null) - val codeFiles: MutableSet = mutableSetOf() - val dialog = ImageGenerationDialog(e.project!!) - if (!dialog.showAndGet()) { - log.debug("Dialog cancelled by user") - return + val dataContext = e.dataContext + val virtualFiles = PlatformDataKeys.VIRTUAL_FILE_ARRAY.getData(dataContext) + log.debug("Found ${virtualFiles?.size ?: 0} virtual files") + progress.text = "Determining root directory..." + val folder = e.getSelectedFolder() + rootRef.set( + if (null != folder) { + log.debug("Using selected folder as root: {}", folder.toFile) + folder.toFile.toPath() + } else if (1 == virtualFiles?.size) { + log.debug("Using parent of single file as root") + e.getSelectedFile()?.parent?.toNioPath() + } else { + log.debug("Using module root as root directory") + getModuleRootForFile( + e.getSelectedFile()?.parent?.toFile ?: throw RuntimeException("No file selected") + ).toPath() + } + ) + progress.text = "Collecting files..." + + val root = rootRef.get() ?: throw RuntimeException("Root path not set") + if (!Files.exists(root)) { + throw IOException("Root directory does not exist: $root") } - UITools.runAsync(e.project, "Creating Image", true) { progress -> - try { - progress.text = "Analyzing code files..." - log.debug("Beginning code analysis") - fun codeSummary() = codeFiles.filter { - rootRef.get()?.resolve(it)?.toFile()?.exists() ?: false - }.associateWith { rootRef.get()?.resolve(it)?.toFile()?.readText(Charsets.UTF_8) }.entries.joinToString( - "\n\n" - ) { (path, code) -> - val extension = - path.toString().split('.').lastOrNull()?.let { /*escapeHtml4*/(it)/*.indent(" ")*/ } - "# $path\n```$extension\n${code}\n```" - } - - val dataContext = e.dataContext - val virtualFiles = PlatformDataKeys.VIRTUAL_FILE_ARRAY.getData(dataContext) - log.debug("Found ${virtualFiles?.size ?: 0} virtual files") - progress.text = "Determining root directory..." - val folder = e.getSelectedFolder() - rootRef.set( - if (null != folder) { - log.debug("Using selected folder as root: {}",folder.toFile) - folder.toFile.toPath() - } else if (1 == virtualFiles?.size) { - log.debug("Using parent of single file as root") - e.getSelectedFile()?.parent?.toNioPath() - } else { - log.debug("Using module root as root directory") - getModuleRootForFile( - e.getSelectedFile()?.parent?.toFile ?: throw RuntimeException("No file selected") - ).toPath() - } - ) - progress.text = "Collecting files..." - - val root = rootRef.get() ?: throw RuntimeException("Root path not set") - if (!Files.exists(root)) { - throw IOException("Root directory does not exist: $root") - } - log.info("Using root directory: $root") - val files = getFiles(virtualFiles, root) - codeFiles.addAll(files) - log.debug("Collected ${codeFiles.size} code files") - progress.text = "Generating image..." - log.info("Starting image generation with ${codeFiles.size} files") - val imageActor = ImageAgent( - prompt = """ + log.info("Using root directory: $root") + val files = getFiles(virtualFiles, root) + codeFiles.addAll(files) + log.debug("Collected ${codeFiles.size} code files") + progress.text = "Generating image..." + log.info("Starting image generation with ${codeFiles.size} files") + val imageActor = ImageGenerationAgent( + prompt = """ You are a technical drawing assistant. You will be composing an image about the following code: ${codeSummary()} Special instructions: ${dialog.getInstructions()} """.trimIndent(), - textModel = AppSettingsState.instance.smartChatClient, - imageModel = AppSettingsState.instance.mainImageModel.imageModel() - ).apply { setImageAPI(IdeaOpenAIClient.instance) } - log.debug("Sending request to image generation API") - val response = imageActor.answer(listOf(codeSummary(), dialog.getInstructions()),) - log.debug("Image generation completed successfully") - val imagePath = root.resolve(dialog.getFileName()) - write(response, imagePath) - VirtualFileManager.getInstance().findFileByNioPath(imagePath)?.refresh(false, false) - } catch (ex: Throwable) { - when (ex) { - is IOException -> log.error("IO error during image creation: ${ex.message}", ex) - is SecurityException -> log.error("Security error during image creation: ${ex.message}", ex) - is IllegalArgumentException -> log.error( - "Invalid argument during image creation: ${ex.message}", - ex - ) - - else -> log.error("Unexpected error during image creation", ex) - } - UITools.showErrorDialog("Failed to create image: ${ex.message}", "Error") - } + textModel = AppSettingsState.instance.smartChatClient, + imageModel = AppSettingsState.instance.imageModel?.model, + imageClient = AppSettingsState.instance.imageClient + ) + log.debug("Sending request to image generation API") + val response = imageActor.answer(listOf(codeSummary(), dialog.getInstructions())) + log.debug("Image generation completed successfully") + val imagePath = root.resolve(dialog.getFileName()) + write(response, imagePath) + VirtualFileManager.getInstance().findFileByNioPath(imagePath)?.refresh(false, false) + } catch (ex: Throwable) { + when (ex) { + is IOException -> log.error("IO error during image creation: ${ex.message}", ex) + is SecurityException -> log.error("Security error during image creation: ${ex.message}", ex) + is IllegalArgumentException -> log.error( + "Invalid argument during image creation: ${ex.message}", + ex + ) + + else -> log.error("Unexpected error during image creation", ex) } + UITools.showErrorDialog("Failed to create image: ${ex.message}", "Error") + } } - - private fun write( - code: ImageResponse, path: Path - ) = try { - log.debug("Creating parent directories for: {}",path) - path.parent?.toFile()?.mkdirs() - val format = path.toString().split(".").last() - log.debug("Writing image in format: $format") - - val bytes = ByteArrayOutputStream().use { outputStream -> - if (!ImageIO.write( - code.image, format, outputStream - ) - ) { - throw IOException("Unsupported or invalid image format: $format") - } - outputStream.toByteArray() - } - path.toFile().writeBytes(bytes) - path - } catch (e: Exception) { - log.error("Failed to write image to $path", e) - when (e) { - is IOException -> throw IOException("Failed to write image: ${e.message}", e) - is SecurityException -> throw SecurityException("Security error writing image: ${e.message}", e) - else -> throw RuntimeException("Unexpected error writing image: ${e.message}", e) - } + } + + private fun write( + code: ImageAndText, path: Path + ) = try { + log.debug("Creating parent directories for: {}", path) + path.parent?.toFile()?.mkdirs() + val format = path.toString().split(".").last() + log.debug("Writing image in format: $format") + + val bytes = ByteArrayOutputStream().use { outputStream -> + if (!ImageIO.write( + code.image, format, outputStream + ) + ) { + throw IOException("Unsupported or invalid image format: $format") + } + outputStream.toByteArray() } - - private fun getFiles( - virtualFiles: Array?, root: Path - ): MutableSet { - val codeFiles = mutableSetOf() - virtualFiles?.forEach { file -> - if (file.isDirectory) { - getFiles(file.children, root) - } else { - val relative = root.relativize(file.toNioPath()) - codeFiles.add(relative) - - } - } - return codeFiles - } - - override fun isEnabled(event: AnActionEvent): Boolean { - if (!super.isEnabled(event)) return false - event.getSelectedFile() ?: return false - return true + path.toFile().writeBytes(bytes) + path + } catch (e: Exception) { + log.error("Failed to write image to $path", e) + when (e) { + is IOException -> throw IOException("Failed to write image: ${e.message}", e) + is SecurityException -> throw SecurityException("Security error writing image: ${e.message}", e) + else -> throw RuntimeException("Unexpected error writing image: ${e.message}", e) } - - companion object { - private val log = LoggerFactory.getLogger(CreateImageAction::class.java) + } + + private fun getFiles( + virtualFiles: Array?, root: Path + ): MutableSet { + val codeFiles = mutableSetOf() + virtualFiles?.forEach { file -> + if (file.isDirectory) { + getFiles(file.children, root) + } else { + val relative = root.relativize(file.toNioPath()) + codeFiles.add(relative) + + } } + return codeFiles + } + + override fun isEnabled(event: AnActionEvent): Boolean { + if (!super.isEnabled(event)) return false + event.getSelectedFile() ?: return false + AppSettingsState.instance.imageModel ?: return false + return true + } + + companion object { + private val log = LoggerFactory.getLogger(CreateImageAction::class.java) + } } \ No newline at end of file diff --git a/intellij/src/main/kotlin/cognotik/actions/git/ReplicateCommitAction.kt b/intellij/src/main/kotlin/cognotik/actions/git/ReplicateCommitAction.kt index 3018ff222..85b2f3e7a 100644 --- a/intellij/src/main/kotlin/cognotik/actions/git/ReplicateCommitAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/git/ReplicateCommitAction.kt @@ -13,8 +13,8 @@ import com.intellij.openapi.vcs.VcsDataKeys import com.intellij.openapi.vcs.changes.Change import com.intellij.openapi.vfs.VirtualFile import com.simiacryptus.cognotik.CognotikAppServer -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.describe.Description diff --git a/intellij/src/main/kotlin/cognotik/actions/plan/PlanConfigDialog.kt b/intellij/src/main/kotlin/cognotik/actions/plan/PlanConfigDialog.kt index ed918deb8..41bfd5148 100644 --- a/intellij/src/main/kotlin/cognotik/actions/plan/PlanConfigDialog.kt +++ b/intellij/src/main/kotlin/cognotik/actions/plan/PlanConfigDialog.kt @@ -10,7 +10,7 @@ import com.intellij.ui.dsl.builder.Align import com.intellij.ui.dsl.builder.panel import com.simiacryptus.cognotik.chat.model.ChatModel import com.simiacryptus.cognotik.config.AppSettingsState -import com.simiacryptus.cognotik.models.LLMModel +import com.simiacryptus.cognotik.models.AIModel import com.simiacryptus.cognotik.plan.OrchestrationConfig import com.simiacryptus.cognotik.plan.TaskType import com.simiacryptus.cognotik.plan.TaskTypeConfig @@ -97,13 +97,20 @@ class PlanConfigDialog( settings.defaultModel?.model?.modelName ?: AppSettingsState.instance.smartModel?.model?.modelName toolTipText = "Default AI model for all tasks" } - private val parsingModelCombo = +private val parsingModelCombo = ComboBox(visibleModelsCache.distinctBy { it.modelName }.map { it.modelName }.toTypedArray()).apply { maximumSize = Dimension(CONFIG_COMBO_WIDTH, CONFIG_COMBO_HEIGHT) selectedItem = settings.parsingModel?.model?.modelName ?: AppSettingsState.instance.smartModel?.model?.modelName toolTipText = "AI model for parsing and understanding tasks" } + private val imageChatModelCombo = + ComboBox(visibleModelsCache.distinctBy { it.modelName }.map { it.modelName }.toTypedArray()).apply { + maximumSize = Dimension(CONFIG_COMBO_WIDTH, CONFIG_COMBO_HEIGHT) + selectedItem = + settings.imageChatModel?.model?.modelName ?: AppSettingsState.instance.imageChatModel?.model?.modelName + toolTipText = "Multimodal AI model for image-related tasks" + } private val temperatureSlider = JSlider(MIN_TEMP, MAX_TEMP, (settings.temperature * TEMPERATURE_SCALE).toInt()).apply { @@ -436,10 +443,11 @@ class PlanConfigDialog( settings.temperature = config.temperature.coerceIn(0.0, 1.0) settings.autoFix = config.autoFix settings.maxTaskHistoryChars = config.maxTaskHistoryChars - settings.maxTasksPerIteration = config.maxTasksPerIteration +settings.maxTasksPerIteration = config.maxTasksPerIteration settings.maxIterations = config.maxIterations settings.defaultModel = config.defaultModel settings.parsingModel = config.parsingModel + settings.imageChatModel = config.imageChatModel settings.cognitiveMode = config.cognitiveMode // Update UI components @@ -473,12 +481,18 @@ class PlanConfigDialog( } } - config.parsingModel?.model?.modelName?.let { modelName -> +config.parsingModel?.model?.modelName?.let { modelName -> visibleModelsCache.find { it.modelName == modelName }?.let { model -> settings.parsingModel = model.toApiChatModel() parsingModelCombo.selectedItem = modelName } } + config.imageChatModel?.model?.modelName?.let { modelName -> + visibleModelsCache.find { it.modelName == modelName }?.let { model -> + settings.imageChatModel = model.toApiChatModel() + imageChatModelCombo.selectedItem = modelName + } + } } catch (e: Exception) { log.error("Error loading configuration", e) @@ -551,10 +565,14 @@ class PlanConfigDialog( cell(globalModelCombo).align(Align.FILL) .comment("Default AI model for all tasks") } - row("Parsing Model:") { +row("Parsing Model:") { cell(parsingModelCombo).align(Align.FILL) .comment("AI model for parsing and understanding tasks") } + row("Image Chat Model:") { + cell(imageChatModelCombo).align(Align.FILL) + .comment("Multimodal AI model for image-related tasks") + } group("Task Configurations") { row { @@ -626,11 +644,16 @@ class PlanConfigDialog( val model = visibleModelsCache.find { it.modelName == selectedGlobalModel } settings.defaultModel = model?.toApiChatModel() } - val selectedParsingModel = parsingModelCombo.selectedItem as? String +val selectedParsingModel = parsingModelCombo.selectedItem as? String if (selectedParsingModel != null) { val model = visibleModelsCache.find { it.modelName == selectedParsingModel } settings.parsingModel = model?.toApiChatModel() } + val selectedImageChatModel = imageChatModelCombo.selectedItem as? String + if (selectedImageChatModel != null) { + val model = visibleModelsCache.find { it.modelName == selectedImageChatModel } + settings.imageChatModel = model?.toApiChatModel() + } val selectedCognitiveMode = cognitiveModeCombo.selectedItem as String settings.cognitiveMode = CognitiveModeStrategies.valueOf(selectedCognitiveMode) return settings @@ -663,7 +686,7 @@ class PlanConfigDialog( // Validation patterns private val CONFIG_NAME_PATTERN = Regex("^[a-zA-Z0-9_ -]+$") - fun isVisible(chatModel: LLMModel) = + fun isVisible(chatModel: AIModel) = ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings().apis.filter { it.key != null } .any { it.provider == chatModel.provider } } diff --git a/intellij/src/main/kotlin/cognotik/actions/plan/TaskTypeSelectionDialog.kt b/intellij/src/main/kotlin/cognotik/actions/plan/TaskTypeSelectionDialog.kt index 297d2de4a..c7592834a 100644 --- a/intellij/src/main/kotlin/cognotik/actions/plan/TaskTypeSelectionDialog.kt +++ b/intellij/src/main/kotlin/cognotik/actions/plan/TaskTypeSelectionDialog.kt @@ -128,22 +128,28 @@ class TaskTypeSelectionDialog(project: Project?) : DialogWrapper(project) { private fun getPackageGroup(taskType: TaskType<*, *>): String { return when { - taskType.name.contains("Reasoning") || - taskType.name in listOf( - "MultiPerspectiveAnalysis", "SocraticDialogue", "AnalogicalReasoning", - "CounterfactualAnalysis", "AbstractionLadder", "ConstraintSatisfaction", - "CausalInference", "DecompositionSynthesis", "NarrativeGeneration", - "AbductiveReasoning", "AdversarialReasoning", "ConstraintRelaxation", - "DialecticalReasoning", "LateralThinking", "NarrativeReasoning", - "ProbabilisticReasoning", "SystemsThinking", "TemporalReasoning", - "GameTheory", "FiniteStateMachine", "Brainstorming", - "ChainOfThought", "MetaCognitiveReflection", "GeneticOptimization", - "EthicalReasoning" - ) -> "Reasoning" - - taskType.name in listOf( + taskType.name in listOf( + "MultiPerspectiveAnalysis", "SocraticDialogue", "AnalogicalReasoning", + "CounterfactualAnalysis", "AbstractionLadder", "ConstraintSatisfaction", + "CausalInference", "DecompositionSynthesis", + "AbductiveReasoning", "AdversarialReasoning", "ConstraintRelaxation", + "DialecticalReasoning", "LateralThinking", + "ProbabilisticReasoning", "SystemsThinking", "TemporalReasoning", + "GameTheory", "FiniteStateMachine", "Brainstorming", + "ChainOfThought", "MetaCognitiveReflection", "GeneticOptimization", + "EthicalReasoning" + ) -> "Reasoning" + + taskType.name in listOf( + "NarrativeGeneration", "NarrativeReasoning", "ArticleGeneration", + "PersuasiveEssay", "TechnicalExplanation", "TutorialGeneration", + "BusinessProposal", "EmailCampaign", "InteractiveStory", + "ReportGeneration", "Scriptwriting", "JournalismReasoning" + ) -> "Writing" + + taskType.name in listOf( "Analysis", "FileModification", "FileSearch", - "WriteHtml", "GeneratePresentation" + "WriteHtml", "GeneratePresentation", "GenerateImage" ) -> "File Operations" taskType.name in listOf("VectorSearch", "KnowledgeIndexing") -> "Knowledge Management" diff --git a/intellij/src/main/kotlin/cognotik/actions/problems/AnalyzeProblemAction.kt b/intellij/src/main/kotlin/cognotik/actions/problems/AnalyzeProblemAction.kt index 0a6d522cf..3f2f34a21 100644 --- a/intellij/src/main/kotlin/cognotik/actions/problems/AnalyzeProblemAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/problems/AnalyzeProblemAction.kt @@ -18,8 +18,8 @@ import com.intellij.openapi.util.TextRange import com.intellij.openapi.vfs.VirtualFile import com.intellij.psi.PsiManager import com.simiacryptus.cognotik.CognotikAppServer -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.platform.Session diff --git a/intellij/src/main/kotlin/cognotik/actions/test/TestResultAutofixAction.kt b/intellij/src/main/kotlin/cognotik/actions/test/TestResultAutofixAction.kt index 8dc6e6cbe..7aa16f4a9 100644 --- a/intellij/src/main/kotlin/cognotik/actions/test/TestResultAutofixAction.kt +++ b/intellij/src/main/kotlin/cognotik/actions/test/TestResultAutofixAction.kt @@ -7,8 +7,8 @@ import com.intellij.openapi.actionSystem.AnActionEvent import com.intellij.openapi.actionSystem.PlatformDataKeys import com.intellij.openapi.vfs.VirtualFile import com.simiacryptus.cognotik.CognotikAppServer -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.config.AppSettingsState import com.simiacryptus.cognotik.platform.Session diff --git a/intellij/src/main/kotlin/com/simiacryptus/cognotik/SettingsWidgetFactory.kt b/intellij/src/main/kotlin/com/simiacryptus/cognotik/SettingsWidgetFactory.kt index b9f61a26c..c48e7d015 100644 --- a/intellij/src/main/kotlin/com/simiacryptus/cognotik/SettingsWidgetFactory.kt +++ b/intellij/src/main/kotlin/com/simiacryptus/cognotik/SettingsWidgetFactory.kt @@ -39,6 +39,7 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { private var statusBar: StatusBar? = null private var smartModelTree: Tree? = null private var fastModelTree: Tree? = null + private var imageChatModelTree: Tree? = null private val sessionsList = JBList() private val sessionsListModel = DefaultListModel() private fun getSmartModelTree(): Tree { @@ -54,10 +55,18 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { } return fastModelTree!! } + private fun getImageChatModelTree(): Tree { + if (imageChatModelTree == null) { + imageChatModelTree = createModelTree("Image Chat Model", AppSettingsState.instance.imageChatModel) + } + return imageChatModelTree!! + } + private fun recreateModelTrees() { smartModelTree = null fastModelTree = null + imageChatModelTree = null } private fun createModelTree(title: String, selectedModel: ApiChatModel?): Tree { @@ -106,7 +115,7 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { tree.selectionModel.selectionMode = TreeSelectionModel.SINGLE_TREE_SELECTION tree.isRootVisible = false tree.showsRootHandles = true - tree.addTreeSelectionListener { +tree.addTreeSelectionListener { val selectedPath = tree.selectionPath if (selectedPath != null && selectedPath.pathCount == 3) { val modelName = selectedPath.lastPathComponent.toString() @@ -123,6 +132,8 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { "Fast Model" -> AppSettingsState.instance.fastModel = ApiChatModel(chatModel, apiData) + "Image Chat Model" -> AppSettingsState.instance.imageChatModel = + ApiChatModel(chatModel, apiData) } statusBar?.updateWidget(ID()) } @@ -285,7 +296,7 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { } } - init { +init { AppSettingsState.onSettingsLoadedListeners.add { Thread { statusBar?.updateWidget(ID()) @@ -298,6 +309,9 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { AppSettingsState.instance.fastModel?.model.let { model -> setSelectedModel(getFastModelTree(), model?.modelName ?: "") } + AppSettingsState.instance.imageChatModel?.model.let { model -> + setSelectedModel(getImageChatModelTree(), model?.modelName ?: "") + } } }.start() } @@ -312,6 +326,11 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { setSelectedModel(getFastModelTree(), model?.modelName ?: "") } } + AppSettingsState.instance.imageChatModel?.model.let { model -> + SwingUtilities.invokeLater { + setSelectedModel(getImageChatModelTree(), model?.modelName ?: "") + } + } }.start() } @@ -368,7 +387,7 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { } } - override fun getPopup(): JBPopup { +override fun getPopup(): JBPopup { updateSessionsList() val panel = JPanel(BorderLayout()) panel.accessibleContext.accessibleDescription = getMessage("popup.description") @@ -382,12 +401,16 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { val fastModelPanel = JPanel(BorderLayout()) fastModelPanel.add(JScrollPane(getFastModelTree()), BorderLayout.CENTER) + val imageChatModelPanel = JPanel(BorderLayout()) + imageChatModelPanel.add(JScrollPane(getImageChatModelTree()), BorderLayout.CENTER) + val usagePanel = JPanel(BorderLayout()) usagePanel.add(UsageTable(ApplicationServices.fileApplicationServices(AppSettingsState.pluginHome).usageManager), BorderLayout.CENTER) tabbedPane.addTab(getMessage("tab.smartModel"), smartModelPanel) tabbedPane.addTab(getMessage("tab.fastModel"), fastModelPanel) + tabbedPane.addTab(getMessage("tab.imageChatModel"), imageChatModelPanel) tabbedPane.addTab(getMessage("tab.server"), createServerControlPanel()) tabbedPane.addTab(getMessage("tab.usage"), usagePanel) @@ -410,9 +433,10 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { return AppSettingsState.instance.smartModel?.model?.modelName ?: "Uninitialized" } - override fun getTooltipText() = """ +override fun getTooltipText() = """ Smart Model: ${AppSettingsState.instance.smartModel?.model?.modelName ?: "Not configured"}
Fast Model: ${AppSettingsState.instance.fastModel?.model?.modelName ?: "Not configured"}
+ Image Chat Model: ${AppSettingsState.instance.imageChatModel?.model?.modelName ?: "Not configured"}
Temperature: ${AppSettingsState.instance.temperature}
${ if (CognotikAppServer.isRunning()) { @@ -459,4 +483,4 @@ class SettingsWidgetFactory : StatusBarWidgetFactory { override fun canBeEnabledOn(statusBar: StatusBar): Boolean { return true } -} +} \ No newline at end of file diff --git a/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsComponent.kt b/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsComponent.kt index 3209135f8..5ad1557d2 100644 --- a/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsComponent.kt +++ b/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsComponent.kt @@ -18,7 +18,7 @@ import com.intellij.ui.components.JBTextField import com.intellij.ui.table.JBTable import com.simiacryptus.cognotik.diff.PatchProcessors import com.simiacryptus.cognotik.embedding.EmbeddingModel -import com.simiacryptus.cognotik.image.ImageModels +import com.simiacryptus.cognotik.image.ImageModel import com.simiacryptus.cognotik.models.APIProvider import com.simiacryptus.cognotik.platform.ApplicationServices.fileApplicationServices import com.simiacryptus.cognotik.util.LoggerFactory @@ -31,730 +31,784 @@ import javax.swing.table.DefaultTableCellRenderer import javax.swing.table.DefaultTableModel class AppSettingsComponent : Disposable { - @Name("Enable Diff Logging") - val diffLoggingEnabled = JBCheckBox() - - @Name("AWS Profile") - val awsProfile = JBTextField().apply { - toolTipText = "AWS Profile" - columns = 30 - } - - @Name("AWS Region") - val awsRegion = JBTextField().apply { - toolTipText = "AWS Region" - columns = 30 - } - - @Name("AWS Bucket") - val awsBucket = JBTextField().apply { - toolTipText = "AWS Bucket" - columns = 30 - } - - @Suppress("unused") - @Name("Store Metadata") - val storeMetadata = JTextArea().apply { - lineWrap = true - wrapStyleWord = true - } - - val executablesModel = DefaultListModel().apply { - AppSettingsState.instance.executables?.forEach { addElement(it) } - } - val executablesList = JBList(executablesModel) - - @Name("Executables") - val executablesPanel = JPanel(BorderLayout()).apply { - val scrollPane = JScrollPane(executablesList) - scrollPane.preferredSize = Dimension(300, 200) - add(scrollPane, BorderLayout.CENTER) - val buttonPanel = JPanel() - val addButton = JButton("Add") - val removeButton = JButton("Remove") - val editButton = JButton("Edit") - removeButton.isEnabled = false - editButton.isEnabled = false - - addButton.addActionListener { - val descriptor = FileChooserDescriptorFactory.createSingleFileDescriptor() - descriptor.title = "Select Executable" - try { - FileChooser.chooseFile(descriptor, null, null) { file -> - val executablePath = file.path - if (executablePath.isNotBlank() && !executablesModel.contains(executablePath)) { - executablesModel.addElement(executablePath) - AppSettingsState.instance.executables?.add(executablePath) - log.debug("Successfully added executable: $executablePath") - } else { - if (executablePath.isBlank()) { - log.warn("Attempted to add blank executable path") - } else { - log.warn("Executable already exists in list: $executablePath") - } - } - } - } catch (e: Exception) { - log.error("Failed to add executable: ${e.message}", e) - JOptionPane.showMessageDialog( - this, "Failed to add executable: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE - ) + @Name("Enable Diff Logging") + val diffLoggingEnabled = JBCheckBox() + + @Name("AWS Profile") + val awsProfile = JBTextField().apply { + toolTipText = "AWS Profile" + columns = 30 + } + + @Name("AWS Region") + val awsRegion = JBTextField().apply { + toolTipText = "AWS Region" + columns = 30 + } + + @Name("AWS Bucket") + val awsBucket = JBTextField().apply { + toolTipText = "AWS Bucket" + columns = 30 + } + + @Suppress("unused") + @Name("Store Metadata") + val storeMetadata = JTextArea().apply { + lineWrap = true + wrapStyleWord = true + } + + val executablesModel = DefaultListModel().apply { + AppSettingsState.instance.executables?.forEach { addElement(it) } + } + val executablesList = JBList(executablesModel) + + @Name("Executables") + val executablesPanel = JPanel(BorderLayout()).apply { + val scrollPane = JScrollPane(executablesList) + scrollPane.preferredSize = Dimension(300, 200) + add(scrollPane, BorderLayout.CENTER) + val buttonPanel = JPanel() + val addButton = JButton("Add") + val removeButton = JButton("Remove") + val editButton = JButton("Edit") + removeButton.isEnabled = false + editButton.isEnabled = false + + addButton.addActionListener { + val descriptor = FileChooserDescriptorFactory.createSingleFileDescriptor() + descriptor.title = "Select Executable" + try { + FileChooser.chooseFile(descriptor, null, null) { file -> + val executablePath = file.path + if (executablePath.isNotBlank() && !executablesModel.contains(executablePath)) { + executablesModel.addElement(executablePath) + AppSettingsState.instance.executables?.add(executablePath) + log.debug("Successfully added executable: $executablePath") + } else { + if (executablePath.isBlank()) { + log.warn("Attempted to add blank executable path") + } else { + log.warn("Executable already exists in list: $executablePath") } + } } - removeButton.addActionListener { - try { - val selectedIndices = executablesList.selectedIndices - if (selectedIndices.isEmpty()) { - log.warn("No executables selected for removal") - return@addActionListener - } - for (i in selectedIndices.reversed()) { - val removed = executablesModel.remove(i) - AppSettingsState.instance.executables?.remove(removed) - log.debug("Successfully removed executable: $removed") - } - } catch (e: Exception) { - log.error("Unexpected error removing executable: ${e.message}", e) - JOptionPane.showMessageDialog( - this, "Failed to remove executable: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE - ) - } + } catch (e: Exception) { + log.error("Failed to add executable: ${e.message}", e) + JOptionPane.showMessageDialog( + this, "Failed to add executable: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE + ) + } + } + removeButton.addActionListener { + try { + val selectedIndices = executablesList.selectedIndices + if (selectedIndices.isEmpty()) { + log.warn("No executables selected for removal") + return@addActionListener } - editButton.addActionListener { - try { - val selectedIndex = executablesList.selectedIndex - if (selectedIndex == -1) { - log.warn("No executable selected for editing") - return@addActionListener - } - val currentValue = executablesModel.get(selectedIndex) - val newValue = JOptionPane.showInputDialog(this, "Edit executable path:", currentValue) - if (newValue != null && newValue.isNotBlank()) { - executablesModel.set(selectedIndex, newValue) - AppSettingsState.instance.executables?.remove(currentValue) - AppSettingsState.instance.executables?.add(newValue) - log.debug("Successfully updated executable from '$currentValue' to '$newValue'") - } else { - log.warn("Invalid new executable path provided: ${newValue ?: "null"}") - } - } catch (e: Exception) { - log.error("Unexpected error editing executable: ${e.message}", e) - JOptionPane.showMessageDialog( - this, "Failed to edit executable: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE - ) - } + for (i in selectedIndices.reversed()) { + val removed = executablesModel.remove(i) + AppSettingsState.instance.executables?.remove(removed) + log.debug("Successfully removed executable: $removed") } - executablesList.addListSelectionListener(object : ListSelectionListener { - override fun valueChanged(e: ListSelectionEvent?) { - val hasSelection = executablesList.selectedIndex != -1 - removeButton.isEnabled = hasSelection - editButton.isEnabled = hasSelection - } - }) - buttonPanel.add(addButton) - buttonPanel.add(removeButton) - buttonPanel.add(editButton) - add(buttonPanel, BorderLayout.SOUTH) - - executablesList.selectionMode = ListSelectionModel.MULTIPLE_INTERVAL_SELECTION + } catch (e: Exception) { + log.error("Unexpected error removing executable: ${e.message}", e) + JOptionPane.showMessageDialog( + this, "Failed to remove executable: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE + ) + } + } + editButton.addActionListener { + try { + val selectedIndex = executablesList.selectedIndex + if (selectedIndex == -1) { + log.warn("No executable selected for editing") + return@addActionListener + } + val currentValue = executablesModel.get(selectedIndex) + val newValue = JOptionPane.showInputDialog(this, "Edit executable path:", currentValue) + if (newValue != null && newValue.isNotBlank()) { + executablesModel.set(selectedIndex, newValue) + AppSettingsState.instance.executables?.remove(currentValue) + AppSettingsState.instance.executables?.add(newValue) + log.debug("Successfully updated executable from '$currentValue' to '$newValue'") + } else { + log.warn("Invalid new executable path provided: ${newValue ?: "null"}") + } + } catch (e: Exception) { + log.error("Unexpected error editing executable: ${e.message}", e) + JOptionPane.showMessageDialog( + this, "Failed to edit executable: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE + ) + } } + executablesList.addListSelectionListener(object : ListSelectionListener { + override fun valueChanged(e: ListSelectionEvent?) { + val hasSelection = executablesList.selectedIndex != -1 + removeButton.isEnabled = hasSelection + editButton.isEnabled = hasSelection + } + }) + buttonPanel.add(addButton) + buttonPanel.add(removeButton) + buttonPanel.add(editButton) + add(buttonPanel, BorderLayout.SOUTH) - @Name("Listening Port") - val listeningPort = JBTextField() + executablesList.selectionMode = ListSelectionModel.MULTIPLE_INTERVAL_SELECTION + } - @Name("Listening Endpoint") - val listeningEndpoint = JBTextField() + @Name("Listening Port") + val listeningPort = JBTextField() - @Name("Suppress Errors") - val suppressErrors = JBCheckBox() + @Name("Listening Endpoint") + val listeningEndpoint = JBTextField() - @Name("Use Scratches System Path") - val useScratchesSystemPath = JBCheckBox() + @Name("Suppress Errors") + val suppressErrors = JBCheckBox() - @Name("Model") - val smartModel = ComboBox() + @Name("Use Scratches System Path") + val useScratchesSystemPath = JBCheckBox() - @Name("Model") - val fastModel = ComboBox() + @Name("Model") + val smartModel = ComboBox() - @Name("Main Image Model") - val mainImageModel = ComboBox() +@Name("Model") + val fastModel = ComboBox() + @Name("Model") + val imageChatModel = ComboBox() - @Name("Embedding Model") - val embeddingModel = ComboBox() - @Name("Patch Processor") - val patchProcessor = ComboBox() + @Name("Main Image Model") + val mainImageModel = ComboBox() + @Name("Embedding Model") + val embeddingModel = ComboBox() - @Suppress("unused") - @Name("Enable API Log") - val apiLog = JBCheckBox() + @Name("Patch Processor") + val patchProcessor = ComboBox() - @Suppress("unused") - val openApiLog = JButton(object : AbstractAction("Open API Log") { - override fun actionPerformed(e: ActionEvent) { - AppSettingsState.auxiliaryLog?.let { - if (it.exists()) { - val project = ApplicationManager.getApplication().runReadAction { - ProjectManager.getInstance().openProjects.firstOrNull() - } - ApplicationManager.getApplication().invokeLater { - val virtualFile = LocalFileSystem.getInstance().refreshAndFindFileByIoFile(it) - val openFileDescriptor = OpenFileDescriptor(project, virtualFile!!, virtualFile.length.toInt()) - FileEditorManager.getInstance(project!!) - .openTextEditor(openFileDescriptor, true)?.document?.setReadOnly( - true - ) - } - } - } - } - }) - @Name("Developer Tools") - val devActions = JBCheckBox() - - @Suppress("unused") - @Name("Edit API Requests") - val editRequests = JBCheckBox() - - @Name("Disable Auto-Open URLs") - val disableAutoOpenUrls = JBCheckBox() - - @Name("Shell Command") - val shellCommand = JBTextField() - - @Name("Show Welcome Screen") - val showWelcomeScreen = JBCheckBox() - - @Name("Temperature") - val temperature = JBTextField() - - @Name("APIs") - val apis = JBTable(DefaultTableModel(arrayOf("Provider", "Name", "Key", "Base URL"), 0)).apply { - columnModel.getColumn(0).preferredWidth = 100 - columnModel.getColumn(1).preferredWidth = 150 - columnModel.getColumn(2).preferredWidth = 200 - columnModel.getColumn(3).preferredWidth = 200 - val keyColumnIndex = 2 - columnModel.getColumn(keyColumnIndex).cellRenderer = object : DefaultTableCellRenderer() { - override fun setValue(value: Any?) { - text = - if (value is String && value.isNotEmpty()) value.map { '*' }.joinToString("") else value?.toString() - ?: "" - } + @Suppress("unused") + @Name("Enable API Log") + val apiLog = JBCheckBox() + + @Suppress("unused") + val openApiLog = JButton(object : AbstractAction("Open API Log") { + override fun actionPerformed(e: ActionEvent) { + AppSettingsState.auxiliaryLog?.let { + if (it.exists()) { + val project = ApplicationManager.getApplication().runReadAction { + ProjectManager.getInstance().openProjects.firstOrNull() + } + ApplicationManager.getApplication().invokeLater { + val virtualFile = LocalFileSystem.getInstance().refreshAndFindFileByIoFile(it) + val openFileDescriptor = OpenFileDescriptor(project, virtualFile!!, virtualFile.length.toInt()) + FileEditorManager.getInstance(project!!) + .openTextEditor(openFileDescriptor, true)?.document?.setReadOnly( + true + ) + } } + } } + }) + + @Name("Developer Tools") + val devActions = JBCheckBox() + + @Suppress("unused") + @Name("Edit API Requests") + val editRequests = JBCheckBox() + + @Name("Disable Auto-Open URLs") + val disableAutoOpenUrls = JBCheckBox() + + @Name("Shell Command") + val shellCommand = JBTextField() + + @Name("Show Welcome Screen") + val showWelcomeScreen = JBCheckBox() + + @Name("Temperature") + val temperature = JBTextField() + + @Name("APIs") + val apis = JBTable(DefaultTableModel(arrayOf("Provider", "Name", "Key", "Base URL"), 0)).apply { + columnModel.getColumn(0).preferredWidth = 100 + columnModel.getColumn(1).preferredWidth = 150 + columnModel.getColumn(2).preferredWidth = 200 + columnModel.getColumn(3).preferredWidth = 200 + val keyColumnIndex = 2 + columnModel.getColumn(keyColumnIndex).cellRenderer = object : DefaultTableCellRenderer() { + override fun setValue(value: Any?) { + text = + if (value is String && value.isNotEmpty()) value.map { '*' }.joinToString("") else value?.toString() + ?: "" + } + } + } + + @Name("API Management") + val apiManagementPanel = JPanel(BorderLayout()).apply { + val scrollPane = JScrollPane(apis) + scrollPane.preferredSize = Dimension(600, 300) + add(scrollPane, BorderLayout.CENTER) + + val buttonPanel = JPanel(FlowLayout(FlowLayout.LEFT)) + val addButton = JButton("Add API") + val removeButton = JButton("Remove") + val editButton = JButton("Edit") + + removeButton.isEnabled = false + editButton.isEnabled = false + + addButton.addActionListener { + val model = apis.model as DefaultTableModel + + // Create add dialog with all fields + val dialog = JDialog(null as Frame?, "Add API Configuration", true) + dialog.layout = GridBagLayout() + val gbc = GridBagConstraints() + + gbc.gridx = 0; gbc.gridy = 0; gbc.anchor = GridBagConstraints.WEST + dialog.add(JLabel("Provider Type:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val providerCombo = ComboBox(APIProvider.values().map { it.name }.toTypedArray()) + dialog.add(providerCombo, gbc) + + gbc.gridx = 0; gbc.gridy = 1; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 + dialog.add(JLabel("Name:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val nameField = JBTextField(30) + dialog.add(nameField, gbc) + + gbc.gridx = 0; gbc.gridy = 2; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 + dialog.add(JLabel("API Key:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val keyField = JBTextField(30) + dialog.add(keyField, gbc) + + gbc.gridx = 0; gbc.gridy = 3; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 + dialog.add(JLabel("Base URL:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val urlField = JBTextField(30) + dialog.add(urlField, gbc) + + // Auto-populate name and base URL when provider changes + providerCombo.addActionListener { + val selectedProvider = APIProvider.valueOf(providerCombo.selectedItem as String) + urlField.text = selectedProvider.base + nameField.text = selectedProvider.name + } + + // Initialize with first provider's defaults + val initialProvider = APIProvider.values().first() + nameField.text = initialProvider.name + urlField.text = initialProvider.base + + gbc.gridx = 0; gbc.gridy = 4; gbc.gridwidth = 2; gbc.fill = GridBagConstraints.NONE + val buttonPanel = JPanel(FlowLayout()) + val okButton = JButton("OK") + val cancelButton = JButton("Cancel") + + okButton.addActionListener { + val provider = providerCombo.selectedItem as? String + val name = nameField.text + + if (provider.isNullOrBlank()) { + log.warn("Provider type is required") + JOptionPane.showMessageDialog( + dialog, "Provider type is required", "Validation Error", JOptionPane.WARNING_MESSAGE + ) + return@addActionListener + } + if (name.isBlank()) { + log.warn("API name is required") + JOptionPane.showMessageDialog( + dialog, "API name is required", "Validation Error", JOptionPane.WARNING_MESSAGE + ) + return@addActionListener + } - @Name("API Management") - val apiManagementPanel = JPanel(BorderLayout()).apply { - val scrollPane = JScrollPane(apis) - scrollPane.preferredSize = Dimension(600, 300) - add(scrollPane, BorderLayout.CENTER) - - val buttonPanel = JPanel(FlowLayout(FlowLayout.LEFT)) - val addButton = JButton("Add API") - val removeButton = JButton("Remove") - val editButton = JButton("Edit") - - removeButton.isEnabled = false - editButton.isEnabled = false - - addButton.addActionListener { - val model = apis.model as DefaultTableModel - - // Create add dialog with all fields - val dialog = JDialog(null as Frame?, "Add API Configuration", true) - dialog.layout = GridBagLayout() - val gbc = GridBagConstraints() - - gbc.gridx = 0; gbc.gridy = 0; gbc.anchor = GridBagConstraints.WEST - dialog.add(JLabel("Provider Type:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val providerCombo = ComboBox(APIProvider.values().map { it.name }.toTypedArray()) - dialog.add(providerCombo, gbc) - - gbc.gridx = 0; gbc.gridy = 1; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 - dialog.add(JLabel("Name:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val nameField = JBTextField(30) - dialog.add(nameField, gbc) - - gbc.gridx = 0; gbc.gridy = 2; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 - dialog.add(JLabel("API Key:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val keyField = JBTextField(30) - dialog.add(keyField, gbc) - - gbc.gridx = 0; gbc.gridy = 3; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 - dialog.add(JLabel("Base URL:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val urlField = JBTextField(30) - dialog.add(urlField, gbc) - - // Auto-populate name and base URL when provider changes - providerCombo.addActionListener { - val selectedProvider = APIProvider.valueOf(providerCombo.selectedItem as String) - urlField.text = selectedProvider.base ?: "" - nameField.text = selectedProvider.name - urlField.text = selectedProvider.base ?: "" - } + model.addRow( + arrayOf( + providerCombo.selectedItem, nameField.text, keyField.text, urlField.text + ) + ) + dialog.dispose() + } + cancelButton.addActionListener { dialog.dispose() } + + buttonPanel.add(okButton) + buttonPanel.add(cancelButton) + dialog.add(buttonPanel, gbc) + + dialog.pack() + dialog.setLocationRelativeTo(this) + dialog.isVisible = true + } - // Initialize with first provider's defaults - val initialProvider = APIProvider.values().first() - nameField.text = initialProvider.name - urlField.text = initialProvider.base - - gbc.gridx = 0; gbc.gridy = 4; gbc.gridwidth = 2; gbc.fill = GridBagConstraints.NONE - val buttonPanel = JPanel(FlowLayout()) - val okButton = JButton("OK") - val cancelButton = JButton("Cancel") - - okButton.addActionListener { - val provider = providerCombo.selectedItem as? String - val name = nameField.text - val key = keyField.text - val url = urlField.text - - if (provider.isNullOrBlank()) { - log.warn("Provider type is required") - JOptionPane.showMessageDialog( - dialog, "Provider type is required", "Validation Error", JOptionPane.WARNING_MESSAGE - ) - return@addActionListener - } - if (name.isBlank()) { - log.warn("API name is required") - JOptionPane.showMessageDialog( - dialog, "API name is required", "Validation Error", JOptionPane.WARNING_MESSAGE - ) - return@addActionListener - } - - model.addRow( - arrayOf( - providerCombo.selectedItem, nameField.text, keyField.text, urlField.text - ) - ) - dialog.dispose() - } - cancelButton.addActionListener { dialog.dispose() } - buttonPanel.add(okButton) - buttonPanel.add(cancelButton) - dialog.add(buttonPanel, gbc) + removeButton.addActionListener { + try { + val selectedRows = apis.selectedRows + if (selectedRows.isEmpty()) { + log.warn("No API configurations selected for removal") + return@addActionListener + } + val model = apis.model as DefaultTableModel + for (i in selectedRows.reversed()) { + val provider = model.getValueAt(i, 0) as? String + val name = model.getValueAt(i, 1) as? String + model.removeRow(i) + log.debug("Successfully removed API configuration: $provider - $name") + } + } catch (e: Exception) { + log.error("Unexpected error removing API configuration: ${e.message}", e) + JOptionPane.showMessageDialog( + this, "Failed to remove API configuration: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE + ) + } + } - dialog.pack() - dialog.setLocationRelativeTo(this) - dialog.isVisible = true + editButton.addActionListener { + val selectedRow = apis.selectedRow + if (selectedRow != -1) { + val model = apis.model as DefaultTableModel + val currentProvider = model.getValueAt(selectedRow, 0) as String + val currentName = model.getValueAt(selectedRow, 1) as String + val currentKey = model.getValueAt(selectedRow, 2) as String + val currentUrl = model.getValueAt(selectedRow, 3) as String + + // Create edit dialog + val dialog = JDialog(null as Frame?, "Edit API Configuration", true) + dialog.layout = GridBagLayout() + val gbc = GridBagConstraints() + + gbc.gridx = 0; gbc.gridy = 0; gbc.anchor = GridBagConstraints.WEST + dialog.add(JLabel("Provider Type:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val providerCombo = ComboBox(APIProvider.values().map { it.name }.toTypedArray()) + providerCombo.selectedItem = currentProvider + dialog.add(providerCombo, gbc) + + gbc.gridx = 0; gbc.gridy = 1; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 + dialog.add(JLabel("Name:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val nameField = JBTextField(currentName, 30) + dialog.add(nameField, gbc) + gbc.gridx = 0; gbc.gridy = 2; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 + dialog.add(JLabel("API Key:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val keyField = JBTextField(currentKey, 30) + dialog.add(keyField, gbc) + + gbc.gridx = 0; gbc.gridy = 3; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 + dialog.add(JLabel("Base URL:"), gbc) + gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 + val urlField = JBTextField(currentUrl, 30) + dialog.add(urlField, gbc) + // Auto-populate base URL when provider changes + providerCombo.addActionListener { + val selectedProvider = APIProvider.valueOf(providerCombo.selectedItem as String) + if (urlField.text == currentUrl || urlField.text.isBlank()) { + urlField.text = selectedProvider.base + } } + gbc.gridx = 0; gbc.gridy = 4; gbc.gridwidth = 2; gbc.fill = GridBagConstraints.NONE + val buttonPanel = JPanel(FlowLayout()) + val okButton = JButton("OK") + val cancelButton = JButton("Cancel") - removeButton.addActionListener { - try { - val selectedRows = apis.selectedRows - if (selectedRows.isEmpty()) { - log.warn("No API configurations selected for removal") - return@addActionListener - } - val model = apis.model as DefaultTableModel - for (i in selectedRows.reversed()) { - val provider = model.getValueAt(i, 0) as? String - val name = model.getValueAt(i, 1) as? String - model.removeRow(i) - log.debug("Successfully removed API configuration: $provider - $name") - } - } catch (e: Exception) { - log.error("Unexpected error removing API configuration: ${e.message}", e) - JOptionPane.showMessageDialog( - this, "Failed to remove API configuration: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE - ) - } - } + okButton.addActionListener { + val provider = providerCombo.selectedItem as? String + val name = nameField.text + val key = keyField.text + val url = urlField.text - editButton.addActionListener { - val selectedRow = apis.selectedRow - if (selectedRow != -1) { - val model = apis.model as DefaultTableModel - val currentProvider = model.getValueAt(selectedRow, 0) as String - val currentName = model.getValueAt(selectedRow, 1) as String - val currentKey = model.getValueAt(selectedRow, 2) as String - val currentUrl = model.getValueAt(selectedRow, 3) as String - - // Create edit dialog - val dialog = JDialog(null as Frame?, "Edit API Configuration", true) - dialog.layout = GridBagLayout() - val gbc = GridBagConstraints() - - gbc.gridx = 0; gbc.gridy = 0; gbc.anchor = GridBagConstraints.WEST - dialog.add(JLabel("Provider Type:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val providerCombo = ComboBox(APIProvider.values().map { it.name }.toTypedArray()) - providerCombo.selectedItem = currentProvider - dialog.add(providerCombo, gbc) - - gbc.gridx = 0; gbc.gridy = 1; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 - dialog.add(JLabel("Name:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val nameField = JBTextField(currentName, 30) - dialog.add(nameField, gbc) - gbc.gridx = 0; gbc.gridy = 2; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 - dialog.add(JLabel("API Key:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val keyField = JBTextField(currentKey, 30) - dialog.add(keyField, gbc) - - gbc.gridx = 0; gbc.gridy = 3; gbc.fill = GridBagConstraints.NONE; gbc.weightx = 0.0 - dialog.add(JLabel("Base URL:"), gbc) - gbc.gridx = 1; gbc.fill = GridBagConstraints.HORIZONTAL; gbc.weightx = 1.0 - val urlField = JBTextField(currentUrl, 30) - dialog.add(urlField, gbc) - // Auto-populate base URL when provider changes - providerCombo.addActionListener { - val selectedProvider = APIProvider.valueOf(providerCombo.selectedItem as String) - if (urlField.text == currentUrl || urlField.text.isBlank()) { - urlField.text = selectedProvider.base - } - } - - gbc.gridx = 0; gbc.gridy = 4; gbc.gridwidth = 2; gbc.fill = GridBagConstraints.NONE - val buttonPanel = JPanel(FlowLayout()) - val okButton = JButton("OK") - val cancelButton = JButton("Cancel") - - okButton.addActionListener { - val provider = providerCombo.selectedItem as? String - val name = nameField.text - val key = keyField.text - val url = urlField.text - - if (provider.isNullOrBlank()) { - log.warn("Provider type is required for editing") - JOptionPane.showMessageDialog( - dialog, "Provider type is required", "Validation Error", JOptionPane.WARNING_MESSAGE - ) - return@addActionListener - } - if (name.isBlank()) { - log.warn("API name is required for editing") - JOptionPane.showMessageDialog( - dialog, "API name is required", "Validation Error", JOptionPane.WARNING_MESSAGE - ) - return@addActionListener - } - - model.setValueAt(provider, selectedRow, 0) - model.setValueAt(name, selectedRow, 1) - model.setValueAt(key, selectedRow, 2) - model.setValueAt(url, selectedRow, 3) - log.debug("Updated API configuration: $provider - $name") - dialog.dispose() - } - cancelButton.addActionListener { dialog.dispose() } - - buttonPanel.add(okButton) - buttonPanel.add(cancelButton) - dialog.add(buttonPanel, gbc) - - dialog.pack() - dialog.setLocationRelativeTo(this) - dialog.isVisible = true - } + if (provider.isNullOrBlank()) { + log.warn("Provider type is required for editing") + JOptionPane.showMessageDialog( + dialog, "Provider type is required", "Validation Error", JOptionPane.WARNING_MESSAGE + ) + return@addActionListener + } + if (name.isBlank()) { + log.warn("API name is required for editing") + JOptionPane.showMessageDialog( + dialog, "API name is required", "Validation Error", JOptionPane.WARNING_MESSAGE + ) + return@addActionListener + } + + model.setValueAt(provider, selectedRow, 0) + model.setValueAt(name, selectedRow, 1) + model.setValueAt(key, selectedRow, 2) + model.setValueAt(url, selectedRow, 3) + log.debug("Updated API configuration: $provider - $name") + dialog.dispose() } + cancelButton.addActionListener { dialog.dispose() } - apis.selectionModel.addListSelectionListener { - val hasSelection = apis.selectedRow != -1 - removeButton.isEnabled = hasSelection - editButton.isEnabled = hasSelection - } + buttonPanel.add(okButton) + buttonPanel.add(cancelButton) + dialog.add(buttonPanel, gbc) - buttonPanel.add(addButton) - buttonPanel.add(removeButton) - buttonPanel.add(editButton) - add(buttonPanel, BorderLayout.SOUTH) + dialog.pack() + dialog.setLocationRelativeTo(this) + dialog.isVisible = true + } } - @Name("Editor Actions") - var usage = UsageTable(fileApplicationServices(AppSettingsState.Companion.pluginHome).usageManager) + apis.selectionModel.addListSelectionListener { + val hasSelection = apis.selectedRow != -1 + removeButton.isEnabled = hasSelection + editButton.isEnabled = hasSelection + } - init { - log.debug("Initializing AppSettingsComponent") - try { + buttonPanel.add(addButton) + buttonPanel.add(removeButton) + buttonPanel.add(editButton) + add(buttonPanel, BorderLayout.SOUTH) + } - diffLoggingEnabled.isSelected = AppSettingsState.instance.diffLoggingEnabled - awsProfile.text = AppSettingsState.instance.awsProfile ?: "" - awsRegion.text = AppSettingsState.instance.awsRegion ?: "" - awsBucket.text = AppSettingsState.instance.awsBucket ?: "" - disableAutoOpenUrls.isSelected = AppSettingsState.instance.disableAutoOpenUrls + @Name("Editor Actions") + var usage = UsageTable(fileApplicationServices(AppSettingsState.Companion.pluginHome).usageManager) - setExecutables(AppSettingsState.instance.executables ?: emptySet()) - } catch (e: Exception) { - log.error("Error initializing basic settings: ${e.message}", e) - } - try { - // Populate API table first - populateApiTable() - } catch (e: Exception) { - log.error("Error populating API table: ${e.message}", e) - } - val apis = - fileApplicationServices(AppSettingsState.Companion.pluginHome).userSettingsManager.getUserSettings().apis - try { + init { + log.debug("Initializing AppSettingsComponent") + try { - // Get all available models from APIs with valid keys - val availableChatModels = try { - apis.filter { api -> - api.key != null - }.flatMap { api -> - try { - api.provider?.getChatModels(api.key!!, api.baseUrl)?.filter { model -> - isVisible(model) - }?.map { it.name to it } ?: emptyList() - } catch (e: Exception) { - log.warn("Failed to get chat models for provider ${api.provider?.name}: ${e.message}") - emptyList() - } - }.toMap().toSortedMap(compareBy { it }) - } catch (e: Exception) { - log.error("Failed to load available models: ${e.message}", e) - emptyMap() - } - val availableEmbeddingModels = try { - apis.filter { api -> - api.key != null - }.flatMap { api -> - try { - val embeddingModels: List? = api.provider?.getEmbeddingModels(api.key!!, api.baseUrl) - embeddingModels?.filter { model -> - isVisible(model) - }?.map { it.modelName to it } ?: emptyList() - } catch (e: Exception) { - log.warn("Failed to get chat models for provider ${api.provider?.name}: ${e.message}") - emptyList() - } - }.toMap().toSortedMap(compareBy { it }) - } catch (e: Exception) { - log.error("Failed to load available models: ${e.message}", e) - emptyMap() - } - - availableChatModels.forEach { - this.smartModel.addItem(it.value.modelName) - this.fastModel.addItem(it.value.modelName) - } - availableEmbeddingModels.forEach { - this.embeddingModel.addItem(it.value.modelName) - } - log.debug("Loaded ${availableChatModels.size} available models") - } catch (e: Exception) { - log.error("Error loading models: ${e.message}", e) - } - try { - ImageModels.values.values.forEach { - this.mainImageModel.addItem(it.name) - } - PatchProcessors.values().forEach { - this.patchProcessor.addItem(it.name) - } - } catch (e: Exception) { - log.error("Error loading image and embedding models: ${e.message}", e) - } + diffLoggingEnabled.isSelected = AppSettingsState.instance.diffLoggingEnabled + awsProfile.text = AppSettingsState.instance.awsProfile ?: "" + awsRegion.text = AppSettingsState.instance.awsRegion ?: "" + awsBucket.text = AppSettingsState.instance.awsBucket ?: "" + disableAutoOpenUrls.isSelected = AppSettingsState.instance.disableAutoOpenUrls - - val smartModelItems = (0 until smartModel.itemCount).map { smartModel.getItemAt(it) }.filter { modelItem -> - val chatModel = apis.filter { it.key != null } - .mapNotNull { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { it.modelName == modelItem } - }.firstOrNull() - if (chatModel == null) { - false - } else { - val visible = isVisible(chatModel) - visible - } - }.sortedBy { modelItem -> - val model = - apis.filter { it.key != null } - .find { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) - ?.any { it.modelName == modelItem } == true - } - ?.let { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) - ?.find { it.modelName == modelItem } - }!! - "${model.provider?.name} - ${model.modelName}" - }.toList() - val fastModelItems = (0 until fastModel.itemCount).map { fastModel.getItemAt(it) }.filter { modelItem -> - val chatModel = apis.filter { it.key != null } - .mapNotNull { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { it.modelName == modelItem } - }.firstOrNull() - if (chatModel == null) { - false - } else { - val visible = isVisible(chatModel) - visible - } - }.sortedBy { modelItem -> - val model = - //ChatModel.values().entries.find { it.value.modelName == modelItem }?.value ?: return@sortedBy "" - apis.filter { it.key != null } - .find { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) - ?.any { it.modelName == modelItem } == true - } - ?.let { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) - ?.find { it.modelName == modelItem } - } - "${model?.provider?.name} - ${model?.modelName}" - }.toList() - smartModel.removeAllItems() - fastModel.removeAllItems() - smartModelItems.forEach { smartModel.addItem(it) } - fastModelItems.forEach { fastModel.addItem(it) } - this.smartModel.isEditable = true - this.fastModel.isEditable = true - this.smartModel.renderer = getModelRenderer() - this.fastModel.renderer = getModelRenderer() - this.mainImageModel.isEditable = true - this.mainImageModel.renderer = getImageModelRenderer() - this.embeddingModel.isEditable = true - this.embeddingModel.renderer = getEmbeddingModelRenderer() - this.patchProcessor.isEditable = false - this.patchProcessor.renderer = getPatchProcessorRenderer() - // Set current selections - AppSettingsState.instance.smartModel?.model?.let { model -> - this.smartModel.selectedItem = model.modelName - } - AppSettingsState.instance.fastModel?.model?.let { model -> - this.fastModel.selectedItem = model.modelName - } - AppSettingsState.instance.embeddingModel?.let { model -> - this.embeddingModel.selectedItem = model - } - AppSettingsState.instance.processor.let { processor -> - this.patchProcessor.selectedItem = processor.label - } - log.debug("AppSettingsComponent initialization completed") + setExecutables(AppSettingsState.instance.executables ?: emptySet()) + } catch (e: Exception) { + log.error("Error initializing basic settings: ${e.message}", e) } - - override fun dispose() { - log.debug("Disposing AppSettingsComponent") + try { + // Populate API table first + populateApiTable() + } catch (e: Exception) { + log.error("Error populating API table: ${e.message}", e) } - - private fun populateApiTable() { - try { - log.debug("Populating API table") - val model = apis.model as DefaultTableModel - model.rowCount = 0 - val userSettings = fileApplicationServices( - AppSettingsState.Companion.pluginHome - ).userSettingsManager.getUserSettings() - userSettings.apis.forEach { api -> - val providerName = api.provider?.name ?: "" - val name = api.name ?: api.provider?.name ?: "" - val key = api.key - val url = api.baseUrl - model.addRow(arrayOf(providerName, name, key, url)) - } - log.debug("Successfully populated API table with ${userSettings.apis.size} entries") - } catch (e: Exception) { - log.error("Failed to populate API table: ${e.message}", e) - JOptionPane.showMessageDialog( - null, "Failed to load API configurations: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE - ) - } + val apis = + fileApplicationServices(AppSettingsState.Companion.pluginHome).userSettingsManager.getUserSettings().apis + try { + + // Get all available models from APIs with valid keys + val availableChatModels = try { + apis.filter { api -> + api.key != null + }.flatMap { api -> + try { + api.provider?.getChatModels(api.key!!, api.baseUrl)?.filter { model -> + isVisible(model) + }?.map { it.name to it } ?: emptyList() + } catch (e: Exception) { + log.warn("Failed to get chat models for provider ${api.provider?.name}: ${e.message}") + emptyList() + } + }.toMap().toSortedMap(compareBy { it }) + } catch (e: Exception) { + log.error("Failed to load available models: ${e.message}", e) + emptyMap() + } + availableChatModels.forEach { + this.smartModel.addItem(it.value.modelName) + this.fastModel.addItem(it.value.modelName) + this.imageChatModel.addItem(it.value.modelName) + } + } catch (e: Exception) { + log.error("Error loading models: ${e.message}", e) } - - private fun getModelRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { - override fun customize( - list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean - ) { - text = value - if (value != null) { - val fileApplicationServices = fileApplicationServices(AppSettingsState.Companion.pluginHome) - val userSettings = fileApplicationServices.userSettingsManager.getUserSettings() - val model = userSettings.apis - .filter { it.key != null } - .find { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) - ?.any { it.modelName == value } == true - } - ?.let { apiData -> - apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { it.modelName == value } - } - text = "${model?.provider?.name} - $value" - } - } + try { + val availableImageModels = try { + apis.filter { api -> + api.key != null + }.flatMap { api -> + try { + val imageModels: List? = + api.provider?.getImageModels(api.key!!, api.baseUrl) + imageModels?.filter { model -> + isVisible(model) + }?.map { it.modelName to it } ?: emptyList() + } catch (e: Exception) { + log.warn("Failed to get chat models for provider ${api.provider?.name}: ${e.message}") + emptyList() + } + }.toMap().toSortedMap(compareBy { it }) + } catch (e: Exception) { + log.error("Failed to load available models: ${e.message}", e) + emptyMap() + } + availableImageModels.forEach { + this.mainImageModel.addItem(it.value.modelName) + } + } catch (e: Exception) { + log.error("Error loading models: ${e.message}", e) + } + try { + val availableEmbeddingModels = try { + apis.filter { api -> + api.key != null + }.flatMap { api -> + try { + val embeddingModels: List? = api.provider?.getEmbeddingModels(api.key!!, api.baseUrl) + embeddingModels?.filter { model -> + isVisible(model) + }?.map { it.modelName to it } ?: emptyList() + } catch (e: Exception) { + log.warn("Failed to get chat models for provider ${api.provider?.name}: ${e.message}") + emptyList() + } + }.toMap().toSortedMap(compareBy { it }) + } catch (e: Exception) { + log.error("Failed to load available models: ${e.message}", e) + emptyMap() + } + availableEmbeddingModels.forEach { + this.embeddingModel.addItem(it.value.modelName) + } + } catch (e: Exception) { + log.error("Error loading models: ${e.message}", e) + } + try { + PatchProcessors.values().forEach { + this.patchProcessor.addItem(it.name) + } + } catch (e: Exception) { + log.error("Error loading image and embedding models: ${e.message}", e) } - private fun getImageModelRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { - override fun customize( - list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean - ) { - text = value - } + val smartModelItems = (0 until smartModel.itemCount).map { smartModel.getItemAt(it) }.filter { modelItem -> + val chatModel = apis.filter { it.key != null }.firstNotNullOfOrNull { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { it.modelName == modelItem } + } + if (chatModel == null) { + false + } else { + val visible = isVisible(chatModel) + visible + } + }.filterNotNull().sortedBy { modelItem -> + val model = + apis.filter { it.key != null } + .find { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) + ?.any { it.modelName == modelItem } == true + } + ?.let { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) + ?.find { it.modelName == modelItem } + }!! + "${model.provider?.name} - ${model.modelName}" + }.toList() +val fastModelItems = (0 until fastModel.itemCount).map { fastModel.getItemAt(it) }.filter { modelItem -> + val chatModel = apis.filter { it.key != null }.firstNotNullOfOrNull { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { it.modelName == modelItem } + } + if (chatModel == null) { + false + } else { + val visible = isVisible(chatModel) + visible + } + }.filterNotNull().sortedBy { modelItem -> + val model = + //ChatModel.values().entries.find { it.value.modelName == modelItem }?.value ?: return@sortedBy "" + apis.filter { it.key != null } + .find { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) + ?.any { it.modelName == modelItem } == true + } + ?.let { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) + ?.find { it.modelName == modelItem } + } + "${model?.provider?.name} - ${model?.modelName}" + }.toList() + val imageChatModelItems = (0 until imageChatModel.itemCount).map { imageChatModel.getItemAt(it) }.filter { modelItem -> + val chatModel = apis.filter { it.key != null }.firstNotNullOfOrNull { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { it.modelName == modelItem } + } + if (chatModel == null) { + false + } else { + val visible = isVisible(chatModel) + visible + } + }.filterNotNull().sortedBy { modelItem -> + val model = + apis.filter { it.key != null } + .find { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) + ?.any { it.modelName == modelItem } == true + } + ?.let { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) + ?.find { it.modelName == modelItem } + } + "${model?.provider?.name} - ${model?.modelName}" + }.toList() + smartModel.removeAllItems() + fastModel.removeAllItems() + imageChatModel.removeAllItems() + smartModelItems.forEach { smartModel.addItem(it) } + fastModelItems.forEach { fastModel.addItem(it) } + imageChatModelItems.forEach { imageChatModel.addItem(it) } + this.smartModel.isEditable = true + this.fastModel.isEditable = true + this.imageChatModel.isEditable = true + this.smartModel.renderer = getModelRenderer() + this.fastModel.renderer = getModelRenderer() + this.imageChatModel.renderer = getModelRenderer() + this.mainImageModel.isEditable = true + this.mainImageModel.renderer = getImageModelRenderer() + this.embeddingModel.isEditable = true + this.embeddingModel.renderer = getEmbeddingModelRenderer() + this.patchProcessor.isEditable = false + this.patchProcessor.renderer = getPatchProcessorRenderer() + // Set current selections + AppSettingsState.instance.smartModel?.model?.let { model -> + this.smartModel.selectedItem = model.modelName } - - private fun getEmbeddingModelRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { - override fun customize( - list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean - ) { - if (value != null) { - val model = EmbeddingModel.values()[value] - text = "${model?.provider?.name} - $value" - } else { - text = "None" - } - } +AppSettingsState.instance.fastModel?.model?.let { model -> + this.fastModel.selectedItem = model.modelName } - - private fun getPatchProcessorRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { - override fun customize( - list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean - ) { - if (value != null) { - try { - val processor = PatchProcessors.valueOf(value) - text = processor.label - } catch (e: IllegalArgumentException) { - text = value - } - } else { - text = "Fuzzy Mode (Balanced)" - } - } + AppSettingsState.instance.imageChatModel?.model?.let { model -> + this.imageChatModel.selectedItem = model.modelName + } + AppSettingsState.instance.embeddingModel?.let { model -> + this.embeddingModel.selectedItem = model + } + AppSettingsState.instance.processor.let { processor -> + this.patchProcessor.selectedItem = processor.label + } + log.debug("AppSettingsComponent initialization completed") + } + + override fun dispose() { + log.debug("Disposing AppSettingsComponent") + } + + private fun populateApiTable() { + try { + log.debug("Populating API table") + val model = apis.model as DefaultTableModel + model.rowCount = 0 + val userSettings = fileApplicationServices( + AppSettingsState.Companion.pluginHome + ).userSettingsManager.getUserSettings() + userSettings.apis.forEach { api -> + val providerName = api.provider?.name ?: "" + val name = api.name ?: api.provider?.name ?: "" + val key = api.key + val url = api.baseUrl + model.addRow(arrayOf(providerName, name, key, url)) + } + log.debug("Successfully populated API table with ${userSettings.apis.size} entries") + } catch (e: Exception) { + log.error("Failed to populate API table: ${e.message}", e) + JOptionPane.showMessageDialog( + null, "Failed to load API configurations: ${e.message}", "Error", JOptionPane.ERROR_MESSAGE + ) } + } + + private fun getModelRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { + override fun customize( + list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean + ) { + text = value + if (value != null) { + val fileApplicationServices = fileApplicationServices(AppSettingsState.Companion.pluginHome) + val userSettings = fileApplicationServices.userSettingsManager.getUserSettings() + val model = userSettings.apis + .filter { it.key != null } + .find { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) + ?.any { it.modelName == value } == true + } + ?.let { apiData -> + apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { it.modelName == value } + } + text = "${model?.provider?.name} - $value" + } + } + } + private fun getImageModelRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { + override fun customize( + list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean + ) { + text = value - fun getExecutables(): Set { - return try { - val model = - ((executablesPanel.getComponent(0) as? JScrollPane)?.viewport?.view as? JList<*>)?.model as? DefaultListModel - model?.elements()?.toList()?.toSet() ?: emptySet() - } catch (e: Exception) { - log.error("Failed to get executables list: ${e.message}", e) - emptySet() - } } + } + + private fun getEmbeddingModelRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { + override fun customize( + list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean + ) { + if (value != null) { + val model = EmbeddingModel.values()[value] + text = "${model?.provider?.name} - $value" + } else { + text = "None" + } + } + } - fun setExecutables(executables: Set) { + private fun getPatchProcessorRenderer(): ListCellRenderer = object : SimpleListCellRenderer() { + override fun customize( + list: JList, value: String?, index: Int, selected: Boolean, hasFocus: Boolean + ) { + if (value != null) { try { - val model = - ((executablesPanel.getComponent(0) as? JScrollPane)?.viewport?.view as? JList<*>)?.model as? DefaultListModel - model?.clear() - executables.forEach { model?.addElement(it) } - log.debug("Set ${executables.size} executables") - } catch (e: Exception) { - log.error("Failed to set executables: ${e.message}", e) + val processor = PatchProcessors.valueOf(value) + text = processor.label + } catch (e: IllegalArgumentException) { + text = value } + } else { + text = "Fuzzy Mode (Balanced)" + } } + } - companion object { - private val log = LoggerFactory.getLogger(AppSettingsComponent::class.java) + + fun getExecutables(): Set { + return try { + val model = + ((executablesPanel.getComponent(0) as? JScrollPane)?.viewport?.view as? JList<*>)?.model as? DefaultListModel + model?.elements()?.toList()?.toSet() ?: emptySet() + } catch (e: Exception) { + log.error("Failed to get executables list: ${e.message}", e) + emptySet() + } + } + + fun setExecutables(executables: Set) { + try { + val model = + ((executablesPanel.getComponent(0) as? JScrollPane)?.viewport?.view as? JList<*>)?.model as? DefaultListModel + model?.clear() + executables.forEach { model?.addElement(it) } + log.debug("Set ${executables.size} executables") + } catch (e: Exception) { + log.error("Failed to set executables: ${e.message}", e) } + } + + companion object { + private val log = LoggerFactory.getLogger(AppSettingsComponent::class.java) + } } \ No newline at end of file diff --git a/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsState.kt b/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsState.kt index dc8466143..463fa010c 100644 --- a/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsState.kt +++ b/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/AppSettingsState.kt @@ -17,15 +17,16 @@ import com.intellij.openapi.components.State import com.intellij.openapi.components.Storage import com.intellij.util.xmlb.XmlSerializerUtil import com.simiacryptus.cognotik.chat.model.ChatInterface +import com.simiacryptus.cognotik.config.AppSettingsState.Companion.log import com.simiacryptus.cognotik.diff.PatchProcessors import com.simiacryptus.cognotik.embedding.EmbeddingModel import com.simiacryptus.cognotik.image.ImageModel -import com.simiacryptus.cognotik.image.ImageModels import com.simiacryptus.cognotik.models.APIProvider import com.simiacryptus.cognotik.platform.ApplicationServices import com.simiacryptus.cognotik.platform.Session import com.simiacryptus.cognotik.platform.file.UserSettingsManager import com.simiacryptus.cognotik.platform.model.ApiChatModel +import com.simiacryptus.cognotik.platform.model.ApiData import com.simiacryptus.cognotik.util.JsonUtil.fromJson import com.simiacryptus.cognotik.util.JsonUtil.toJson import com.simiacryptus.cognotik.util.LoggerFactory @@ -54,11 +55,12 @@ data class AppSettingsState( var temperature: Double = 0.1, var useScratchesSystemPath: Boolean = false, - /* Model Settings */ +/* Model Settings */ var smartModel: ApiChatModel? = null, var fastModel: ApiChatModel? = null, + var imageChatModel: ApiChatModel? = null, var transcriptionModel: String? = null, - var mainImageModel: String = "", + var imageModel: ApiImageModel? = null, /* Embedding Model Settings */ var embeddingModel: EmbeddingModel? = null, var processor: PatchProcessors = PatchProcessors.Fuzzy, @@ -80,7 +82,7 @@ data class AppSettingsState( var devActions: Boolean = false, var disableAutoOpenUrls: Boolean = false, var showWelcomeScreen: Boolean = true, -var greetedVersion: String = "", + var greetedVersion: String = "", var shellCommand: String = getDefaultShell(), var feedbackRequested: Boolean = false, var feedbackOptOut: Boolean = false, @@ -97,9 +99,18 @@ var greetedVersion: String = "", val smartChatClient: ChatInterface get() = smartModel?.instance() ?: throw IllegalStateException("Smart model not configured") - @get:JsonIgnore +@get:JsonIgnore val fastChatClient: ChatInterface get() = fastModel?.instance() ?: throw IllegalStateException("Fast model not configured") + @get:JsonIgnore + val imageChatClient: ChatInterface + get() = imageChatModel?.instance() ?: throw IllegalStateException("Image chat model not configured") + + + @get:JsonIgnore + val imageClient: com.simiacryptus.cognotik.image.ImageClientInterface? + get() = imageModel?.instance() + @get:JsonIgnore val embeddingClient: com.simiacryptus.cognotik.embedding.Embedder? get() = embeddingModel?.instance() @@ -187,11 +198,12 @@ var greetedVersion: String = "", if (sampleSize != other.sampleSize) return false if (channels != other.channels) return false if (temperature != other.temperature) return false - if (useScratchesSystemPath != other.useScratchesSystemPath) return false +if (useScratchesSystemPath != other.useScratchesSystemPath) return false if (smartModel != other.smartModel) return false if (fastModel != other.fastModel) return false + if (imageChatModel != other.imageChatModel) return false if (transcriptionModel != other.transcriptionModel) return false - if (mainImageModel != other.mainImageModel) return false + if (imageModel != other.imageModel) return false if (embeddingModel != other.embeddingModel) return false if (processor != other.processor) return false if (awsProfile != other.awsProfile) return false @@ -234,11 +246,12 @@ var greetedVersion: String = "", result = 31 * result + sampleSize result = 31 * result + channels result = 31 * result + temperature.hashCode() - result = 31 * result + useScratchesSystemPath.hashCode() +result = 31 * result + useScratchesSystemPath.hashCode() result = 31 * result + smartModel.hashCode() result = 31 * result + fastModel.hashCode() + result = 31 * result + (imageChatModel?.hashCode() ?: 0) result = 31 * result + (transcriptionModel?.hashCode() ?: 0) - result = 31 * result + mainImageModel.hashCode() + result = 31 * result + (imageModel?.hashCode() ?: 0) result = 31 * result + (embeddingModel?.hashCode() ?: 0) result = 31 * result + processor.hashCode() result = 31 * result + (awsProfile?.hashCode() ?: 0) @@ -300,19 +313,15 @@ var greetedVersion: String = "", } -fun String.imageModel(): ImageModel { - return ImageModels.values.values.toList().firstOrNull { - it.modelName == this || it.name == this - } ?: ImageModels.DallE3 -} fun ApiChatModel.instance(): ChatInterface? { val usageManager = ApplicationServices.fileApplicationServices(AppSettingsState.Companion.pluginHome).usageManager val model = model if (model == null) { - throw RuntimeException("Model not configured for ${provider?.provider?.name}") + log.warn("Model not configured for ${provider?.provider?.name}") + return null } - return (model).instance( + return model.instance( key = provider?.key ?: throw IllegalArgumentException("API key is not set"), base = provider?.provider?.base ?: throw IllegalArgumentException("API base for ${provider?.provider?.name} is not set"), @@ -329,4 +338,27 @@ fun ApiChatModel.instance(): ChatInterface? { ) }, ) +} + +data class ApiImageModel( + val model: ImageModel, + val provider: ApiData? +) + +fun ApiImageModel.instance(): com.simiacryptus.cognotik.image.ImageClientInterface? { + val model = model + if (model == null) { + log.warn("Model not configured for ${provider?.provider?.name}") + return null + } + return provider?.provider?.getImageClient( + key = provider.key ?: throw IllegalArgumentException("API key is not set"), + base = provider.baseUrl ?: provider.provider?.base + ?: throw IllegalArgumentException("API base for ${provider.provider?.name} is not set"), + workPool = AppSettingsState.workPool, + scheduledPool = ApplicationServices.threadPoolManager.getScheduledPool( + AppSettingsState.currentSession, + UserSettingsManager.defaultUser + ), + ) } \ No newline at end of file diff --git a/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/StaticAppSettingsConfigurable.kt b/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/StaticAppSettingsConfigurable.kt index 27dfbcd41..50f045a22 100644 --- a/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/StaticAppSettingsConfigurable.kt +++ b/intellij/src/main/kotlin/com/simiacryptus/cognotik/config/StaticAppSettingsConfigurable.kt @@ -41,10 +41,14 @@ class StaticAppSettingsConfigurable : AppSettingsConfigurable() { add(JLabel("Smart Model:")) add(component.smartModel) }) - add(JPanel(FlowLayout(FlowLayout.LEFT)).apply { +add(JPanel(FlowLayout(FlowLayout.LEFT)).apply { add(JLabel("Fast Model:")) add(component.fastModel) }) + add(JPanel(FlowLayout(FlowLayout.LEFT)).apply { + add(JLabel("Image Chat Model:")) + add(component.imageChatModel) + }) add(JPanel(FlowLayout(FlowLayout.LEFT)).apply { add(JLabel("Image Model:")) add(component.mainImageModel) @@ -427,12 +431,13 @@ class StaticAppSettingsConfigurable : AppSettingsConfigurable() { component.awsBucket.text = settings.awsBucket ?: "" component.listeningPort.text = settings.listeningPort.toString() component.listeningEndpoint.text = settings.listeningEndpoint - component.suppressErrors.isSelected = settings.suppressErrors +component.suppressErrors.isSelected = settings.suppressErrors component.disableAutoOpenUrls.isSelected = settings.disableAutoOpenUrls settings.fastModel?.model?.let { component.fastModel.selectedItem = it.modelName } settings.smartModel?.model?.let { component.smartModel.selectedItem = it.modelName } + settings.imageChatModel?.model?.let { component.imageChatModel.selectedItem = it.modelName } + settings.imageModel?.model?.let { component.mainImageModel.selectedItem = it.modelName } component.devActions.isSelected = settings.devActions - component.mainImageModel.selectedItem = settings.mainImageModel component.temperature.text = settings.temperature.toString() component.embeddingModel.selectedItem = settings.embeddingModel component.shellCommand.text = settings.shellCommand @@ -454,19 +459,26 @@ class StaticAppSettingsConfigurable : AppSettingsConfigurable() { ).userSettingsManager.getUserSettings() log.debug("Current user has ${userSettings.apis.size} API configurations") - val fastModelName = component.fastModel.selectedItem as String? +val fastModelName = component.fastModel.selectedItem as String? val smartModelName = component.smartModel.selectedItem as String? - log.debug("Selected models - fast: $fastModelName, smart: $smartModelName") + val imageChatModelName = component.imageChatModel.selectedItem as String? + val imageModelName = component.mainImageModel.selectedItem as String? + log.debug("Selected models - fast: $fastModelName, smart: $smartModelName, imageChat: $imageChatModelName") - val fastChatModel = userSettings.apis.filter { it.key != null }.firstOrNull() - ?.let { apiData -> apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { model -> model.modelName == fastModelName } } + val chatModels = userSettings.apis.flatMap { apiData -> apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl) ?: emptyList() } + val imageModels = userSettings.apis.flatMap { apiData -> apiData.provider?.getImageModels(apiData.key!!, apiData.baseUrl) ?: emptyList() } + val fastChatModel = chatModels.find { model -> model.modelName == fastModelName || model.name == fastModelName } val fastApiData = userSettings.apis.find { it.provider == fastChatModel?.provider } - val smartChatModel = userSettings.apis.filter { it.key != null }.firstOrNull() - ?.let { apiData -> apiData.provider?.getChatModels(apiData.key!!, apiData.baseUrl)?.find { model -> model.modelName == smartModelName } } +val smartChatModel = chatModels.find { model -> model.modelName == smartModelName || model.name == smartModelName } val smartApiData = userSettings.apis.find { it.provider == smartChatModel?.provider } + val imageChatModel = chatModels.find { model -> model.modelName == imageChatModelName || model.name == imageChatModelName } + val imageChatApiData = userSettings.apis.find { it.provider == imageChatModel?.provider } + val imageModel = imageModels.find { model -> model.modelName == imageModelName || model.name == imageModelName } + val imageApiData = userSettings.apis.find { it.provider == imageModel?.provider } - settings.fastModel = ApiChatModel(fastChatModel, fastApiData) +settings.fastModel = ApiChatModel(fastChatModel, fastApiData) settings.diffLoggingEnabled = component.diffLoggingEnabled.isSelected + settings.imageChatModel = ApiChatModel(imageChatModel, imageChatApiData) settings.awsProfile = component.awsProfile.text.takeIf { it.isNotBlank() } settings.awsRegion = component.awsRegion.text.takeIf { it.isNotBlank() } settings.awsBucket = component.awsBucket.text.takeIf { it.isNotBlank() } @@ -476,6 +488,7 @@ class StaticAppSettingsConfigurable : AppSettingsConfigurable() { settings.listeningEndpoint = component.listeningEndpoint.text settings.suppressErrors = component.suppressErrors.isSelected settings.smartModel = ApiChatModel(smartChatModel, smartApiData) + settings.imageModel = imageModel?.let { ApiImageModel(it, imageApiData) } settings.devActions = component.devActions.isSelected settings.disableAutoOpenUrls = component.disableAutoOpenUrls.isSelected settings.temperature = component.temperature.text.safeDouble() @@ -486,12 +499,6 @@ class StaticAppSettingsConfigurable : AppSettingsConfigurable() { else -> null } } - settings.mainImageModel = component.mainImageModel.selectedItem.let { - when (it) { - is String -> it - else -> "" - } - } settings.shellCommand = component.shellCommand.text settings.showWelcomeScreen = component.showWelcomeScreen.isSelected settings.processor = component.patchProcessor.selectedItem?.let { diff --git a/intellij/src/main/resources/META-INF/plugin.xml b/intellij/src/main/resources/META-INF/plugin.xml index 6a76f9b27..95bd4d87b 100644 --- a/intellij/src/main/resources/META-INF/plugin.xml +++ b/intellij/src/main/resources/META-INF/plugin.xml @@ -155,6 +155,10 @@ text="✨ Create File from Description" description="Create a new file with appropriate content based on a natural language description, intelligently determining file type and location"> + + , ImageResponse>( - prompt = prompt, - name = name, - model = textModel, - temperature = temperature, -) { - override fun chatMessages(questions: List) = arrayOf( - ChatMessage( - role = ModelSchema.Role.system, - content = prompt.toContentList() - ), - ) + questions.map { - ChatMessage( - role = ModelSchema.Role.user, - content = it.toContentList() - ) - } - - inner class ImageResponseImpl( - override val text: String, - private val api: OpenAIClient - ) : ImageResponse { - private val _image: BufferedImage by lazy { render(text, api) } - override val image: BufferedImage get() = _image - } - - open fun render( - text: String, - api: OpenAIClient, - ): BufferedImage { - val url = (api as OpenAIClient).createImage( - ImageGenerationRequest( - prompt = text, - model = imageModel.modelName, - size = "${width}x$height" - ) - ).data.first().url - return ImageIO.read(URL(url)) - } - - override fun respond(input: List, vararg messages: ChatMessage): ImageResponse { - var text = response(*messages).choices.first().message?.content - ?: throw RuntimeException("No response") - while (imageModel.maxPrompt <= text.length && null != openAI) { - text = response( - *listOf( - messages.toList(), - listOf( - text.toChatMessage(), - "Please shorten the description".toChatMessage(), - ), - ).flatten().toTypedArray(), - model = imageModel - ).choices.first().message?.content ?: throw RuntimeException("No response") - } - return ImageResponseImpl(text, api = this.openAI ?: throw RuntimeException("No API")) - } - - override fun withModel(model: ChatInterface): ImageAgent = ImageAgent( - prompt = prompt, - name = name, - textModel = model, - imageModel = imageModel, - temperature = temperature, - width = width, - height = height, - openAI = openAI - ) - - fun setImageAPI(openAI: OpenAIClient): ImageAgent { - this.openAI = openAI - return this - } - -} - diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ImageResponse.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ImageResponse.kt deleted file mode 100644 index 9f0f55c8c..000000000 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ImageResponse.kt +++ /dev/null @@ -1,8 +0,0 @@ -package com.simiacryptus.cognotik.actors - -import java.awt.image.BufferedImage - -interface ImageResponse { - val text: String - val image: BufferedImage -} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/BaseAgent.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/BaseAgent.kt similarity index 95% rename from jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/BaseAgent.kt rename to jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/BaseAgent.kt index 97a32d338..560240889 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/BaseAgent.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/BaseAgent.kt @@ -1,4 +1,4 @@ -package com.simiacryptus.cognotik.actors +package com.simiacryptus.cognotik.agents import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.models.AIModel diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ChatAgent.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ChatAgent.kt similarity index 96% rename from jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ChatAgent.kt rename to jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ChatAgent.kt index f3d73a160..6a88c453c 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ChatAgent.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ChatAgent.kt @@ -1,4 +1,4 @@ -package com.simiacryptus.cognotik.actors +package com.simiacryptus.cognotik.agents import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.models.ModelSchema diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/CodeAgent.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/CodeAgent.kt similarity index 99% rename from jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/CodeAgent.kt rename to jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/CodeAgent.kt index 276be9344..845e0736a 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/CodeAgent.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/CodeAgent.kt @@ -1,4 +1,4 @@ -package com.simiacryptus.cognotik.actors +package com.simiacryptus.cognotik.agents import com.simiacryptus.cognotik.OutputInterceptor import com.simiacryptus.cognotik.chat.model.ChatInterface diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageAndText.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageAndText.kt new file mode 100644 index 000000000..1206537dd --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageAndText.kt @@ -0,0 +1,8 @@ +package com.simiacryptus.cognotik.agents + +import java.awt.image.BufferedImage + +data class ImageAndText( + val text: String, + val image: BufferedImage? = null, +) \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageGenerationAgent.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageGenerationAgent.kt new file mode 100644 index 000000000..08c1e11d3 --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageGenerationAgent.kt @@ -0,0 +1,99 @@ +package com.simiacryptus.cognotik.agents + +import com.simiacryptus.cognotik.chat.model.ChatInterface +import com.simiacryptus.cognotik.image.ImageClientInterface +import com.simiacryptus.cognotik.image.ImageModel +import com.simiacryptus.cognotik.models.ModelSchema +import com.simiacryptus.cognotik.models.ModelSchema.ChatMessage +import com.simiacryptus.cognotik.models.ModelSchema.ImageGenerationRequest +import com.simiacryptus.cognotik.util.toChatMessage +import com.simiacryptus.cognotik.util.toContentList +import okio.ByteString.Companion.decodeBase64 +import java.awt.image.BufferedImage +import java.net.URL +import javax.imageio.ImageIO + +open class ImageGenerationAgent( + prompt: String = "Transform the user request into an image generation prompt that the user will like", + name: String? = null, + textModel: ChatInterface, + var imageModel: ImageModel?, + val imageClient: ImageClientInterface?, + temperature: Double = 0.3, + val width: Int = 1024, + val height: Int = 1024, +) : BaseAgent, ImageAndText>( + prompt = prompt, + name = name, + model = textModel, + temperature = temperature, +) { + override fun chatMessages(questions: List) = arrayOf( + ChatMessage( + role = ModelSchema.Role.system, + content = prompt.toContentList() + ), + ) + questions.map { + ChatMessage( + role = ModelSchema.Role.user, + content = it.toContentList() + ) + } + + open fun render( + text: String, + api: ImageClientInterface, + ): BufferedImage { + val data = api.createImage( + ImageGenerationRequest( + prompt = text, + model = imageModel?.modelName ?: throw RuntimeException("No image model configured"), + size = "${width}x$height" + ) + ).data + val first = data.first() + return when { + first.url != null -> ImageIO.read(URL(first.url)) + first.b64_json != null -> ImageIO.read(first.b64_json.decodeBase64()?.toByteArray()?.inputStream()) + else -> throw RuntimeException("No image data returned") + } + } + + override fun respond(input: List, vararg messages: ChatMessage): ImageAndText { + var text = response(*messages).choices.first().message?.content + ?: throw RuntimeException("No response") + val maxPrompt = imageModel?.maxPrompt ?: Int.MAX_VALUE + while (maxPrompt <= text.length && null != imageClient) { + text = response( + *listOf( + messages.toList(), + listOf( + text.toChatMessage(), + "Please shorten the description".toChatMessage(), + ), + ).flatten().toTypedArray(), + model = imageModel!! + ).choices.first().message?.content ?: throw RuntimeException("No response") + } + return ImageAndText( + text = text, + image = render( + text, + api = this.imageClient ?: throw RuntimeException("No image client configured") + ) + ) + } + + override fun withModel(model: ChatInterface): ImageGenerationAgent = ImageGenerationAgent( + prompt = prompt, + name = name, + textModel = model, + imageModel = imageModel, + imageClient = imageClient, + temperature = temperature, + width = width, + height = height, + ) + +} + diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageModificationAgent.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageModificationAgent.kt new file mode 100644 index 000000000..8ddd8c430 --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ImageModificationAgent.kt @@ -0,0 +1,79 @@ +package com.simiacryptus.cognotik.agents + +import com.simiacryptus.cognotik.chat.model.ChatInterface +import com.simiacryptus.cognotik.models.ModelSchema +import com.simiacryptus.cognotik.models.ModelSchema.ChatMessage +import com.simiacryptus.cognotik.models.ModelSchema.ContentPart +import com.simiacryptus.cognotik.util.toContentList +import java.awt.image.BufferedImage +import java.io.ByteArrayOutputStream +import java.util.* +import javax.imageio.ImageIO + +/** + * Agent that processes images using multimodal chat models. + * Takes an input image and text prompt, and returns modified image with description. + */ +open class ImageModificationAgent( + prompt: String = "Analyze and describe the image based on the user's request", + name: String? = null, + model: ChatInterface, + temperature: Double = 0.3, +) : BaseAgent, ImageAndText>( + prompt = prompt, + name = name, + model = model, + temperature = temperature, +) { + + override fun chatMessages(questions: List) = arrayOf( + ChatMessage( + role = ModelSchema.Role.system, + content = prompt.toContentList() + ), + ChatMessage( + role = ModelSchema.Role.user, + content = questions.flatMap { question -> + listOf( + ContentPart( + text = question.text, + image_url = question.image?.let { "data:image/png;base64,${it.encodeImageToBase64()}" }, + ) + ) + } + ) + ) + + override fun respond( + input: List, + vararg messages: ChatMessage + ): ImageAndText { + val choices = response(*messages).choices + val image = choices.firstOrNull { it.message?.image_url != null }?.let { it.message?.image } + if (image == null) { + log.info("No image returned in response, falling back to input image.") + } + val text = choices.firstOrNull()?.message?.content ?: "" + return ImageAndText(text = text, image = image ?: input.map { it.image }.firstOrNull()) + } + + override fun withModel(model: ChatInterface): ImageModificationAgent = ImageModificationAgent( + prompt = prompt, + name = name, + model = model, + temperature = temperature, + ) + + companion object { + private val log = org.slf4j.LoggerFactory.getLogger(ImageModificationAgent::class.java) + } +} + +/** + * Encodes a BufferedImage to a Base64 string in PNG format + */ +fun BufferedImage.encodeImageToBase64(): String { + val outputStream = ByteArrayOutputStream() + ImageIO.write(this, "png", outputStream) + return Base64.getEncoder().encodeToString(outputStream.toByteArray()) +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ParsedAgent.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ParsedAgent.kt similarity index 99% rename from jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ParsedAgent.kt rename to jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ParsedAgent.kt index 5a64701e5..e2cc31647 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ParsedAgent.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ParsedAgent.kt @@ -1,4 +1,4 @@ -package com.simiacryptus.cognotik.actors +package com.simiacryptus.cognotik.agents import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.AbbrevWhitelistYamlDescriber diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ParsedResponse.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ParsedResponse.kt similarity index 93% rename from jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ParsedResponse.kt rename to jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ParsedResponse.kt index 3f88c5394..dfefb8b2c 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ParsedResponse.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ParsedResponse.kt @@ -1,4 +1,4 @@ -package com.simiacryptus.cognotik.actors +package com.simiacryptus.cognotik.agents abstract class ParsedResponse(val clazz: Class) { abstract val text: String diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ProxyAgent.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ProxyAgent.kt similarity index 99% rename from jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ProxyAgent.kt rename to jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ProxyAgent.kt index 31018ad5e..d75ca0441 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/actors/ProxyAgent.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/agents/ProxyAgent.kt @@ -1,4 +1,4 @@ -package com.simiacryptus.cognotik.actors +package com.simiacryptus.cognotik.agents import com.fasterxml.jackson.module.kotlin.isKotlinClass import com.google.gson.reflect.TypeToken diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/audio/AudioModels.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/audio/AudioModels.kt index 255475a29..ad24f39c9 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/audio/AudioModels.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/audio/AudioModels.kt @@ -7,9 +7,9 @@ import java.util.concurrent.atomic.AtomicReference @Suppress("unused") class AudioModels( - override val modelName: String, - val type: AudioModelType = AudioModelType.Transcription, - val provider: APIProvider = APIProvider.OpenAI, + override val modelName: String, + val type: AudioModelType = AudioModelType.Transcription, + override val provider: APIProvider = APIProvider.OpenAI, ) : AIModel { private val _api = AtomicReference(null) diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AnthropicChatClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AnthropicChatClient.kt index 82ec5bc8a..484d0d320 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AnthropicChatClient.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AnthropicChatClient.kt @@ -172,7 +172,8 @@ import java.util.concurrent.ConcurrentHashMap id = response.id, choices = listOf( ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = response.content?.joinToString("\n") { it.text ?: "" }), index = 0 + content = response.content?.joinToString("\n") { it.text ?: "" }, + ), index = 0 ) ), usage = ModelSchema.Usage( prompt_tokens = response.usage?.input_tokens?.toLong() ?: 0, diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AwsChatClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AwsChatClient.kt index a815ac9bc..f565c05db 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AwsChatClient.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/AwsChatClient.kt @@ -408,7 +408,6 @@ class AwsChatClient( ) = ModelSchema.ChatMessage( role = acc.role, content = listOf( ModelSchema.ContentPart( - type = "text", text = (acc.content?.plus(chatMessage.content ?: emptyList()) ?: chatMessage.content)?.joinToString( "\n" ) { it.text ?: "" }) @@ -443,7 +442,7 @@ class AwsChatClient( choices = listOf( ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = fromJson.generation ?: "" + content = fromJson.generation ?: "", ), index = 0 ) ), usage = ModelSchema.Usage( @@ -463,7 +462,7 @@ class AwsChatClient( choices = listOf( ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = fromJson.outputs?.firstOrNull()?.text ?: "" + content = fromJson.outputs?.firstOrNull()?.text ?: "", ), index = 0 ) ) @@ -478,7 +477,7 @@ class AwsChatClient( choices = listOf( ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = fromJson.results?.firstOrNull()?.outputText ?: "" + content = fromJson.results?.firstOrNull()?.outputText ?: "", ), index = 0 ) ) @@ -493,7 +492,7 @@ class AwsChatClient( choices = listOf( ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = fromJson.generations?.firstOrNull()?.text ?: "" + content = fromJson.generations?.firstOrNull()?.text ?: "", ), index = 0 ) ) @@ -508,7 +507,7 @@ class AwsChatClient( choices = fromJson.completions?.mapIndexed { index, completion -> ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = completion.data?.text ?: "" + content = completion.data?.text ?: "", ), index = index ) } ?: emptyList(), @@ -526,7 +525,7 @@ class AwsChatClient( choices = listOf( ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = fromJson.content?.firstOrNull()?.text ?: "" + content = fromJson.content?.firstOrNull()?.text ?: "", ), index = 0 ) ), usage = ModelSchema.Usage( diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/GeminiChatClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/GeminiChatClient.kt index ef79b5a64..2174fece5 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/GeminiChatClient.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/GeminiChatClient.kt @@ -136,7 +136,8 @@ import java.util.concurrent.ConcurrentHashMap choices = fromJson.candidates?.mapIndexed { index, candidate -> ModelSchema.ChatChoice( message = ModelSchema.ChatMessageResponse( - content = candidate.content?.parts?.joinToString("\n") { it.text ?: "" }), index = index + content = candidate.content?.parts?.joinToString("\n") { it.text ?: "" }, + ), index = index ) } ?: emptyList(), usage = ModelSchema.Usage( diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/GeminiSdkChatClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/GeminiSdkChatClient.kt new file mode 100644 index 000000000..0bc789a5d --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/GeminiSdkChatClient.kt @@ -0,0 +1,242 @@ +package com.simiacryptus.cognotik.chat + +import com.google.common.util.concurrent.ListeningScheduledExecutorService +import com.google.genai.Client +import com.google.genai.types.* +import com.simiacryptus.cognotik.chat.model.ChatModel +import com.simiacryptus.cognotik.chat.model.GeminiModels +import com.simiacryptus.cognotik.models.APIProvider +import com.simiacryptus.cognotik.models.ModelSchema +import com.simiacryptus.cognotik.util.LoggerFactory +import okio.ByteString.Companion.decodeBase64 +import org.apache.hc.core5.http.HttpRequest +import org.slf4j.event.Level +import java.io.BufferedOutputStream +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.ExecutorService +import kotlin.jvm.optionals.getOrNull + +/** + * Gemini Chat Client using the official Google Gen AI Java SDK + */ +class GeminiSdkChatClient( + apiKey: String, + val apiBase: String = APIProvider.Gemini.base, + workPool: ExecutorService, + logLevel: Level = Level.INFO, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService, + private val useVertexAI: Boolean = false, + private val project: String? = null, + private val location: String? = null, +) : ChatClientBase( + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool +), ChatClientInterface { + + private val client: Client = buildClient(apiKey, useVertexAI, project, location) + + private fun buildClient( + apiKey: String, + useVertexAI: Boolean, + project: String?, + location: String? + ): Client { + val builder = Client.builder() + + if (useVertexAI) { + builder.vertexAI(true) + if (project != null && location != null) { + builder.project(project).location(location) + } else { + builder.apiKey(apiKey) + } + } else { + builder.apiKey(apiKey) + } + + return builder.build() + } + + override fun getModels(): List? { + // Check cache first + modelsCache[apiBase]?.let { return it } + val models = try { + client.models.list( + ListModelsConfig.builder().build() + ).mapNotNull { + val model = it.name().get() + val baseModelId = model.removePrefix("models/") + GeminiModels.values.values.find { + it.modelName == baseModelId || it.modelName == model + } ?: run { + // If not found in predefined models, create a dynamic one + log.debug("Creating basic ChatModel for unknown Gemini model: ${baseModelId}") + ChatModel( + name = model ?: baseModelId, + modelName = baseModelId, + maxTotalTokens = it.inputTokenLimit().get() + it.outputTokenLimit().get(), + maxOutTokens = it.outputTokenLimit().get(), + provider = APIProvider.Gemini, + inputTokenPricePerK = 0.0, // Default pricing - would need to be configured + outputTokenPricePerK = 0.0 + ) + } + }.toList() + } catch (e: Exception) { + log.warn("Failed to fetch models: ${e.message}") + null + } + // Cache the result + models?.let { modelsCache[apiBase] = it } + return models + } + + override fun chat( + chatRequest: ModelSchema.ChatRequest, + model: ChatModel, + logStreams: MutableList + ): ModelSchema.ChatResponse { + try { + val config = buildGenerateContentConfig(chatRequest) + val contents = convertToGeminiContents(chatRequest.messages) + + log(Level.DEBUG, "Sending request to Gemini SDK for model: ${model.modelName}", logStreams) + + val response = if (contents.size == 1) { + client.models.generateContent(model.modelName, contents[0], config) + } else { + // For multi-turn conversations, use the first content as prompt + // and pass config with system instruction if needed + client.models.generateContent(model.modelName, contents.last(), config) + } + + val chatResponse = convertFromGeminiResponse(response) + + if (chatResponse.usage != null && model is ChatModel) { + onUsage(model, chatResponse.usage.copy(cost = model.pricing(chatResponse.usage)), logStreams = logStreams) + } + + return chatResponse + } catch (e: Exception) { + log.error("Error during Gemini SDK chat request", e) + throw e + } + } + + private fun buildGenerateContentConfig(chatRequest: ModelSchema.ChatRequest): GenerateContentConfig? { + val builder = GenerateContentConfig.builder() + + chatRequest.temperature?.let { builder.temperature(it.toFloat()) } + chatRequest.max_tokens?.let { builder.maxOutputTokens(it) } + + // Extract system instruction from messages + val systemMessages = chatRequest.messages.filter { it.role == ModelSchema.Role.system } + if (systemMessages.isNotEmpty()) { + val systemText = systemMessages.joinToString("\n") { + it.content?.joinToString("\n") { part -> part.text ?: "" } ?: "" + } + builder.systemInstruction(Content.fromParts(Part.fromText(systemText))) + } + + return builder.build() + } + + private fun convertToGeminiContents(messages: List): List { + return messages + .filter { it.role != ModelSchema.Role.system } + .map { message -> + val role = when (message.role) { + ModelSchema.Role.user -> "user" + ModelSchema.Role.assistant -> "model" + else -> "user" + } + + val parts = message.content?.map { contentPart -> + when { + contentPart.text != null -> Part.fromText(contentPart.text) + contentPart.image_url != null -> { + // Handle image URLs + val imageUrl = contentPart.image_url + if (imageUrl?.startsWith("data:") == true) { + // Base64 encoded image + val parts = imageUrl.split(",") + val mimeType = parts[0].substringAfter("data:").substringBefore(";") + val data = parts[1] + Part.fromBytes(data.decodeBase64()?.toByteArray(), mimeType) + } else if (imageUrl?.startsWith("gs://") == true) { + // GCS URI + Part.fromUri(imageUrl, "image/jpeg") + } else { + // Regular URL - convert to text description + Part.fromText("[Image: $imageUrl]") + } + } + + else -> Part.fromText("") + } + } ?: listOf(Part.fromText("")) + + Content.builder() + .role(role) + .parts(parts) + .build() + } + } + + private fun convertFromGeminiResponse(response: GenerateContentResponse): ModelSchema.ChatResponse { + val choices = response.candidates().orElse(emptyList()).mapIndexed { index, candidate -> + val content = candidate.content().orElse(null) + val text = content?.parts()?.orElse(emptyList()) + ?.mapNotNull { it.text().getOrNull() }?.joinToString("\n")?.let { + when (it) { + "" -> null + else -> it + } + } + + val chatMessageResponse = ModelSchema.ChatMessageResponse( + content = text, + ) + content?.parts()?.orElse(emptyList())?.forEach { part -> + part.inlineData()?.getOrNull()?.apply { + when (mimeType().getOrNull()) { + "image/png", "image/jpeg", "image/jpg", "image/gif" -> { + chatMessageResponse.image_data = this.data().getOrNull() + chatMessageResponse.image_mime_type = this.mimeType().getOrNull() + } + } + } + } + ModelSchema.ChatChoice( + message = chatMessageResponse, + index = index, + finish_reason = candidate.finishReason().orElse(null)?.toString() + ) + } + + val usage = response.usageMetadata().orElse(null)?.let { metadata -> + ModelSchema.Usage( + prompt_tokens = metadata.promptTokenCount().orElse(0).toLong(), + completion_tokens = metadata.candidatesTokenCount().orElse(0).toLong(), + total_tokens = metadata.totalTokenCount().orElse(0).toLong() + ) + } + + return ModelSchema.ChatResponse( + choices = choices, + usage = usage + ) + } + + override fun authorize(request: HttpRequest, apiProvider: APIProvider) { + TODO("Not yet implemented") + } + + companion object { + private val log = LoggerFactory.getLogger(GeminiSdkChatClient::class.java) + private val modelsCache = ConcurrentHashMap>() + } +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/ModelsLabChatClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/ModelsLabChatClient.kt index aa21e1403..713e87080 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/ModelsLabChatClient.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/ModelsLabChatClient.kt @@ -76,7 +76,7 @@ class ModelsLabChatClient( ModelSchema.ChatResponse( id = response.chat_id, choices = listOf( ModelSchema.ChatChoice( - message = ModelSchema.ChatMessageResponse(content = response.message), index = 0 + message = ModelSchema.ChatMessageResponse(content = response.message,), index = 0 ) ), usage = response.meta?.let { ModelSchema.Usage( diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/OllamaChatClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/OllamaChatClient.kt index 10efbd58c..949e15ad0 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/OllamaChatClient.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/OllamaChatClient.kt @@ -116,8 +116,8 @@ class OllamaChatClient( index = 0, message = ollamaResponse.message.let { message -> ChatMessageResponse( - role = message.role.let { Role.valueOf(it) }, - content = message.content + role = message.role.let { Role.valueOf(it) }, + content = message.content, ) }, finish_reason = if (ollamaResponse.done) "stop" else "length" diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/AnthropicModels.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/AnthropicModels.kt index 2d0b4e067..1018a1095 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/AnthropicModels.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/AnthropicModels.kt @@ -8,7 +8,7 @@ object AnthropicModels { modelName = "claude-opus-4-1-20250805", maxTotalTokens = 200000, maxOutTokens = 32000, - provider = APIProvider.Companion.Anthropic, + provider = APIProvider.Anthropic, inputTokenPricePerK = 15.0 / 1000.0, outputTokenPricePerK = 75.0 / 1000.0, ) @@ -17,7 +17,7 @@ object AnthropicModels { modelName = "claude-sonnet-4-20250514", maxTotalTokens = 200000, maxOutTokens = 64000, - provider = APIProvider.Companion.Anthropic, + provider = APIProvider.Anthropic, inputTokenPricePerK = 3.0 / 1000.0, outputTokenPricePerK = 15.0 / 1000.0, ) @@ -27,7 +27,7 @@ object AnthropicModels { modelName = "claude-sonnet-4-5-20250929", maxTotalTokens = 200000, maxOutTokens = 64000, - provider = APIProvider.Companion.Anthropic, + provider = APIProvider.Anthropic, inputTokenPricePerK = 3.0 / 1000.0, outputTokenPricePerK = 15.0 / 1000.0, ) @@ -37,7 +37,7 @@ object AnthropicModels { modelName = "claude-3-5-haiku-latest", maxTotalTokens = 200000, maxOutTokens = 8192, - provider = APIProvider.Companion.Anthropic, + provider = APIProvider.Anthropic, inputTokenPricePerK = 0.80 / 1000.0, outputTokenPricePerK = 4.0 / 1000.0, ) @@ -45,8 +45,8 @@ object AnthropicModels { name = "Claude45Haiku", modelName = "claude-haiku-4-5-20251001", maxTotalTokens = 200000, - maxOutTokens = 8192, - provider = APIProvider.Companion.Anthropic, + maxOutTokens = 64000, + provider = APIProvider.Anthropic, inputTokenPricePerK = 0.80 / 1000.0, outputTokenPricePerK = 4.0 / 1000.0, ) diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/ChatModel.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/ChatModel.kt index bc73d6b97..891deed56 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/ChatModel.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/ChatModel.kt @@ -1,35 +1,34 @@ package com.simiacryptus.cognotik.chat.model -import com.fasterxml.jackson.core.JsonGenerator -import com.fasterxml.jackson.core.JsonParser -import com.fasterxml.jackson.core.JsonToken -import com.fasterxml.jackson.databind.DeserializationContext -import com.fasterxml.jackson.databind.JsonDeserializer -import com.fasterxml.jackson.databind.JsonNode -import com.fasterxml.jackson.databind.SerializerProvider -import com.fasterxml.jackson.databind.annotation.JsonDeserialize -import com.fasterxml.jackson.databind.annotation.JsonSerialize -import com.fasterxml.jackson.databind.ser.std.StdSerializer -import com.google.common.util.concurrent.ListeningScheduledExecutorService -import com.simiacryptus.cognotik.chat.model.ChatModel.Companion.values -import com.simiacryptus.cognotik.models.APIProvider -import com.simiacryptus.cognotik.models.ModelSchema.Usage -import com.simiacryptus.cognotik.models.LLMModel -import org.slf4j.event.Level -import java.io.BufferedOutputStream -import java.util.concurrent.ExecutorService + import com.fasterxml.jackson.core.JsonGenerator + import com.fasterxml.jackson.core.JsonParser + import com.fasterxml.jackson.core.JsonToken + import com.fasterxml.jackson.databind.DeserializationContext + import com.fasterxml.jackson.databind.JsonDeserializer + import com.fasterxml.jackson.databind.JsonNode + import com.fasterxml.jackson.databind.SerializerProvider + import com.fasterxml.jackson.databind.annotation.JsonDeserialize + import com.fasterxml.jackson.databind.annotation.JsonSerialize + import com.fasterxml.jackson.databind.ser.std.StdSerializer + import com.google.common.util.concurrent.ListeningScheduledExecutorService + import com.simiacryptus.cognotik.models.APIProvider + import com.simiacryptus.cognotik.models.ModelSchema.Usage + import com.simiacryptus.cognotik.models.LLMModel + import org.slf4j.event.Level + import java.io.BufferedOutputStream + import java.util.concurrent.ExecutorService -@JsonDeserialize(using = ChatModelsDeserializer::class) -@JsonSerialize(using = ChatModelsSerializer::class) -open class ChatModel( - val name: String, - modelName: String, - maxTotalTokens: Int, + @JsonDeserialize(using = ChatModelsDeserializer::class) + @JsonSerialize(using = ChatModelsSerializer::class) + open class ChatModel( + val name: String = "", + modelName: String = name, + maxTotalTokens: Int = -1, maxOutTokens: Int = maxTotalTokens, - provider: APIProvider, - val inputTokenPricePerK: Double, - val outputTokenPricePerK: Double, -) : LLMModel( + provider: APIProvider? = null, + val inputTokenPricePerK: Double = 0.0, + val outputTokenPricePerK: Double = inputTokenPricePerK, + ) : LLMModel( modelName = modelName, maxTotalTokens = maxTotalTokens, maxOutTokens = maxOutTokens, @@ -79,31 +78,46 @@ open class ChatModel( } } -class ChatModelsSerializer : StdSerializer(ChatModel::class.java) { + class ChatModelsSerializer : StdSerializer(ChatModel::class.java) { override fun serialize(value: ChatModel, gen: JsonGenerator, provider: SerializerProvider) { - val modelKey = values().entries.find { it.value == value }?.key - gen.writeString(modelKey ?: value.modelName) + gen.writeStartObject() + gen.writeStringField("name", value.name) + gen.writeStringField("modelName", value.modelName) + gen.writeNumberField("maxTotalTokens", value.maxTotalTokens) + gen.writeNumberField("maxOutTokens", value.maxOutTokens) + value.provider?.let { gen.writeStringField("provider", it.name) } + gen.writeNumberField("inputTokenPricePerK", value.inputTokenPricePerK) + gen.writeNumberField("outputTokenPricePerK", value.outputTokenPricePerK) + gen.writeEndObject() } } -class ChatModelsDeserializer : JsonDeserializer() { + class ChatModelsDeserializer : JsonDeserializer() { override fun deserialize(p: JsonParser, ctxt: DeserializationContext): ChatModel { return when (p.currentToken) { - JsonToken.VALUE_STRING -> { - // Handle string format - val modelName = p.readValueAs(String::class.java) - values().entries.find { it.key == modelName || it.value.name == modelName || it.value.modelName == modelName }?.value - ?: throw IllegalArgumentException("Unknown model: $modelName") - } JsonToken.START_OBJECT -> { - // Handle object format - delegate to default deserialization + // Handle object format val node = p.readValueAsTree() - val modelName = node.get("modelName")?.asText() ?: node.get("name")?.asText() - ?: throw IllegalArgumentException("Object format must contain 'modelName' or 'name' field") - values().entries.find { it.key == modelName || it.value.name == modelName || it.value.modelName == modelName }?.value - ?: throw IllegalArgumentException("Unknown model: $modelName") + val name = node.get("name")?.asText() ?: "" + val modelName = node.get("modelName")?.asText() ?: name + val maxTotalTokens = node.get("maxTotalTokens")?.asInt() ?: -1 + val maxOutTokens = node.get("maxOutTokens")?.asInt() ?: maxTotalTokens + val providerName = node.get("provider")?.asText() + val provider = providerName?.let { APIProvider.valueOf(it) } + val inputTokenPricePerK = node.get("inputTokenPricePerK")?.asDouble() ?: 0.0 + val outputTokenPricePerK = node.get("outputTokenPricePerK")?.asDouble() ?: inputTokenPricePerK + + return ChatModel( + name = name, + modelName = modelName, + maxTotalTokens = maxTotalTokens, + maxOutTokens = maxOutTokens, + provider = provider, + inputTokenPricePerK = inputTokenPricePerK, + outputTokenPricePerK = outputTokenPricePerK + ) } - else -> throw IllegalArgumentException("ChatModel must be deserialized from either a string or an object") + else -> throw IllegalArgumentException("ChatModel must be deserialized from an object") } } } \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/GeminiModels.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/GeminiModels.kt index e6cc4fa0d..4a0717b03 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/GeminiModels.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/chat/model/GeminiModels.kt @@ -75,7 +75,7 @@ object GeminiModels { ) val GeminiFlash_20_Preview_Image_Generation = ChatModel( name = "GeminiFlash_20_Preview_Image_Generation", - modelName = "gemini-2.0-flash-preview-image-generation", + modelName = "gemini-2.0-flash-exp-image-generation", maxTotalTokens = 1048576, maxOutTokens = 8192, provider = APIProvider.Companion.Gemini, @@ -85,7 +85,7 @@ object GeminiModels { val GeminiPro_25 = ChatModel( name = "GeminiPro_25", - modelName = "gemini-2.5-pro", + modelName = "gemini-2.5-pro-preview-03-25", maxTotalTokens = 1048576, maxOutTokens = 65536, provider = APIProvider.Companion.Gemini, @@ -104,7 +104,7 @@ object GeminiModels { ) val GeminiFlash_25_Lite = ChatModel( name = "GeminiFlash_25_Lite", - modelName = "gemini-2.5-flash-lite", + modelName = "gemini-2.5-flash-lite-preview-06-17", maxTotalTokens = 1048576, maxOutTokens = 65536, provider = APIProvider.Companion.Gemini, @@ -113,7 +113,7 @@ object GeminiModels { ) val GeminiFlash_25_Live = ChatModel( name = "GeminiFlash_25_Live", - modelName = "gemini-live-2.5-flash-preview", + modelName = "gemini-2.5-flash-preview-05-20", maxTotalTokens = 1048576, maxOutTokens = 65536, provider = APIProvider.Companion.Gemini, diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/EmbeddingModel.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/EmbeddingModel.kt index eed19b2cc..3054153d0 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/EmbeddingModel.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/EmbeddingModel.kt @@ -26,7 +26,7 @@ interface Embedder { @JsonDeserialize(using = EmbeddingModelsDeserializer::class) @JsonSerialize(using = EmbeddingModelsSerializer::class) open class EmbeddingModel( - modelName: String? = null, + modelName: String = "", maxTokens: Int = 0, provider: APIProvider? = null, private val tokenPricePerK: Double = 0.0, @@ -36,7 +36,7 @@ open class EmbeddingModel( maxTotalTokens = maxTokens ) { private val log = LoggerFactory.getLogger(EmbeddingModel::class.java) - override fun toString() = modelName ?: "UnnamedEmbeddingModel" + override fun toString() = modelName override fun pricing(usage: ModelSchema.Usage) = usage.prompt_tokens * tokenPricePerK / 1000.0 .also { log.info("Calculated pricing for model: $modelName with prompt tokens: ${usage.prompt_tokens}, price: $it") } diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/PromptOptimization.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/PromptOptimization.kt index d4778419d..f3251d7db 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/PromptOptimization.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/embedding/PromptOptimization.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.embedding -import com.simiacryptus.cognotik.actors.ProxyAgent +import com.simiacryptus.cognotik.agents.ProxyAgent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.models.ModelSchema diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/GeminiImageClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/GeminiImageClient.kt new file mode 100644 index 000000000..416d1a3bb --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/GeminiImageClient.kt @@ -0,0 +1,156 @@ +package com.simiacryptus.cognotik.image + +import com.google.common.util.concurrent.ListeningScheduledExecutorService +import com.google.genai.Client +import com.google.genai.types.GenerateImagesConfig +import com.google.genai.types.GenerateImagesResponse +import com.simiacryptus.cognotik.HttpClientManager +import com.simiacryptus.cognotik.models.ModelSchema.* +import com.simiacryptus.cognotik.util.LoggerFactory +import org.slf4j.event.Level +import java.io.BufferedOutputStream +import java.lang.Exception +import java.lang.NumberFormatException +import java.util.Base64 +import java.util.concurrent.ExecutorService + +class GeminiImageClient( + apiKey: String, + workPool: ExecutorService, + logLevel: Level = Level.INFO, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService, + useVertexAI: Boolean = false, + project: String? = null, + location: String? = null, +) : HttpClientManager( + logLevel = logLevel, + logStreams = logStreams, + workPool = workPool, + scheduledPool = scheduledPool +), ImageClientInterface { + + private val client: Client = buildClient(apiKey, useVertexAI, project, location) + + private fun buildClient( + apiKey: String, + useVertexAI: Boolean, + project: String?, + location: String? + ): Client { + val builder = Client.builder() + + if (useVertexAI) { + builder.vertexAI(true) + if (project != null && location != null) { + builder.project(project).location(location) + } else { + builder.apiKey(apiKey) + } + } else { + builder.apiKey(apiKey) + } + + return builder.build() + } + + override fun createImage(request: ImageGenerationRequest): ImageGenerationResponse { + return withReliability { + withPerformanceLogging { + try { + val config = buildGenerateImagesConfig(request) + + log(Level.DEBUG, "Sending image generation request to Gemini SDK for model: ${request.model}", logStreams) + + val response: GenerateImagesResponse = client.models.generateImages(request.model, request.prompt, config) + + val imageData = response.generatedImages().orElse(emptyList()).mapNotNull { generatedImage -> + generatedImage.image().orElse(null)?.let { image -> + // Convert the image to base64 or URL format + val imageBytes = image.imageBytes().orElse(null) + val imageUrl = image.gcsUri().orElse(null) + + ImageObject( + url = imageUrl, + b64_json = imageBytes?.let { Base64.getEncoder().encodeToString(it) } + ) + } + } + + val model = GeminiImageModels.values.values.find { it.modelName.equals(request.model, true) } + val dims = request.size?.split("x") + if (model != null) { + onUsage( + model, Usage( + completion_tokens = imageData.size.toLong(), + cost = model.pricing( + width = dims?.get(0)?.toInt() ?: 1024, + height = dims?.get(1)?.toInt() ?: 1024 + ) * imageData.size + ) + ) + } + + ImageGenerationResponse( + created = System.currentTimeMillis() / 1000, + data = imageData + ) + } catch (e: Exception) { + log.error("Error during Gemini image generation", e) + throw e + } + } + } + } + + private fun buildGenerateImagesConfig(request: ImageGenerationRequest): GenerateImagesConfig? { + val builder = GenerateImagesConfig.builder() + + request.n?.let { builder.numberOfImages(it) } + + // Set output format based on response_format + when (request.response_format) { + "b64_json" -> builder.outputMimeType("image/jpeg") + "url" -> builder.outputMimeType("image/jpeg") + else -> builder.outputMimeType("image/jpeg") + } + + // Include safety attributes + builder.includeSafetyAttributes(true) + + // Parse size if provided + request.size?.let { size -> + val dims = size.split("x") + if (dims.size == 2) { + try { + val width = dims[0].toInt() + val height = dims[1].toInt() + // Note: Gemini SDK may have specific size constraints + // You may need to validate or adjust these values + } catch (e: NumberFormatException) { + log.warn("Invalid size format: $size") + } + } + } + + return builder.build() + } + + override fun getModels(): List? { + return try { + GeminiImageModels.values.values.toList() + } catch (e: Exception) { + log.warn("Failed to fetch Gemini image models: ${e.message}") + listOf() + } + } + + fun onUsage(model: ImageModel, usage: Usage) { + // Override this method to track usage + } + + + companion object { + private val log = LoggerFactory.getLogger(GeminiImageClient::class.java) + } +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/GeminiImageModels.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/GeminiImageModels.kt new file mode 100644 index 000000000..e2f4c6515 --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/GeminiImageModels.kt @@ -0,0 +1,63 @@ +package com.simiacryptus.cognotik.image + +import com.simiacryptus.cognotik.models.APIProvider + +object GeminiImageModels { + val Imagen3Generate = ImageModel( + name = "Imagen3Generate", modelName = "imagen-3.0-generate-002", maxPrompt = 2048, provider = APIProvider.Gemini, pricingFunction = { width, height -> + // Pricing based on number of images generated + // Standard pricing: $0.04 per image for standard quality + 0.04 + }) + + val Imagen4GeneratePreview = ImageModel( + name = "Imagen4GeneratePreview", + modelName = "imagen-4.0-generate-preview-06-06", + maxPrompt = 2048, + provider = APIProvider.Gemini, + pricingFunction = { width, height -> + // Preview pricing: $0.04 per image + 0.04 + }) + val Imagen4UltraGeneratePreview = ImageModel( + name = "Imagen4UltraGeneratePreview", + modelName = "imagen-4.0-ultra-generate-preview-06-06", + maxPrompt = 2048, + provider = APIProvider.Gemini, + pricingFunction = { width, height -> + // Ultra preview pricing: $0.08 per image + 0.08 + }) + val Imagen4Generate = ImageModel( + name = "Imagen4Generate", modelName = "imagen-4.0-generate-001", maxPrompt = 2048, provider = APIProvider.Gemini, pricingFunction = { width, height -> + // Standard Imagen 4.0 pricing: $0.05 per image + 0.05 + }) + val Imagen4UltraGenerate = ImageModel( + name = "Imagen4UltraGenerate", + modelName = "imagen-4.0-ultra-generate-001", + maxPrompt = 2048, + provider = APIProvider.Gemini, + pricingFunction = { width, height -> + // Ultra quality pricing: $0.10 per image + 0.10 + }) + val Imagen4Fast = ImageModel( + name = "Imagen4Fast", modelName = "imagen-4.0-fast-generate-001", maxPrompt = 2048, provider = APIProvider.Gemini, pricingFunction = { width, height -> + // Fast generation pricing: $0.03 per image + 0.03 + }) + + + val values: Map = mapOf( + "Imagen3Generate" to Imagen3Generate, + "Imagen4GeneratePreview" to Imagen4GeneratePreview, + "Imagen4UltraGeneratePreview" to Imagen4UltraGeneratePreview, + "Imagen4Generate" to Imagen4Generate, + "Imagen4UltraGenerate" to Imagen4UltraGenerate, + "Imagen4Fast" to Imagen4Fast + ) + + fun valueOf(name: String): ImageModel? = values[name] + fun entries(): Collection = values.values +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageClient.kt deleted file mode 100644 index d5ff25b27..000000000 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageClient.kt +++ /dev/null @@ -1,53 +0,0 @@ -package com.simiacryptus.cognotik.image - - import com.simiacryptus.cognotik.models.ModelSchema - import com.simiacryptus.cognotik.models.APIProvider -import com.simiacryptus.cognotik.OpenAIClient -import com.google.common.util.concurrent.MoreExecutors -import org.slf4j.event.Level -import java.util.concurrent.Executors - -class ImageClient( - apiKey: String, - private val apiBase: String -) { - private val client = OpenAIClient( - key = apiKey, - apiBase = apiBase, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), - logLevel = Level.INFO - ) - - fun generate( - request: ModelSchema.ImageGenerationRequest, - model: ImageModel - ): ModelSchema.ImageGenerationResponse { - return client.createImage(request) - } - - companion object { - private val clients = mutableMapOf() - - @Synchronized - fun getClient( - apiKey: String, - apiBase: String, - provider: APIProvider - ): ImageClient { - val key = "$provider:$apiBase" - return clients.getOrPut(key) { - ImageClient(apiKey, apiBase) - } - } - - fun generate( - request: ModelSchema.ImageGenerationRequest, - model: ImageModel, - apiKey: String - ): ModelSchema.ImageGenerationResponse { - val client = getClient(apiKey, model.provider?.base!!, model.provider) - return client.generate(request, model) - } - } -} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageClientInterface.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageClientInterface.kt new file mode 100644 index 000000000..0ca099d51 --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageClientInterface.kt @@ -0,0 +1,8 @@ +package com.simiacryptus.cognotik.image + +import com.simiacryptus.cognotik.models.ModelSchema + +interface ImageClientInterface { + fun createImage(request: ModelSchema.ImageGenerationRequest): ModelSchema.ImageGenerationResponse + fun getModels(): List? +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageModel.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageModel.kt new file mode 100644 index 000000000..b75cdc2f5 --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageModel.kt @@ -0,0 +1,21 @@ +package com.simiacryptus.cognotik.image + +import com.simiacryptus.cognotik.models.AIModel +import com.simiacryptus.cognotik.models.APIProvider +import com.simiacryptus.cognotik.util.LoggerFactory + +class ImageModel( + val name: String, + override val modelName: String, + val maxPrompt: Int, + override val provider: APIProvider, + val quality: String = "standard", + val pricingFunction: (width: Int, height: Int) -> Double +) : AIModel { + + fun pricing(width: Int, height: Int): Double = pricingFunction(width, height) + + companion object { + private val log = LoggerFactory.getLogger(ImageModel::class.java) + } +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageModels.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageModels.kt deleted file mode 100644 index 1c4881ffd..000000000 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/ImageModels.kt +++ /dev/null @@ -1,90 +0,0 @@ -package com.simiacryptus.cognotik.image - -import com.simiacryptus.cognotik.models.APIProvider -import com.simiacryptus.cognotik.models.LLMModel -import com.simiacryptus.cognotik.models.ModelSchema -import com.simiacryptus.cognotik.models.ModelSchema.Usage -import com.simiacryptus.cognotik.util.LoggerFactory - -class ImageModel( - val name: String, - override val modelName: String, - val maxPrompt: Int, - provider: APIProvider, - val quality: String = "standard", - val pricingFunction: (width: Int, height: Int) -> Double -) : LLMModel( - modelName = modelName, provider = provider, maxTotalTokens = maxPrompt, maxOutTokens = 0 -) { - override fun pricing(usage: Usage): Double { - return usage.cost ?: 0.0 - } - - fun pricing(width: Int, height: Int): Double = pricingFunction(width, height) - - fun generate( - prompt: String, width: Int = 1024, height: Int = 1024, count: Int = 1, quality: String = this.quality - ): ModelSchema.ImageGenerationResponse { - return ImageClient.generate( - request = ModelSchema.ImageGenerationRequest( - prompt = prompt, model = modelName, n = count, size = "${width}x${height}", quality = quality - ), model = this, apiKey = TODO() - ) - } - - companion object { - private val log = LoggerFactory.getLogger(ImageModel::class.java) - } -} - -object ImageModels { - val DallE2 = ImageModel( - name = "DallE2", - modelName = "dall-e-2", - maxPrompt = 1000, - provider = APIProvider.Companion.OpenAI, - pricingFunction = { width, height -> - when { - width == 1024 && height == 1024 -> 0.02 - width == 512 && height == 512 -> 0.018 - width == 256 && height == 256 -> 0.016 - else -> throw IllegalArgumentException("Unsupported image size: $width x $height") - } - }) - - val DallE3 = ImageModel( - name = "DallE3", - modelName = "dall-e-3", - maxPrompt = 1000, - provider = APIProvider.Companion.OpenAI, - pricingFunction = { width, height -> - when { - width == 1024 && height == 1024 -> 0.04 - width == 1024 && height == 1792 -> 0.08 - width == 1792 && height == 1024 -> 0.08 - else -> throw IllegalArgumentException("Unsupported image size: $width x $height") - } - }) - - val DallE3_HD = ImageModel( - name = "DallE3_HD", - modelName = "dall-e-3", - maxPrompt = 1000, - provider = APIProvider.Companion.OpenAI, - quality = "hd", - pricingFunction = { width, height -> - when { - width == 1024 && height == 1024 -> 0.08 - width == 1024 && height == 1792 -> 0.12 - width == 1792 && height == 1024 -> 0.12 - else -> throw IllegalArgumentException("Unsupported image size: $width x $height") - } - }) - - val values: Map = mapOf( - "DallE2" to DallE2, "DallE3" to DallE3, "DallE3_HD" to DallE3_HD - ) - - fun valueOf(name: String): ImageModel? = values[name] - fun entries(): Collection = values.values -} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/OpenAIImageClient.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/OpenAIImageClient.kt new file mode 100644 index 000000000..eb982d8f7 --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/OpenAIImageClient.kt @@ -0,0 +1,104 @@ +package com.simiacryptus.cognotik.image + +import com.google.common.util.concurrent.ListeningScheduledExecutorService +import com.google.gson.Gson +import com.simiacryptus.cognotik.HttpClientManager +import com.simiacryptus.cognotik.exceptions.ErrorUtil.checkError +import com.simiacryptus.cognotik.models.AIModel +import com.simiacryptus.cognotik.models.APIProvider +import com.simiacryptus.cognotik.models.ModelSchema.* +import com.simiacryptus.cognotik.util.JsonUtil +import com.simiacryptus.cognotik.util.LoggerFactory +import org.apache.hc.client5.http.classic.methods.HttpGet +import org.apache.hc.client5.http.classic.methods.HttpPost +import org.apache.hc.core5.http.io.entity.EntityUtils +import org.apache.hc.core5.http.io.entity.StringEntity +import org.slf4j.Logger +import org.slf4j.event.Level +import java.io.BufferedOutputStream +import java.io.IOException +import java.util.concurrent.ExecutorService + +open class OpenAIImageClient( + protected var key: String, + protected val apiBase: String, + logLevel: Level = Level.TRACE, + logStreams: MutableList = mutableListOf(), + workPool: ExecutorService, + scheduledPool: ListeningScheduledExecutorService, +) : HttpClientManager( + logLevel = logLevel, + logStreams = logStreams, + workPool = workPool, + scheduledPool = scheduledPool +), ImageClientInterface { + + var user: Any? = null + var session: Any? = null + open val provider = APIProvider.OpenAI + + open fun onUsage(model: AIModel?, tokens: Usage) { + } + + @Throws(IOException::class, InterruptedException::class) + protected fun post(url: String, json: String, apiProvider: APIProvider): String { + val request = HttpPost(url) + request.addHeader("Content-Type", "application/json") + request.addHeader("Accept", "application/json") + log.info("Sending POST request to URL: $url with payload: $json") + apiProvider.authorize(request, key, apiBase) + request.entity = StringEntity(json, Charsets.UTF_8, false) + return post(request) + } + + protected fun post(request: HttpPost): String = withClient { EntityUtils.toString(it.execute(request).entity) } + + @Throws(IOException::class) + protected operator fun get(url: String?, apiProvider: APIProvider): String = withClient { + val request = HttpGet(url) + request.addHeader("Content-Type", "application/json") + request.addHeader("Accept", "application/json") + log.debug("Sending GET request to URL: $url") + apiProvider.authorize(request, key, apiBase) + EntityUtils.toString(it.execute(request).entity) + } + + override fun createImage(request: ImageGenerationRequest): ImageGenerationResponse = withReliability { + withPerformanceLogging { + val url = "${apiBase}/images/generations" + val httpRequest = HttpPost(url) + httpRequest.addHeader("Accept", "application/json") + httpRequest.addHeader("Content-Type", "application/json") + provider.authorize(httpRequest, key, apiBase) + val requestBody = Gson().toJson(request) + httpRequest.entity = StringEntity(requestBody, Charsets.UTF_8, false) + val response = post(httpRequest) + checkError(response) + log.info("Image creation response received") + val model = OpenAIImageModels.values.values.find { it.modelName.equals(request.model, true) } + val dims = request.size?.split("x") + onUsage( + model, Usage( + completion_tokens = 1, cost = model?.pricing( + width = dims?.get(0)?.toInt() ?: 0, + height = dims?.get(1)?.toInt() ?: 0 + ) + ) + ) + JsonUtil.objectMapper().readValue(response, ImageGenerationResponse::class.java) + } + } + + override fun getModels(): List? { + return try { + OpenAIImageModels.values.values.toList() + } catch (e: Exception) { + log.error("Failed to fetch image models", e) + null + } + } + + companion object { + private val log: Logger = LoggerFactory.getLogger(OpenAIImageClient::class.java) + } +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/OpenAIImageModels.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/OpenAIImageModels.kt new file mode 100644 index 000000000..292a32c03 --- /dev/null +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/image/OpenAIImageModels.kt @@ -0,0 +1,55 @@ +package com.simiacryptus.cognotik.image + +import com.simiacryptus.cognotik.models.APIProvider + +object OpenAIImageModels { + val DallE2 = ImageModel( + name = "DallE2", + modelName = "dall-e-2", + maxPrompt = 1000, + provider = APIProvider.OpenAI, + pricingFunction = { width, height -> + when { + width == 1024 && height == 1024 -> 0.02 + width == 512 && height == 512 -> 0.018 + width == 256 && height == 256 -> 0.016 + else -> throw IllegalArgumentException("Unsupported image size: $width x $height") + } + }) + + val DallE3 = ImageModel( + name = "DallE3", + modelName = "dall-e-3", + maxPrompt = 1000, + provider = APIProvider.OpenAI, + pricingFunction = { width, height -> + when { + width == 1024 && height == 1024 -> 0.04 + width == 1024 && height == 1792 -> 0.08 + width == 1792 && height == 1024 -> 0.08 + else -> throw IllegalArgumentException("Unsupported image size: $width x $height") + } + }) + + val DallE3_HD = ImageModel( + name = "DallE3_HD", + modelName = "dall-e-3", + maxPrompt = 1000, + provider = APIProvider.OpenAI, + quality = "hd", + pricingFunction = { width, height -> + when { + width == 1024 && height == 1024 -> 0.08 + width == 1024 && height == 1792 -> 0.12 + width == 1792 && height == 1024 -> 0.12 + else -> throw IllegalArgumentException("Unsupported image size: $width x $height") + } + }) + + val values: Map = mapOf( + "DallE2" to DallE2, "DallE3" to DallE3, "DallE3_HD" to DallE3_HD + ) + + fun valueOf(name: String): ImageModel? = values[name] + fun entries(): Collection = values.values +} \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/AIModel.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/AIModel.kt index bf191045b..3c98e2e0a 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/AIModel.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/AIModel.kt @@ -1,5 +1,6 @@ package com.simiacryptus.cognotik.models interface AIModel { - val modelName: String? + val modelName: String? + val provider: APIProvider? } \ No newline at end of file diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/APIProvider.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/APIProvider.kt index 9b31cff98..fd0c13310 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/APIProvider.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/APIProvider.kt @@ -7,18 +7,16 @@ import com.google.common.util.concurrent.MoreExecutors import com.simiacryptus.cognotik.audio.AudioModels import com.simiacryptus.cognotik.chat.* import com.simiacryptus.cognotik.chat.model.* -import com.simiacryptus.cognotik.embedding.EmbeddingClientInterface import com.simiacryptus.cognotik.embedding.EmbeddingModel import com.simiacryptus.cognotik.embedding.OllamaEmbeddingModels import com.simiacryptus.cognotik.embedding.OpenAIEmbeddingModels -import com.simiacryptus.cognotik.image.ImageModel -import com.simiacryptus.cognotik.image.ImageModels +import com.simiacryptus.cognotik.image.* import com.simiacryptus.cognotik.util.DynamicEnum import com.simiacryptus.cognotik.util.DynamicEnumDeserializer import com.simiacryptus.cognotik.util.DynamicEnumSerializer -import org.slf4j.Logger import com.simiacryptus.cognotik.util.LoggerFactory import org.apache.hc.core5.http.HttpRequest +import org.slf4j.Logger import org.slf4j.event.Level import java.io.BufferedOutputStream import java.util.concurrent.ExecutorService @@ -30,446 +28,504 @@ private val log: Logger = LoggerFactory.getLogger(APIProvider::class.java) @JsonSerialize(using = APIProviderSerializer::class) abstract class APIProvider private constructor(name: String, val base: String) : DynamicEnum(name) { - abstract fun getChatClient( + abstract fun getChatClient( + key: String, + base: String = this.base, + workPool: ExecutorService, + logLevel: Level = Level.INFO, + logStreams: MutableList = mutableListOf(), + scheduledPool: ListeningScheduledExecutorService + ): ChatClientInterface + + abstract fun getChatModels(key: String, baseUrl: String): List + open fun getEmbeddingModels(key: String, baseUrl: String): List = emptyList() + + open fun getTranscriptionModels(key: String, baseUrl: String): List = emptyList() + open fun getImageModels(key: String, baseUrl: String): List = emptyList() + + open fun authorize(request: HttpRequest, key: String, apiBase: String) { + request.addHeader("Authorization", "Bearer ${key}") + } + + open fun getEmbeddingClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level = Level.INFO, + logStreams: MutableList = mutableListOf(), + scheduledPool: ListeningScheduledExecutorService + ): com.simiacryptus.cognotik.embedding.EmbeddingClientInterface { + throw UnsupportedOperationException("${this.name} does not support embedding functionality") + } + + open fun getImageClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level = Level.INFO, + logStreams: MutableList = mutableListOf(), + scheduledPool: ListeningScheduledExecutorService + ): ImageClientInterface { + throw UnsupportedOperationException("${this.name} does not support image generation functionality") + } + + companion object { + val SearchAPI: APIProvider = object : APIProvider("SearchAPI", "https://api.searchapi.com") { + + override fun getChatModels(key: String, baseUrl: String): List = emptyList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = throw UnsupportedOperationException("SearchAPI does not support chat functionality") + } + + val Gemini: APIProvider = object : APIProvider("Gemini", "https://generativelanguage.googleapis.com") { + override fun authorize( + request: HttpRequest, + key: String, + apiBase: String + ) { + } + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: GeminiModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = if (false) GeminiChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) else GeminiSdkChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + + override fun getImageModels(key: String, baseUrl: String): List { + return GeminiImageModels.values.values.toList() + } + + override fun getImageClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ): ImageClientInterface = GeminiImageClient( + apiKey = key, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + + } + val Ollama: APIProvider = object : APIProvider("Ollama", "http://localhost:11434") { + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: emptyList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = OllamaChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + scheduledPool = scheduledPool, + logLevel = logLevel, + logStreams = logStreams + ) + + override fun getEmbeddingModels(key: String, baseUrl: String): List { + return OllamaEmbeddingModels.values.values.toList() + } + + override fun getEmbeddingClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = com.simiacryptus.cognotik.embedding.OllamaEmbeddingClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + } + val OpenAI: APIProvider = object : APIProvider("OpenAI", "https://api.openai.com/v1") { + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: OpenAIModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = OpenAIChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + scheduledPool = scheduledPool + ) + + + override fun getEmbeddingModels(key: String, baseUrl: String): List { + return OpenAIEmbeddingModels.values.values.toList() + } + + override fun getEmbeddingClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = com.simiacryptus.cognotik.embedding.OpenAIEmbeddingClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + + override fun getImageModels(key: String, baseUrl: String): List { + return OpenAIImageModels.values.values.toList() + } + + override fun getImageClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ): ImageClientInterface = OpenAIImageClient( + key = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + + override fun getTranscriptionModels( + key: String, + baseUrl: String + ): List { + return listOf( + AudioModels(modelName = "gpt-4o-transcribe", provider = this), + AudioModels(modelName = "gpt-4o-mini-transcribe", provider = this), + AudioModels(modelName = "whisper-1", provider = this) + ) + } + } + val Anthropic: APIProvider = object : APIProvider("Anthropic", "https://api.anthropic.com/v1") { + override fun authorize( + request: HttpRequest, + key: String, + apiBase: String + ) { + request.addHeader("x-api-key", key) + request.addHeader("anthropic-version", "2023-06-01") + } + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: AnthropicModels.values.values.toList() + + override fun getChatClient( key: String, - base: String = this.base, + base: String, workPool: ExecutorService, - logLevel: Level = Level.INFO, - logStreams: MutableList = mutableListOf(), + logLevel: Level, + logStreams: MutableList, scheduledPool: ListeningScheduledExecutorService - ): ChatClientInterface + ) = AnthropicChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + } + val AWS: APIProvider = object : APIProvider("AWS", "https://api.openai.aws") { + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: AWSModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = AwsChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) - abstract fun getChatModels(key: String, baseUrl: String): List - open fun getEmbeddingModels(key: String, baseUrl: String): List = emptyList() + } + val Groq: APIProvider = object : APIProvider("Groq", "https://api.groq.com/openai/v1") { + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: GroqModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = GroqChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + + override fun getTranscriptionModels( + key: String, + baseUrl: String + ): List { + return listOf( + AudioModels(modelName = "whisper-large-v3", provider = this), + AudioModels(modelName = "whisper-large-v3-turbo", provider = this), + ) + } + } + val Perplexity: APIProvider = object : APIProvider("Perplexity", "https://api.perplexity.ai") { + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: PerplexityModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = OpenAIChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + scheduledPool = scheduledPool + ) + } + val ModelsLab: APIProvider = object : APIProvider("ModelsLab", "https://modelslab.com/api/v6") { + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: ModelsLabModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = ModelsLabChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + } + val Mistral: APIProvider = object : APIProvider("Mistral", "https://api.mistral.ai/v1") { + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: MistralModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = MistralChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + } + val DeepSeek: APIProvider = object : APIProvider("DeepSeek", "https://api.deepseek.com") { + + override fun getChatModels(key: String, baseUrl: String) = getChatClient( + key = key, + base = baseUrl, + workPool = MoreExecutors.newDirectExecutorService(), + scheduledPool = MoreExecutors.listeningDecorator(Executors.newScheduledThreadPool(1)), + logLevel = Level.INFO, + logStreams = mutableListOf() + ).getModels() ?: DeepSeekModels.values.values.toList() + + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = DeepSeekChatClient( + apiKey = key, + apiBase = base, + workPool = workPool, + logLevel = logLevel, + logStreams = logStreams, + scheduledPool = scheduledPool + ) + } + val Google: APIProvider = object : APIProvider("GoogleSearch", "c581d1409962d72e1") { - open fun getTranscriptionModels(key: String, baseUrl: String): List = emptyList() + override fun getChatModels(key: String, baseUrl: String): List = emptyList() - open fun authorize(request: HttpRequest, key: String, apiBase: String) { - request.addHeader("Authorization", "Bearer ${key}") + override fun getChatClient( + key: String, + base: String, + workPool: ExecutorService, + logLevel: Level, + logStreams: MutableList, + scheduledPool: ListeningScheduledExecutorService + ) = throw UnsupportedOperationException("Google Search API does not support chat functionality") } - open fun getEmbeddingClient( + val Github: APIProvider = object : APIProvider("Github", "https://api.github.com") { + + override fun getChatModels(key: String, baseUrl: String): List = emptyList() + + override fun getChatClient( key: String, base: String, workPool: ExecutorService, - logLevel: Level = Level.INFO, - logStreams: MutableList = mutableListOf(), + logLevel: Level, + logStreams: MutableList, scheduledPool: ListeningScheduledExecutorService - ): com.simiacryptus.cognotik.embedding.EmbeddingClientInterface { - throw UnsupportedOperationException("${this.name} does not support embedding functionality") + ) = throw UnsupportedOperationException("Github API does not support chat functionality") } - companion object { - val SearchAPI: APIProvider = object : APIProvider("SearchAPI", "https://api.searchapi.com") { - - override fun getChatModels(key: String, baseUrl: String): List = emptyList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = throw UnsupportedOperationException("SearchAPI does not support chat functionality") - } - - val Gemini: APIProvider = object : APIProvider("Gemini", "https://generativelanguage.googleapis.com") { - override fun authorize( - request: HttpRequest, - key: String, - apiBase: String - ) { - } - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: GeminiModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = GeminiChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - } - val Ollama: APIProvider = object : APIProvider("Ollama", "http://localhost:11434") { - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: emptyList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = OllamaChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - scheduledPool = scheduledPool, - logLevel = logLevel, - logStreams = logStreams - ) - - override fun getEmbeddingModels(key: String, baseUrl: String): List { - return OllamaEmbeddingModels.values.values.toList() - } - - override fun getEmbeddingClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = com.simiacryptus.cognotik.embedding.OllamaEmbeddingClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - } - val OpenAI: APIProvider = object : APIProvider("OpenAI", "https://api.openai.com/v1") { - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: OpenAIModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = OpenAIChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - scheduledPool = scheduledPool - ) - - - override fun getEmbeddingModels(key: String, baseUrl: String): List { - return OpenAIEmbeddingModels.values.values.toList() - } - - override fun getEmbeddingClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = com.simiacryptus.cognotik.embedding.OpenAIEmbeddingClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - fun getImageModels(key: String, baseUrl: String): List { - return ImageModels.values.values.toList() - } - - override fun getTranscriptionModels( - key: String, - baseUrl: String - ): List { - return listOf( - AudioModels(modelName = "gpt-4o-transcribe", provider = this,), - AudioModels(modelName = "gpt-4o-mini-transcribe", provider = this,), - AudioModels(modelName = "whisper-1", provider = this,) - ) - } - } - val Anthropic: APIProvider = object : APIProvider("Anthropic", "https://api.anthropic.com/v1") { - override fun authorize( - request: HttpRequest, - key: String, - apiBase: String - ) { - request.addHeader("x-api-key", key) - request.addHeader("anthropic-version", "2023-06-01") - } - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: AnthropicModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = AnthropicChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - } - val AWS: APIProvider = object : APIProvider("AWS", "https://api.openai.aws") { - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: AWSModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = AwsChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - - } - val Groq: APIProvider = object : APIProvider("Groq", "https://api.groq.com/openai/v1") { - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: GroqModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = GroqChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - override fun getTranscriptionModels( - key: String, - baseUrl: String - ): List { - return listOf( - AudioModels(modelName = "whisper-large-v3", provider = this,), - AudioModels(modelName = "whisper-large-v3-turbo", provider = this,), - ) - } - } - val Perplexity: APIProvider = object : APIProvider("Perplexity", "https://api.perplexity.ai") { - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: PerplexityModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = OpenAIChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - scheduledPool = scheduledPool - ) - } - val ModelsLab: APIProvider = object : APIProvider("ModelsLab", "https://modelslab.com/api/v6") { - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: ModelsLabModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = ModelsLabChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - } - val Mistral: APIProvider = object : APIProvider("Mistral", "https://api.mistral.ai/v1") { - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: MistralModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = MistralChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - } - val DeepSeek: APIProvider = object : APIProvider("DeepSeek", "https://api.deepseek.com") { - - override fun getChatModels(key: String, baseUrl: String) = getChatClient( - key = key, - base = baseUrl, - workPool = MoreExecutors.newDirectExecutorService(), - scheduledPool = MoreExecutors.listeningDecorator( Executors.newScheduledThreadPool(1) ), - logLevel = Level.INFO, - logStreams = mutableListOf() - ).getModels() ?: DeepSeekModels.values.values.toList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = DeepSeekChatClient( - apiKey = key, - apiBase = base, - workPool = workPool, - logLevel = logLevel, - logStreams = logStreams, - scheduledPool = scheduledPool - ) - } - val Google: APIProvider = object : APIProvider("GoogleSearch", "c581d1409962d72e1") { - - override fun getChatModels(key: String, baseUrl: String): List = emptyList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = throw UnsupportedOperationException("Google Search API does not support chat functionality") - } - val Github: APIProvider = object : APIProvider("Github", "https://api.github.com") { - - override fun getChatModels(key: String, baseUrl: String): List = emptyList() - - override fun getChatClient( - key: String, - base: String, - workPool: ExecutorService, - logLevel: Level, - logStreams: MutableList, - scheduledPool: ListeningScheduledExecutorService - ) = throw UnsupportedOperationException("Github API does not support chat functionality") - } - - init { - log.info("Registering API providers") - register(APIProvider::class.java, Gemini) - register(APIProvider::class.java, OpenAI) - register(APIProvider::class.java, Anthropic) - register(APIProvider::class.java, AWS) - register(APIProvider::class.java, Groq) - register(APIProvider::class.java, Perplexity) - register(APIProvider::class.java, ModelsLab) - register(APIProvider::class.java, Mistral) - register(APIProvider::class.java, DeepSeek) - register(APIProvider::class.java, Google) - register(APIProvider::class.java, Github) - register(APIProvider::class.java, Ollama) - register(APIProvider::class.java, SearchAPI) - } - - @JvmStatic - fun valueOf(name: String): APIProvider = valueOf(APIProvider::class.java, name) - - @JvmStatic - fun values(): Collection { - log.debug("Retrieving all APIProvider values") - return values(APIProvider::class.java) - } + init { + log.info("Registering API providers") + register(APIProvider::class.java, Gemini) + register(APIProvider::class.java, OpenAI) + register(APIProvider::class.java, Anthropic) + register(APIProvider::class.java, AWS) + register(APIProvider::class.java, Groq) + register(APIProvider::class.java, Perplexity) + register(APIProvider::class.java, ModelsLab) + register(APIProvider::class.java, Mistral) + register(APIProvider::class.java, DeepSeek) + register(APIProvider::class.java, Google) + register(APIProvider::class.java, Github) + register(APIProvider::class.java, Ollama) + register(APIProvider::class.java, SearchAPI) + } + + @JvmStatic + fun valueOf(name: String): APIProvider = valueOf(APIProvider::class.java, name) + + @JvmStatic + fun values(): Collection { + log.debug("Retrieving all APIProvider values") + return values(APIProvider::class.java) } + } } class APIProviderSerializer : DynamicEnumSerializer(APIProvider::class.java) diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/LLMModel.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/LLMModel.kt index 7a0018c46..8ba39c18c 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/LLMModel.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/LLMModel.kt @@ -13,10 +13,10 @@ import com.simiacryptus.cognotik.models.ModelSchema.Usage @JsonDeserialize(using = LLMModelDeserializer::class) @JsonSerialize(using = LLMModelSerializer::class) open class LLMModel( - override val modelName: String?, - val provider: APIProvider?, - val maxTotalTokens: Int = -1, - val maxOutTokens: Int = maxTotalTokens, + override val modelName: String, + override val provider: APIProvider?, + val maxTotalTokens: Int = -1, + val maxOutTokens: Int = maxTotalTokens, ) : AIModel { open fun pricing(usage: Usage): Double = 0.0 } diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/ModelSchema.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/ModelSchema.kt index d292abec9..080bb9e43 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/ModelSchema.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/models/ModelSchema.kt @@ -1,5 +1,6 @@ package com.simiacryptus.cognotik.models +import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.databind.node.ObjectNode import com.simiacryptus.cognotik.util.LoggerFactory import java.awt.image.BufferedImage @@ -222,31 +223,72 @@ interface ModelSchema { ) data class ContentPart( - val type: String, val text: String? = null, - val image_url: String? = null, + var image_url: String? = null, val input_audio: AudioInput? = null ) { + var image_data: ByteArray? + @JsonIgnore + get() { + return if (image_url != null && image_url!!.startsWith("data:image/")) { + val parts = image_url!!.split(",") + Base64.getDecoder().decode(parts[1]) + } else { + null + } + } + @JsonIgnore + set(value) { + if (value != null) { + val base64Data = Base64.getEncoder().encodeToString(value) + image_url = "data:image/jpeg;base64,$base64Data" + } else { + image_url = null + } + } + var image: BufferedImage? + @JsonIgnore + get() { + val data = image_data + return if (data != null) { + ImageIO.read(data.inputStream()) + } else { + null + } + } + @JsonIgnore + set(value) { + if (value != null) { + val output = ByteArrayOutputStream() + ImageIO.write(value, "jpg", output) + val base64Data = Base64.getEncoder().encodeToString(output.toByteArray()) + image_url = "data:image/jpeg;base64,$base64Data" + } else { + image_url = null + } + } + + companion object { private val log = LoggerFactory.getLogger(ContentPart::class.java) fun text(content: String): ContentPart { log.info("Creating text ContentPart") - return ContentPart(type = "text", text = content) + return ContentPart(text = content) } fun jpg(img: BufferedImage): ContentPart { log.info("Creating jpg ContentPart") - return ContentPart(type = "image_url", image_url = "data:image/jpeg;base64," + toBase64(img, "jpg")) + return ContentPart(image_url = "data:image/jpeg;base64," + toBase64(img, "jpg")) } fun png(img: BufferedImage): ContentPart { log.info("Creating png ContentPart") - return ContentPart(type = "image_url", image_url = "data:image/png;base64," + toBase64(img, "png")) + return ContentPart(image_url = "data:image/png;base64," + toBase64(img, "png")) } fun audio(data: String, format: String): ContentPart { log.info("Creating audio ContentPart") - return ContentPart(type = "input_audio", input_audio = AudioInput(data, format)) + return ContentPart(input_audio = AudioInput(data, format)) } fun toBase64(image: BufferedImage, fmt: String): String { @@ -265,10 +307,54 @@ interface ModelSchema { ) data class ChatMessageResponse( - val role: Role? = null, - val content: String? = null, - val function_call: FunctionCall? = null, - ) + val role: Role? = null, + val content: String? = null, + val function_call: FunctionCall? = null, + var image_url: String? = null, + var image_mime_type: String? = null, + ) { + var image: BufferedImage? + @JsonIgnore + get() { + return if (image_url != null && image_url!!.startsWith("data:image/")) { + val parts = image_url!!.split(",") + val data = Base64.getDecoder().decode(parts[1]) + ImageIO.read(data.inputStream()) + } else { + null + } + } + @JsonIgnore + set(value) { + if (value != null) { + val output = ByteArrayOutputStream() + ImageIO.write(value, "jpg", output) + val base64Data = Base64.getEncoder().encodeToString(output.toByteArray()) + image_url = "data:image/jpeg;base64,$base64Data" + } else { + image_url = null + } + } + var image_data: ByteArray? + @JsonIgnore + get() { + return if (image_url != null && image_url!!.startsWith("data:image/")) { + val parts = image_url!!.split(",") + Base64.getDecoder().decode(parts[1]) + } else { + null + } + } + @JsonIgnore + set(value) { + if (value != null) { + val base64Data = Base64.getEncoder().encodeToString(value) + image_url = "data:image/jpeg;base64,$base64Data" + } else { + image_url = null + } + } + } enum class Role { assistant, user, system @@ -281,7 +367,6 @@ interface ModelSchema { data class GroqChatMessage( val role: Role? = null, - val content: String? = null, val function_call: FunctionCall? = null, ) @@ -374,7 +459,8 @@ interface ModelSchema { ) data class ImageObject( - val url: String + val url: String? = null, + val b64_json: String? = null ) data class ImageGenerationResponse( diff --git a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/util/util.kt b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/util/util.kt index 979393c7f..b667e240a 100644 --- a/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/util/util.kt +++ b/jo-penai/src/main/kotlin/com/simiacryptus/cognotik/util/util.kt @@ -2,6 +2,6 @@ package com.simiacryptus.cognotik.util import com.simiacryptus.cognotik.models.ModelSchema -fun String.toContentList() = listOf(this).map { ModelSchema.ContentPart(text = it, type = "text") } +fun String.toContentList() = listOf(this).map { ModelSchema.ContentPart(text = it) } fun String.toChatMessage(role: ModelSchema.Role = ModelSchema.Role.user) = ModelSchema.ChatMessage(role = role, content = toContentList()) \ No newline at end of file diff --git a/webapp/src/store/slices/messageSlice.ts b/webapp/src/store/slices/messageSlice.ts index 053b6369d..4db00df18 100644 --- a/webapp/src/store/slices/messageSlice.ts +++ b/webapp/src/store/slices/messageSlice.ts @@ -30,7 +30,7 @@ mermaid.initialize({ ALLOWED_TAGS: ['div', 'span', 'p', 'br', 'b', 'i', 'em', 'strong', 'a', 'ul', 'ol', 'li', 'code', 'pre', 'table', 'tr', 'td', 'th', 'thead', 'tbody', 'button', 'input', 'label', 'select', 'option', 'textarea', 'code', 'pre', 'div', 'section', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'img', 'figure', 'figcaption',], ALLOWED_ATTR: ['class', 'href', 'target', 'data-tab', 'data-for-tab', 'style', 'type', 'value', 'id', 'name', - 'data-message-id', 'data-id', 'data-message-action', 'data-action', 'data-ref-id', 'data-version', 'role', 'message-id'], + 'data-message-id', 'data-id', 'data-message-action', 'data-action', 'data-ref-id', 'data-version', 'role', 'message-id', 'src'], }); }; diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/code/CodingTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/code/CodingTask.kt index e3ecf0da2..3659573e0 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/code/CodingTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/code/CodingTask.kt @@ -1,7 +1,7 @@ package com.simiacryptus.cognotik.apps.code -import com.simiacryptus.cognotik.actors.CodeAgent -import com.simiacryptus.cognotik.actors.CodeAgent.CodeResult +import com.simiacryptus.cognotik.agents.CodeAgent +import com.simiacryptus.cognotik.agents.CodeAgent.CodeResult import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.interpreter.CodeRuntime diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/CmdPatchApp.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/CmdPatchApp.kt index 959b2dd6d..be226755f 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/CmdPatchApp.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/CmdPatchApp.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.apps.general -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.diff.PatchProcessor import com.simiacryptus.cognotik.util.* diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/PatchApp.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/PatchApp.kt index 07f98dcab..05c2a4690 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/PatchApp.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/general/PatchApp.kt @@ -1,8 +1,8 @@ package com.simiacryptus.cognotik.apps.general -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent -import com.simiacryptus.cognotik.actors.ParsedResponse +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedResponse import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.diff.PatchProcessor diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/DocumentParsingModel.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/DocumentParsingModel.kt index 844d1460b..3bab15922 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/DocumentParsingModel.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/DocumentParsingModel.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.apps.parse -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.embedding.EmbeddingModel diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/LogPatternGenerator.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/LogPatternGenerator.kt index f093d28d6..3d5cd7fe5 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/LogPatternGenerator.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/apps/parse/LogPatternGenerator.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.apps.parse -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/AbstractTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/AbstractTask.kt index 466c4a2a3..d9988f220 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/AbstractTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/AbstractTask.kt @@ -8,58 +8,57 @@ import java.io.File import java.nio.file.Path abstract class AbstractTask( - val orchestrationConfig: OrchestrationConfig, - val executionConfig: T? + val orchestrationConfig: OrchestrationConfig, + val executionConfig: T? ) { - var state: TaskState? = TaskState.Pending - protected val codeFiles = mutableMapOf() + var state: TaskState? = TaskState.Pending - protected open val root: Path - get() = orchestrationConfig.absoluteWorkingDir?.let { File(it).toPath() } - ?: throw IllegalStateException("Working directory not set") + protected open val root: Path + get() = orchestrationConfig.absoluteWorkingDir?.let { File(it).toPath() } + ?: throw IllegalStateException("Working directory not set") - open val typeConfig: U? - get() = executionConfig?.task_type - ?.let { task_type -> orchestrationConfig.taskSettings.values.firstOrNull { it.task_type == task_type } as? U } + open val typeConfig: U? + get() = executionConfig?.task_type + ?.let { task_type -> orchestrationConfig.taskSettings.values.firstOrNull { it.task_type == task_type } as? U } - enum class TaskState { - Pending, - InProgress, - Completed, - } + enum class TaskState { + Pending, + InProgress, + Completed, + } - open fun getPriorCode(executionState: ExecutionState?) = - executionConfig?.task_dependencies?.joinToString("\n\n\n") { dependency -> - "# $dependency\n\n${executionState?.taskResult[dependency] ?: ""}" - } ?: "" + open fun getPriorCode(executionState: ExecutionState?) = + executionConfig?.task_dependencies?.joinToString("\n\n\n") { dependency -> + "# $dependency\n\n${executionState?.taskResult[dependency] ?: ""}" + } ?: "" - protected open fun acceptButtonFooter(ui: SocketManager, fn: () -> Unit): String { - val footerTask = ui.newTask(false) - lateinit var textHandle: StringBuilder - @Suppress("AssignedValueIsNeverRead") - textHandle = footerTask.complete(ui.hrefLink("Accept", classname = "href-link cmd-button") { - try { - textHandle.set("""
Accepted
""") - footerTask.complete() - } catch (e: Throwable) { - log.warn("Error", e) - } - fn() - })!! - return footerTask.placeholder - } + protected open fun acceptButtonFooter(ui: SocketManager, fn: () -> Unit): String { + val footerTask = ui.newTask(false) + lateinit var textHandle: StringBuilder + @Suppress("AssignedValueIsNeverRead") + textHandle = footerTask.complete(ui.hrefLink("Accept", classname = "href-link cmd-button") { + try { + textHandle.set("""
Accepted
""") + footerTask.complete() + } catch (e: Throwable) { + log.warn("Error", e) + } + fn() + })!! + return footerTask.placeholder + } - abstract fun promptSegment(): String + abstract fun promptSegment(): String - abstract fun run( - agent: TaskOrchestrator, - messages: List = listOf(), - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig, - ) + abstract fun run( + agent: TaskOrchestrator, + messages: List = listOf(), + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig, + ) - companion object { - val log = LoggerFactory.getLogger(AbstractTask::class.java) - } + companion object { + val log = LoggerFactory.getLogger(AbstractTask::class.java) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ApiChatModelDeserializer.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ApiChatModelDeserializer.kt new file mode 100644 index 000000000..0bfc1dee2 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ApiChatModelDeserializer.kt @@ -0,0 +1,35 @@ +package com.simiacryptus.cognotik.plan + +import com.fasterxml.jackson.core.JsonParser +import com.fasterxml.jackson.databind.DeserializationContext +import com.fasterxml.jackson.databind.JsonDeserializer +import com.simiacryptus.cognotik.platform.ApplicationServices +import com.simiacryptus.cognotik.platform.model.ApiChatModel + +/** + * Custom deserializer for ApiChatModel that resolves the model from its name + */ +class ApiChatModelDeserializer : JsonDeserializer() { + override fun deserialize(p: JsonParser, ctxt: DeserializationContext): ApiChatModel? { + val modelName = p.readValueAs(String::class.java) ?: return null + val userSettings = ApplicationServices + .fileApplicationServices() + .userSettingsManager + .getUserSettings() + val model = userSettings.apis.flatMap { + it.provider?.getChatModels(it.key ?: "", it.baseUrl) ?: listOf() + }.firstOrNull { + it.modelName == modelName || it.name == modelName + } + if (model == null) { + throw IllegalStateException("No API model found for model $modelName") + } + val apiData = userSettings.apis.firstOrNull { + it.provider == model.provider + } + if (apiData == null) { + throw IllegalStateException("No API data found for model $modelName") + } + return ApiChatModel(model, apiData) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ApiChatModelSerializer.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ApiChatModelSerializer.kt new file mode 100644 index 000000000..42ca950d8 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ApiChatModelSerializer.kt @@ -0,0 +1,19 @@ +package com.simiacryptus.cognotik.plan + +import com.fasterxml.jackson.core.JsonGenerator +import com.fasterxml.jackson.databind.JsonSerializer +import com.fasterxml.jackson.databind.SerializerProvider +import com.simiacryptus.cognotik.platform.model.ApiChatModel + +/** + * Custom serializer for ApiChatModel that only serializes the model name + */ +class ApiChatModelSerializer : JsonSerializer() { + override fun serialize(value: ApiChatModel?, gen: JsonGenerator, serializers: SerializerProvider) { + if (value == null) { + gen.writeNull() + } else { + gen.writeString(value.model?.modelName ?: value.model?.name) + } + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ExecutionState.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ExecutionState.kt index 3ec351f5e..17563ad6c 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ExecutionState.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/ExecutionState.kt @@ -4,13 +4,13 @@ import com.simiacryptus.cognotik.webui.session.SessionTask import java.util.concurrent.Future data class ExecutionState( - val subTasks: Map, - val tasksByDescription: MutableMap = subTasks.entries.toTypedArray() - .associate { (it.value.task_description ?: it.key) to it.value }.toMutableMap(), - val taskIdProcessingQueue: MutableList = com.simiacryptus.cognotik.plan.PlanUtil.executionOrder(subTasks) - .toMutableList(), - val taskResult: MutableMap = mutableMapOf(), - val completedTasks: MutableList = mutableListOf(), - val taskFutures: MutableMap> = mutableMapOf(), - val uitaskMap: MutableMap = mutableMapOf() + val subTasks: Map, + val tasksByDescription: MutableMap = subTasks.entries.toTypedArray() + .associate { (it.value.task_description ?: it.key) to it.value }.toMutableMap(), + val taskIdProcessingQueue: MutableList = PlanUtil.executionOrder(subTasks) + .toMutableList(), + val taskResult: MutableMap = mutableMapOf(), + val completedTasks: MutableList = mutableListOf(), + val taskFutures: MutableMap> = mutableMapOf(), + val uitaskMap: MutableMap = mutableMapOf() ) \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/OrchestrationConfig.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/OrchestrationConfig.kt index 6605a5d8f..cfee0a562 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/OrchestrationConfig.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/OrchestrationConfig.kt @@ -1,15 +1,9 @@ package com.simiacryptus.cognotik.plan import com.fasterxml.jackson.annotation.JsonIgnore -import com.fasterxml.jackson.core.JsonGenerator -import com.fasterxml.jackson.core.JsonParser -import com.fasterxml.jackson.databind.DeserializationContext -import com.fasterxml.jackson.databind.JsonDeserializer -import com.fasterxml.jackson.databind.JsonSerializer -import com.fasterxml.jackson.databind.SerializerProvider import com.fasterxml.jackson.databind.annotation.JsonDeserialize import com.fasterxml.jackson.databind.annotation.JsonSerialize -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.describe.TypeDescriber @@ -27,154 +21,164 @@ import java.io.File class OrchestrationConfig( - @JsonSerialize(using = ApiChatModelSerializer::class) - @JsonDeserialize(using = ApiChatModelDeserializer::class) - var defaultModel: ApiChatModel? = null, - @JsonSerialize(using = ApiChatModelSerializer::class) - @JsonDeserialize(using = ApiChatModelDeserializer::class) - var parsingModel: ApiChatModel? = null, - var cognitiveMode: CognitiveModeStrategies? = null, - val shellCmd: List = listOf(if (isWindows) "powershell" else "bash"), - var temperature: Double = 0.2, - val budget: Double = 2.0, - val taskSettings: MutableMap = TaskType.values().filter { - false // Do not auto-enable any tasks - }.associateWith { taskType -> - taskType.newSettings()?.let { - it.name = taskType.description - it - } ?: throw IllegalStateException("No default config for task type ${taskType.name}") - }.mapKeys { it.key.name }.toMutableMap(), - var autoFix: Boolean = false, - val env: Map? = mapOf(), - val workingDir: String? = ".", - val language: String? = if (isWindows) "powershell" else "bash", - var maxTaskHistoryChars: Int = 10000, - var maxTasksPerIteration: Int = 1, - var maxIterations: Int = 10, + @JsonSerialize(using = ApiChatModelSerializer::class) + @JsonDeserialize(using = ApiChatModelDeserializer::class) + var defaultModel: ApiChatModel? = null, + @JsonSerialize(using = ApiChatModelSerializer::class) + @JsonDeserialize(using = ApiChatModelDeserializer::class) + var parsingModel: ApiChatModel? = null, + @JsonSerialize(using = ApiChatModelSerializer::class) + @JsonDeserialize(using = ApiChatModelDeserializer::class) + var imageChatModel: ApiChatModel? = null, + var cognitiveMode: CognitiveModeStrategies? = null, + val shellCmd: List = listOf(if (isWindows) "powershell" else "bash"), + var temperature: Double = 0.2, + val budget: Double = 2.0, + val taskSettings: MutableMap = TaskType.values().filter { + false // Do not auto-enable any tasks + }.associateWith { taskType -> + taskType.newSettings()?.let { + it.name = taskType.description + it + } ?: throw IllegalStateException("No default config for task type ${taskType.name}") + }.mapKeys { it.key.name }.toMutableMap(), + var autoFix: Boolean = false, + val env: Map? = mapOf(), + val workingDir: String? = ".", + val language: String? = if (isWindows) "powershell" else "bash", + var maxTaskHistoryChars: Int = 10000, + var maxTasksPerIteration: Int = 1, + var maxIterations: Int = 10, ) { - @get:JsonIgnore - var processor: PatchProcessor = PatchProcessors.Fuzzy + @get:JsonIgnore + var processor: PatchProcessor = PatchProcessors.Fuzzy - @get:JsonIgnore - val defaultChatter get() = instance(defaultModel ?: throw IllegalStateException("Default model not set")) + @get:JsonIgnore + val defaultChatter get() = instance(defaultModel ?: throw IllegalStateException("Default model not set")) - @get:JsonIgnore - val parsingChatter - get() = instance(parsingModel ?: defaultModel ?: throw IllegalStateException("Parsing model not set")) + @get:JsonIgnore + val parsingChatter + get() = instance(parsingModel ?: defaultModel ?: throw IllegalStateException("Parsing model not set")) - @JsonIgnore - fun instance(model: ApiChatModel) = instanceFn?.let { it(model) } - ?: throw IllegalStateException("Instance function not set") - - @get:JsonIgnore - val absoluteWorkingDir - get() = when { - this.workingDir == null -> null//throw IllegalStateException("Working directory not set") - this.workingDir.startsWith("~") -> File( - this.workingDir.replaceFirst( - "~", - System.getProperty("user.home") - ) - ).absolutePath + @get:JsonIgnore + val imageChatChatter + get() = instance(imageChatModel ?: defaultModel ?: throw IllegalStateException("Image chat model not set")) - else -> File(this.workingDir).absolutePath - } - fun getTaskSettings(taskType: TaskType<*, *>): TaskTypeConfig = - taskSettings[taskType.name] ?: taskType.newSettings() ?.also { - it.name = taskType.description - taskSettings[taskType.name] = it - } ?: throw IllegalStateException("No default config for task type ${taskType.name}") + @JsonIgnore + fun instance(model: ApiChatModel) = instanceFn?.let { it(model) } + ?: throw IllegalStateException("Instance function not set") - fun planningActor( - describer: TypeDescriber, - task: SessionTask - ): ParsedAgent { - val availableTaskTypes = TaskType.Companion.getAvailableTaskTypes(this) - return planningActor( - taskDescriptions = availableTaskTypes.joinToString("\n") { taskType -> - val impl = TaskType.Companion.getImpl(this, taskType) - "* ${impl.promptSegment()}" - }, - model = defaultChatter.getChildClient(task), - parsingModel = parsingChatter.getChildClient(task), - temperature = temperature, - describer = describer, - availableTaskTypes = availableTaskTypes + @get:JsonIgnore + val absoluteWorkingDir + get() = when { + this.workingDir == null -> null//throw IllegalStateException("Working directory not set") + this.workingDir.startsWith("~") -> File( + this.workingDir.replaceFirst( + "~", + System.getProperty("user.home") ) + ).absolutePath + + else -> File(this.workingDir).absolutePath } - @JsonIgnore - fun copy( - model: ApiChatModel? = this.defaultModel, - parsingModel: ApiChatModel? = this.parsingModel, - shellCmd: List = this.shellCmd, - temperature: Double = this.temperature, - budget: Double = this.budget, - taskSettings: MutableMap = this.taskSettings, - autoFix: Boolean = this.autoFix, - env: Map? = this.env, - workingDir: String? = this.workingDir, - language: String? = this.language, - cognitiveMode: CognitiveModeStrategies? = this.cognitiveMode, - maxTaskHistoryChars: Int = this.maxTaskHistoryChars, - maxTasksPerIteration: Int = this.maxTasksPerIteration, - maxIterations: Int = this.maxIterations, - ): OrchestrationConfig = OrchestrationConfig( - defaultModel = model, - parsingModel = parsingModel, - shellCmd = shellCmd, - temperature = temperature, - budget = budget, - taskSettings = taskSettings, - autoFix = autoFix, - env = env, - workingDir = workingDir, - language = language, - maxTaskHistoryChars = maxTaskHistoryChars, - maxTasksPerIteration = maxTasksPerIteration, - maxIterations = maxIterations, - cognitiveMode = cognitiveMode, + fun getTaskSettings(taskType: TaskType<*, *>): TaskTypeConfig = + taskSettings[taskType.name] ?: taskType.newSettings()?.also { + it.name = taskType.description + taskSettings[taskType.name] = it + } ?: throw IllegalStateException("No default config for task type ${taskType.name}") + + fun planningActor( + describer: TypeDescriber, + task: SessionTask + ): ParsedAgent { + val availableTaskTypes = TaskType.getAvailableTaskTypes(this) + return planningActor( + taskDescriptions = availableTaskTypes.joinToString("\n") { taskType -> + val impl = TaskType.getImpl(this, taskType) + "* ${impl.promptSegment()}" + }, + model = defaultChatter.getChildClient(task), + parsingModel = parsingChatter.getChildClient(task), + temperature = temperature, + describer = describer, + availableTaskTypes = availableTaskTypes ) + } + @JsonIgnore + fun copy( + model: ApiChatModel? = this.defaultModel, + parsingModel: ApiChatModel? = this.parsingModel, + imageChatModel: ApiChatModel? = this.imageChatModel, + shellCmd: List = this.shellCmd, + temperature: Double = this.temperature, + budget: Double = this.budget, + taskSettings: MutableMap = this.taskSettings, + autoFix: Boolean = this.autoFix, + env: Map? = this.env, + workingDir: String? = this.workingDir, + language: String? = this.language, + cognitiveMode: CognitiveModeStrategies? = this.cognitiveMode, + maxTaskHistoryChars: Int = this.maxTaskHistoryChars, + maxTasksPerIteration: Int = this.maxTasksPerIteration, + maxIterations: Int = this.maxIterations, + ): OrchestrationConfig = OrchestrationConfig( + defaultModel = model, + parsingModel = parsingModel, + imageChatModel = imageChatModel, + shellCmd = shellCmd, + temperature = temperature, + budget = budget, + taskSettings = taskSettings, + autoFix = autoFix, + env = env, + workingDir = workingDir, + language = language, + maxTaskHistoryChars = maxTaskHistoryChars, + maxTasksPerIteration = maxTasksPerIteration, + maxIterations = maxIterations, + cognitiveMode = cognitiveMode, + ) - data class TaskBreakdownResult( - @Description("A map where each task ID is associated with its corresponding PlanTask object. Crucial for defining task relationships and information flow.") - val tasksByID: Map? = null, - ) - companion object { - var exampleInstance = TaskBreakdownResult( - tasksByID = mapOf( - "1" to SelfHealingTaskExecutionConfigData( - task_description = "Task 1", task_dependencies = listOf(), commands = listOf( - SelfHealingTask.CommandWithWorkingDir( - command = listOf("echo", "Hello, World!"), workingDir = "." - ) - ) - ), "2" to FileModificationTaskExecutionConfigData( - task_description = "Task 2", - task_dependencies = listOf("1"), - related_files = listOf("input2.txt"), - files = listOf("output2.txt"), - ) - ), + data class TaskBreakdownResult( + @Description("A map where each task ID is associated with its corresponding PlanTask object. Crucial for defining task relationships and information flow.") + val tasksByID: Map? = null, + ) + + companion object { + var exampleInstance = TaskBreakdownResult( + tasksByID = mapOf( + "1" to SelfHealingTaskExecutionConfigData( + task_description = "Task 1", task_dependencies = listOf(), commands = listOf( + SelfHealingTask.CommandWithWorkingDir( + command = listOf("echo", "Hello, World!"), workingDir = "." + ) + ) + ), "2" to FileModificationTaskExecutionConfigData( + task_description = "Task 2", + task_dependencies = listOf("1"), + related_files = listOf("input2.txt"), + files = listOf("output2.txt"), ) + ), + ) - fun planningActor( - taskDescriptions: String, - model: ChatInterface, - parsingModel: ChatInterface, - temperature: Double, - describer: TypeDescriber, - availableTaskTypes: List> - ): ParsedAgent = ParsedAgent( - name = "TaskBreakdown", - resultClass = TaskBreakdownResult::class.java, - exampleInstance = exampleInstance, - prompt = """ + fun planningActor( + taskDescriptions: String, + model: ChatInterface, + parsingModel: ChatInterface, + temperature: Double, + describer: TypeDescriber, + availableTaskTypes: List> + ): ParsedAgent = ParsedAgent( + name = "TaskBreakdown", + resultClass = TaskBreakdownResult::class.java, + exampleInstance = exampleInstance, + prompt = """ Given a user request, identify and list smaller, actionable tasks that can be directly implemented in code. (Do not repeat or ask for the JSON content since the platform already handles reading the software graph.) For each task: @@ -186,107 +190,67 @@ class OrchestrationConfig( """.trimIndent() + "\n " + taskDescriptions + """ (Remember: the JSON file content is already loaded by the platform.) """.trimIndent(), - model = model, - parsingChatter = parsingModel, - temperature = temperature, - describer = describer, - parserPrompt = ("\nTask Subtype Schema:\n\n" + availableTaskTypes.joinToString("\n\n") { taskType -> - "\n${taskType.name}:\n ${ - describer.describe(taskType.executionConfigClass).lineSequence() - .map { - when { - it.isBlank() -> { - when { - it.length < " ".length -> " " - else -> it - } - } + model = model, + parsingChatter = parsingModel, + temperature = temperature, + describer = describer, + parserPrompt = ("\nTask Subtype Schema:\n\n" + availableTaskTypes.joinToString("\n\n") { taskType -> + "\n${taskType.name}:\n ${ + describer.describe(taskType.executionConfigClass).lineSequence() + .map { + when { + it.isBlank() -> { + when { + it.length < " ".length -> " " + else -> it + } + } - else -> " " + it - } - } - .joinToString("\n") - }\n".trim() - } + "\n") - ) - - @JsonIgnore - var instanceFn: ((ApiChatModel) -> ChatInterface)? = null - } + else -> " " + it + } + } + .joinToString("\n") + }\n".trim() + } + "\n") + ) - /** - * Get all available task configurations for a given task type - */ - fun getTaskConfigs(taskType: TaskType<*, *>): List { - return taskSettings.filter { it.value.task_type == taskType.name }.values.toList() - } + @JsonIgnore + var instanceFn: ((ApiChatModel) -> ChatInterface)? = null + } - /** - * Get a specific task configuration by task type and name - */ - fun getTaskConfig(taskType: TaskType<*, *>, configName: String?): TaskTypeConfig? { - val configs = getTaskConfigs(taskType) - return if (configName != null) { - configs.firstOrNull { it.name == configName } - } else { - configs.firstOrNull() - } - } + /** + * Get all available task configurations for a given task type + */ + fun getTaskConfigs(taskType: TaskType<*, *>): List { + return taskSettings.filter { it.value.task_type == taskType.name }.values.toList() + } - fun addTaskConfig(taskType: TaskType<*, *>, newConfig: TaskTypeConfig) { - val configs = getTaskConfigs(taskType) - if (configs.any { it.name == newConfig.name }) { - throw IllegalArgumentException("A configuration with the name '${newConfig.name}' already exists for task type '${taskType.name}'") - } - taskSettings[newConfig.task_type!!] = newConfig + /** + * Get a specific task configuration by task type and name + */ + fun getTaskConfig(taskType: TaskType<*, *>, configName: String?): TaskTypeConfig? { + val configs = getTaskConfigs(taskType) + return if (configName != null) { + configs.firstOrNull { it.name == configName } + } else { + configs.firstOrNull() } + } - fun removeTaskConfig(taskType: TaskType<*, *>, selectedConfig: String) { - val configs = getTaskConfigs(taskType) - val configToRemove = configs.firstOrNull { it.name == selectedConfig } - if (configToRemove != null) { - taskSettings.remove(configToRemove.task_type) - } + fun addTaskConfig(taskType: TaskType<*, *>, newConfig: TaskTypeConfig) { + val configs = getTaskConfigs(taskType) + if (configs.any { it.name == newConfig.name }) { + throw IllegalArgumentException("A configuration with the name '${newConfig.name}' already exists for task type '${taskType.name}'") } -} + taskSettings[newConfig.task_type!!] = newConfig + } -/** - * Custom serializer for ApiChatModel that only serializes the model name - */ -class ApiChatModelSerializer : JsonSerializer() { - override fun serialize(value: ApiChatModel?, gen: JsonGenerator, serializers: SerializerProvider) { - if (value == null) { - gen.writeNull() - } else { - gen.writeString(value.model?.modelName ?: value.model?.name) - } + fun removeTaskConfig(taskType: TaskType<*, *>, selectedConfig: String) { + val configs = getTaskConfigs(taskType) + val configToRemove = configs.firstOrNull { it.name == selectedConfig } + if (configToRemove != null) { + taskSettings.remove(configToRemove.task_type) } + } } -/** - * Custom deserializer for ApiChatModel that resolves the model from its name - */ -class ApiChatModelDeserializer : JsonDeserializer() { - override fun deserialize(p: JsonParser, ctxt: DeserializationContext): ApiChatModel? { - val modelName = p.readValueAs(String::class.java) ?: return null - val userSettings = com.simiacryptus.cognotik.platform.ApplicationServices - .fileApplicationServices() - .userSettingsManager - .getUserSettings() - val model = userSettings.apis.flatMap { - it.provider?.getChatModels(it.key ?: "", it.baseUrl) ?: listOf() - }.firstOrNull { - it.modelName == modelName || it.name == modelName - } - if (model == null) { - throw IllegalStateException("No API model found for model $modelName") - } - val apiData = userSettings.apis.firstOrNull { - it.provider == model.provider - } - if (apiData == null) { - throw IllegalStateException("No API data found for model $modelName") - } - return ApiChatModel(model, apiData) - } -} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/PlanUtil.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/PlanUtil.kt index 18658c6ab..993fa9f64 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/PlanUtil.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/PlanUtil.kt @@ -9,142 +9,142 @@ import java.util.concurrent.ConcurrentHashMap object PlanUtil { - fun diagram( - taskMap: Map - ) = "## Sub-Plan Task Dependency Graph\n${TRIPLE_TILDE}mermaid\n${ - buildMermaidGraph( - taskMap - ) - }\n${TRIPLE_TILDE}".renderMarkdown + fun diagram( + taskMap: Map + ) = "## Sub-Plan Task Dependency Graph\n${TRIPLE_TILDE}mermaid\n${ + buildMermaidGraph( + taskMap + ) + }\n${TRIPLE_TILDE}".renderMarkdown - fun executionOrder(tasks: Map): List { - val taskIds: MutableList = mutableListOf() - val taskMap = tasks.toMutableMap() - while (taskMap.isNotEmpty()) { - val nextTasks = - taskMap.filter { (_, task) -> - task.task_dependencies?.filter { entry -> - entry in tasks.keys - }?.all { taskIds.contains(it) } ?: true - } - if (nextTasks.isEmpty()) { - throw RuntimeException("Circular dependency detected in task breakdown") - } - taskIds.addAll(nextTasks.keys) - nextTasks.keys.forEach { taskMap.remove(it) } + fun executionOrder(tasks: Map): List { + val taskIds: MutableList = mutableListOf() + val taskMap = tasks.toMutableMap() + while (taskMap.isNotEmpty()) { + val nextTasks = + taskMap.filter { (_, task) -> + task.task_dependencies?.filter { entry -> + entry in tasks.keys + }?.all { taskIds.contains(it) } ?: true } - return taskIds + if (nextTasks.isEmpty()) { + throw RuntimeException("Circular dependency detected in task breakdown") + } + taskIds.addAll(nextTasks.keys) + nextTasks.keys.forEach { taskMap.remove(it) } } + return taskIds + } - val isWindows = System.getProperty("os.name").lowercase(Locale.getDefault()).contains("windows") - private fun sanitizeForMermaid(input: String) = input - .replace(" ", "_") - .replace("\"", "\\\"") - .replace("[", "\\[") - .replace("]", "\\]") - .replace("(", "\\(") - .replace(")", "\\)") - .let { "`$it`" } + val isWindows = System.getProperty("os.name").lowercase(Locale.getDefault()).contains("windows") + private fun sanitizeForMermaid(input: String) = input + .replace(" ", "_") + .replace("\"", "\\\"") + .replace("[", "\\[") + .replace("]", "\\]") + .replace("(", "\\(") + .replace(")", "\\)") + .let { "`$it`" } - private fun escapeMermaidCharacters(input: String) = input - .replace("\"", "\\\"") - .let { '"' + it + '"' } + private fun escapeMermaidCharacters(input: String) = input + .replace("\"", "\\\"") + .let { '"' + it + '"' } - private val mermaidGraphCache = ConcurrentHashMap() - private val mermaidExceptionCache = ConcurrentHashMap() + private val mermaidGraphCache = ConcurrentHashMap() + private val mermaidExceptionCache = ConcurrentHashMap() - fun buildMermaidGraph(subTasks: Map): String { + fun buildMermaidGraph(subTasks: Map): String { - val cacheKey = JsonUtil.toJson(subTasks) + val cacheKey = JsonUtil.toJson(subTasks) - mermaidGraphCache[cacheKey]?.let { return it } - mermaidExceptionCache[cacheKey]?.let { throw it } - try { - val graphBuilder = StringBuilder("graph TD;\n") - subTasks.forEach { (taskId, task) -> - val sanitizedTaskId = sanitizeForMermaid(taskId) - val taskType = task.task_type ?: "Unknown" - val escapedDescription = - escapeMermaidCharacters(task.task_description ?: "") - val style = when (task.state) { - TaskState.Completed -> ":::completed" - TaskState.InProgress -> ":::inProgress" - else -> ":::$taskType" - } - graphBuilder.append(" ${sanitizedTaskId}[$escapedDescription]$style;\n") - task.task_dependencies?.forEach { dependency -> - val sanitizedDependency = sanitizeForMermaid(dependency) - graphBuilder.append(" $sanitizedDependency --> ${sanitizedTaskId};\n") - } - } - graphBuilder.append(" classDef default fill:#f9f9f9,stroke:#333,stroke-width:2px;\n") - graphBuilder.append(" classDef NewFile fill:lightblue,stroke:#333,stroke-width:2px;\n") - graphBuilder.append(" classDef EditFile fill:lightgreen,stroke:#333,stroke-width:2px;\n") - graphBuilder.append(" classDef Documentation fill:lightyellow,stroke:#333,stroke-width:2px;\n") - graphBuilder.append(" classDef Inquiry fill:orange,stroke:#333,stroke-width:2px;\n") - graphBuilder.append(" classDef TaskPlanning fill:lightgrey,stroke:#333,stroke-width:2px;\n") - graphBuilder.append(" classDef completed fill:#90EE90,stroke:#333,stroke-width:2px;\n") - graphBuilder.append(" classDef inProgress fill:#FFA500,stroke:#333,stroke-width:2px;\n") - val graph = graphBuilder.toString() - mermaidGraphCache[cacheKey] = graph - return graph - } catch (e: Exception) { - mermaidExceptionCache[cacheKey] = e - throw e + mermaidGraphCache[cacheKey]?.let { return it } + mermaidExceptionCache[cacheKey]?.let { throw it } + try { + val graphBuilder = StringBuilder("graph TD;\n") + subTasks.forEach { (taskId, task) -> + val sanitizedTaskId = sanitizeForMermaid(taskId) + val taskType = task.task_type ?: "Unknown" + val escapedDescription = + escapeMermaidCharacters(task.task_description ?: "") + val style = when (task.state) { + TaskState.Completed -> ":::completed" + TaskState.InProgress -> ":::inProgress" + else -> ":::$taskType" + } + graphBuilder.append(" ${sanitizedTaskId}[$escapedDescription]$style;\n") + task.task_dependencies?.forEach { dependency -> + val sanitizedDependency = sanitizeForMermaid(dependency) + graphBuilder.append(" $sanitizedDependency --> ${sanitizedTaskId};\n") } + } + graphBuilder.append(" classDef default fill:#f9f9f9,stroke:#333,stroke-width:2px;\n") + graphBuilder.append(" classDef NewFile fill:lightblue,stroke:#333,stroke-width:2px;\n") + graphBuilder.append(" classDef EditFile fill:lightgreen,stroke:#333,stroke-width:2px;\n") + graphBuilder.append(" classDef Documentation fill:lightyellow,stroke:#333,stroke-width:2px;\n") + graphBuilder.append(" classDef Inquiry fill:orange,stroke:#333,stroke-width:2px;\n") + graphBuilder.append(" classDef TaskPlanning fill:lightgrey,stroke:#333,stroke-width:2px;\n") + graphBuilder.append(" classDef completed fill:#90EE90,stroke:#333,stroke-width:2px;\n") + graphBuilder.append(" classDef inProgress fill:#FFA500,stroke:#333,stroke-width:2px;\n") + val graph = graphBuilder.toString() + mermaidGraphCache[cacheKey] = graph + return graph + } catch (e: Exception) { + mermaidExceptionCache[cacheKey] = e + throw e } + } - fun filterPlan( - retries: Int = 3, - fn: () -> Map? - ): Map? { - val tasksByID = fn() ?: emptyMap() - tasksByID.forEach { - it.value.task_dependencies = it.value.task_dependencies?.filter { it in tasksByID.keys }?.toMutableList() - it.value.state = TaskState.Pending - } - try { - executionOrder(tasksByID) - } catch (e: RuntimeException) { - if (retries <= 0) { - log.warn("Error filtering plan: " + JsonUtil.toJson(fn() ?: emptyMap()), e) - throw e - } else { - log.info("Circular dependency detected in task breakdown") - return filterPlan(retries - 1, fn) - } - } - return if (tasksByID.size == (fn() ?: emptyMap()).size) { - fn() ?: emptyMap() - } else filterPlan { - tasksByID - } + fun filterPlan( + retries: Int = 3, + fn: () -> Map? + ): Map? { + val tasksByID = fn() ?: emptyMap() + tasksByID.forEach { + it.value.task_dependencies = it.value.task_dependencies?.filter { it in tasksByID.keys }?.toMutableList() + it.value.state = TaskState.Pending } + try { + executionOrder(tasksByID) + } catch (e: RuntimeException) { + if (retries <= 0) { + log.warn("Error filtering plan: " + JsonUtil.toJson(fn() ?: emptyMap()), e) + throw e + } else { + log.info("Circular dependency detected in task breakdown") + return filterPlan(retries - 1, fn) + } + } + return if (tasksByID.size == (fn() ?: emptyMap()).size) { + fn() ?: emptyMap() + } else filterPlan { + tasksByID + } + } - fun getAllDependencies( - subPlanTask: TaskExecutionConfig, - subTasks: Map, - visited: MutableSet - ): List { - val dependencies = subPlanTask.task_dependencies?.toMutableList() ?: mutableListOf() - subPlanTask.task_dependencies?.forEach { dep -> - if (dep in visited) return@forEach - val subTask = subTasks[dep] - if (subTask != null) { - visited.add(dep) - dependencies.addAll( - getAllDependencies( - subTask, - subTasks, - visited - ) - ) - } - } - return dependencies + fun getAllDependencies( + subPlanTask: TaskExecutionConfig, + subTasks: Map, + visited: MutableSet + ): List { + val dependencies = subPlanTask.task_dependencies?.toMutableList() ?: mutableListOf() + subPlanTask.task_dependencies?.forEach { dep -> + if (dep in visited) return@forEach + val subTask = subTasks[dep] + if (subTask != null) { + visited.add(dep) + dependencies.addAll( + getAllDependencies( + subTask, + subTasks, + visited + ) + ) + } } + return dependencies + } - val log = LoggerFactory.getLogger(PlanUtil::class.java) + val log = LoggerFactory.getLogger(PlanUtil::class.java) } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskContextYamlDescriber.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskContextYamlDescriber.kt index 09ee83fe5..8fb954d96 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskContextYamlDescriber.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskContextYamlDescriber.kt @@ -3,17 +3,17 @@ package com.simiacryptus.cognotik.plan import com.simiacryptus.cognotik.describe.AbbrevWhitelistYamlDescriber class TaskContextYamlDescriber( - val orchestrationConfig : OrchestrationConfig + val orchestrationConfig: OrchestrationConfig ) : AbbrevWhitelistYamlDescriber( - "com.simiacryptus", "aicoder.actions" + "com.simiacryptus", "aicoder.actions" ) { - override val includeMethods: Boolean get() = false + override val includeMethods: Boolean get() = false - override fun getEnumValues(clazz: Class<*>): List { - return if (clazz == TaskType::class.java) { - orchestrationConfig.taskSettings.keys.toList() - } else { - super.getEnumValues(clazz) - } + override fun getEnumValues(clazz: Class<*>): List { + return if (clazz == TaskType::class.java) { + orchestrationConfig.taskSettings.keys.toList() + } else { + super.getEnumValues(clazz) } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskExecutionConfig.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskExecutionConfig.kt index 0fe789754..5a599a03f 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskExecutionConfig.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskExecutionConfig.kt @@ -15,34 +15,34 @@ open class TaskExecutionConfig( @Description("A brief user-facing description of the task") open var task_description: String? = null, @Description("A list of IDs of tasks that must be completed before this task can be executed. This defines upstream dependencies ensuring proper task order and information flow.") - var task_dependencies: MutableList? = null, + var task_dependencies: MutableList? = null, @Description("Ignore.") - var state: AbstractTask.TaskState? = null + var state: AbstractTask.TaskState? = null ) { - class PlanTaskTypeIdResolver : TypeIdResolverBase() { - override fun idFromValue(value: Any) = when (value) { - is TaskExecutionConfig -> if (value.task_type != null) { - value.task_type - } else { - throw IllegalArgumentException("Unknown task type") - } + class PlanTaskTypeIdResolver : TypeIdResolverBase() { + override fun idFromValue(value: Any) = when (value) { + is TaskExecutionConfig -> if (value.task_type != null) { + value.task_type + } else { + throw IllegalArgumentException("Unknown task type") + } - else -> { - throw IllegalArgumentException("Unexpected value type: ${value.javaClass}") - } - } + else -> { + throw IllegalArgumentException("Unexpected value type: ${value.javaClass}") + } + } - override fun idFromValueAndType(value: Any, suggestedType: Class<*>) = idFromValue(value) + override fun idFromValueAndType(value: Any, suggestedType: Class<*>) = idFromValue(value) - override fun typeFromId(context: DatabindContext, id: String): JavaType { - val taskType = TaskType.valueOf(id.replace(" ", "")) - val subType = context.constructType(taskType.executionConfigClass) - return subType - } + override fun typeFromId(context: DatabindContext, id: String): JavaType { + val taskType = TaskType.valueOf(id.replace(" ", "")) + val subType = context.constructType(taskType.executionConfigClass) + return subType + } - override fun getMechanism(): JsonTypeInfo.Id { - return JsonTypeInfo.Id.CUSTOM - } + override fun getMechanism(): JsonTypeInfo.Id { + return JsonTypeInfo.Id.CUSTOM } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskOrchestrator.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskOrchestrator.kt index 352871684..a873a557b 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskOrchestrator.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskOrchestrator.kt @@ -18,207 +18,207 @@ import java.util.concurrent.Future import java.util.concurrent.TimeUnit class TaskOrchestrator( - val user: User?, - val session: Session, - val dataStorage: StorageInterface, - val root: Path + val user: User?, + val session: Session, + val dataStorage: StorageInterface, + val root: Path ) { - val pool: ExecutorService by lazy { ApplicationServices.threadPoolManager.getPool(session, user) } + val pool: ExecutorService by lazy { ApplicationServices.threadPoolManager.getPool(session, user) } - val files: Array by lazy { - FileSelectionUtils.expandFileList(root.toFile()) - } + val files: Array by lazy { + FileSelectionUtils.expandFileList(root.toFile()) + } - val codeFiles: Map - get() = files - .filter { it.exists() && it.isFile } - .filter { !it.name.startsWith(".") } - .associate { file -> - root.relativize(file.toPath()) to try { - file.inputStream().bufferedReader().use { it.readText() } - } catch (e: Exception) { - log.warn("Error reading file", e) - "" - } - } + val codeFiles: Map + get() = files + .filter { it.exists() && it.isFile } + .filter { !it.name.startsWith(".") } + .associate { file -> + root.relativize(file.toPath()) to try { + file.inputStream().bufferedReader().use { it.readText() } + } catch (e: Exception) { + log.warn("Error reading file", e) + "" + } + } - var executionState: ExecutionState? = null + var executionState: ExecutionState? = null - fun executePlan( - plan: Map, - task: SessionTask, - userMessage: String, - orchestrationConfig: OrchestrationConfig, - ): ExecutionState { - val tabs = TabbedDisplay(task) - val planProcessingState = newState(plan) - this.executionState = planProcessingState - try { - val diagramTask = task.ui.newTask(false).apply { tabs["Plan"] = (placeholder) } - executePlan( - diagramBuffer = diagramTask.add( - "## Task Dependency Graph\n${TRIPLE_TILDE}mermaid\n${buildMermaidGraph(planProcessingState.subTasks)}\n$TRIPLE_TILDE".renderMarkdown, - additionalClasses = "flow-chart" - ), - subTasks = planProcessingState.subTasks, - task = diagramTask, - executionState = planProcessingState, - taskIdProcessingQueue = planProcessingState.taskIdProcessingQueue, - pool = pool, - userMessage = userMessage, - plan = plan, - tabs = tabs, - orchestrationConfig = orchestrationConfig - ) - } catch (e: Throwable) { - log.warn("Error during incremental code generation process", e) - task.error(e) - } - return planProcessingState + fun executePlan( + plan: Map, + task: SessionTask, + userMessage: String, + orchestrationConfig: OrchestrationConfig, + ): ExecutionState { + val tabs = TabbedDisplay(task) + val planProcessingState = newState(plan) + this.executionState = planProcessingState + try { + val diagramTask = task.ui.newTask(false).apply { tabs["Plan"] = (placeholder) } + executePlan( + diagramBuffer = diagramTask.add( + "## Task Dependency Graph\n${TRIPLE_TILDE}mermaid\n${buildMermaidGraph(planProcessingState.subTasks)}\n$TRIPLE_TILDE".renderMarkdown, + additionalClasses = "flow-chart" + ), + subTasks = planProcessingState.subTasks, + task = diagramTask, + executionState = planProcessingState, + taskIdProcessingQueue = planProcessingState.taskIdProcessingQueue, + pool = pool, + userMessage = userMessage, + plan = plan, + tabs = tabs, + orchestrationConfig = orchestrationConfig + ) + } catch (e: Throwable) { + log.warn("Error during incremental code generation process", e) + task.error(e) } + return planProcessingState + } - private fun newState(plan: Map) = - ExecutionState( - subTasks = (filterPlan { plan }?.entries?.toTypedArray>() - ?.associate { it.key to it.value } ?: mapOf()).toMutableMap() - ) + private fun newState(plan: Map) = + ExecutionState( + subTasks = (filterPlan { plan }?.entries?.toTypedArray>() + ?.associate { it.key to it.value } ?: mapOf()).toMutableMap() + ) - fun executePlan( - diagramBuffer: StringBuilder?, - subTasks: Map, - task: SessionTask, - executionState: ExecutionState, - taskIdProcessingQueue: MutableList, - pool: ExecutorService, - userMessage: String, - plan: Map, - tabs: TabbedDisplay, - orchestrationConfig: OrchestrationConfig, - ) { - val sessionTask = task.ui.newTask(false).apply { tabs["Session"] = placeholder } - val taskTabs = object : TabbedDisplay(sessionTask, additionalClasses = "task-tabs") { - override fun renderTabButtons(): String { - diagramBuffer?.set( - "## Task Dependency Graph\n${TRIPLE_TILDE}mermaid\n${buildMermaidGraph(subTasks)}\n$TRIPLE_TILDE".renderMarkdown - ) - task.complete() - return buildString { - append("
\n") - super.tabs.withIndex().forEach { (idx, t) -> - val (taskId, taskV) = t - val subTask = executionState.tasksByDescription[taskId] - if (null == subTask) { - log.warn("Task tab not found: $taskId") - } - val isChecked = if (taskId in taskIdProcessingQueue) "checked" else "" - val style = when (subTask?.state) { - AbstractTask.TaskState.Completed -> " style='text-decoration: line-through;'" - null -> " style='opacity: 20%;'" - AbstractTask.TaskState.Pending -> " style='opacity: 30%;'" - else -> "" - } - append("\n") - } - append("
") - } + fun executePlan( + diagramBuffer: StringBuilder?, + subTasks: Map, + task: SessionTask, + executionState: ExecutionState, + taskIdProcessingQueue: MutableList, + pool: ExecutorService, + userMessage: String, + plan: Map, + tabs: TabbedDisplay, + orchestrationConfig: OrchestrationConfig, + ) { + val sessionTask = task.ui.newTask(false).apply { tabs["Session"] = placeholder } + val taskTabs = object : TabbedDisplay(sessionTask, additionalClasses = "task-tabs") { + override fun renderTabButtons(): String { + diagramBuffer?.set( + "## Task Dependency Graph\n${TRIPLE_TILDE}mermaid\n${buildMermaidGraph(subTasks)}\n$TRIPLE_TILDE".renderMarkdown + ) + task.complete() + return buildString { + append("
\n") + super.tabs.withIndex().forEach { (idx, t) -> + val (taskId, _) = t + val subTask = executionState.tasksByDescription[taskId] + if (null == subTask) { + log.warn("Task tab not found: $taskId") + } + val isChecked = if (taskId in taskIdProcessingQueue) "checked" else "" + val style = when (subTask?.state) { + AbstractTask.TaskState.Completed -> " style='text-decoration: line-through;'" + null -> " style='opacity: 20%;'" + AbstractTask.TaskState.Pending -> " style='opacity: 30%;'" + else -> "" } + append("\n") + } + append("
") } - taskIdProcessingQueue.forEach { taskId -> - val newTask = task.ui.newTask(false) - executionState.uitaskMap[taskId] = newTask - val subtask: TaskExecutionConfig? = executionState.subTasks[taskId] - val description = subtask?.task_description - log.debug("Creating task tab: $taskId ${System.identityHashCode(subtask)} $description") - taskTabs[description ?: taskId] = newTask.placeholder + } + } + taskIdProcessingQueue.forEach { taskId -> + val newTask = task.ui.newTask(false) + executionState.uitaskMap[taskId] = newTask + val subtask: TaskExecutionConfig? = executionState.subTasks[taskId] + val description = subtask?.task_description + log.debug("Creating task tab: $taskId ${System.identityHashCode(subtask)} $description") + taskTabs[description ?: taskId] = newTask.placeholder + } + Thread.sleep(100) + while (taskIdProcessingQueue.isNotEmpty()) { + val taskId = taskIdProcessingQueue.removeAt(0) + val subTask = executionState.subTasks[taskId] ?: throw RuntimeException("Task not found: $taskId") + executionState.taskFutures[taskId] = pool.submit { + subTask.state = AbstractTask.TaskState.Pending + log.debug("Awaiting dependencies: ${subTask.task_dependencies?.joinToString(", ") ?: ""}") + subTask.task_dependencies + ?.associate { it to executionState.taskFutures[it] } + ?.forEach { (id, future) -> + try { + future?.get() ?: log.warn("Dependency not found: $id") + } catch (e: Throwable) { + log.warn("Error", e) + } + } + subTask.state = AbstractTask.TaskState.InProgress + taskTabs.update() + log.debug("Running task: ${System.identityHashCode(subTask)} ${subTask.task_description}") + val task1 = executionState.uitaskMap.get(taskId) ?: task.ui.newTask(false).apply { + taskTabs[taskId] = placeholder } - Thread.sleep(100) - while (taskIdProcessingQueue.isNotEmpty()) { - val taskId = taskIdProcessingQueue.removeAt(0) - val subTask = executionState.subTasks[taskId] ?: throw RuntimeException("Task not found: $taskId") - executionState.taskFutures[taskId] = pool.submit { - subTask.state = AbstractTask.TaskState.Pending - log.debug("Awaiting dependencies: ${subTask.task_dependencies?.joinToString(", ") ?: ""}") - subTask.task_dependencies - ?.associate { it to executionState.taskFutures[it] } - ?.forEach { (id, future) -> - try { - future?.get() ?: log.warn("Dependency not found: $id") - } catch (e: Throwable) { - log.warn("Error", e) - } - } - subTask.state = AbstractTask.TaskState.InProgress - taskTabs.update() - log.debug("Running task: ${System.identityHashCode(subTask)} ${subTask.task_description}") - val task1 = executionState.uitaskMap.get(taskId) ?: task.ui.newTask(false).apply { - taskTabs[taskId] = placeholder - } - try { - val dependencies = subTask.task_dependencies?.toMutableSet() ?: mutableSetOf() - dependencies += getAllDependencies( - subPlanTask = subTask, - subTasks = executionState.subTasks, - visited = mutableSetOf() - ) + try { + val dependencies = subTask.task_dependencies?.toMutableSet() ?: mutableSetOf() + dependencies += getAllDependencies( + subPlanTask = subTask, + subTasks = executionState.subTasks, + visited = mutableSetOf() + ) - task1.add( - """ + task1.add( + """ ## Task `""".trimIndent() + taskId + "`" + (subTask.task_description ?: "") + "\n" + - TRIPLE_TILDE + "json" + JsonUtil.toJson(data = subTask) + "\n" + TRIPLE_TILDE + - "\n### Dependencies:" + dependencies.joinToString("\n") { "* $it" }.renderMarkdown - ) - val impl = getImpl(orchestrationConfig, subTask) - val messages = listOf( - userMessage, - JsonUtil.toJson(plan), - impl.getPriorCode(executionState) - ) - impl.run( - agent = this, - messages = messages, - task = task1, - resultFn = { executionState.taskResult[taskId] = it }, - orchestrationConfig = orchestrationConfig - ) - } catch (e: Throwable) { - log.warn("Error during task execution", e) - task1.error(e) - } finally { - executionState.completedTasks.add(element = taskId) - subTask.state = AbstractTask.TaskState.Completed - log.debug("Completed task: $taskId ${System.identityHashCode(subTask)}") - taskTabs.update() - } - } + TRIPLE_TILDE + "json" + JsonUtil.toJson(data = subTask) + "\n" + TRIPLE_TILDE + + "\n### Dependencies:" + dependencies.joinToString("\n") { "* $it" }.renderMarkdown + ) + val impl = getImpl(orchestrationConfig, subTask) + val messages = listOf( + userMessage, + JsonUtil.toJson(plan), + impl.getPriorCode(executionState) + ) + impl.run( + agent = this, + messages = messages, + task = task1, + resultFn = { executionState.taskResult[taskId] = it }, + orchestrationConfig = orchestrationConfig + ) + } catch (e: Throwable) { + log.warn("Error during task execution", e) + task1.error(e) + } finally { + executionState.completedTasks.add(element = taskId) + subTask.state = AbstractTask.TaskState.Completed + log.debug("Completed task: $taskId ${System.identityHashCode(subTask)}") + taskTabs.update() } - await(executionState.taskFutures) + } } + await(executionState.taskFutures) + } - fun await(futures: MutableMap>) { - val start = System.currentTimeMillis() - while (futures.values.count { it.isDone } < futures.size && (System.currentTimeMillis() - start) < TimeUnit.MINUTES.toMillis( - 2 - )) { - Thread.sleep(1000) - } + fun await(futures: MutableMap>) { + val start = System.currentTimeMillis() + while (futures.values.count { it.isDone } < futures.size && (System.currentTimeMillis() - start) < TimeUnit.MINUTES.toMillis( + 2 + )) { + Thread.sleep(1000) } + } - fun copy( - user: User? = this.user, - session: Session = this.session, - dataStorage: StorageInterface = this.dataStorage, - root: Path = this.root - ) = TaskOrchestrator( - user = user, - session = session, - dataStorage = dataStorage, - root = root - ) + fun copy( + user: User? = this.user, + session: Session = this.session, + dataStorage: StorageInterface = this.dataStorage, + root: Path = this.root + ) = TaskOrchestrator( + user = user, + session = session, + dataStorage = dataStorage, + root = root + ) - companion object { - private val log = LoggerFactory.getLogger(TaskOrchestrator::class.java) - } + companion object { + private val log = LoggerFactory.getLogger(TaskOrchestrator::class.java) + } } const val TRIPLE_TILDE = "```" \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskType.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskType.kt index 65e9184c6..f11835ff3 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskType.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskType.kt @@ -21,6 +21,7 @@ import com.simiacryptus.cognotik.plan.tools.reasoning.* import com.simiacryptus.cognotik.plan.tools.reasoning.ChainOfThoughtTask.Companion.ChainOfThought import com.simiacryptus.cognotik.plan.tools.session.RunShellCommandTask import com.simiacryptus.cognotik.plan.tools.session.SeleniumSessionTask +import com.simiacryptus.cognotik.plan.tools.writing.* import com.simiacryptus.cognotik.util.DynamicEnum import com.simiacryptus.cognotik.util.DynamicEnumDeserializer import com.simiacryptus.cognotik.util.DynamicEnumSerializer @@ -28,173 +29,206 @@ import com.simiacryptus.cognotik.util.DynamicEnumSerializer @JsonDeserialize(using = TaskTypeDeserializer::class) @JsonSerialize(using = TaskTypeSerializer::class) class TaskType( - name: String, - val executionConfigClass: Class, - val taskSettingsClass: Class, - val description: String? = null, - val tooltipHtml: String? = null, + name: String, + val executionConfigClass: Class, + val taskSettingsClass: Class, + val description: String? = null, + val tooltipHtml: String? = null, ) : DynamicEnum>(name) { - companion object { + companion object { - init { - registerConstructor(ChainOfThought) { settings, task -> - ChainOfThoughtTask(settings, task) - } - registerConstructor(Analysis) { settings, task -> - AnalysisTask(settings, task) - } - registerConstructor(CrawlerAgentTask.CrawlerAgent) { settings, task -> - CrawlerAgentTask(settings, task) - } - registerConstructor(FileModification) { settings, task -> - FileModificationTask(settings, task) - } - registerConstructor(FileSearch) { settings, task -> - FileSearchTask(settings, task) - } - registerConstructor(KnowledgeIndexing) { settings, task -> - KnowledgeIndexingTask(settings, task) - } - registerConstructor(GitHubSearchTask.GitHubSearch) { settings, task -> - GitHubSearchTask(settings, task) - } - registerConstructor(RunShellCommandTask.RunShellCommand) { settings, task -> - RunShellCommandTask(settings, task) - } - registerConstructor(RunCodeTask.RunCode) { settings, task -> - RunCodeTask(settings, task) - } - registerConstructor(SeleniumSessionTask.SeleniumSession) { settings, task -> - SeleniumSessionTask(settings, task) - } - registerConstructor(SelfHealingTask.SelfHealing) { settings, task -> - SelfHealingTask(settings, task) - } - registerConstructor(VectorSearch) { settings, task -> - VectorSearchTask(settings, task) - } - registerConstructor(MCPToolTask.MCPTool) { settings, task -> - MCPToolTask(settings, task) - } - registerConstructor(WriteHtmlTask.WriteHtml) { settings, task -> - WriteHtmlTask(settings, task) - } - registerConstructor(GeneratePresentationTask.GeneratePresentation) { settings, task -> - GeneratePresentationTask(settings, task) - } - registerConstructor(MetaCognitiveReflectionTask.MetaCognitiveReflection) { settings, task -> - MetaCognitiveReflectionTask(settings, task) - } - registerConstructor(CausalInferenceTask.CausalInference) { settings, task -> - CausalInferenceTask(settings, task) - } - registerConstructor(AbstractionLadderTask.AbstractionLadder) { settings, task -> - AbstractionLadderTask(settings, task) - } - registerConstructor(CounterfactualAnalysisTask.CounterfactualAnalysis) { settings, task -> - CounterfactualAnalysisTask(settings, task) - } - registerConstructor(AnalogicalReasoningTask.AnalogicalReasoning) { settings, task -> - AnalogicalReasoningTask(settings, task) - } - registerConstructor(SocraticDialogueTask.SocraticDialogue) { settings, task -> - SocraticDialogueTask(settings, task) - } - registerConstructor(MultiPerspectiveAnalysisTask.MultiPerspectiveAnalysis) { settings, task -> - MultiPerspectiveAnalysisTask(settings, task) - } - registerConstructor(ConstraintSatisfactionTask.ConstraintSatisfaction) { settings, task -> - ConstraintSatisfactionTask(settings, task) - } - registerConstructor(DecompositionSynthesisTask.DecompositionSynthesis) { settings, task -> - DecompositionSynthesisTask(settings, task) - } - registerConstructor(BrainstormingTask.Brainstorming) { settings, task -> - BrainstormingTask(settings, task) - } - registerConstructor(FiniteStateMachineTask.FiniteStateMachine) { settings, task -> - FiniteStateMachineTask(settings, task) - } - registerConstructor(GameTheoryTask.GameTheory) { settings, task -> - GameTheoryTask(settings, task) - } - registerConstructor(AbductiveReasoningTask.AbductiveReasoning) { settings, task -> - AbductiveReasoningTask(settings, task) - } - registerConstructor(AdversarialReasoningTask.AdversarialReasoning) { settings, task -> - AdversarialReasoningTask(settings, task) - } - registerConstructor(ConstraintRelaxationTask.ConstraintRelaxation) { settings, task -> - ConstraintRelaxationTask(settings, task) - } - registerConstructor(DialecticalReasoningTask.DialecticalReasoning) { settings, task -> - DialecticalReasoningTask(settings, task) - } - registerConstructor(LateralThinkingTask.LateralThinking) { settings, task -> - LateralThinkingTask(settings, task) - } - registerConstructor(ProbabilisticReasoningTask.ProbabilisticReasoning) { settings, task -> - ProbabilisticReasoningTask(settings, task) - } - registerConstructor(SystemsThinkingTask.SystemsThinking) { settings, task -> - SystemsThinkingTask(settings, task) - } - registerConstructor(TemporalReasoningTask.TemporalReasoning) { settings, task -> - TemporalReasoningTask(settings, task) - } - registerConstructor(NarrativeReasoningTask.NarrativeReasoning) { settings, task -> - NarrativeReasoningTask(settings, task) - } - registerConstructor(NarrativeGenerationTask.NarrativeGeneration) { settings, task -> - NarrativeGenerationTask(settings, task) - } - registerConstructor(GeneticOptimizationTask.GeneticOptimization) { settings, task -> - GeneticOptimizationTask(settings, task) - } - registerConstructor(SubPlanning) { settings, task -> - SubPlanningTask(settings, task) - } - registerConstructor(EthicalReasoningTask.EthicalReasoning) { settings, task -> - EthicalReasoningTask(settings, task) - } - } + init { + registerConstructor(ChainOfThought) { settings, task -> + ChainOfThoughtTask(settings, task) + } + registerConstructor(Analysis) { settings, task -> + AnalysisTask(settings, task) + } + registerConstructor(CrawlerAgentTask.CrawlerAgent) { settings, task -> + CrawlerAgentTask(settings, task) + } + registerConstructor(FileModification) { settings, task -> + FileModificationTask(settings, task) + } + registerConstructor(FileSearch) { settings, task -> + FileSearchTask(settings, task) + } + registerConstructor(KnowledgeIndexing) { settings, task -> + KnowledgeIndexingTask(settings, task) + } + registerConstructor(GitHubSearchTask.GitHubSearch) { settings, task -> + GitHubSearchTask(settings, task) + } + registerConstructor(RunShellCommandTask.RunShellCommand) { settings, task -> + RunShellCommandTask(settings, task) + } + registerConstructor(RunCodeTask.RunCode) { settings, task -> + RunCodeTask(settings, task) + } + registerConstructor(SeleniumSessionTask.SeleniumSession) { settings, task -> + SeleniumSessionTask(settings, task) + } + registerConstructor(SelfHealingTask.SelfHealing) { settings, task -> + SelfHealingTask(settings, task) + } + registerConstructor(VectorSearch) { settings, task -> + VectorSearchTask(settings, task) + } + registerConstructor(MCPToolTask.MCPTool) { settings, task -> + MCPToolTask(settings, task) + } + registerConstructor(WriteHtmlTask.WriteHtml) { settings, task -> + WriteHtmlTask(settings, task) + } + registerConstructor(GeneratePresentationTask.GeneratePresentation) { settings, task -> + GeneratePresentationTask(settings, task) + } + registerConstructor(MetaCognitiveReflectionTask.MetaCognitiveReflection) { settings, task -> + MetaCognitiveReflectionTask(settings, task) + } + registerConstructor(CausalInferenceTask.CausalInference) { settings, task -> + CausalInferenceTask(settings, task) + } + registerConstructor(AbstractionLadderTask.AbstractionLadder) { settings, task -> + AbstractionLadderTask(settings, task) + } + registerConstructor(CounterfactualAnalysisTask.CounterfactualAnalysis) { settings, task -> + CounterfactualAnalysisTask(settings, task) + } + registerConstructor(AnalogicalReasoningTask.AnalogicalReasoning) { settings, task -> + AnalogicalReasoningTask(settings, task) + } + registerConstructor(SocraticDialogueTask.SocraticDialogue) { settings, task -> + SocraticDialogueTask(settings, task) + } + registerConstructor(MultiPerspectiveAnalysisTask.MultiPerspectiveAnalysis) { settings, task -> + MultiPerspectiveAnalysisTask(settings, task) + } + registerConstructor(ConstraintSatisfactionTask.ConstraintSatisfaction) { settings, task -> + ConstraintSatisfactionTask(settings, task) + } + registerConstructor(DecompositionSynthesisTask.DecompositionSynthesis) { settings, task -> + DecompositionSynthesisTask(settings, task) + } + registerConstructor(BrainstormingTask.Brainstorming) { settings, task -> + BrainstormingTask(settings, task) + } + registerConstructor(FiniteStateMachineTask.FiniteStateMachine) { settings, task -> + FiniteStateMachineTask(settings, task) + } + registerConstructor(GameTheoryTask.GameTheory) { settings, task -> + GameTheoryTask(settings, task) + } + registerConstructor(AbductiveReasoningTask.AbductiveReasoning) { settings, task -> + AbductiveReasoningTask(settings, task) + } + registerConstructor(AdversarialReasoningTask.AdversarialReasoning) { settings, task -> + AdversarialReasoningTask(settings, task) + } + registerConstructor(ConstraintRelaxationTask.ConstraintRelaxation) { settings, task -> + ConstraintRelaxationTask(settings, task) + } + registerConstructor(DialecticalReasoningTask.DialecticalReasoning) { settings, task -> + DialecticalReasoningTask(settings, task) + } + registerConstructor(LateralThinkingTask.LateralThinking) { settings, task -> + LateralThinkingTask(settings, task) + } + registerConstructor(ProbabilisticReasoningTask.ProbabilisticReasoning) { settings, task -> + ProbabilisticReasoningTask(settings, task) + } + registerConstructor(SystemsThinkingTask.SystemsThinking) { settings, task -> + SystemsThinkingTask(settings, task) + } + registerConstructor(TemporalReasoningTask.TemporalReasoning) { settings, task -> + TemporalReasoningTask(settings, task) + } + registerConstructor(NarrativeReasoningTask.NarrativeReasoning) { settings, task -> + NarrativeReasoningTask(settings, task) + } + registerConstructor(NarrativeGenerationTask.NarrativeGeneration) { settings, task -> + NarrativeGenerationTask(settings, task) + } + registerConstructor(GeneticOptimizationTask.GeneticOptimization) { settings, task -> + GeneticOptimizationTask(settings, task) + } + registerConstructor(SubPlanning) { settings, task -> + SubPlanningTask(settings, task) + } + registerConstructor(EthicalReasoningTask.EthicalReasoning) { settings, task -> + EthicalReasoningTask(settings, task) + } + registerConstructor(ArticleGenerationTask.ArticleGeneration) { settings, task -> + ArticleGenerationTask(settings, task) + } + registerConstructor(PersuasiveEssayTask.PersuasiveEssay) { settings, task -> + PersuasiveEssayTask(settings, task) + } + registerConstructor(BusinessProposalTask.BusinessProposal) { settings, task -> + BusinessProposalTask(settings, task) + } + registerConstructor(EmailCampaignTask.EmailCampaign) { settings, task -> + EmailCampaignTask(settings, task) + } + registerConstructor(InteractiveStoryTask.InteractiveStory) { settings, task -> + InteractiveStoryTask(settings, task) + } + registerConstructor(JournalismReasoningTask.JournalismReasoning) { settings, task -> + JournalismReasoningTask(settings, task) + } + registerConstructor(TechnicalExplanationTask.TechnicalExplanation) { settings, task -> + TechnicalExplanationTask(settings, task) + } + registerConstructor(TutorialGenerationTask.TutorialGeneration) { settings, task -> + TutorialGenerationTask(settings, task) + } + registerConstructor(ReportGenerationTask.ReportGeneration) { settings, task -> + ReportGenerationTask(settings, task) + } + registerConstructor(ScriptwritingTask.Scriptwriting) { settings, task -> + ScriptwritingTask(settings, task) + } + registerConstructor(GenerateImageTask.GenerateImage) { settings, task -> + GenerateImageTask(settings, task) + } + } - fun registerConstructor( - taskType: TaskType, constructor: (OrchestrationConfig, T?) -> AbstractTask - ) { - taskConstructors[taskType] = { settings: OrchestrationConfig, task: TaskExecutionConfig? -> - constructor(settings, task as T?) as AbstractTask - } - register(taskType) - } + fun registerConstructor( + taskType: TaskType, constructor: (OrchestrationConfig, T?) -> AbstractTask + ) { + taskConstructors[taskType] = { settings: OrchestrationConfig, task: TaskExecutionConfig? -> + constructor(settings, task as T?) as AbstractTask + } + register(taskType) + } - fun values() = values(TaskType::class.java) + fun values() = values(TaskType::class.java) - fun getImpl( - orchestrationConfig: OrchestrationConfig, planTask: TaskExecutionConfig?, strict: Boolean = true - ) = getImpl( - orchestrationConfig = orchestrationConfig, - taskType = planTask?.task_type?.let { valueOf(it) } ?: throw RuntimeException("Task type not specified"), - planTask = planTask) + fun getImpl( + orchestrationConfig: OrchestrationConfig, planTask: TaskExecutionConfig?, strict: Boolean = true + ) = getImpl( + orchestrationConfig = orchestrationConfig, + taskType = planTask?.task_type?.let { valueOf(it) } ?: throw RuntimeException("Task type not specified"), + planTask = planTask) - fun getImpl( - orchestrationConfig: OrchestrationConfig, taskType: TaskType<*, *>, planTask: TaskExecutionConfig? = null - ): AbstractTask { - val constructor = taskConstructors[taskType] - if (constructor == null) { - throw RuntimeException("Unknown task type: ${taskType.name}") - } - return constructor(orchestrationConfig, planTask) - } + fun getImpl( + orchestrationConfig: OrchestrationConfig, taskType: TaskType<*, *>, planTask: TaskExecutionConfig? = null + ): AbstractTask { + val constructor = taskConstructors[taskType] + if (constructor == null) { + throw RuntimeException("Unknown task type: ${taskType.name}") + } + return constructor(orchestrationConfig, planTask) + } - fun getAvailableTaskTypes(orchestrationConfig: OrchestrationConfig) = - orchestrationConfig.taskSettings.mapNotNull { x -> valueOf(x.value.task_type ?: return@mapNotNull null) } + fun getAvailableTaskTypes(orchestrationConfig: OrchestrationConfig) = + orchestrationConfig.taskSettings.mapNotNull { x -> valueOf(x.value.task_type ?: return@mapNotNull null) } - fun valueOf(name: String): TaskType<*, *> = valueOf(TaskType::class.java, name) + fun valueOf(name: String): TaskType<*, *> = valueOf(TaskType::class.java, name) - private fun register(taskType: TaskType<*, *>) = register(TaskType::class.java, taskType) - } + private fun register(taskType: TaskType<*, *>) = register(TaskType::class.java, taskType) + } } @@ -202,5 +236,4 @@ class TaskTypeSerializer : DynamicEnumSerializer>(TaskType::class class TaskTypeDeserializer : DynamicEnumDeserializer>(TaskType::class.java) -private val taskConstructors = - mutableMapOf, (OrchestrationConfig, TaskExecutionConfig?) -> AbstractTask>() \ No newline at end of file +private val taskConstructors = mutableMapOf, (OrchestrationConfig, TaskExecutionConfig?) -> AbstractTask>() diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskTypeConfig.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskTypeConfig.kt index 5ad2dd22f..e2bc1e936 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskTypeConfig.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/TaskTypeConfig.kt @@ -10,44 +10,43 @@ import com.simiacryptus.cognotik.platform.model.ApiChatModel @JsonTypeIdResolver(TaskTypeConfig.PlanTaskTypeIdResolver::class) @JsonTypeInfo(use = JsonTypeInfo.Id.CUSTOM, property = "task_type", include = JsonTypeInfo.As.EXISTING_PROPERTY, visible = true) open class TaskTypeConfig( - var task_type: String? = null, - name: String? = null, - var model: ApiChatModel? = null + var task_type: String? = null, + name: String? = null, + var model: ApiChatModel? = null ) { - var name: String? = name - get() = field ?: task_type - set(value) { field = value } + var name: String? = name + get() = field ?: task_type - class PlanTaskTypeIdResolver : TypeIdResolverBase() { - override fun idFromValue(value: Any): String? { - return when (value) { - is TaskTypeConfig -> value.task_type ?: return null - else -> throw IllegalArgumentException("Unexpected value type: ${value.javaClass}") - } - } + class PlanTaskTypeIdResolver : TypeIdResolverBase() { + override fun idFromValue(value: Any): String? { + return when (value) { + is TaskTypeConfig -> value.task_type ?: return null + else -> throw IllegalArgumentException("Unexpected value type: ${value.javaClass}") + } + } - override fun idFromValueAndType(value: Any, suggestedType: Class<*>): String? { - return idFromValue(value) - } + override fun idFromValueAndType(value: Any, suggestedType: Class<*>): String? { + return idFromValue(value) + } - override fun typeFromId(context: DatabindContext, id: String): JavaType { - val taskType = TaskType.valueOf(id.replace(" ", "")) - val subType = context.constructType(taskType.taskSettingsClass) - return subType - } + override fun typeFromId(context: DatabindContext, id: String): JavaType { + val taskType = TaskType.valueOf(id.replace(" ", "")) + val subType = context.constructType(taskType.taskSettingsClass) + return subType + } - override fun getMechanism(): JsonTypeInfo.Id { - return JsonTypeInfo.Id.CUSTOM - } + override fun getMechanism(): JsonTypeInfo.Id { + return JsonTypeInfo.Id.CUSTOM } + } } fun TaskType<*, *>.newSettings(): TaskTypeConfig? = - taskSettingsClass.declaredConstructors.firstOrNull { it.parameters.isEmpty() }?.let { - it.isAccessible = true - val defaultConfig = it.newInstance() as TaskTypeConfig - defaultConfig.task_type = name - defaultConfig.name = null - defaultConfig.model = null - defaultConfig - } + taskSettingsClass.declaredConstructors.firstOrNull { it.parameters.isEmpty() }?.let { + it.isAccessible = true + val defaultConfig = it.newInstance() as TaskTypeConfig + defaultConfig.task_type = name + defaultConfig.name = null + defaultConfig.model = null + defaultConfig + } diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/AdaptivePlanningMode.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/AdaptivePlanningMode.kt index 2425cd18e..1bf7bf89a 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/AdaptivePlanningMode.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/AdaptivePlanningMode.kt @@ -1,7 +1,7 @@ package com.simiacryptus.cognotik.plan.cognitive -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* @@ -13,6 +13,7 @@ import com.simiacryptus.cognotik.util.MarkdownUtil.renderMarkdown import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.util.* import java.util.concurrent.Future import java.util.concurrent.atomic.AtomicReference @@ -36,7 +37,8 @@ open class AdaptivePlanningMode( private val executionRecords = mutableListOf() private val reasoningState = AtomicReference(null) private var isRunning = false - private val expansionExpressionPattern = Regex("""\{([^|}{]+(?:\|[^|}{\n<>()\[\]]+)+)}""") + private var transcriptStream: FileOutputStream? = null + private val expansionExpressionPattern = Regex("""\{([^|}{]+(?:\|[^|}{\n<>()\[\]]+)}""") override fun initialize() { log.debug("Initializing AutoPlanMode") @@ -58,6 +60,7 @@ open class AdaptivePlanningMode( private fun startAutoPlanChat(userMessage: String) { log.debug("Starting auto plan chat with initial message: $userMessage") task.echo(renderMarkdown(userMessage)) + transcriptStream = transcript(task) val continueLoop = true val tabbedDisplay = TabbedDisplay(task) @@ -81,6 +84,7 @@ open class AdaptivePlanningMode( log.debug("Initialized thinking status") initialStatus.initialPrompt = userMessage reasoningState.set(initialStatus) + writeToTranscript("# Auto Plan Chat Session\n\n## Initial Prompt\n\n$userMessage\n\n") var iteration = 0 while (iteration++ < maxIterations && continueLoop) { @@ -88,6 +92,7 @@ open class AdaptivePlanningMode( task.complete() val currentThinkingStatus = reasoningState.get() ?: throw IllegalStateException("ThinkingStatus is null at iteration $iteration") + writeToTranscript("## Iteration $iteration\n\n") val task = task.linkedTask("Iteration $iteration") val ui = task.ui @@ -140,22 +145,23 @@ open class AdaptivePlanningMode( val taskResults = mutableListOf>>() for ((index, currentTask: TaskData) in nextTask.withIndex()) { val currentTaskId = "task_${index + 1}" + writeToTranscript("### Task $currentTaskId\n\n") log.debug("Executing task $currentTaskId") val taskExecutionTask = ui.newTask(false) val taskConfig = currentTask.task.tasks?.get(index) val taskDescription = taskConfig?.task_description ?: "No description provided for this task item." taskExecutionTask.add("\n```json\n${taskConfig?.toJson()}\n```\n".renderMarkdown) + writeToTranscript("**Description:** $taskDescription\n\n```json\n${JsonUtil.toJson(taskConfig)}\n```\n\n") taskExecutionTask.verbose( - renderMarkdown( - """ -Executing task: `$currentTaskId` - $taskDescription + + """ + Executing task: `$currentTaskId` - $taskDescription Full TaskData JSON: ```json ${JsonUtil.toJson(taskConfig)} ``` -""".trimIndent(), tabs = false - ) +""".trimIndent().renderMarkdown ) iterationTabbedDisplay["Task Execution $currentTaskId"] = taskExecutionTask.placeholder @@ -184,6 +190,7 @@ ${JsonUtil.toJson(taskConfig)} val completedTasks = taskResults.map { (task, future) -> val result = future.get() log.debug("Task completed: ${task.task_description}") + writeToTranscript("**Result:**\n\n$result\n\n") ExecutionRecord( time = Date(), iteration = iteration, @@ -197,6 +204,7 @@ ${JsonUtil.toJson(taskConfig)} ui.newTask(false).apply { iterationTabbedDisplay["Thinking Status"] = placeholder } try { log.debug("Updating thinking status") + writeToTranscript("### Updated Thinking Status\n\n") val updatedStatus = updateThinking(currentThinkingStatus, completedTasks, task) reasoningState.set(updatedStatus) log.debug("Updated thinking status") @@ -209,6 +217,7 @@ ${JsonUtil.toJson(taskConfig)} }" ) ) + writeToTranscript("```json\n${JsonUtil.toJson(updatedStatus)}\n```\n\n") } catch (e: Exception) { log.error("Error updating thinking status", e) thinkingStatusTask.error(e) @@ -233,6 +242,11 @@ ${JsonUtil.toJson(taskConfig)} } ?: "null" }") ) + writeToTranscript("\n## Summary\n\nAuto Plan Chat completed.\n\n") + transcriptStream?.flush() + transcriptStream?.close() + transcriptStream = null + task.complete() task.complete() } } @@ -268,7 +282,7 @@ ${JsonUtil.toJson(taskConfig)} reasoningState: ReasoningState, task: SessionTask ): List? { - initDescriber(this.orchestrationConfig, this.describer) + Tasks.initDescriber(orchestrationConfig, describer) val parsedActor = ParsedAgent( name = "TaskChooser", resultClass = Tasks::class.java, @@ -685,9 +699,23 @@ ${JsonUtil.toJson(taskConfig)} val description: String? = null ) - data class Tasks( - val tasks: MutableList? = null - ) + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun writeToTranscript(content: String) { + transcriptStream?.write(content.toByteArray()) + } + companion object : CognitiveModeStrategy { override val inputCnt = 1 @@ -698,11 +726,5 @@ ${JsonUtil.toJson(taskConfig)} user: User? ) = AdaptivePlanningMode(task, orchestrationConfig, session, user) - fun initDescriber(orchestrationConfig: OrchestrationConfig, describer: TaskContextYamlDescriber) { - describer.clearSubTypes(TaskExecutionConfig::class.java) - TaskType.getAvailableTaskTypes(orchestrationConfig).forEach { taskType -> - describer.registerSubType(TaskExecutionConfig::class.java, taskType.executionConfigClass) - } - } } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/CognitiveMode.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/CognitiveMode.kt index 46204d99e..83ed43e54 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/CognitiveMode.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/CognitiveMode.kt @@ -5,7 +5,6 @@ import com.simiacryptus.cognotik.plan.OrchestrationConfig import com.simiacryptus.cognotik.platform.Session import com.simiacryptus.cognotik.platform.model.User import com.simiacryptus.cognotik.webui.session.SessionTask -import com.simiacryptus.cognotik.webui.session.SocketManager /** * The CognitiveMode interface defines the “cognitive” strategy @@ -13,87 +12,87 @@ import com.simiacryptus.cognotik.webui.session.SocketManager * thought updates. */ interface CognitiveMode { - val task: SessionTask - val orchestrationConfig: OrchestrationConfig - val session: Session - val user: User? + val task: SessionTask + val orchestrationConfig: OrchestrationConfig + val session: Session + val user: User? - /** - * Initialize the internal cognitive state. - */ - fun initialize() + /** + * Initialize the internal cognitive state. + */ + fun initialize() - /** - * Handle a user message and trigger the appropriate planning or execution. - */ - fun handleUserMessage(userMessage: String, task: SessionTask) + /** + * Handle a user message and trigger the appropriate planning or execution. + */ + fun handleUserMessage(userMessage: String, task: SessionTask) - /** - * Get the context data accumulated during execution. - * This is useful for sub-planning tasks to collect results. - */ - fun contextData(): List + /** + * Get the context data accumulated during execution. + * This is useful for sub-planning tasks to collect results. + */ + fun contextData(): List } interface CognitiveModeStrategy { - val inputCnt: Int + val inputCnt: Int - fun getCognitiveMode( - task: SessionTask, - orchestrationConfig: OrchestrationConfig, - session: Session, - user: User? - ): CognitiveMode + fun getCognitiveMode( + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + session: Session, + user: User? + ): CognitiveMode } enum class CognitiveModeStrategies : CognitiveModeStrategy { - Chat { - override val inputCnt: Int get() = ConversationalMode.inputCnt + Chat { + override val inputCnt: Int get() = ConversationalMode.inputCnt - override fun getCognitiveMode( - task: SessionTask, - orchestrationConfig: OrchestrationConfig, - session: Session, - user: User? - ): CognitiveMode { - return ConversationalMode(task, orchestrationConfig, session, user) - } - }, - Adaptive { - override val inputCnt: Int get() = AdaptivePlanningMode.inputCnt + override fun getCognitiveMode( + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + session: Session, + user: User? + ): CognitiveMode { + return ConversationalMode(task, orchestrationConfig, session, user) + } + }, + Adaptive { + override val inputCnt: Int get() = AdaptivePlanningMode.inputCnt - override fun getCognitiveMode( - task: SessionTask, - orchestrationConfig: OrchestrationConfig, - session: Session, - user: User? - ): CognitiveMode { - return AdaptivePlanningMode(task, orchestrationConfig, session, user) - } - }, - Waterfall { - override val inputCnt: Int get() = WaterfallMode.inputCnt + override fun getCognitiveMode( + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + session: Session, + user: User? + ): CognitiveMode { + return AdaptivePlanningMode(task, orchestrationConfig, session, user) + } + }, + Waterfall { + override val inputCnt: Int get() = WaterfallMode.inputCnt - override fun getCognitiveMode( - task: SessionTask, - orchestrationConfig: OrchestrationConfig, - session: Session, - user: User? - ): CognitiveMode { - return WaterfallMode(task, orchestrationConfig, session, user) - } - }, - Hierarchical { - override val inputCnt: Int get() = HierarchicalPlanningMode.inputCnt + override fun getCognitiveMode( + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + session: Session, + user: User? + ): CognitiveMode { + return WaterfallMode(task, orchestrationConfig, session, user) + } + }, + Hierarchical { + override val inputCnt: Int get() = HierarchicalPlanningMode.inputCnt - override fun getCognitiveMode( - task: SessionTask, - orchestrationConfig: OrchestrationConfig, - session: Session, - user: User? - ): CognitiveMode { - return HierarchicalPlanningMode(task, orchestrationConfig, session, user) - } - }, - ; + override fun getCognitiveMode( + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + session: Session, + user: User? + ): CognitiveMode { + return HierarchicalPlanningMode(task, orchestrationConfig, session, user) + } + }, + ; } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/ConversationalMode.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/ConversationalMode.kt index 3a4b9f59d..6a9b954d7 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/ConversationalMode.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/ConversationalMode.kt @@ -1,7 +1,7 @@ package com.simiacryptus.cognotik.plan.cognitive -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.models.ModelSchema @@ -18,6 +18,7 @@ import com.simiacryptus.cognotik.util.toContentList import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.lang.Thread.sleep import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ConcurrentLinkedQueue @@ -43,6 +44,7 @@ open class ConversationalMode( private val messagesLock = Any() private val messages get() = messageMaps.computeIfAbsent(session) { ConcurrentLinkedQueue() } private val messageBuffer = ConcurrentLinkedQueue() + private var transcriptStream: FileOutputStream? = null private var isProcessing = false private val systemPrompt = "Given the following input, choose ONE task to execute and describe it in detail." private val aggregateTopics = ConcurrentHashMap>() @@ -57,12 +59,13 @@ open class ConversationalMode( log.debug( "ConversationalMode initialized with task types: ${enabledTasks.joinToString(", ") { it.name }}", RuntimeException() ) + transcriptStream = transcript(task) log.debug( "Task configurations: ${ - orchestrationConfig.taskSettings.values.joinToString(", ") { - "${it.task_type}${it.name?.let { name -> ":$name" } ?: ""}" - } - }") + orchestrationConfig.taskSettings.values.joinToString(", ") { + "${it.task_type}${it.name?.let { name -> ":$name" } ?: ""}" + } + }") } override fun handleUserMessage(userMessage: String, task: SessionTask) { @@ -78,6 +81,7 @@ open class ConversationalMode( } task.echo(userMessage.renderMarkdown()) + writeToTranscript("## User\n\n$userMessage\n\n") this.task.ui.pool.submit { try { while (!Thread.interrupted()) { @@ -114,6 +118,7 @@ open class ConversationalMode( // Extract topics from the aggregated response if (useExpansionSyntax && aggregateResponse.isNotEmpty()) { try { + writeToTranscript("## Assistant\n\n${aggregateResponse}\n\n") val model = orchestrationConfig.defaultChatter.getChildClient(task) val topics = extractTopics(aggregateResponse.toString(), model) topics.topics?.forEach { (topicType, entities) -> @@ -127,6 +132,7 @@ open class ConversationalMode( "* `{${it.key}}` - ${it.value.joinToString(", ") { "`$it`" }}" } task.complete(topicsText.renderMarkdown(), additionalClasses = "topics") + writeToTranscript("### Topics\n\n$topicsText\n\n") } } catch (e: Exception) { log.error("Error in topic extraction", e) @@ -171,13 +177,15 @@ open class ConversationalMode( private fun executeTask(userMessage: String, task: SessionTask, aggregateResponse: StringBuilder) { val describer = TaskContextYamlDescriber(orchestrationConfig) val availableTaskTypes = TaskType.getAvailableTaskTypes(orchestrationConfig) + Tasks.initDescriber(orchestrationConfig, describer) val parsedActor = ParsedAgent( name = "TaskChooser", - resultClass = AdaptivePlanningMode.Tasks::class.java, - exampleInstance = AdaptivePlanningMode.Tasks( + resultClass = Tasks::class.java, + exampleInstance = Tasks( listOfNotNull(availableTaskTypes.firstOrNull()?.let { - TaskType.getImpl(orchestrationConfig, it).executionConfig - }).toMutableList()), + TaskType.getImpl(orchestrationConfig, it).executionConfig + }).toMutableList() + ), prompt = buildString { append(systemPrompt) append("Available task types:\n") @@ -198,10 +206,10 @@ open class ConversationalMode( temperature = orchestrationConfig.temperature, describer = describer, parserPrompt = ("Task Subtype Schema:\n" + availableTaskTypes.joinToString("\n\n") { taskType -> - "${taskType.name}:\n ${ - describer.describe(taskType.executionConfigClass).trim().trimIndent().indent(" ") - }".trim() - }) + "${taskType.name}:\n ${ + describer.describe(taskType.executionConfigClass).trim().trimIndent().indent(" ") + }".trim() + }) ) // Use the expanded userMessage for task selection val input = getConversationContext() + listOf( @@ -344,6 +352,28 @@ open class ConversationalMode( ) + /** + * Creates and initializes a transcript file for the conversation + */ + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + /** + * Writes content to the transcript file if available + */ + private fun writeToTranscript(content: String) { + transcriptStream?.write(content.toByteArray()) + transcriptStream?.flush() + } + /** * Gets the current conversation context as a list of messages */ @@ -374,4 +404,4 @@ open class ConversationalMode( private val messageMaps = ConcurrentHashMap>() private val log = LoggerFactory.getLogger(ConversationalMode::class.java) } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/DependencyGraphMode.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/DependencyGraphMode.kt index 688373361..386049ffc 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/DependencyGraphMode.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/DependencyGraphMode.kt @@ -1,11 +1,7 @@ package com.simiacryptus.cognotik.apps.graph -import com.simiacryptus.cognotik.actors.ParsedAgent -import com.simiacryptus.cognotik.plan.TaskOrchestrator -import com.simiacryptus.cognotik.plan.ExecutionState -import com.simiacryptus.cognotik.plan.OrchestrationConfig -import com.simiacryptus.cognotik.plan.TaskContextYamlDescriber -import com.simiacryptus.cognotik.plan.TaskExecutionConfig +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.plan.cognitive.CognitiveMode import com.simiacryptus.cognotik.plan.cognitive.CognitiveModeStrategy import com.simiacryptus.cognotik.platform.Session @@ -23,92 +19,92 @@ import java.io.File * a plan task, and executes the resulting plan. */ open class DependencyGraphMode( - override val task: SessionTask, - override val orchestrationConfig: OrchestrationConfig, - override val session: Session, - override val user: User?, - private val graphFile: String, + override val task: SessionTask, + override val orchestrationConfig: OrchestrationConfig, + override val session: Session, + override val user: User?, + private val graphFile: String, ) : CognitiveMode { - private val log = LoggerFactory.getLogger(DependencyGraphMode::class.java) + private val log = LoggerFactory.getLogger(DependencyGraphMode::class.java) - data class ExtraTaskDependencies( - val dependencies: Map> = emptyMap() - ) + data class ExtraTaskDependencies( + val dependencies: Map> = emptyMap() + ) - override fun initialize() { - log.debug("Initializing GraphOrderedPlanMode with graph file: $graphFile") - } + override fun initialize() { + log.debug("Initializing GraphOrderedPlanMode with graph file: $graphFile") + } - override fun handleUserMessage(userMessage: String, task: SessionTask) { - log.debug("Handling user message: $userMessage") - execute(userMessage, task) - } + override fun handleUserMessage(userMessage: String, task: SessionTask) { + log.debug("Handling user message: $userMessage") + execute(userMessage, task) + } - override fun contextData(): List = emptyList() + override fun contextData(): List = emptyList() - private fun execute(userMessage: String, task: SessionTask) { - try { - task.add("Reading graph file: $graphFile") - val graphFileContent = readGraphFile(orchestrationConfig) - val softwareGraph = JsonUtil.fromJson( - graphFileContent, SoftwareNodeType.SoftwareGraph::class.java - ) - log.debug("Successfully read graph file. Size: ${graphFileContent.length} characters; ${softwareGraph.nodes.size} nodes.") - task.add("Successfully loaded graph with ${softwareGraph.nodes.size} nodes") - val orderedNodes = orderGraphNodes(softwareGraph.nodes) - task.add("Ordered ${orderedNodes.size} nodes by priority") - val cumulativeTasks = transformNodesToPlan(orderedNodes, orchestrationConfig, userMessage, graphFile, task) - addDependencies(cumulativeTasks, graphFileContent, userMessage, task) - val plan = com.simiacryptus.cognotik.plan.PlanUtil.filterPlan { cumulativeTasks } ?: emptyMap() - log.info("Ordered plan built successfully. Proceeding to execute DAG.") - task.add("Plan generated successfully with ${plan.size} tasks") - task.add("Starting plan execution...") - task.add(buildPlanSummary(plan).let(::renderMarkdown)) - task.add( - buildExecutionSummary( - TaskOrchestrator( - user = user, - session = session, - dataStorage = this.task.ui.dataStorage!!, - root = orchestrationConfig.absoluteWorkingDir?.let { File(it).toPath() } - ?: this.task.ui.dataStorage?.getSessionDir( - user, - session - )?.toPath() ?: File(".").toPath() - ).executePlan( - plan = plan, - task = task, - userMessage = userMessage, - orchestrationConfig = orchestrationConfig, - )).let(::renderMarkdown)) - task.add("Plan execution completed") - } catch (e: Exception) { - task.error(e) - task.add("Error during ordered planning: ${e.message}") - log.error("Error during ordered planning: ${e.message}", e) - } + private fun execute(userMessage: String, task: SessionTask) { + try { + task.add("Reading graph file: $graphFile") + val graphFileContent = readGraphFile(orchestrationConfig) + val softwareGraph = JsonUtil.fromJson( + graphFileContent, SoftwareNodeType.SoftwareGraph::class.java + ) + log.debug("Successfully read graph file. Size: ${graphFileContent.length} characters; ${softwareGraph.nodes.size} nodes.") + task.add("Successfully loaded graph with ${softwareGraph.nodes.size} nodes") + val orderedNodes = orderGraphNodes(softwareGraph.nodes) + task.add("Ordered ${orderedNodes.size} nodes by priority") + val cumulativeTasks = transformNodesToPlan(orderedNodes, orchestrationConfig, userMessage, graphFile, task) + addDependencies(cumulativeTasks, graphFileContent, userMessage, task) + val plan = PlanUtil.filterPlan { cumulativeTasks } ?: emptyMap() + log.info("Ordered plan built successfully. Proceeding to execute DAG.") + task.add("Plan generated successfully with ${plan.size} tasks") + task.add("Starting plan execution...") + task.add(buildPlanSummary(plan).let(::renderMarkdown)) + task.add( + buildExecutionSummary( + TaskOrchestrator( + user = user, + session = session, + dataStorage = this.task.ui.dataStorage!!, + root = orchestrationConfig.absoluteWorkingDir?.let { File(it).toPath() } + ?: this.task.ui.dataStorage?.getSessionDir( + user, + session + )?.toPath() ?: File(".").toPath() + ).executePlan( + plan = plan, + task = task, + userMessage = userMessage, + orchestrationConfig = orchestrationConfig, + )).let(::renderMarkdown)) + task.add("Plan execution completed") + } catch (e: Exception) { + task.error(e) + task.add("Error during ordered planning: ${e.message}") + log.error("Error during ordered planning: ${e.message}", e) } + } - private fun addDependencies( - cumulativeTasks: MutableMap, - graphFileContent: String, - userMessage: String, - task: SessionTask - ) { - log.debug("Starting dependency analysis for ${cumulativeTasks.size} tasks") + private fun addDependencies( + cumulativeTasks: MutableMap, + graphFileContent: String, + userMessage: String, + task: SessionTask + ) { + log.debug("Starting dependency analysis for ${cumulativeTasks.size} tasks") - if (cumulativeTasks.isEmpty()) { - log.warn("No tasks provided for dependency analysis") - return - } - try { - val existingDependencies = cumulativeTasks.mapValues { - it.value.task_dependencies?.toSet() ?: emptySet() - } + if (cumulativeTasks.isEmpty()) { + log.warn("No tasks provided for dependency analysis") + return + } + try { + val existingDependencies = cumulativeTasks.mapValues { + it.value.task_dependencies?.toSet() ?: emptySet() + } - ParsedAgent( - resultClass = ExtraTaskDependencies::class.java, - prompt = """ + ParsedAgent( + resultClass = ExtraTaskDependencies::class.java, + prompt = """ Analyze the current plan context and the provided software graph to identify missing task dependencies. Consider: 1. Code file dependencies from the graph @@ -118,217 +114,217 @@ open class DependencyGraphMode( Only suggest new dependencies that are not already present. Ensure all suggested task IDs exist in the current plan. """.trimIndent(), - model = orchestrationConfig.defaultChatter.getChildClient(task), - parsingChatter = orchestrationConfig.parsingChatter, - ).answer( - contextData() + - listOf( - "You are a software planning assistant. Your goal is to analyze the current plan context and the provided software graph, then focus on generating or refining an instruction (patch/subplan) for the specific node provided.", - "Current aggregated plan so far (if any):\n```json\n${JsonUtil.toJson(cumulativeTasks)}\n```", - "Complete Software Graph from file `$graphFile` is given below:\n```json\n$graphFileContent\n```", - "User Instruction/Query: $userMessage\nPlease evaluate the context and provide your suggested changes or instructions to improve the software plan." - ), - ).obj.dependencies.forEach { (taskToEdit, newUpstreams) -> + model = orchestrationConfig.defaultChatter.getChildClient(task), + parsingChatter = orchestrationConfig.parsingChatter, + ).answer( + contextData() + + listOf( + "You are a software planning assistant. Your goal is to analyze the current plan context and the provided software graph, then focus on generating or refining an instruction (patch/subplan) for the specific node provided.", + "Current aggregated plan so far (if any):\n```json\n${JsonUtil.toJson(cumulativeTasks)}\n```", + "Complete Software Graph from file `$graphFile` is given below:\n```json\n$graphFileContent\n```", + "User Instruction/Query: $userMessage\nPlease evaluate the context and provide your suggested changes or instructions to improve the software plan." + ), + ).obj.dependencies.forEach { (taskToEdit, newUpstreams) -> - val task = cumulativeTasks[taskToEdit] - if (task == null) { - log.warn("Attempted to add dependencies to non-existent task: $taskToEdit") - return@forEach - } + val task = cumulativeTasks[taskToEdit] + if (task == null) { + log.warn("Attempted to add dependencies to non-existent task: $taskToEdit") + return@forEach + } - if (task.task_dependencies == null) { - task.task_dependencies = mutableListOf() - } + if (task.task_dependencies == null) { + task.task_dependencies = mutableListOf() + } - val validNewDependencies = newUpstreams.filter { upstreamId -> - if (!cumulativeTasks.containsKey(upstreamId)) { - log.warn("Skipping invalid dependency $upstreamId for task $taskToEdit") - false - } else if (wouldCreateCycle(taskToEdit, upstreamId, cumulativeTasks)) { - log.warn("Skipping cyclic dependency $upstreamId for task $taskToEdit") - false - } else { - true - } - } - task.task_dependencies?.addAll(validNewDependencies) - if (validNewDependencies.isNotEmpty()) { - log.debug("Added ${validNewDependencies.size} dependencies to task $taskToEdit: ${validNewDependencies.joinToString()}") - } - } + val validNewDependencies = newUpstreams.filter { upstreamId -> + if (!cumulativeTasks.containsKey(upstreamId)) { + log.warn("Skipping invalid dependency $upstreamId for task $taskToEdit") + false + } else if (wouldCreateCycle(taskToEdit, upstreamId, cumulativeTasks)) { + log.warn("Skipping cyclic dependency $upstreamId for task $taskToEdit") + false + } else { + true + } + } + task.task_dependencies?.addAll(validNewDependencies) + if (validNewDependencies.isNotEmpty()) { + log.debug("Added ${validNewDependencies.size} dependencies to task $taskToEdit: ${validNewDependencies.joinToString()}") + } + } - val newDependencies = cumulativeTasks.mapValues { - (it.value.task_dependencies?.toSet() ?: emptySet()) - (existingDependencies[it.key] ?: emptySet()) - }.filterValues { it.isNotEmpty() } - if (newDependencies.isNotEmpty()) { - log.info("Added new dependencies to ${newDependencies.size} tasks") - newDependencies.forEach { (taskId, deps) -> - log.debug("Task $taskId: Added dependencies: ${deps.joinToString()}") - } - } else { - log.debug("No new dependencies were added") - } - } catch (e: Exception) { - log.error("Error during dependency analysis", e) - throw RuntimeException("Failed to analyze and add dependencies", e) + val newDependencies = cumulativeTasks.mapValues { + (it.value.task_dependencies?.toSet() ?: emptySet()) - (existingDependencies[it.key] ?: emptySet()) + }.filterValues { it.isNotEmpty() } + if (newDependencies.isNotEmpty()) { + log.info("Added new dependencies to ${newDependencies.size} tasks") + newDependencies.forEach { (taskId, deps) -> + log.debug("Task $taskId: Added dependencies: ${deps.joinToString()}") } + } else { + log.debug("No new dependencies were added") + } + } catch (e: Exception) { + log.error("Error during dependency analysis", e) + throw RuntimeException("Failed to analyze and add dependencies", e) } + } - /** - * Check if adding a dependency would create a cycle in the task graph - */ - private fun wouldCreateCycle( - taskId: String, - newDependencyId: String, - tasks: Map, - visited: MutableSet = mutableSetOf() - ): Boolean { - if (taskId == newDependencyId) return true - if (!visited.add(newDependencyId)) return false - return tasks[newDependencyId]?.task_dependencies?.any { dependencyId -> - wouldCreateCycle(taskId, dependencyId, tasks, visited) - } ?: false - } + /** + * Check if adding a dependency would create a cycle in the task graph + */ + private fun wouldCreateCycle( + taskId: String, + newDependencyId: String, + tasks: Map, + visited: MutableSet = mutableSetOf() + ): Boolean { + if (taskId == newDependencyId) return true + if (!visited.add(newDependencyId)) return false + return tasks[newDependencyId]?.task_dependencies?.any { dependencyId -> + wouldCreateCycle(taskId, dependencyId, tasks, visited) + } ?: false + } - /** - * Read and return the content of the graph file. - */ - private fun readGraphFile(orchestrationConfig: OrchestrationConfig): String { - val workingDirectory = orchestrationConfig.absoluteWorkingDir ?: "." - val file = File(workingDirectory).resolve(graphFile) - if (!file.exists()) { - log.error("Graph file does not exist at: ${file.absolutePath}") - throw IllegalArgumentException("Graph file does not exist at: ${file.absolutePath}") - } - log.debug("Reading graph file from: ${file.absolutePath}") - return file.readText() + /** + * Read and return the content of the graph file. + */ + private fun readGraphFile(orchestrationConfig: OrchestrationConfig): String { + val workingDirectory = orchestrationConfig.absoluteWorkingDir ?: "." + val file = File(workingDirectory).resolve(graphFile) + if (!file.exists()) { + log.error("Graph file does not exist at: ${file.absolutePath}") + throw IllegalArgumentException("Graph file does not exist at: ${file.absolutePath}") } + log.debug("Reading graph file from: ${file.absolutePath}") + return file.readText() + } - /** - * Order nodes first by defined priorities and then by remaining nodes. - */ - private fun orderGraphNodes(nodes: Collection>): List> { - val priorityOrder = listOf("SpecificationDocument", "CodeFile", "TestCodeFile") - val ordered = mutableListOf>() - for (priority in priorityOrder) { - val filtered = nodes.filter { it.type == priority } - log.debug("Found ${filtered.size} nodes for priority '$priority'.") - ordered.addAll(filtered) - } - val remaining = nodes.filter { it.type !in priorityOrder } - log.debug("Appending ${remaining.size} remaining nodes.") - ordered.addAll(remaining) - return ordered + /** + * Order nodes first by defined priorities and then by remaining nodes. + */ + private fun orderGraphNodes(nodes: Collection>): List> { + val priorityOrder = listOf("SpecificationDocument", "CodeFile", "TestCodeFile") + val ordered = mutableListOf>() + for (priority in priorityOrder) { + val filtered = nodes.filter { it.type == priority } + log.debug("Found ${filtered.size} nodes for priority '$priority'.") + ordered.addAll(filtered) } + val remaining = nodes.filter { it.type !in priorityOrder } + log.debug("Appending ${remaining.size} remaining nodes.") + ordered.addAll(remaining) + return ordered + } - /** - * Transform each node into plan patches. - */ - private fun transformNodesToPlan( - nodes: List>, - orchestrationConfig: OrchestrationConfig, - userMessage: String, - graphFile: String, - task: SessionTask - ): MutableMap { - val tasks = mutableMapOf() - nodes.forEach { - tasks.putAll( - getNodePlan( - orchestrationConfig = orchestrationConfig, - tasks = tasks, - graphFile = graphFile, - graphTxt = readGraphFile(orchestrationConfig), - node = it, - userMessage = userMessage, - task - ) ?: emptyMap() - ) - } - return tasks + /** + * Transform each node into plan patches. + */ + private fun transformNodesToPlan( + nodes: List>, + orchestrationConfig: OrchestrationConfig, + userMessage: String, + graphFile: String, + task: SessionTask + ): MutableMap { + val tasks = mutableMapOf() + nodes.forEach { + tasks.putAll( + getNodePlan( + orchestrationConfig = orchestrationConfig, + tasks = tasks, + graphFile = graphFile, + graphTxt = readGraphFile(orchestrationConfig), + node = it, + userMessage = userMessage, + task + ) ?: emptyMap() + ) } + return tasks + } - private fun getNodePlan( - orchestrationConfig: OrchestrationConfig, - tasks: MutableMap, - graphFile: String, - graphTxt: String, - node: SoftwareNodeType.NodeBase<*>, - userMessage: String, - task: SessionTask - ): Map? { - val maxRetries = 3 - val retryDelayMillis = 1000L - var attempt = 0 - fun combine(node: SoftwareNodeType.NodeBase<*>, key: String) = when { - key.startsWith(node.id.toString(), false) -> key - else -> "${node.id}_$key" + private fun getNodePlan( + orchestrationConfig: OrchestrationConfig, + tasks: MutableMap, + graphFile: String, + graphTxt: String, + node: SoftwareNodeType.NodeBase<*>, + userMessage: String, + task: SessionTask + ): Map? { + val maxRetries = 3 + val retryDelayMillis = 1000L + var attempt = 0 + fun combine(node: SoftwareNodeType.NodeBase<*>, key: String) = when { + key.startsWith(node.id.toString(), false) -> key + else -> "${node.id}_$key" + } + while (true) { + try { + return orchestrationConfig.planningActor(TaskContextYamlDescriber(orchestrationConfig), task).answer( + contextData() + + listOf( + "You are a software planning assistant. Your goal is to analyze the current plan context and the provided software graph, then focus on generating or refining an instruction (patch/subplan) for the specific node provided.", + "Current aggregated plan so far (if any):\n```json\n${JsonUtil.toJson(tasks)}\n```", + "Complete Software Graph from file `$graphFile` is given below:\n```json\n$graphTxt\n```", + "Details of the focused node with ID `${node.id}`:\n```json\n${JsonUtil.toJson(node)}\n```", + "User Instruction/Query: $userMessage\nPlease evaluate the context and provide your suggested changes or instructions to improve the software plan." + ).filter { it.isNotBlank() }, + ).obj.tasksByID?.mapKeys { combine(node, it.key) }?.mapValues { + it.value.task_dependencies = it.value.task_dependencies?.map { combine(node, it) }?.toMutableList() + it.value } - while (true) { - try { - return orchestrationConfig.planningActor(TaskContextYamlDescriber(orchestrationConfig), task).answer( - contextData() + - listOf( - "You are a software planning assistant. Your goal is to analyze the current plan context and the provided software graph, then focus on generating or refining an instruction (patch/subplan) for the specific node provided.", - "Current aggregated plan so far (if any):\n```json\n${JsonUtil.toJson(tasks)}\n```", - "Complete Software Graph from file `$graphFile` is given below:\n```json\n$graphTxt\n```", - "Details of the focused node with ID `${node.id}`:\n```json\n${JsonUtil.toJson(node)}\n```", - "User Instruction/Query: $userMessage\nPlease evaluate the context and provide your suggested changes or instructions to improve the software plan." - ).filter { it.isNotBlank() }, - ).obj.tasksByID?.mapKeys { combine(node, it.key) }?.mapValues { - it.value.task_dependencies = it.value.task_dependencies?.map { combine(node, it) }?.toMutableList(); - it.value - } - } catch (e: Exception) { - if (attempt++ >= maxRetries) { - throw e - } - Thread.sleep(retryDelayMillis) - } + } catch (e: Exception) { + if (attempt++ >= maxRetries) { + throw e } + Thread.sleep(retryDelayMillis) + } } + } - /** - * Build a plan summary string for UI display. - */ - private fun buildPlanSummary(plan: Map): String = buildString { - appendLine("# Graph-Based Planning Result") - appendLine() - appendLine("## Generated Plan (DAG)") - appendLine("```json") - appendLine(JsonUtil.toJson(plan)) - appendLine("```") - } + /** + * Build a plan summary string for UI display. + */ + private fun buildPlanSummary(plan: Map): String = buildString { + appendLine("# Graph-Based Planning Result") + appendLine() + appendLine("## Generated Plan (DAG)") + appendLine("```json") + appendLine(JsonUtil.toJson(plan)) + appendLine("```") + } - /** - * Build an execution summary string for UI display. - */ - private fun buildExecutionSummary(state: ExecutionState): String = buildString { - appendLine("## Plan Execution Summary") - appendLine("- Completed Tasks: ${state.completedTasks.size}") - appendLine("- Failed Tasks: ${state.subTasks.size - state.completedTasks.size}") - appendLine() - appendLine("### Task Results:") - state.taskResult.forEach { (taskId, result) -> - appendLine("#### $taskId") - appendLine("```") - appendLine(result.take(500)) - appendLine("```") - } + /** + * Build an execution summary string for UI display. + */ + private fun buildExecutionSummary(state: ExecutionState): String = buildString { + appendLine("## Plan Execution Summary") + appendLine("- Completed Tasks: ${state.completedTasks.size}") + appendLine("- Failed Tasks: ${state.subTasks.size - state.completedTasks.size}") + appendLine() + appendLine("### Task Results:") + state.taskResult.forEach { (taskId, result) -> + appendLine("#### $taskId") + appendLine("```") + appendLine(result.take(500)) + appendLine("```") } + } - companion object : CognitiveModeStrategy { + companion object : CognitiveModeStrategy { - override val inputCnt = 1 - var graphFile: String = "software_graph.json" + override val inputCnt = 1 + var graphFile: String = "software_graph.json" - override fun getCognitiveMode( - task: SessionTask, - orchestrationConfig: OrchestrationConfig, - session: Session, - user: User? - ): CognitiveMode { - return DependencyGraphMode(task, orchestrationConfig, session, user, graphFile) - } + override fun getCognitiveMode( + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + session: Session, + user: User? + ): CognitiveMode { + return DependencyGraphMode(task, orchestrationConfig, session, user, graphFile) } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/HierarchicalPlanningMode.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/HierarchicalPlanningMode.kt index cff607d9c..98d3fd61f 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/HierarchicalPlanningMode.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/HierarchicalPlanningMode.kt @@ -1,23 +1,25 @@ package com.simiacryptus.cognotik.plan.cognitive -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description -import com.simiacryptus.cognotik.plan.TaskOrchestrator import com.simiacryptus.cognotik.plan.OrchestrationConfig import com.simiacryptus.cognotik.plan.TaskContextYamlDescriber +import com.simiacryptus.cognotik.plan.TaskOrchestrator import com.simiacryptus.cognotik.plan.TaskType -import com.simiacryptus.cognotik.plan.cognitive.AdaptivePlanningMode.Companion.initDescriber -import com.simiacryptus.cognotik.plan.cognitive.AdaptivePlanningMode.Tasks import com.simiacryptus.cognotik.platform.ApplicationServices import com.simiacryptus.cognotik.platform.Session import com.simiacryptus.cognotik.platform.model.User -import com.simiacryptus.cognotik.util.* +import com.simiacryptus.cognotik.util.FixedConcurrencyProcessor +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.set +import com.simiacryptus.cognotik.util.toJson import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.util.concurrent.* import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicInteger @@ -45,10 +47,15 @@ open class HierarchicalPlanningMode( private var periodicUpdateFuture: ScheduledFuture<*>? = null private val sessionLog = StringBuilder() private var sessionLogTask: SessionTask? = null + private var transcriptStream: FileOutputStream? = null + private var transcriptTask: SessionTask? = null + fun logToSession(message: String) { log.info(message) sessionLog.append(message).append("\n") sessionLogTask?.complete(message.renderMarkdown()) + transcriptStream?.write("$message\n".toByteArray()) + transcriptStream?.flush() } val processor: FixedConcurrencyProcessor = FixedConcurrencyProcessor(task.ui.pool, maxConcurrency) @@ -60,6 +67,9 @@ open class HierarchicalPlanningMode( goalIdCounter.set(1) taskIdCounter.set(1) stopRequested.set(false) + transcriptStream?.close() + transcriptStream = null + transcriptTask = null } override fun handleUserMessage(userMessage: String, task: SessionTask) { @@ -81,6 +91,13 @@ open class HierarchicalPlanningMode( private fun startGoalOrientedSession(userMessage: String, task: SessionTask) { task.echo("User: $userMessage".renderMarkdown()) + // Initialize transcript + transcriptTask = task.linkedTask("Transcript") + transcriptStream = transcript(transcriptTask!!) + logToSession("# Goal-Oriented Planning Session Transcript\n") + logToSession("**User Request:** $userMessage\n") + logToSession("**Started:** ${java.time.LocalDateTime.now()}\n\n") + val stopLinkRef = AtomicReference() val stopLink = task.add(this.task.ui.hrefLink("Stop Goal-Oriented Processing") { @@ -162,6 +179,15 @@ open class HierarchicalPlanningMode( tasksSummaryTask.add(taskSummary(tasksTask).renderMarkdown()) handleStop(iteration, task, stopLink) sessionLogTask?.complete(sessionLog.toString().renderMarkdown()) + // Finalize transcript + logToSession("\n---\n") + logToSession("**Completed:** ${java.time.LocalDateTime.now()}") + logToSession("\n## Final Statistics") + logToSession("- Total Goals: ${goalTree.size}") + logToSession("- Total Tasks: ${taskMap.size}") + logToSession("- Iterations: $iteration") + transcriptStream?.close() + transcriptStream = null } private fun nextIteration( @@ -252,6 +278,7 @@ open class HierarchicalPlanningMode( task: SessionTask ) { logToSession("Decomposing goal: ${goal.description} (ID: ${goal.id})") + logToSession("\n### Goal Decomposition: ${goal.id}\n") // Create a goal tab for this goal goalTasks[goal.id] = task task.add("# Goal: ${goal.description}\n\nID: ${goal.id}".renderMarkdown()) @@ -296,6 +323,7 @@ open class HierarchicalPlanningMode( } else { val subgoalsList = StringBuilder("## Subgoals:\n") val tasksList = StringBuilder("## Tasks:\n") + logToSession("\n#### Generated Subgoals and Tasks for Goal ${goal.id}:") subgoals.forEach { subgoal -> if (!goalTree.containsKey(subgoal.id)) { @@ -482,6 +510,9 @@ open class HierarchicalPlanningMode( ): String? { return try { log.info("Started execution of Task ID ${id} (${t.description}) in processor.") + logToSession("\n### Task Execution: ${t.id}\n") + logToSession("**Description:** ${t.description}") + logToSession("**Status:** ${t.status}") task.add("Starting execution of task: ${t.description}".renderMarkdown()) task.verbose("Task Details:\n```json\n${t.toJson()}\n```\n".renderMarkdown()) val answer = actor.answer( @@ -516,18 +547,21 @@ open class HierarchicalPlanningMode( val acquired = semaphore.tryAcquire(5, TimeUnit.MINUTES) if (!acquired) { logToSession("Task ID ${t.id} timed out after 5 minutes") + logToSession("**Result:** TIMEOUT") t.status = TaskStatus.FAILED t.result = "Task execution timed out" task.add("Task execution timed out after 5 minutes".renderMarkdown()) } logToSession("Task ID ${t.id} complete") val result = t.result + logToSession("**Result:** ${result?.take(200)?.replace("\n", " ")}${if ((result?.length ?: 0) > 200) "..." else ""}") log.info("Completed execution of Task ID ${id} (${t.description}) in processor.") result } catch (e: Exception) { log.error( "Task ID ${id} (${t.description}) execution failed in processor.submit lambda", e ) + logToSession("**Result:** FAILED - ${e.message}") taskMap[id]?.apply { status = TaskStatus.FAILED result = "Execution Error: ${e.message}" @@ -537,12 +571,25 @@ open class HierarchicalPlanningMode( } } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf".renderMarkdown() + ) + return markdownTranscript + } + private fun getParsedActor( task: Task, chatInterface: ChatInterface ): ParsedAgent { val availableTaskTypes = TaskType.getAvailableTaskTypes(orchestrationConfig) - initDescriber(this.orchestrationConfig, this.describer) + Tasks.initDescriber(orchestrationConfig, describer) return ParsedAgent( name = "TaskTypeChooser", resultClass = Tasks::class.java, // Parse directly into TaskConfigBase @@ -937,7 +984,7 @@ open class HierarchicalPlanningMode( else -> "Deps: $it" } } - nodeSb.append("- " + ("""$statusEmoji **${goal.description ?: "N/A"} (ID: ${goal.id})**""").let { it -> + nodeSb.append("- " + ("""$statusEmoji **${goal.description ?: "N/A"} (ID: ${goal.id})**""").let { goalTasks[goal.id]?.ui?.linkToSession( it ) ?: it diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/Tasks.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/Tasks.kt new file mode 100644 index 000000000..d524413ef --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/Tasks.kt @@ -0,0 +1,32 @@ +package com.simiacryptus.cognotik.plan.cognitive + +import com.simiacryptus.cognotik.plan.OrchestrationConfig +import com.simiacryptus.cognotik.plan.TaskContextYamlDescriber +import com.simiacryptus.cognotik.plan.TaskExecutionConfig +import com.simiacryptus.cognotik.plan.TaskType +import com.simiacryptus.cognotik.util.ValidatedObject + +data class Tasks( + val tasks: MutableList? = null +) : ValidatedObject { + override fun validate(): String? { + val errors = mutableListOf() + if (tasks == null || tasks.isEmpty()) { + errors.add("Tasks list cannot be null or empty.") + } else { + tasks.forEachIndexed { index, task -> + if (task is ValidatedObject) task.validate()?.let { errors.add(it) } + } + } + return errors.ifEmpty { null }?.joinToString("; ") + } + + companion object { + fun initDescriber(orchestrationConfig: OrchestrationConfig, describer: TaskContextYamlDescriber) { + describer.clearSubTypes(TaskExecutionConfig::class.java) + TaskType.getAvailableTaskTypes(orchestrationConfig).forEach { taskType -> + describer.registerSubType(TaskExecutionConfig::class.java, taskType.executionConfigClass) + } + } + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/WaterfallMode.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/WaterfallMode.kt index 676d95e81..df56e978b 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/WaterfallMode.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/cognitive/WaterfallMode.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.plan.cognitive -import com.simiacryptus.cognotik.actors.ParsedResponse +import com.simiacryptus.cognotik.agents.ParsedResponse import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.TypeDescriber import com.simiacryptus.cognotik.models.ModelSchema @@ -15,202 +15,242 @@ import com.simiacryptus.cognotik.util.JsonUtil import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.webui.session.SessionTask import java.io.File +import java.io.FileOutputStream import java.nio.file.Path /** * A cognitive mode that implements the traditional plan-ahead strategy. */ open class WaterfallMode( - override val task: SessionTask, - override val orchestrationConfig: OrchestrationConfig, - override val session: Session, - override val user: User? + override val task: SessionTask, + override val orchestrationConfig: OrchestrationConfig, + override val session: Session, + override val user: User? ) : CognitiveMode { - private val log = LoggerFactory.getLogger(WaterfallMode::class.java) + private val log = LoggerFactory.getLogger(WaterfallMode::class.java) + private var transcriptStream: FileOutputStream? = null - override fun initialize() { - log.debug("Initializing PlanAheadMode") - } + override fun initialize() { + log.debug("Initializing PlanAheadMode") + transcriptStream = transcript(task) + } - override fun contextData(): List = emptyList() + override fun contextData(): List = emptyList() - override fun handleUserMessage(userMessage: String, task: SessionTask) { - log.debug("Handling user message: $userMessage") - execute(userMessage, task) + override fun handleUserMessage(userMessage: String, task: SessionTask) { + log.debug("Handling user message: $userMessage") + transcriptStream?.let { stream -> + stream.write("## User Message\n\n$userMessage\n\n".toByteArray()) + stream.flush() } + execute(userMessage, task) + } - private fun execute(userMessage: String, task: SessionTask) { - try { - val coordinator = TaskOrchestrator( - user = user, - session = session, - dataStorage = this.task.ui.dataStorage!!, - root = orchestrationConfig.absoluteWorkingDir?.let { File(it).toPath() } - ?: this.task.ui.dataStorage?.getSessionDir( - user, - session - )?.toPath() ?: File(".").toPath() - ) + private fun execute(userMessage: String, task: SessionTask) { + try { + val coordinator = TaskOrchestrator( + user = user, + session = session, + dataStorage = this.task.ui.dataStorage!!, + root = orchestrationConfig.absoluteWorkingDir?.let { File(it).toPath() } + ?: this.task.ui.dataStorage?.getSessionDir( + user, + session + )?.toPath() ?: File(".").toPath() + ) - val plan = initialPlan( - codeFiles = coordinator.codeFiles, - files = coordinator.files, - root = coordinator.root, - task = task, - userMessage = userMessage, - orchestrationConfig = orchestrationConfig, - contextFn = { contextData() }, - describer = TaskContextYamlDescriber(orchestrationConfig) - ) - coordinator.executePlan( - plan = plan.plan, - task = task, - userMessage = userMessage, - orchestrationConfig = orchestrationConfig, - // Use the budgeted and task-specific client - ) - } catch (e: Throwable) { - task.error(e) // Report error on the current task - log.error("Error in execute", e) - } + val describer = TaskContextYamlDescriber(orchestrationConfig) + Tasks.initDescriber(orchestrationConfig, describer) + val plan = initialPlan( + codeFiles = coordinator.codeFiles, + files = coordinator.files, + root = coordinator.root, + task = task, + userMessage = userMessage, + orchestrationConfig = orchestrationConfig, + contextFn = { contextData() }, + describer = describer + ) + transcriptStream?.let { stream -> + stream.write("## Generated Plan\n\n${plan.planText}\n\n".toByteArray()) + stream.flush() + } + + coordinator.executePlan( + plan = plan.plan, + task = task, + userMessage = userMessage, + orchestrationConfig = orchestrationConfig, + // Use the budgeted and task-specific client + ) + } catch (e: Throwable) { + task.error(e) // Report error on the current task + log.error("Error in execute", e) + transcriptStream?.let { stream -> + stream.write("## Error\n\n```\n${e.message}\n${e.stackTraceToString()}\n```\n\n".toByteArray()) + stream.flush() + } + } finally { + transcriptStream?.close() } + } - open fun initialPlan( - codeFiles: Map, - files: Array, - root: Path, - task: SessionTask, - userMessage: String, - orchestrationConfig: OrchestrationConfig, - contextFn: () -> List = { emptyList() }, - describer: TypeDescriber - ): TaskBreakdownWithPrompt { - val toInput = inputFn(codeFiles, files, root) - task.echo(userMessage.renderMarkdown()) - return if (!orchestrationConfig.autoFix) - Discussable( - task = task, - heading = "", - userMessage = { userMessage }, - initialResponse = { - newPlan( - orchestrationConfig, - toInput(userMessage) + contextFn(), - describer, - task - ) - }, - outputFn = { - try { - render( - withPrompt = TaskBreakdownWithPrompt( - prompt = userMessage, - plan = it.obj, - planText = it.text - ) - ) - } catch (e: Throwable) { - log.warn("Error rendering task breakdown", e) - task.error(e) - e.message ?: e.javaClass.simpleName - } - }, - reviseResponse = { userMessages: List> -> - newPlan( - orchestrationConfig, - userMessages.map { it.first }, - describer, - task - ) - }, - ).call().let { - TaskBreakdownWithPrompt( - prompt = userMessage, - plan = filterPlan { it?.obj } ?: emptyMap(), - planText = it?.text ?: "(no plan generated)" - ) - } - else { - newPlan( - orchestrationConfig, - toInput(userMessage) + contextFn(), - describer, - task - ).let { - TaskBreakdownWithPrompt( - prompt = userMessage, - plan = filterPlan { it.obj } ?: emptyMap(), - planText = it.text - ) - } - } + open fun initialPlan( + codeFiles: Map, + files: Array, + root: Path, + task: SessionTask, + userMessage: String, + orchestrationConfig: OrchestrationConfig, + contextFn: () -> List = { emptyList() }, + describer: TypeDescriber + ): TaskBreakdownWithPrompt { + val toInput = inputFn(codeFiles, files, root) + task.echo(userMessage.renderMarkdown()) + return if (!orchestrationConfig.autoFix) + Discussable( + task = task, + heading = "", + userMessage = { userMessage }, + initialResponse = { + newPlan( + orchestrationConfig, + toInput(userMessage) + contextFn(), + describer, + task + ) + }, + outputFn = { + try { + render( + withPrompt = TaskBreakdownWithPrompt( + prompt = userMessage, + plan = it.obj, + planText = it.text + ) + ) + } catch (e: Throwable) { + log.warn("Error rendering task breakdown", e) + task.error(e) + e.message ?: e.javaClass.simpleName + } + }, + reviseResponse = { userMessages: List> -> + newPlan( + orchestrationConfig, + userMessages.map { it.first }, + describer, + task + ) + }, + ).call().let { + TaskBreakdownWithPrompt( + prompt = userMessage, + plan = filterPlan { it?.obj } ?: emptyMap(), + planText = it?.text ?: "(no plan generated)" + ) + } + else { + newPlan( + orchestrationConfig, + toInput(userMessage) + contextFn(), + describer, + task + ).let { + TaskBreakdownWithPrompt( + prompt = userMessage, + plan = filterPlan { it.obj } ?: emptyMap(), + planText = it.text + ) + } } + } - data class TaskBreakdownWithPrompt( - val prompt: String, - val plan: Map, - val planText: String - ) + data class TaskBreakdownWithPrompt( + val prompt: String, + val plan: Map, + val planText: String + ) - fun render( - withPrompt: TaskBreakdownWithPrompt - ) = AgentPatterns.displayMapInTabs( - mapOf( - "Text" to withPrompt.planText.renderMarkdown(), - "JSON" to "${TRIPLE_TILDE}json\n${JsonUtil.toJson(withPrompt)}\n${TRIPLE_TILDE}".renderMarkdown(), - "Diagram" to (("```mermaid\n" + buildMermaidGraph( - (filterPlan { - withPrompt.plan - } ?: emptyMap()).toMutableMap() - ) + "\n```\n").renderMarkdown()) - ) + fun render( + withPrompt: TaskBreakdownWithPrompt + ) = AgentPatterns.displayMapInTabs( + mapOf( + "Text" to withPrompt.planText.renderMarkdown(), + "JSON" to "${TRIPLE_TILDE}json\n${JsonUtil.toJson(withPrompt)}\n${TRIPLE_TILDE}".renderMarkdown(), + "Diagram" to (("```mermaid\n" + buildMermaidGraph( + (filterPlan { + withPrompt.plan + } ?: emptyMap()).toMutableMap() + ) + "\n```\n").renderMarkdown()) ) + ) - open fun newPlan( - orchestrationConfig: OrchestrationConfig, - inStrings: List, - describer: TypeDescriber, - task: SessionTask - ): ParsedResponse> { - orchestrationConfig.absoluteWorkingDir?.apply { File(this).mkdirs() } - val planningActor = orchestrationConfig.planningActor(describer, task) - return planningActor.respond( - messages = planningActor.chatMessages(inStrings), - input = inStrings, - ).map(Map::class.java) { - it.tasksByID ?: emptyMap() - } as ParsedResponse> - } + open fun newPlan( + orchestrationConfig: OrchestrationConfig, + inStrings: List, + describer: TypeDescriber, + task: SessionTask + ): ParsedResponse> { + orchestrationConfig.absoluteWorkingDir?.apply { File(this).mkdirs() } + val planningActor = orchestrationConfig.planningActor(describer, task) + return planningActor.respond( + messages = planningActor.chatMessages(inStrings), + input = inStrings, + ).map(Map::class.java) { + it.tasksByID ?: emptyMap() + } as ParsedResponse> + } - open fun inputFn( - codeFiles: Map, - files: Array, - root: Path - ) = { str: String -> - listOf( - if (!codeFiles.all { it.key.toFile().isFile } || codeFiles.size > 2) "Files:\n${ - codeFiles.keys.joinToString( - "\n" - ) { "* $it" } - } " else { - files.joinToString("\n\n") { - val path = root.relativize(it.toPath()) - "## $path\n\n${(codeFiles[path] ?: "").let { "$TRIPLE_TILDE\n${it}\n$TRIPLE_TILDE" }}" - } - }, - str + open fun inputFn( + codeFiles: Map, + files: Array, + root: Path + ) = { str: String -> + listOf( + if (!codeFiles.all { it.key.toFile().isFile } || codeFiles.size > 2) "Files:\n${ + codeFiles.keys.joinToString( + "\n" + ) { "* $it" } + } " else { + files.joinToString("\n\n") { + val path = root.relativize(it.toPath()) + "## $path\n\n${(codeFiles[path] ?: "").let { "$TRIPLE_TILDE\n${it}\n$TRIPLE_TILDE" }}" + } + }, + str + ) + } + + /** + * Creates a transcript file for logging the session's interactions. + * The transcript is written in Markdown format and includes links to HTML and PDF versions. + * + * @param task The session task used to create the file + * @return FileOutputStream for writing to the transcript, or null if creation failed + */ + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } - companion object : CognitiveModeStrategy { - override val inputCnt = 1 - override fun getCognitiveMode( - task: SessionTask, - orchestrationConfig: OrchestrationConfig, - session: Session, - user: User? - ) = WaterfallMode(task, orchestrationConfig, session, user) - } + companion object : CognitiveModeStrategy { + override val inputCnt = 1 + override fun getCognitiveMode( + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + session: Session, + user: User? + ) = WaterfallMode(task, orchestrationConfig, session, user) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/RunCodeTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/RunCodeTask.kt index 1fb2dc2cc..468d1f560 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/RunCodeTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/RunCodeTask.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.plan.tools -import com.simiacryptus.cognotik.actors.CodeAgent +import com.simiacryptus.cognotik.agents.CodeAgent import com.simiacryptus.cognotik.apps.code.CodingTask import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.interpreter.CodeRuntime @@ -13,166 +13,192 @@ import com.simiacryptus.cognotik.util.oneAtATime import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.util.concurrent.Semaphore import java.util.concurrent.atomic.AtomicInteger import kotlin.reflect.KClass class RunCodeTask( - orchestrationConfig: OrchestrationConfig, - planTask: RunCodeTaskExecutionConfigData?, + orchestrationConfig: OrchestrationConfig, + planTask: RunCodeTaskExecutionConfigData?, ) : AbstractTask(orchestrationConfig, planTask) { - class RunCodeTaskTypeConfig( - task_type : String = RunCode.name, - val codeRuntime: CodeRuntimes? = null, - model: ApiChatModel? = null, - name: String? = task_type, - ) : TaskTypeConfig( - task_type = task_type, - name = name, - model = model, - ) + class RunCodeTaskTypeConfig( + task_type: String = RunCode.name, + val codeRuntime: CodeRuntimes? = null, + model: ApiChatModel? = null, + name: String? = task_type, + ) : TaskTypeConfig( + task_type = task_type, + name = name, + model = model, + ) - class RunCodeTaskExecutionConfigData( - @Description("The task or goal to be accomplished") - val goal: String? = null, - @Description("The relative file path of the working directory") - val workingDir: String? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null - ) : TaskExecutionConfig( - task_type = RunCode.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) + class RunCodeTaskExecutionConfigData( + @Description("The task or goal to be accomplished") + val goal: String? = null, + @Description("The relative file path of the working directory") + val workingDir: String? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null + ) : TaskExecutionConfig( + task_type = RunCode.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) - override fun promptSegment() = """ + override fun promptSegment() = """ RunCode - Use a code interpreter to solve and complete the user's request. * Do not directly write code (yet) * Include detailed technical requirements for the needed solution """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val autoRunCounter = AtomicInteger(0) - val semaphore = Semaphore(0) - val typeConfig = typeConfig ?: throw RuntimeException() - val model = (typeConfig.model?.let { orchestrationConfig.instance(it) } - ?: orchestrationConfig.defaultChatter).getChildClient(task) + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val autoRunCounter = AtomicInteger(0) + val transcript = transcript(task) + val semaphore = Semaphore(0) + val typeConfig = typeConfig ?: throw RuntimeException() + val model = (typeConfig.model?.let { orchestrationConfig.instance(it) } + ?: orchestrationConfig.defaultChatter).getChildClient(task) // val taskSettings = this.orchestrationConfig.getTaskSettings(TaskType.RunCodeTask) - val taskSettings = typeConfig as? RunCodeTaskTypeConfig - val runtime = taskSettings?.codeRuntime ?: CodeRuntimes.GroovyRuntime // Kotlin has issues running within IntelliJ - val defs = mapOf( - "env" to (orchestrationConfig.env ?: emptyMap()), - "workingDir" to ( - orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } - ?: orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } - ?: File(".").absolutePath - ), - ) - val codeRuntime = CodeRuntimes.getRuntime(runtime, defs) - - val codingAgent = object : CodingTask( - dataStorage = agent.dataStorage, - session = agent.session, - user = agent.user, - ui = task.ui, - interpreter = codeRuntime::class as KClass, - symbols = mapOf( - "env" to (orchestrationConfig.env ?: emptyMap()), - "workingDir" to ( - orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } - ?: orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } - ?: File(".").absolutePath - ), - "language" to runtime.name.lowercase().replace("runtime", ""), + val taskSettings = typeConfig as? RunCodeTaskTypeConfig + val runtime = taskSettings?.codeRuntime ?: CodeRuntimes.GroovyRuntime // Kotlin has issues running within IntelliJ + val defs = mapOf( + "env" to (orchestrationConfig.env ?: emptyMap()), + "workingDir" to ( + orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } + ?: orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } + ?: File(".").absolutePath + ), + ) + val codeRuntime = CodeRuntimes.getRuntime(runtime, defs) + + val codingAgent = object : CodingTask( + dataStorage = agent.dataStorage, + session = agent.session, + user = agent.user, + ui = task.ui, + interpreter = codeRuntime::class as KClass, + symbols = mapOf( + "env" to (orchestrationConfig.env ?: emptyMap()), + "workingDir" to ( + orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } + ?: orchestrationConfig.absoluteWorkingDir?.let { File(it).absolutePath } + ?: File(".").absolutePath ), - temperature = orchestrationConfig.temperature, - details = """ + "language" to runtime.name.lowercase().replace("runtime", ""), + ), + temperature = orchestrationConfig.temperature, + details = """ Code a solution using ${runtime.name} to the user's request. """.trimIndent(), - model = model, - mainTask = task, - retryable = false, - ) { - override fun displayFeedback( - task: SessionTask, - request: CodeAgent.CodeRequest, - response: CodeAgent.CodeResult - ) { - val formText = StringBuilder() - var formHandle: StringBuilder? = null - if (!orchestrationConfig.autoFix) formHandle = task.add( - "
\n${ - if (!super.canPlay) "" else super.playButton(task, request, response, formText) { formHandle!! } - }\n${ - ui.hrefLink("Continue", "href-link play-button") { - response.let { - "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n## Output\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n" - }.apply { resultFn(this) } - semaphore.release() - } - }\n
\n${ - super.ui.textInput(oneAtATime { feedback: String -> - super.responseAction(task, "Revising...", formHandle!!, formText) { - super.feedback(task, feedback, request, response) - } - }) - }", additionalClasses = "reply-message" - ) else if (autoRunCounter.incrementAndGet() <= 1) { - responseAction(task, "Running...", formHandle, formText) { - execute(task, response, request) - } - } - formText.append(formHandle.toString()) - formHandle.toString() - task.complete() + model = model, + mainTask = task, + retryable = false, + ) { + override fun displayFeedback( + task: SessionTask, + request: CodeAgent.CodeRequest, + response: CodeAgent.CodeResult + ) { + val formText = StringBuilder() + transcript?.write("## Code Request\n```${runtime.name.lowercase().replace("runtime", "")}\n${request.messages}\n```\n\n".toByteArray()) + transcript?.write("## Execution Result\n".toByteArray()) + transcript?.write("**Result Value:**\n```\n${response.result.resultValue}\n```\n\n".toByteArray()) + transcript?.write("**Output:**\n```\n${response.result.resultOutput}\n```\n\n".toByteArray()) + var formHandle: StringBuilder? = null + if (!orchestrationConfig.autoFix) formHandle = task.add( + "
\n${ + if (!super.canPlay) "" else super.playButton(task, request, response, formText) { formHandle!! } + }\n${ + ui.hrefLink("Continue", "href-link play-button") { + response.let { + transcript?.write("## User Action: Continue\n\n".toByteArray()) + "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n## Output\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n" + }.apply { resultFn(this) } + semaphore.release() } + }\n
\n${ + super.ui.textInput(oneAtATime { feedback: String -> + super.responseAction(task, "Revising...", formHandle!!, formText) { + transcript?.write("## User Feedback\n$feedback\n\n".toByteArray()) + super.feedback(task, feedback, request, response) + } + }) + }", additionalClasses = "reply-message" + ) else if (autoRunCounter.incrementAndGet() <= 1) { + responseAction(task, "Running...", formHandle, formText) { + execute(task, response, request) + } + } + formText.append(formHandle.toString()) + formHandle.toString() + task.complete() + } - override fun execute( - task: SessionTask, - response: CodeAgent.CodeResult - ): String { - val result = super.execute(task, response) - if (orchestrationConfig.autoFix) { - response.let { - "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n## Result\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n## Output\n$TRIPLE_TILDE\n${response.result.resultOutput}\n$TRIPLE_TILDE\n" - }.apply { resultFn(this) } - semaphore.release() - } - return result - } + override fun execute( + task: SessionTask, + response: CodeAgent.CodeResult + ): String { + val result = super.execute(task, response) + if (orchestrationConfig.autoFix) { + transcript?.write("## Auto-fix Execution\n\n".toByteArray()) + response.let { + "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n## Result\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n## Output\n$TRIPLE_TILDE\n${response.result.resultOutput}\n$TRIPLE_TILDE\n" + }.apply { resultFn(this) } + semaphore.release() } - codingAgent.start( - codingAgent.codeRequest( - messages.map { it to ModelSchema.Role.user } + listOf( - (this.executionConfig?.goal ?: "") to ModelSchema.Role.user, - ) - ) + return result + } + } + codingAgent.start( + codingAgent.codeRequest( + messages.map { it to ModelSchema.Role.user } + listOf( + (this.executionConfig?.goal ?: "") to ModelSchema.Role.user, ) - try { - semaphore.acquire() - } catch (e: Throwable) { - log.warn("Error", e) - } + ) + ) + try { + semaphore.acquire() + } catch (e: Throwable) { + transcript?.write("## Error\n```\n${e.message}\n${e.stackTraceToString()}\n```\n\n".toByteArray()) + log.warn("Error", e) + } finally { + transcript?.write("\n## Task Completed\n".toByteArray()) + transcript?.close() } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } - companion object { - private val log = LoggerFactory.getLogger(RunCodeTask::class.java) - val RunCode = TaskType( - "RunCode", - RunCodeTaskExecutionConfigData::class.java, - RunCodeTaskTypeConfig::class.java, - "Execute code snippets safely", - """ + companion object { + private val log = LoggerFactory.getLogger(RunCodeTask::class.java) + val RunCode = TaskType( + "RunCode", + RunCodeTaskExecutionConfigData::class.java, + RunCodeTaskTypeConfig::class.java, + "Execute code snippets safely", + """ Executes code snippets in a controlled environment.
  • Safe code execution handling
  • @@ -182,7 +208,7 @@ class RunCodeTask(
  • Interactive result review
""" - ) + ) - } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/RunCodeTask.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/RunCodeTask.md deleted file mode 100644 index 0c7410729..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/RunCodeTask.md +++ /dev/null @@ -1,145 +0,0 @@ -# RunCodeTask - -## Overview - -The `RunCodeTask` is a specialized task implementation that executes code through an interpreter to solve and complete user requests. It leverages a code runtime environment to dynamically execute code and provide interactive feedback. - -## Purpose - -This task is designed to: -- Execute code in various runtime environments (Kotlin, Python, etc.) -- Provide interactive code execution with feedback mechanisms -- Support automatic fixing of code issues when enabled -- Integrate with the task orchestration system for complex workflows - -## Configuration - -### RunCodeTaskSettings - -Settings that control the behavior of the RunCodeTask: - -| Parameter | Type | Default | Description | -|-----------|------|---------|-------------| -| `task_type` | String | `TaskType.RunCodeTask.name` | The type identifier for this task | -| `codeRuntime` | CodeRuntimes? | `null` | The runtime environment to use (e.g., KotlinRuntime, PythonRuntime) | -| `enabled` | Boolean | `true` | Whether this task type is enabled | -| `model` | ApiChatModel? | `null` | The AI model to use for code generation | - -### RunCodeTaskConfigData - -Configuration data specific to each task instance: - -| Parameter | Type | Description | -|-----------|------|-------------| -| `goal` | String? | The task or goal to be accomplished | -| `workingDir` | String? | The relative file path of the working directory | -| `task_description` | String? | Description of what this task does | -| `task_dependencies` | List? | List of task IDs that must complete before this task | -| `state` | TaskState? | Current state of the task execution | - -## Key Features - -### 1. **Multiple Runtime Support** -- Supports various code runtimes through the `CodeRuntimes` enum -- Default runtime is Kotlin if not specified -- Runtime environment includes working directory and environment variables - -### 2. **Interactive Execution** -- Provides interactive feedback during code execution -- Users can continue, revise, or provide feedback on code results -- Supports both manual and automatic execution modes - -### 3. **Auto-Fix Capability** -- When `autoFix` is enabled in orchestration config, automatically attempts to fix code issues -- Limits automatic retries to prevent infinite loops -- Provides detailed output including code, results, and console output - -### 4. **Integration with CodingAgent** -- Extends the `CodingAgent` class for sophisticated code generation -- Passes environment variables and working directory to the runtime -- Supports temperature control for AI model responses - -## Usage Example - -```kotlin -// Create task settings -val settings = RunCodeTaskSettings( - codeRuntime = CodeRuntimes.PythonRuntime, - enabled = true, - model = ApiChatModel.GPT4 -) - -// Create task configuration -val config = RunCodeTaskConfigData( - goal = "Calculate the fibonacci sequence up to n=10", - workingDir = "./workspace", - task_description = "Generate and execute fibonacci calculation" -) - -// Initialize and run the task -val task = RunCodeTask(orchestrationConfig, config) -task.run(agent, messages, sessionTask, resultCallback, orchestrationConfig) -``` - -## Execution Flow - -1. **Initialization**: Sets up the code runtime with environment variables and working directory -2. **Code Generation**: Uses AI model to generate code based on the goal and messages -3. **Execution**: Runs the generated code in the specified runtime -4. **Feedback Loop**: - - If auto-fix is disabled: Presents results and waits for user interaction - - If auto-fix is enabled: Automatically attempts to fix issues (limited to 1 retry) -5. **Result Handling**: Formats and returns the execution results including code, output, and any errors - -## Output Format - -The task produces formatted output containing: -- **Command**: The generated code -- **Result**: The return value of the code execution -- **Output**: Console output from the execution - -Example output format: -``` -## Command -``` -[generated code] -``` -## Result -``` -[execution result] -``` -## Output -``` -[console output] -``` -``` - -## Error Handling - -- Uses semaphore-based synchronization for execution control -- Catches and logs exceptions during execution -- Provides feedback mechanisms for error correction -- Limits automatic retries to prevent infinite loops - -## Dependencies - -- `CodeAgent`: For code generation and execution -- `CodingAgent`: Extended class providing code interaction capabilities -- `CodeRuntimes`: Runtime environment management -- `SessionTask`: UI and session management -- `TaskOrchestrator`: Parent orchestration system - -## Best Practices - -1. **Runtime Selection**: Choose the appropriate runtime based on the task requirements -2. **Working Directory**: Ensure the working directory has appropriate permissions -3. **Environment Variables**: Pass necessary environment variables through the orchestration config -4. **Auto-Fix Usage**: Use auto-fix cautiously for tasks that might have side effects -5. **Error Handling**: Always handle potential execution failures in dependent tasks - -## Limitations - -- Auto-fix is limited to one retry to prevent infinite loops -- Runtime must be supported by the CodeRuntimes implementation -- Execution is synchronous and blocks until completion or user interaction -- Security considerations depend on the underlying runtime implementation \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SelfHealingTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SelfHealingTask.kt index 7329dc023..48cefd56b 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SelfHealingTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SelfHealingTask.kt @@ -11,58 +11,59 @@ import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.util.concurrent.Semaphore import kotlin.io.path.exists class SelfHealingTask( - orchestrationConfig: OrchestrationConfig, planTask: SelfHealingTaskExecutionConfigData? - ) : AbstractTask(orchestrationConfig, planTask) { - class SelfHealingTaskTypeConfig( - task_type: String? = null, - model: ApiChatModel? = null, - @Description("List of command executables that can be used for auto-fixing") var commandAutoFixCommands: MutableList? = mutableListOf(), - name: String? = task_type, - ) : TaskTypeConfig(task_type, name, model), ValidatedObject { - override fun validate(): String? { - if (commandAutoFixCommands.isNullOrEmpty()) { - return "commandAutoFixCommands must not be null or empty" - } - return ValidatedObject.validateFields(this) - } + orchestrationConfig: OrchestrationConfig, planTask: SelfHealingTaskExecutionConfigData? +) : AbstractTask(orchestrationConfig, planTask) { + class SelfHealingTaskTypeConfig( + task_type: String? = null, + model: ApiChatModel? = null, + @Description("List of command executables that can be used for auto-fixing") var commandAutoFixCommands: MutableList? = mutableListOf(), + name: String? = task_type, + ) : TaskTypeConfig(task_type, name, model), ValidatedObject { + override fun validate(): String? { + if (commandAutoFixCommands.isNullOrEmpty()) { + return "commandAutoFixCommands must not be null or empty" + } + return ValidatedObject.validateFields(this) } + } - class SelfHealingTaskExecutionConfigData( - @Description("The commands to be executed with their respective working directories") val commands: List? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null - ) : ValidatedObject, TaskExecutionConfig( - task_type = SelfHealing.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) { - override fun validate(): String? { - if (commands.isNullOrEmpty()) { - return "commands must not be null or empty" - } - return ValidatedObject.validateFields(this) - } + class SelfHealingTaskExecutionConfigData( + @Description("The commands to be executed with their respective working directories") val commands: List? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null + ) : ValidatedObject, TaskExecutionConfig( + task_type = SelfHealing.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) { + override fun validate(): String? { + if (commands.isNullOrEmpty()) { + return "commands must not be null or empty" + } + return ValidatedObject.validateFields(this) } + } - data class CommandWithWorkingDir( - @Description("The command to be executed") val command: List = emptyList(), - @Description("The relative path of the working directory") val workingDir: String? = null - ) : ValidatedObject { - override fun validate(): String? { - if (command.isEmpty()) { - return "command must not be empty" - } - return null - } + data class CommandWithWorkingDir( + @Description("The command to be executed") val command: List = emptyList(), + @Description("The relative path of the working directory") val workingDir: String? = null + ) : ValidatedObject { + override fun validate(): String? { + if (command.isEmpty()) { + return "command must not be empty" + } + return null } + } - override fun promptSegment() = (""" + override fun promptSegment() = (""" SelfHealing - Run a command and automatically fix any issues that arise * Specify the commands to be executed along with their working directories * Each command's working directory should be specified relative to the root directory @@ -71,89 +72,118 @@ class SelfHealingTask( * Available commands: """.trimIndent() + typeConfig.commandAutoFixCommands?.joinToString("\n") { " * ${File(it).name}" }).trim() - override val typeConfig: SelfHealingTaskTypeConfig - get() = super.typeConfig as SelfHealingTaskTypeConfig + override val typeConfig: SelfHealingTaskTypeConfig + get() = super.typeConfig as SelfHealingTaskTypeConfig - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val semaphore = Semaphore(0) - Retryable(task = task) { - val task = task.ui.newTask() - agent.pool.submit { - val model = (typeConfig.model?.let { orchestrationConfig.instance(it) } - ?: orchestrationConfig.defaultChatter).getChildClient(task) - CmdPatchApp( - root = agent.root, - settings = PatchApp.Settings( - commands = this.executionConfig?.commands?.map { commandWithDir -> - val alias = commandWithDir.command.firstOrNull() - val cmds = executionConfig.commands.map { - val cmd = it.command.firstOrNull() - typeConfig.commandAutoFixCommands?.firstOrNull { it.endsWith(cmd ?: "") } ?: cmd - }.map { File(it!!) }.associateBy { it.name }.filterKeys { it.startsWith(alias ?: "") } - PatchApp.CommandSettings( - executable = when { - cmds.isNotEmpty() -> cmds.entries.firstOrNull()?.value - alias.isNullOrBlank() -> null - root.resolve(alias).exists() -> root.resolve(alias).toFile().absoluteFile - File(alias).exists() -> File(alias).absoluteFile - else -> null - } ?: throw IllegalArgumentException("Command not found: $alias"), - arguments = commandWithDir.command.drop(1).joinToString(" "), - workingDirectory = (commandWithDir.workingDir?.let { agent.root.toFile().resolve(it) } - ?: agent.root.toFile()).apply { mkdirs() }, - additionalInstructions = "" - ) - } ?: emptyList(), - autoFix = orchestrationConfig.autoFix, - includeLineNumbers = false, - ), - files = agent.files, - model = model, - parsingModel = orchestrationConfig.parsingChatter, - processor = orchestrationConfig.processor, - ).run( - task = task, model = model - ).apply { - when { - this.exitCode == 0 -> { - resultFn("All Commands completed") - semaphore.release() - } + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val semaphore = Semaphore(0) + Retryable(task = task) { + val markdownTranscript = transcript(task) + val task = task.ui.newTask() + agent.pool.submit { + val model = (typeConfig.model?.let { orchestrationConfig.instance(it) } + ?: orchestrationConfig.defaultChatter).getChildClient(task) + CmdPatchApp( + root = agent.root, + settings = PatchApp.Settings( + commands = this.executionConfig?.commands?.map { commandWithDir -> + val alias = commandWithDir.command.firstOrNull() + val cmds = executionConfig.commands.map { + val cmd = it.command.firstOrNull() + typeConfig.commandAutoFixCommands?.firstOrNull { it.endsWith(cmd ?: "") } ?: cmd + }.map { File(it!!) }.associateBy { it.name }.filterKeys { it.startsWith(alias ?: "") } + PatchApp.CommandSettings( + executable = when { + cmds.isNotEmpty() -> cmds.entries.firstOrNull()?.value + alias.isNullOrBlank() -> null + root.resolve(alias).exists() -> root.resolve(alias).toFile().absoluteFile + File(alias).exists() -> File(alias).absoluteFile + else -> null + } ?: throw IllegalArgumentException("Command not found: $alias"), + arguments = commandWithDir.command.drop(1).joinToString(" "), + workingDirectory = (commandWithDir.workingDir?.let { agent.root.toFile().resolve(it) } + ?: agent.root.toFile()).apply { mkdirs() }, + additionalInstructions = "" + ) + } ?: emptyList(), + autoFix = orchestrationConfig.autoFix, + includeLineNumbers = false, + ), + files = agent.files, + model = model, + parsingModel = orchestrationConfig.parsingChatter, + processor = orchestrationConfig.processor, + ).also { app -> + markdownTranscript?.let { transcript -> + transcript.write("# Self-Healing Task Execution\n\n".toByteArray()) + transcript.write("## Commands\n".toByteArray()) + } + }.run( + task = task, model = model + ).apply { + when { + this.exitCode == 0 -> { + resultFn("All Commands completed") + semaphore.release() + markdownTranscript?.let { transcript -> + transcript.write("\n## Result\n".toByteArray()) + transcript.write("All commands completed successfully (exit code: 0)\n".toByteArray()) + transcript.close() + } + } - else -> { - task.add( - task.ui.hrefLink("Ignore Error", "href-link cmd-button") { - resultFn("Error: ${this.exitCode}") - semaphore.release() - } - ) - } - } + else -> { + task.add( + task.ui.hrefLink("Ignore Error", "href-link cmd-button") { + resultFn("Error: ${this.exitCode}") + semaphore.release() + markdownTranscript?.let { transcript -> + transcript.write("\n## Result\n".toByteArray()) + transcript.write("Command failed with exit code: ${this.exitCode}\n".toByteArray()) + transcript.close() + } } + ) } - task.placeholder - } - try { - semaphore.acquire() - } catch (e: Throwable) { - log.warn("Error", e) + } } + } + task.placeholder + } + try { + semaphore.acquire() + } catch (e: Throwable) { + log.warn("Error", e) } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } - companion object { - private val log = LoggerFactory.getLogger(SelfHealingTask::class.java) - val SelfHealing = TaskType( - "SelfHealing", - SelfHealingTask.SelfHealingTaskExecutionConfigData::class.java, - SelfHealingTask.SelfHealingTaskTypeConfig::class.java, - "Run a command and automatically fix any issues that arise", - """ + companion object { + private val log = LoggerFactory.getLogger(SelfHealingTask::class.java) + val SelfHealing = TaskType( + "SelfHealing", + SelfHealingTaskExecutionConfigData::class.java, + SelfHealingTaskTypeConfig::class.java, + "Run a command and automatically fix any issues that arise", + """ Executes a command and automatically fixes any issues that arise.
  • Specify commands and working directories
  • @@ -162,7 +192,7 @@ class SelfHealingTask(
  • Output diff formatting
""" - ) + ) - } -} \ No newline at end of file + } +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SelfHealingTask.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SelfHealingTask.md deleted file mode 100644 index 5a5af906f..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SelfHealingTask.md +++ /dev/null @@ -1,121 +0,0 @@ -# SelfHealingTask - -## Overview - -The `SelfHealingTask` is a specialized task implementation that executes commands with automatic error recovery capabilities. It runs specified commands and attempts to automatically fix any issues that arise during execution, making it ideal for build processes, test runs, or any command-line operations that may encounter recoverable errors. - -## Key Features - -- **Automatic Error Recovery**: Attempts to fix issues that arise during command execution -- **Multiple Command Support**: Can execute multiple commands with different working directories -- **Configurable Working Directories**: Each command can have its own working directory relative to the root -- **Command Aliasing**: Maps command aliases to actual executable paths -- **Interactive Error Handling**: Provides options to ignore errors when auto-fix fails - -## Configuration - -### Task Settings (`SelfHealingTaskSettings`) - -| Field | Type | Description | -|-------|------|-------------| -| `task_type` | String? | The type identifier for the task | -| `enabled` | Boolean | Whether the task is enabled (default: false) | -| `model` | ApiChatModel? | The AI model to use for auto-fixing | -| `commandAutoFixCommands` | List? | List of command executables that can be used for auto-fixing | - -### Task Configuration (`SelfHealingTaskConfigData`) - -| Field | Type | Description | -|-------|------|-------------| -| `commands` | List? | Commands to execute with their working directories | -| `task_description` | String? | Description of the task | -| `task_dependencies` | List? | List of task dependencies | -| `state` | TaskState? | Current state of the task | - -### Command Configuration (`CommandWithWorkingDir`) - -| Field | Type | Description | -|-------|------|-------------| -| `command` | List | Command and its arguments as a list of strings | -| `workingDir` | String? | Relative path of the working directory from root | - -## Usage Example - -```kotlin -val taskConfig = SelfHealingTaskConfigData( - commands = listOf( - CommandWithWorkingDir( - command = listOf("npm", "test"), - workingDir = "frontend" - ), - CommandWithWorkingDir( - command = listOf("gradle", "build"), - workingDir = "backend" - ) - ), - task_description = "Run tests and build the project" - ) - -val settings = SelfHealingTaskSettings( - enabled = true, - model = ApiChatModel.GPT_4, - commandAutoFixCommands = mutableListOf( - "/usr/bin/npm", - "/usr/local/bin/gradle" - ) - ) -``` - -## How It Works - -1. **Command Resolution**: The task resolves command aliases to actual executable paths using the configured `commandAutoFixCommands` list -2. **Execution**: Commands are executed in their specified working directories -3. **Error Detection**: If a command fails (non-zero exit code), the auto-fix mechanism is triggered -4. **Auto-Fix Attempt**: Uses the configured AI model to analyze the error and attempt fixes -5. **Result Handling**: - - If successful (exit code 0): Reports completion - - If failed: Provides an option to ignore the error and continue - -## Command Resolution Logic - -The task uses a sophisticated command resolution mechanism: - -1. Extracts the first element of the command as an alias -2. Searches for matching executables in `commandAutoFixCommands` -3. Falls back to checking: - - Relative path from root directory - - Absolute file path - +4. Throws an error if no valid executable is found - -## Integration with CmdPatchApp - -The task delegates execution to `CmdPatchApp` with the following configuration: - -- **Root Directory**: The agent's root directory -- **Auto-Fix**: Enabled based on orchestration configuration -- **Model**: Uses either the task-specific model or the default chatter model -- **Parsing Model**: Uses the orchestration's parsing chatter model - -## Error Handling - -- **Retryable Wrapper**: The entire execution is wrapped in a `Retryable` block for resilience -- **Semaphore Control**: Uses semaphores to manage asynchronous execution flow -- **Interactive Recovery**: When auto-fix fails, provides an "Ignore Error" button for manual intervention - -## Best Practices - -1. **Command Configuration**: Always provide full paths in `commandAutoFixCommands` for reliable execution -2. **Working Directories**: Ensure working directories exist or can be created -3. **Model Selection**: Choose an appropriate AI model based on the complexity of potential errors -4. **Dependencies**: Properly configure task dependencies to ensure correct execution order -5. **Error Messages**: The task will attempt to fix compilation errors, missing dependencies, and configuration issues automatically - -## Limitations - -- Requires proper configuration of available commands in `commandAutoFixCommands` -- Auto-fix capability depends on the AI model's understanding of the error context -- May not be able to fix all types of errors (e.g., hardware failures, network issues) - -## Logging - -The task uses SLF4J logging through `LoggerFactory` for debugging and error tracking. Monitor logs for detailed execution information and troubleshooting. \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SubPlanningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SubPlanningTask.kt index 2d8b57aa2..a6a977068 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SubPlanningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/SubPlanningTask.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.plan.tools -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* @@ -11,6 +11,7 @@ import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient +import java.io.FileOutputStream class SubPlanningTask( orchestrationConfig: OrchestrationConfig, planTask: SubPlanningTaskExecutionConfigData? @@ -51,14 +52,14 @@ class SubPlanningTask( if (planning_goal.isNullOrBlank() && task_description.isNullOrBlank()) { return "Either planning_goal or task_description must be specified for SubPlanningTask" } - + // Validate context items if present context?.forEachIndexed { index, ctx -> if (ctx.isBlank()) { return "Context item at index $index is blank" } } - + return ValidatedObject.validateFields(this) } } @@ -85,6 +86,7 @@ class SubPlanningTask( agent: TaskOrchestrator, messages: List, task: SessionTask, resultFn: (String) -> Unit, orchestrationConfig: OrchestrationConfig ) { log.info("Starting SubPlanningTask with goal: ${executionConfig?.planning_goal}") + val transcript = transcript(task) try { val typeConfig = this.typeConfig ?: throw RuntimeException() @@ -152,6 +154,7 @@ class SubPlanningTask( appendLine("---") appendLine() } + transcript?.write(planningInfo.toByteArray()) planningTask.add(planningInfo.renderMarkdown) // Execute the sub-plan using the cognitive mode @@ -161,6 +164,11 @@ class SubPlanningTask( log.debug("Executing sub-plan with ${contextMessages.size} context messages") // Handle the user message through the cognitive mode + transcript?.write("\n\n## Execution\n\n".toByteArray()) + transcript?.write("**Planning Goal:**\n\n".toByteArray()) + transcript?.write(planningGoal.toByteArray()) + transcript?.write("\n\n".toByteArray()) + cognitiveInstance.handleUserMessage(planningGoal, executionTask) // Collect results from the cognitive mode's context @@ -173,14 +181,21 @@ class SubPlanningTask( tabs["Summary"] = summaryTask.placeholder val summary = createSummary(results, planningGoal, summaryTask, orchestrationConfig) + transcript?.write("\n\n## Summary\n\n".toByteArray()) + transcript?.write(summary.toByteArray()) + transcript?.write("\n\n".toByteArray()) summaryTask.add(summary.renderMarkdown) tabs.update() resultFn(summary) } catch (e: Exception) { log.error("Error executing SubPlanningTask", e) + transcript?.write("\n\n## Error\n\n".toByteArray()) + transcript?.write("```\n${e.message}\n${e.stackTraceToString()}\n```\n".toByteArray()) task.error(e) resultFn("Error in sub-planning: ${e.message}") + } finally { + transcript?.close() } } @@ -267,6 +282,20 @@ class SubPlanningTask( } } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + companion object { private val log = LoggerFactory.getLogger(SubPlanningTask::class.java) diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/doc_std.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/doc_std.md deleted file mode 100644 index baf017141..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/doc_std.md +++ /dev/null @@ -1,169 +0,0 @@ -### **Documentation Standards for Task Types** - -#### 1. Introduction - -The goal of these standards is to create consistent, comprehensive, and user-friendly documentation for every `TaskType` in the Cognitive Task Planning Framework. Good documentation is essential for users to understand what a task does, for planners to know when to use it, and for administrators to configure it correctly. - -This document provides a template and best practices for documenting tasks, leveraging the information already present in the source code (such as `@Description` annotations) as a single source of truth. - -#### 2. Core Principles - -* **Audience-First:** Documentation should be written with two primary audiences in mind: - * **End-Users:** Need to know what the task does, when to use it, and what kind of goal or prompt will invoke it. - * **Administrators/Developers:** Need to know how to configure the task at the system level (`OrchestrationConfig`) and understand its specific parameters. -* **Source-Driven:** Documentation should derive directly from the code wherever possible. The `shortDescription`, `longDescription`, and `@Description` annotations in the task's definition are the source of truth. -* **Example-Oriented:** Abstract descriptions are not enough. Every task must be documented with clear, practical examples of its configuration and usage. -* **Discoverable:** Documentation should make it easy to find related tasks or alternatives, helping users build more effective plans. - -#### 3. Documentation Template (Markdown) - -Each `TaskType` should have a dedicated section or page in the user manual that follows this structure. - -```markdown -## Task: - -**Category:** -**Summary:** - -### Description - - - -### When to Use - - -* Use this task when you need to execute a script and see its output. -* This is the best choice for running build commands like `mvn install` or `npm run build` and automatically fixing any compilation errors. -* Use this task to analyze a complex problem by breaking it down into a series of questions and answers. - -### Execution Configuration (`ExecutionConfigData`) - -These parameters are specified when the task is added to a plan. - -| Parameter | Type | Required | Default | Description | -|:-------------------|:--------------------|:---------|:--------|:-------------------------------------------------------| -| `` | `` | Yes | `null` | | -| `` | `List` | No | `[]` | | -| `` | `Boolean` | No | `false` | | - -**Example (Plan Snippet):** -```json -{ - "task_type": "", - "task_description": "A human-readable description of this step.", - "task_dependencies": ["previous_task_id"], - "": "Example Value", - "": ["value1", "value2"] -} -``` - -### Type Configuration (`TypeConfig`) - -These parameters are set by an administrator in the global `OrchestrationConfig` to define the task's default behavior. - -| Parameter | Type | Required | Default | Description | -|:-------------------|:--------------------|:---------|:--------|:-------------------------------------------------------| -| `` | `ApiChatModel` | No | `null` | | -| `` | `List` | No | `[]` | | - -**Example (`OrchestrationConfig` Snippet):** -```json -{ - "taskSettings": { - "": { - "task_type": "", - "": { "model": "gpt-4" }, - "": ["/usr/bin/mvn", "/usr/bin/npm"] - } - } -} -``` - -### Output - - - -The task returns a string containing the standard output and standard error from the executed command. This output can be used as input for a subsequent `AnalysisTask` to check for specific keywords or summarize the result. - -### Related Tasks - -* **``:** -* **``:** -``` - -#### 4. Example: Applying the Standard to `SelfHealingTask` - -Here is how the `SelfHealingTask` documentation would look using the template. - ---- - -## Task: SelfHealing - -**Category:** Code & Execution -**Summary:** Run a command and automatically fix any issues that arise. - -### Description - -The `SelfHealingTask` is a powerful tool for executing shell commands that might fail, such as build scripts, linters, or test runners. When a command fails (i.e., returns a non-zero exit code), the task captures the output, analyzes the error, and attempts to generate and apply a code patch to fix the underlying issue. It then re-runs the command to verify the fix. - -This is ideal for automating CI/CD pipelines, code maintenance, and complex build processes. Key features include: -* Execution of one or more shell commands in specified working directories. -* Automatic error analysis and patch generation using an AI model. -* Interactive mode for approving or revising suggested fixes. -* Support for a configurable list of allowed command executables. - -### When to Use - -* Use this task to run a build process (e.g., `mvn install`) and automatically fix compilation errors. -* Use it to run a test suite (e.g., `npm test`) and have the AI attempt to fix failing tests. -* This is the right choice for any script or command where failures are possible and you want to attempt an automated recovery. - -### Execution Configuration (`SelfHealingTaskExecutionConfigData`) - -These parameters are specified when the task is added to a plan. - -| Parameter | Type | Required | Default | Description | -|:---|:---|:---|:---|:---| -| `commands` | `List` | Yes | `null` | The commands to be executed with their respective working directories. | -| `task_description` | `String` | No | `null` | A human-readable description of the task's purpose. | -| `task_dependencies` | `List` | No | `[]` | A list of task IDs that must be completed before this one starts. | - -**Example (Plan Snippet):** -```json -{ - "task_type": "SelfHealing", - "task_description": "Build the Java project and fix any errors.", - "commands": [ - { - "command": ["mvn", "clean", "install"], - "workingDir": "backend/java-app" - } - ] -} -``` - -### Type Configuration (`SelfHealingTaskTypeConfig`) - -These parameters are set by an administrator in the global `OrchestrationConfig` to define the task's default behavior. - -| Parameter | Type | Required | Default | Description | -|:---|:---|:---|:---|:---| -| `model` | `ApiChatModel` | No | `null` | The AI model to use for generating fixes. Defaults to the system's `defaultChatter`. | -| `commandAutoFixCommands` | `MutableList` | No | `[]` | List of command executables that can be used for auto-fixing. This acts as a security whitelist. | - -**Example (`OrchestrationConfig` Snippet):** -```json -{ - "taskSettings": { - "SelfHealing": { - "task_type": "SelfHealing", - "model": { "model": "gpt-4-turbo" }, - "commandAutoFixCommands": ["/usr/bin/mvn", "/usr/local/bin/npm", "/usr/bin/git"] - } - } -} -``` - -### Output - -If all commands execute successfully (or are successfully patched), the task returns the string `"All Commands completed"`. If a command fails and cannot be fixed, or if the user chooses to ignore the error, it returns an error message like `"Error: "`. diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AbstractFileTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AbstractFileTask.kt index 00d222c14..234fac7ba 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AbstractFileTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AbstractFileTask.kt @@ -10,115 +10,133 @@ import com.simiacryptus.cognotik.plan.TaskTypeConfig import com.simiacryptus.cognotik.plan.tools.file.AbstractFileTask.FileTaskExecutionConfig import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.webui.session.SessionTask import java.io.File +import java.io.FileOutputStream import java.nio.file.FileSystems +import java.nio.file.Path abstract class AbstractFileTask( - orchestrationConfig: OrchestrationConfig, - planTask: T? + orchestrationConfig: OrchestrationConfig, + planTask: T? ) : AbstractTask(orchestrationConfig, planTask) { -abstract class FileTaskExecutionConfig( - task_type: String? = null, - task_description: String? = null, - @Description("REQUIRED: The files to be generated as output for the task (relative paths)") val files: List? = null, - @Description("Additional files used to inform the change, including relevant files created by previous tasks") val related_files: List? = null, - @Description("Whether to extract text content from non-text files (PDF, HTML, etc.)") val extractContent: Boolean = false, - task_dependencies: List? = null, - state: TaskState? = TaskState.Pending, - ) : TaskExecutionConfig( - task_type = task_type, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) + protected val codeFiles = mutableMapOf() - protected fun getInputFileCode(): String { - val strings = (executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf()) - val flatMap = strings - .flatMap { pattern: String -> - val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") - (FileSelectionUtils.filteredWalk(root.toFile()) { - //path -> matcher.matches(root.relativize(path.toPath())) && !FileSelectionUtils.isLLMIgnored(path.toPath()) - when { - FileSelectionUtils.isLLMIgnored(it.toPath()) -> false - matcher.matches(root.relativize(it.toPath())) -> true - it.isDirectory -> true - else -> false - } - }) - } - val filter = flatMap.filter { file -> - file.isFile && file.exists() - } - return filter - .distinct() - .sortedBy { it } - .joinToString("\n\n") { relativePath -> - val file = root.toFile().resolve(relativePath) - try { - val content = if (executionConfig?.extractContent == true && !isTextFile(file)) { - extractDocumentContent(file) - } else { - codeFiles[file.toPath()] ?: file.readText() - } - "# $relativePath\n\n$TRIPLE_TILDE\n$content\n$TRIPLE_TILDE" - } catch (e: Throwable) { - log.warn("Error reading file: $relativePath", e) - "" - } - } - } + abstract class FileTaskExecutionConfig( + task_type: String? = null, + task_description: String? = null, + @Description("REQUIRED: The files to be generated as output for the task (relative paths)") val files: List? = null, + @Description("Additional files used to inform the change, including relevant files created by previous tasks") val related_files: List? = null, + @Description("Whether to extract text content from non-text files (PDF, HTML, etc.)") val extractContent: Boolean = false, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = task_type, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) - private fun isTextFile(file: java.io.File): Boolean { - val textExtensions = setOf( - "txt", - "md", - "kt", - "java", - "js", - "ts", - "py", - "rb", - "go", - "rs", - "c", - "cpp", - "h", - "hpp", - "css", - "html", - "xml", - "json", - "yaml", - "yml", - "properties", - "gradle", - "maven" - ) - return textExtensions.contains(file.extension.lowercase()) + protected fun getInputFileCode(): String { + val strings = (executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf()) + val flatMap = strings + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + //path -> matcher.matches(root.relativize(path.toPath())) && !FileSelectionUtils.isLLMIgnored(path.toPath()) + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + } + val filter = flatMap.filter { file -> + file.isFile && file.exists() } + return filter + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (executionConfig?.extractContent == true && !isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n$TRIPLE_TILDE\n$content\n$TRIPLE_TILDE" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + } + + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + protected fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + return markdownTranscript + } - companion object { - private val log = LoggerFactory.getLogger(AbstractFileTask::class.java) - const val TRIPLE_TILDE = "```" - fun extractDocumentContent(file: File): String { - return try { - file.getReader().use { reader -> - when (reader) { - is PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) - else -> reader.getText() - } - } - } catch (e: Exception) { - log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) - try { - file.readText() - } catch (e2: Exception) { - "Error reading file: ${e2.message}" - } - } + companion object { + private val log = LoggerFactory.getLogger(AbstractFileTask::class.java) + const val TRIPLE_TILDE = "```" + + fun extractDocumentContent(file: File): String { + return try { + file.getReader().use { reader -> + when (reader) { + is PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + try { + file.readText() + } catch (e2: Exception) { + "Error reading file: ${e2.message}" } + } } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AnalysisTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AnalysisTask.kt index 4c06566c2..83a29bfd1 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AnalysisTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/AnalysisTask.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.plan.tools.file -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.input.PaginatedDocumentReader @@ -9,233 +9,203 @@ import com.simiacryptus.cognotik.models.ModelSchema import com.simiacryptus.cognotik.models.ModelSchema.Role import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.plan.tools.file.AbstractFileTask.Companion.TRIPLE_TILDE -import com.simiacryptus.cognotik.plan.tools.file.FileSearchTask.Companion.getAvailableFiles import com.simiacryptus.cognotik.platform.model.ApiChatModel import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.nio.file.FileSystems +import java.nio.file.Path import java.util.concurrent.Semaphore import java.util.concurrent.atomic.AtomicReference - class AnalysisTask( - orchestrationConfig: OrchestrationConfig, - planTask: AnalysisTaskExecutionConfigData? +class AnalysisTask( + orchestrationConfig: OrchestrationConfig, + planTask: AnalysisTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - - class AnalysisTaskTypeConfig( - @Description("Enable non-interactive mode to skip user feedback and iteration") - val non_interactive: Boolean = true, - task_type: String? = Analysis.name, - name: String? = null - ) : TaskTypeConfig( - task_type = task_type, - name = name - ), ValidatedObject - - class AnalysisTaskExecutionConfigData( - @Description("The specific questions or topics to be addressed in the inquiry") - val inquiry_questions: List? = null, - @Description("The goal or purpose of the inquiry") - val inquiry_goal: String? = null, - @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") - val input_files: List? = null, - @Description("Whether to extract text content from non-text files (PDF, HTML, etc.)") - val extractContent: Boolean = false, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null, - ) : TaskExecutionConfig( - task_type = Analysis.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ), ValidatedObject - - override fun promptSegment(): String { - val availableFiles = getAvailableFiles(root) - return (if (!orchestrationConfig.autoFix) """ - Analysis - Directly answer questions or provide insights using the LLM. Reading files is optional and can be included if relevant to the inquiry. - * Specify the questions and the goal of the inquiry. - * Optionally, list input files (supports glob patterns) to be examined when answering the questions. - * User response/feedback and iteration are supported. - * The primary characteristic of this task is that it does not produce side effects; the LLM is used to directly process the inquiry and respond. - """ else """ - Analysis - Directly answer questions or provide a report using the LLM. Reading files is optional and can be included if relevant to the inquiry. - * Specify the questions and the goal of the inquiry. - * Optionally, list input files (supports glob patterns) to be examined when answering the questions. - * The primary characteristic of this task is that it does not produce side effects; the LLM is used to directly process the inquiry and respond. - """) + """ - Available files: - ${availableFiles.joinToString("\n") { " - $it" }} - """ - } - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - - val toInput = { it: String -> - messages + listOf( - getInputFileCode(), - it, - ).filter { it.isNotBlank() } - } + protected val codeFiles = mutableMapOf() + + class AnalysisTaskTypeConfig( + @Description("Enable non-interactive mode to skip user feedback and iteration") + val non_interactive: Boolean = true, + task_type: String? = Analysis.name, + name: String? = null + ) : TaskTypeConfig( + task_type = task_type, + name = name + ), ValidatedObject + + class AnalysisTaskExecutionConfigData( + @Description("The specific questions or topics to be addressed in the inquiry") + val inquiry_questions: List? = null, + @Description("The goal or purpose of the inquiry") + val inquiry_goal: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null, + ) : TaskExecutionConfig( + task_type = Analysis.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject - val taskConfig: AnalysisTaskExecutionConfigData? = this.executionConfig - val typeConfig = typeConfig ?: throw RuntimeException() - val insightActor = ChatAgent( - name = "Insight", - prompt = """ + override fun promptSegment() = (if (!orchestrationConfig.autoFix) """ + Analysis - Directly answer questions or provide insights using the LLM. Reading files is optional and can be included if relevant to the inquiry. + * Specify the questions and the goal of the inquiry. + * Optionally, list input files (supports glob patterns) to be examined when answering the questions. + * User response/feedback and iteration are supported. + * The primary characteristic of this task is that it does not produce side effects; the LLM is used to directly process the inquiry and respond. + """ else """ + Analysis - Directly answer questions or provide a report using the LLM. Reading files is optional and can be included if relevant to the inquiry. + * Specify the questions and the goal of the inquiry. + * Optionally, list input files (supports glob patterns) to be examined when answering the questions. + * The primary characteristic of this task is that it does not produce side effects; the LLM is used to directly process the inquiry and respond. + """) + """ + Available files: + ${getAvailableFiles(root).joinToString("\n") { " - $it" }} + """ + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val transcript = transcript(task) + + val toInput = { it: String -> + messages + listOf( + getInputFileCode(), + it, + ).filter { it.isNotBlank() } + } + + val taskConfig: AnalysisTaskExecutionConfigData? = this.executionConfig + val typeConfig = typeConfig ?: throw RuntimeException() + val insightActor = ChatAgent( + name = "Insight", + prompt = """ Create code for a new file that fulfills the specified requirements and context. Given a detailed user request, break it down into smaller, actionable tasks suitable for software development. Compile comprehensive information and insights on the specified topic. Provide a comprehensive overview, including key concepts, relevant technologies, best practices, and any potential challenges or considerations. Ensure the information is accurate, up-to-date, and well-organized to facilitate easy understanding. """.trimIndent(), - model = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } - ?: this.orchestrationConfig.defaultChatter).getChildClient(task), - temperature = this.orchestrationConfig.temperature, - ) - val inquiryResult = if (orchestrationConfig.autoFix || typeConfig.non_interactive) - insightActor.answer( - toInput( - "Expand ${taskConfig?.task_description ?: ""}\nQuestions: ${ - taskConfig?.inquiry_questions?.joinToString( - "\n" - ) - }\nGoal: ${taskConfig?.inquiry_goal}\n${JsonUtil.toJson(data = this)}" - ), - ).apply { - task.add(MarkdownUtil.renderMarkdown(this, ui = task.ui)) - } - else - Discussable( - task = task, - userMessage = { - "Expand ${taskConfig?.task_description ?: ""}\nQuestions: ${ - taskConfig?.inquiry_questions?.joinToString( - "\n" - ) - }\nGoal: ${taskConfig?.inquiry_goal}\n${this.executionConfig?.toJson()}" - }, - heading = "", - initialResponse = { it: String -> insightActor.answer(toInput(it)) }, - outputFn = { design: String -> - MarkdownUtil.renderMarkdown(design) - }, - reviseResponse = { usermessages: List> -> - val inStr = "Expand ${taskConfig?.task_description ?: ""}\nQuestions: ${ - taskConfig?.inquiry_questions?.joinToString("\n") - }\nGoal: ${taskConfig?.inquiry_goal}\n${this.executionConfig?.toJson()}" - val messages = usermessages.map { ModelSchema.ChatMessage(it.second, it.first.toContentList()) } - .toTypedArray() - insightActor.respond( - messages = messages, - input = toInput(inStr), - ) - }, - atomicRef = AtomicReference(), - semaphore = Semaphore(0), - ).call() - resultFn(inquiryResult ?: "(no response)") - } - - private fun getInputFileCode(): String { - val strings = executionConfig?.input_files ?: listOf() - val flatMap = strings - .flatMap { pattern: String -> - val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") - (FileSelectionUtils.filteredWalk(root.toFile()) { - //path -> matcher.matches(root.relativize(path.toPath())) && !FileSelectionUtils.isLLMIgnored(path.toPath()) - when { - FileSelectionUtils.isLLMIgnored(it.toPath()) -> false - matcher.matches(root.relativize(it.toPath())) -> true - it.isDirectory -> true - else -> false - } - }) - } - val filter = flatMap.filter { file -> - file.isFile && file.exists() - } - return filter - .distinct() - .sortedBy { it } - .joinToString("\n\n") { relativePath -> - val file = root.toFile().resolve(relativePath) - try { - val content = if (executionConfig?.extractContent == true && !isTextFile(file)) { - extractDocumentContent(file) - } else { - codeFiles[file.toPath()] ?: file.readText() - } - "# $relativePath\n\n$TRIPLE_TILDE\n$content\n$TRIPLE_TILDE" - } catch (e: Throwable) { - log.warn("Error reading file: $relativePath", e) - "" - } - } - } + model = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } + ?: this.orchestrationConfig.defaultChatter).getChildClient(task), + temperature = this.orchestrationConfig.temperature, + ) + val inquiryResult = if (orchestrationConfig.autoFix || typeConfig.non_interactive) { + val input = toInput( + "Expand ${taskConfig?.task_description ?: ""}\nQuestions: ${ + taskConfig?.inquiry_questions?.joinToString( + "\n" + ) + }\nGoal: ${taskConfig?.inquiry_goal}\n${JsonUtil.toJson(data = this)}" + ) + transcript?.write("# Analysis Request\n\n${input.joinToString("\n\n")}\n\n".toByteArray()) + insightActor.answer(input) + } else + Discussable( + task = task, + userMessage = { + "Expand ${taskConfig?.task_description ?: ""}\nQuestions: ${ + taskConfig?.inquiry_questions?.joinToString( + "\n" + ) + }\nGoal: ${taskConfig?.inquiry_goal}\n${this.executionConfig?.toJson()}" + }, + heading = "", + initialResponse = { it: String -> + transcript?.write("# Initial Request\n\n$it\n\n".toByteArray()) + insightActor.answer(toInput(it)).also { response -> + transcript?.write("# Initial Response\n\n$response\n\n".toByteArray()) + } + }, + outputFn = { design: String -> + MarkdownUtil.renderMarkdown(design) + }, + reviseResponse = { usermessages: List> -> + val inStr = "Expand ${taskConfig?.task_description ?: ""}\nQuestions: ${ + taskConfig?.inquiry_questions?.joinToString("\n") + }\nGoal: ${taskConfig?.inquiry_goal}\n${this.executionConfig?.toJson()}" + val messages = usermessages.map { ModelSchema.ChatMessage(it.second, it.first.toContentList()) } + .toTypedArray() + transcript?.write("# Revision Request\n\n${usermessages.joinToString("\n") { "${it.second}: ${it.first}" }}\n\n".toByteArray()) + insightActor.respond( + messages = messages, + input = toInput(inStr), + ).also { response -> + transcript?.write("# Revision Response\n\n$response\n\n".toByteArray()) + } + }, + atomicRef = AtomicReference(), + semaphore = Semaphore(0), + ).call() + transcript?.close() + resultFn(inquiryResult ?: "(no response)") + } - private fun isTextFile(file: File): Boolean { - val textExtensions = setOf( - "txt", - "md", - "kt", - "java", - "js", - "ts", - "py", - "rb", - "go", - "rs", - "c", - "cpp", - "h", - "hpp", - "css", - "html", - "xml", - "json", - "yaml", - "yml", - "properties", - "gradle", - "maven" + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + - private fun extractDocumentContent(file: File) = try { - file.getReader().use { reader -> - when (reader) { - is PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) - else -> reader.getText() - } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false } - } catch (e: Exception) { - log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) - try { - file.readText() - } catch (e2: Exception) { - "Error reading file: ${e2.message}" + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() } + "# $relativePath\n\n$TRIPLE_TILDE\n$content\n$TRIPLE_TILDE" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } } - companion object { -private val log = LoggerFactory.getLogger(AnalysisTask::class.java) - val Analysis = TaskType( - "Analysis", - AnalysisTaskExecutionConfigData::class.java, - AnalysisTaskTypeConfig::class.java, - "Directly answer questions or provide insights using the LLM, optionally referencing files, with optional user feedback and iteration.", - """ + companion object { + private val log = LoggerFactory.getLogger(AnalysisTask::class.java) + val Analysis = TaskType( + "Analysis", + AnalysisTaskExecutionConfigData::class.java, + AnalysisTaskTypeConfig::class.java, + "Directly answer questions or provide insights using the LLM, optionally referencing files, with optional user feedback and iteration.", + """ Provides direct answers and insights using the LLM, optionally referencing project files.
  • Primarily processes and responds to user inquiries using the language model, without producing side effects or modifying files
  • @@ -247,7 +217,64 @@ private val log = LoggerFactory.getLogger(AnalysisTask::class.java)
  • Ideal for technical Q&A, code reviews, and architectural analysis without making changes
""" - ) + ) + fun getAvailableFiles( + path: Path, + treatDocumentsAsText: Boolean = false, + ): List { + return try { + listOf(FileSelectionUtils.filteredWalkAsciiTree(path.toFile(), 20, treatDocumentsAsText = treatDocumentsAsText)) + } catch (e: Exception) { + log.error("Error listing available files", e) + listOf("Error listing files: ${e.message}") + } + } + + private val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" + ) + + fun isTextFile(file: File): Boolean { + return textExtensions.contains(file.extension.lowercase()) + } + + fun extractDocumentContent(file: File) = try { + file.getReader().use { reader -> + when (reader) { + is PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + try { + file.readText() + } catch (e2: Exception) { + "Error reading file: ${e2.message}" + } } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileModificationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileModificationTask.kt index 519a25880..14617be66 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileModificationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileModificationTask.kt @@ -1,136 +1,144 @@ package com.simiacryptus.cognotik.plan.tools.file - import com.simiacryptus.cognotik.actors.ChatAgent - import com.simiacryptus.cognotik.chat.model.ChatInterface - import com.simiacryptus.cognotik.describe.Description - import com.simiacryptus.cognotik.plan.OrchestrationConfig - import com.simiacryptus.cognotik.plan.TaskOrchestrator - import com.simiacryptus.cognotik.plan.TaskType - import com.simiacryptus.cognotik.plan.TaskTypeConfig - import com.simiacryptus.cognotik.plan.tools.file.FileModificationTask.FileModificationTaskExecutionConfigData - import com.simiacryptus.cognotik.plan.tools.file.FileSearchTask.Companion.getAvailableFiles - import com.simiacryptus.cognotik.platform.model.ApiChatModel - import com.simiacryptus.cognotik.util.AddApplyFileDiffLinks - import com.simiacryptus.cognotik.util.LoggerFactory - import com.simiacryptus.cognotik.util.MarkdownUtil.renderMarkdown - import com.simiacryptus.cognotik.util.Retryable +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.chat.model.ChatInterface +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.OrchestrationConfig +import com.simiacryptus.cognotik.plan.TaskOrchestrator +import com.simiacryptus.cognotik.plan.TaskType +import com.simiacryptus.cognotik.plan.TaskTypeConfig +import com.simiacryptus.cognotik.plan.tools.file.FileModificationTask.FileModificationTaskExecutionConfigData +import com.simiacryptus.cognotik.platform.model.ApiChatModel +import com.simiacryptus.cognotik.util.AddApplyFileDiffLinks +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.MarkdownUtil.renderMarkdown +import com.simiacryptus.cognotik.util.Retryable import com.simiacryptus.cognotik.util.ValidatedObject - import com.simiacryptus.cognotik.webui.session.SessionTask - import com.simiacryptus.cognotik.webui.session.getChildClient - import java.io.File - import java.util.concurrent.Semaphore - import java.util.concurrent.TimeUnit +import com.simiacryptus.cognotik.webui.session.SessionTask +import com.simiacryptus.cognotik.webui.session.getChildClient +import java.io.File +import java.util.concurrent.Semaphore +import java.util.concurrent.TimeUnit - class FileModificationTask( - orchestrationConfig: OrchestrationConfig, - planTask: FileModificationTaskExecutionConfigData? - ) : AbstractFileTask(orchestrationConfig, planTask) { - class FileModificationTaskExecutionConfigData( - files: List? = null, - related_files: List? = null, - extractContent: Boolean = false, - @Description("Specific modifications to be made to the files") - val modifications: Any? = null, - @Description("Whether to include git diff with HEAD") - val includeGitDiff: Boolean = false, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null - ) : FileTaskExecutionConfig( - task_type = FileModification.name, - task_description = task_description, - task_dependencies = task_dependencies, - related_files = related_files, - files = files, - extractContent = extractContent, - state = state - ), ValidatedObject { - override fun validate(): String? { - if (files.isNullOrEmpty() && related_files.isNullOrEmpty()) { - return "At least one file must be specified in either 'files' or 'related_files'" - } - return ValidatedObject.validateFields(this) - } +class FileModificationTask( + orchestrationConfig: OrchestrationConfig, + planTask: FileModificationTaskExecutionConfigData? +) : AbstractFileTask(orchestrationConfig, planTask) { + class FileModificationTaskExecutionConfigData( + files: List? = null, + related_files: List? = null, + extractContent: Boolean = false, + @Description("Specific modifications to be made to the files") + val modifications: Any? = null, + @Description("Whether to include git diff with HEAD") + val includeGitDiff: Boolean = false, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null + ) : FileTaskExecutionConfig( + task_type = FileModification.name, + task_description = task_description, + task_dependencies = task_dependencies, + related_files = related_files, + files = files, + extractContent = extractContent, + state = state + ), ValidatedObject { + override fun validate(): String? { + if (files.isNullOrEmpty() && related_files.isNullOrEmpty()) { + return "At least one file must be specified in either 'files' or 'related_files'" + } + return ValidatedObject.validateFields(this) } + } - private fun getGitDiff(filePath: String): String? { - return try { - val process = ProcessBuilder("git", "diff", "HEAD", "--", File(filePath).name) - .directory(File(filePath).parentFile) - .start() - if (process.waitFor(10, TimeUnit.SECONDS)) { - process.inputStream.bufferedReader().readText() - } else { - process.destroy() - log.warn("Git diff command timed out for file: $filePath") - null - } - } catch (e: Exception) { - log.warn("Failed to get git diff for file: $filePath", e) - null - } + private fun getGitDiff(filePath: String): String? { + return try { + val process = ProcessBuilder("git", "diff", "HEAD", "--", File(filePath).name) + .directory(File(filePath).parentFile) + .start() + if (process.waitFor(10, TimeUnit.SECONDS)) { + process.inputStream.bufferedReader().readText() + } else { + process.destroy() + log.warn("Git diff command timed out for file: $filePath") + null + } + } catch (e: Exception) { + log.warn("Failed to get git diff for file: $filePath", e) + null } + } - private fun getInputFileWithDiff(): String { - if (!executionConfig?.includeGitDiff!!) return getInputFileCode() - val fileContent = getInputFileCode() - val gitDiffs = (executionConfig?.related_files ?: listOf()) - .mapNotNull { file -> - getGitDiff(file)?.let { diff -> - "Git diff for $file:\n$diff" - } - } - .joinToString("\n\n") - return if (gitDiffs.isNotBlank()) { - """ + private fun getInputFileWithDiff(): String { + if (!executionConfig?.includeGitDiff!!) return getInputFileCode() + val fileContent = getInputFileCode() + val gitDiffs = (executionConfig?.related_files ?: listOf()) + .mapNotNull { file -> + getGitDiff(file)?.let { diff -> + "Git diff for $file:\n$diff" + } + } + .joinToString("\n\n") + return if (gitDiffs.isNotBlank()) { + """ Current file content: $fileContent Git changes: $gitDiffs """.trimIndent() - } else { - fileContent - } + } else { + fileContent } + } - override fun promptSegment() = """ + override fun promptSegment() = """ FileModification - Modify existing files or create new files * For each file, specify the relative file path and the goal of the modification or creation * List input files/tasks to be examined when designing the modifications or new files Available files: -${getAvailableFiles(root).joinToString("\n") { " - $it" }} +${ + AnalysisTask.getAvailableFiles( + root + ).joinToString("\n") { " - $it" } + } """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val defaultFile = if (((executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf())).isEmpty()) { - task.complete("CONFIGURATION ERROR: No input files specified") - resultFn("CONFIGURATION ERROR: No input files specified") - return - } else if (((executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf())).distinct().size == 1) { - ((executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf())).first() - } else if ((executionConfig?.files ?: listOf()).distinct().size == 1) { - (executionConfig?.files ?: listOf()).first() - } else { - null - } - val semaphore = Semaphore(0) - val completionNotes = mutableListOf() - Retryable(task = task) { - val task = task.ui.newTask(false) - val typeConfig = typeConfig ?: throw RuntimeException() - task.ui.pool.submit { - val chatInterface = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } - ?: this.orchestrationConfig.defaultChatter).getChildClient(task) - val chatAgent = ChatAgent( - name = "FileModification", - prompt = """ + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val defaultFile = if (((executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf())).isEmpty()) { + task.complete("CONFIGURATION ERROR: No input files specified") + resultFn("CONFIGURATION ERROR: No input files specified") + return + } else if (((executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf())).distinct().size == 1) { + ((executionConfig?.related_files ?: listOf()) + (executionConfig?.files ?: listOf())).first() + } else if ((executionConfig?.files ?: listOf()).distinct().size == 1) { + (executionConfig?.files ?: listOf()).first() + } else { + null + } + + val semaphore = Semaphore(0) + val completionNotes = mutableListOf() + // Initialize transcript for this task + val transcript = transcript(task) + transcript?.let { stream -> + stream.write("# File Modification Task Transcript\n\n".toByteArray()) + Retryable(task = task) { + val task = task.ui.newTask(false) + val typeConfig = typeConfig ?: throw RuntimeException() + task.ui.pool.submit { + val chatInterface = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } + ?: this.orchestrationConfig.defaultChatter).getChildClient(task) + val chatAgent = ChatAgent( + name = "FileModification", + prompt = """ Generate precise code modifications and new files based on requirements: For modifying existing files: - Write efficient, readable, and maintainable code changes @@ -181,87 +189,102 @@ ${getAvailableFiles(root).joinToString("\n") { " - $it" }} } ${TRIPLE_TILDE} """.trimIndent(), - model = chatInterface, - temperature = this.orchestrationConfig.temperature, - ) - val codeResult = chatAgent.answer( - (messages + listOf( - agent.executionState?.tasksByDescription?.filter { - executionConfig?.task_dependencies?.contains(it.key) == true && it.value is FileModificationTaskExecutionConfigData - }?.entries?.joinToString("\n\n") { - (it.value as FileModificationTaskExecutionConfigData).files?.joinToString("\n") { - val file = root.resolve(it).toFile() - if (file.exists()) { - val relativePath = root.relativize(file.toPath()) - "## $relativePath\n\n${(codeFiles[file.toPath()] ?: file.readText()).let { "$TRIPLE_TILDE\n${it}\n$TRIPLE_TILDE" }}" - } else { - "File not found: $it" - } - } ?: "" - } ?: "", - getInputFileWithDiff(), - executionConfig?.task_description ?: "", - )).filter { it.isNotBlank() } - ) - if (orchestrationConfig.autoFix) { - val markdown = renderMarkdown(codeResult, ui = task.ui) { - AddApplyFileDiffLinks.instrumentFileDiffs( - task.ui, - root = agent.root, - response = it, - handle = { newCodeMap -> - newCodeMap.forEach { (path, newCode) -> - completionNotes += ("$path Updated") - } - }, - shouldAutoApply = { orchestrationConfig.autoFix }, - model = chatInterface, - defaultFile = defaultFile, - orchestrationConfig.processor - ) + "\n\n## Auto-applied changes" - } - task.complete(markdown) - semaphore.release() - } else { - task.complete(renderMarkdown(codeResult, ui = task.ui) { - AddApplyFileDiffLinks.instrumentFileDiffs( - task.ui, - root = agent.root, - response = it, - handle = { newCodeMap -> - newCodeMap.forEach { (path, newCode) -> - completionNotes += ("$path Updated") - } - }, - model = chatInterface, - defaultFile = defaultFile, - processor = orchestrationConfig.processor, - ) + acceptButtonFooter(task.ui) { - task.complete() - semaphore.release() - } - }) - } + model = chatInterface, + temperature = this.orchestrationConfig.temperature, + ) + val codeResult = chatAgent.answer( + (messages + listOf( + agent.executionState?.tasksByDescription?.filter { + executionConfig?.task_dependencies?.contains(it.key) == true && it.value is FileModificationTaskExecutionConfigData + }?.entries?.joinToString("\n\n") { + (it.value as FileModificationTaskExecutionConfigData).files?.joinToString("\n") { + val file = root.resolve(it).toFile() + if (file.exists()) { + val relativePath = root.relativize(file.toPath()) + "## $relativePath\n\n${(codeFiles[file.toPath()] ?: file.readText()).let { "$TRIPLE_TILDE\n${it}\n$TRIPLE_TILDE" }}" + } else { + "File not found: $it" + } + } ?: "" + } ?: "", + getInputFileWithDiff(), + executionConfig?.task_description ?: "", + )).filter { it.isNotBlank() } + ) + // Write to transcript + transcript?.write("\n## AI Response\n\n".toByteArray()) + transcript?.write(codeResult.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + if (orchestrationConfig.autoFix) { + val markdown = renderMarkdown(codeResult, ui = task.ui) { + AddApplyFileDiffLinks.instrumentFileDiffs( + task.ui, + root = agent.root, + response = it, + handle = { newCodeMap -> + newCodeMap.forEach { (path, _) -> + completionNotes += ("$path Updated") + } + }, + shouldAutoApply = { orchestrationConfig.autoFix }, + model = chatInterface, + defaultFile = defaultFile, + orchestrationConfig.processor + ) + "\n\n## Auto-applied changes" } - task.placeholder - } - try { - semaphore.acquire() - resultFn(completionNotes.joinToString("\n")) - } catch (e: Throwable) { - log.warn("Error", e) + // Log auto-applied changes to transcript + transcript?.write("## Auto-Applied Changes\n\n".toByteArray()) + transcript?.write(completionNotes.joinToString("\n").toByteArray()) + task.complete(markdown) + semaphore.release() + } else { + task.complete(renderMarkdown(codeResult, ui = task.ui) { + AddApplyFileDiffLinks.instrumentFileDiffs( + task.ui, + root = agent.root, + response = it, + handle = { newCodeMap -> + newCodeMap.forEach { (path, _) -> + completionNotes += ("$path Updated") + } + }, + model = chatInterface, + defaultFile = defaultFile, + processor = orchestrationConfig.processor, + ) + acceptButtonFooter(task.ui) { + task.complete() + semaphore.release() + } + }) + } + transcript?.flush() } + task.placeholder + } } - companion object { - private val log = LoggerFactory.getLogger(FileModificationTask::class.java) + try { + semaphore.acquire() + // Write final completion notes to transcript + transcript?.write("\n## Completion Notes\n\n".toByteArray()) + transcript?.write(completionNotes.joinToString("\n").toByteArray()) + transcript?.close() + resultFn(completionNotes.joinToString("\n")) + } catch (e: Throwable) { + log.warn("Error", e) + } + } + + companion object { + private val log = LoggerFactory.getLogger(FileModificationTask::class.java) - val FileModification = TaskType( - "FileModification", - FileModificationTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Create new files or modify existing code with AI-powered assistance", - """ + val FileModification = TaskType( + "FileModification", + FileModificationTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Create new files or modify existing code with AI-powered assistance", + """ Creates or modifies source files with AI assistance while maintaining code quality.
  • Shows proposed changes in diff format for easy review
  • @@ -274,6 +297,6 @@ ${getAvailableFiles(root).joinToString("\n") { " - $it" }}
  • Preserves existing code formatting and structure
""" - ) - } + ) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileModificationTask.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileModificationTask.md deleted file mode 100644 index 114818350..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileModificationTask.md +++ /dev/null @@ -1,245 +0,0 @@ -# FileModificationTask - -## Overview - -The `FileModificationTask` is a specialized task implementation for creating new files or modifying existing code with AI-powered assistance. It extends `AbstractFileTask` and provides intelligent code generation and modification capabilities while maintaining code quality and project standards. - -## Purpose - -This task enables automated or semi-automated code modifications through AI assistance, supporting both file creation and modification workflows with proper diff generation, Git integration, and approval mechanisms. - -## Key Features - -- **AI-Powered Code Generation**: Uses ChatAgent to generate precise code modifications based on requirements -- **Diff Format Support**: Presents changes in standard diff format for easy review -- **Git Integration**: Optionally includes Git diffs with HEAD for context -- **Dual Mode Operation**: Supports both automated application and manual approval workflows -- **Multi-File Operations**: Handles complex operations across multiple files -- **Code Quality Maintenance**: Preserves coding standards and project conventions -- **Comprehensive Documentation**: Provides clear rationale for all changes - -## Configuration - -### FileModificationTaskConfigData - -The task configuration extends `FileTaskConfigBase` with the following parameters: - -| Parameter | Type | Description | Default | -|-----------|------|-------------|---------| -| `files` | `List?` | List of files to be modified | `null` | -| `related_files` | `List?` | Additional files for context | `null` | -| `extractContent` | `Boolean` | Whether to extract file content | `false` | -| `modifications` | `Any?` | Specific modifications to be made | `null` | -| `includeGitDiff` | `Boolean` | Include Git diff with HEAD | `false` | -| `task_description` | `String?` | Description of the modification task | `null` | -| `task_dependencies` | `List?` | List of dependent task descriptions | `null` | -| `state` | `TaskState?` | Current task state | `null` | - -## Core Functionality - -### 1. Git Diff Integration - -The task can retrieve Git diffs for files to provide additional context: - -```kotlin -private fun getGitDiff(filePath: String): String? -``` - -- Executes `git diff HEAD` for specified files -- Includes 10-second timeout for Git operations -- Handles errors gracefully with logging - -### 2. Input File Processing - -```kotlin -private fun getInputFileWithDiff(): String -``` - -- Combines file content with Git diffs when enabled -- Formats input for AI processing -- Provides comprehensive context for modifications - -### 3. AI-Powered Modification - -The task uses a ChatAgent with a detailed prompt that instructs the AI to: - -#### For Existing Files: -- Write efficient, readable, and maintainable code changes -- Ensure smooth integration with existing code -- Follow project coding standards -- Consider dependencies and side effects -- Provide clear context and rationale - -#### For New Files: -- Choose appropriate file locations and names -- Structure code according to project conventions -- Include necessary imports and dependencies -- Add comprehensive documentation -- Avoid duplication of existing functionality - -### 4. Response Format - -The AI generates responses in specific formats: - -- **Existing Files**: Uses diff code blocks with file path headers -- **New Files**: Uses language-specific code blocks with file path headers -- **Context Lines**: Includes 2 lines before and after changes in diffs -- **Separation**: Code blocks separated by blank lines - -## Execution Flow - -### 1. Validation Phase -```kotlin -// Checks for input files -if (files.isEmpty()) { - return "CONFIGURATION ERROR: No input files specified" -} -``` - -### 2. Processing Phase -- Creates new task in task manager -- Initializes ChatInterface with configured model -- Constructs ChatAgent with specialized prompt -- Processes input files and dependencies - -### 3. Generation Phase -- AI generates code modifications -- Formats response according to specifications -- Includes rationale and documentation - -### 4. Application Phase - -#### Auto-Fix Mode: -```kotlin -if (orchestrationConfig.autoFix) { - // Automatically applies changes - // Updates completion notes - // Marks task as complete -} -``` - -#### Manual Approval Mode: -- Presents changes with diff visualization -- Provides accept/reject buttons -- Waits for user confirmation - -### 5. Completion Phase -- Updates file system with approved changes -- Generates completion notes with file links -- Releases semaphore for synchronization - -## Integration Points - -### Dependencies -- `ChatAgent`: For AI-powered code generation -- `AddApplyFileDiffLinks`: For diff visualization and application -- `MarkdownUtil`: For rendering markdown output -- `Retryable`: For error recovery -- `SessionTask`: For task management - -### File System Operations -- Reads existing files from configured root directory -- Writes modified files back to file system -- Maintains relative path structure -- Handles file creation and modification - -### Git Integration -- Optional Git diff retrieval -- Provides version control context -- Helps understand recent changes - -## Error Handling - -### Configuration Errors -- Validates presence of input files -- Returns clear error messages -- Prevents execution with invalid configuration - -### Git Operation Errors -- Timeouts for Git commands (10 seconds) -- Graceful fallback when Git unavailable -- Warning logs for debugging - -### Execution Errors -- Wrapped in Retryable for automatic recovery -- Comprehensive error logging -- Semaphore-based synchronization - -## Usage Example - -```kotlin -// Configuration -val config = FileModificationTaskConfigData( - files = listOf("src/main/kotlin/MyClass.kt"), - related_files = listOf("src/test/kotlin/MyClassTest.kt"), - includeGitDiff = true, - task_description = "Add error handling to MyClass", - modifications = "Add try-catch blocks for IO operations" -) - -// Create and execute task -val task = FileModificationTask(orchestrationConfig, config) -task.run( - agent = orchestrator, - messages = listOf("Previous context"), - task = sessionTask, - resultFn = { result -> println("Completed: $result") }, - orchestrationConfig = config -) -``` - -## Output Format - -### Diff Format (Existing Files) -```diff -### src/utils/existingFile.js - function existingFunction() { -- return 'old result'; -+ return 'new result'; - } -``` - -### New File Format -```javascript -### src/utils/newFile.js -function newFunction() { - return 'new functionality'; -} -``` - -## Best Practices - -1. **File Selection**: Carefully specify input files to provide appropriate context -2. **Git Integration**: Enable `includeGitDiff` when working with recently modified files -3. **Dependencies**: List task dependencies for complex multi-step operations -4. **Auto-Fix Mode**: Use cautiously in production environments -5. **Description Clarity**: Provide clear task descriptions for better AI understanding - -## Performance Considerations - -- **Semaphore Synchronization**: Ensures proper task completion before proceeding -- **Timeout Management**: 10-second timeout for Git operations prevents hanging -- **Retryable Wrapper**: Automatic retry on transient failures -- **Async Execution**: Task runs in thread pool for non-blocking operation - -## Logging - -Uses SLF4J logging through `LoggerFactory`: -- `WARN`: Git operation failures and timeouts -- `WARN`: General execution errors -- Provides debugging information for troubleshooting - -## Limitations - -1. Git diff timeout fixed at 10 seconds -2. Requires file system access for modifications -3. AI model limitations affect code quality -4. No built-in rollback mechanism -5. Limited to text-based file modifications - -## See Also - -- [`AbstractFileTask`](./AbstractFileTask.md) - Base class for file operations -- [`FileSearchTask`](./FileSearchTask.md) - Related file search functionality -- [`TaskOrchestrator`](../../TaskOrchestrator.md) - Task orchestration system -- [`ChatAgent`](../../../actors/ChatAgent.md) - AI chat interface \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileSearchTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileSearchTask.kt index 26ee2804d..4452fac79 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileSearchTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileSearchTask.kt @@ -8,341 +8,351 @@ import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.MarkdownUtil import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask +import java.io.FileOutputStream import java.nio.file.FileSystems import java.nio.file.Files -import java.nio.file.Path import java.util.regex.Pattern import kotlin.math.max class FileSearchTask( - orchestrationConfig: OrchestrationConfig, - planTask: SearchTaskExecutionConfigData? - ) : AbstractTask(orchestrationConfig, planTask) { - // SearchTaskConfigData remains the same - class SearchTaskExecutionConfigData( - @Description("The search pattern (substring or regex) to look for in the files") - val search_pattern: String = "", - @Description("Whether the search pattern is a regex (true) or a substring (false)") - val is_regex: Boolean = false, - @Description("The number of context lines to include before and after each match") - val context_lines: Int = 2, - @Description("The specific files (or file patterns) to be searched") - val input_files: List? = null, - @Description("Whether to extract and search text content from non-text files (PDF, HTML, etc.)") - val extractContent: Boolean = false, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null, - ) : ValidatedObject, TaskExecutionConfig( - task_type = FileSearch.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) { - override fun validate(): String? { - if (search_pattern.isBlank()) { - return "search_pattern cannot be blank" - } - if (context_lines < 0) { - return "context_lines must be non-negative" - } - // Delegate to parent validation for nested objects - return ValidatedObject.validateFields(this) - } + orchestrationConfig: OrchestrationConfig, + planTask: SearchTaskExecutionConfigData? +) : AbstractTask(orchestrationConfig, planTask) { + // SearchTaskConfigData remains the same + class SearchTaskExecutionConfigData( + @Description("The search pattern (substring or regex) to look for in the files") + val search_pattern: String = "", + @Description("Whether the search pattern is a regex (true) or a substring (false)") + val is_regex: Boolean = false, + @Description("The number of context lines to include before and after each match") + val context_lines: Int = 2, + @Description("The specific files (or file patterns) to be searched") + val input_files: List? = null, + @Description("Whether to extract and search text content from non-text files (PDF, HTML, etc.)") + val extractContent: Boolean = false, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null, + ) : ValidatedObject, TaskExecutionConfig( + task_type = FileSearch.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) { + override fun validate(): String? { + if (search_pattern.isBlank()) { + return "search_pattern cannot be blank" + } + if (context_lines < 0) { + return "context_lines must be non-negative" + } + // Delegate to parent validation for nested objects + return ValidatedObject.validateFields(this) } - // promptSegment remains the same + } + // promptSegment remains the same - override fun promptSegment() = """ + override fun promptSegment() = """ FileSearch - Search for patterns in files and provide results with context * Specify the search pattern (substring or regex) * Specify whether the pattern is a regex or a substring * Specify the number of context lines to include * List files (incl glob patterns) to be searched Available files: -${getAvailableFiles(root).joinToString("\n") { " - $it" }} +${ + AnalysisTask.getAvailableFiles( + root + ).joinToString("\n") { " - $it" } + } """.trimIndent() - // run remains the same + // run remains the same - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val searchResults = performSearch() - val formattedResults = formatSearchResults(searchResults) - task.add(MarkdownUtil.renderMarkdown(formattedResults, ui = task.ui)) - resultFn(formattedResults) - } + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val searchResults = performSearch() + val formattedResults = formatSearchResults(searchResults) + val transcript = transcript(task) + transcript?.write(formattedResults.toByteArray()) + transcript?.close() + task.add(MarkdownUtil.renderMarkdown(formattedResults, ui = task.ui)) + resultFn(formattedResults) + } - // Temporary holder for a raw match within a file - private data class RawMatch(val lineNumber: Int, val lineContent: String) // lineNumber is 1-based + // Temporary holder for a raw match within a file + private data class RawMatch(val lineNumber: Int, val lineContent: String) // lineNumber is 1-based - // Represents a block of context that might contain multiple matches - private data class DisplayBlock( - val file: String, - val contextLines: List, // The actual lines of the combined context - val firstLineNumberInFile: Int, // 1-based line number in the original file for contextLines[0] - val matches: List // Original matches that fall into this block - ) + // Represents a block of context that might contain multiple matches + private data class DisplayBlock( + val file: String, + val contextLines: List, // The actual lines of the combined context + val firstLineNumberInFile: Int, // 1-based line number in the original file for contextLines[0] + val matches: List // Original matches that fall into this block + ) - // Info about each original match within a DisplayBlock - private data class MatchInBlock( - val originalLineNumber: Int, // 1-based line number in the file - val indexInDisplayBlockContext: Int // 0-based index within DisplayBlock.contextLines - ) + // Info about each original match within a DisplayBlock + private data class MatchInBlock( + val originalLineNumber: Int, // 1-based line number in the file + val indexInDisplayBlockContext: Int // 0-based index within DisplayBlock.contextLines + ) - private fun performSearch(): List { - val currentConfig = executionConfig - if (currentConfig == null) { - log.warn("FileSearchTask taskConfig is null. Cannot perform search.") - return emptyList() - } + private fun performSearch(): List { + val currentConfig = executionConfig + if (currentConfig == null) { + log.warn("FileSearchTask taskConfig is null. Cannot perform search.") + return emptyList() + } - val pattern = if (currentConfig.is_regex) { - Pattern.compile(currentConfig.search_pattern) - } else { - Pattern.compile(Pattern.quote(currentConfig.search_pattern)) - } + val pattern = if (currentConfig.is_regex) { + Pattern.compile(currentConfig.search_pattern) + } else { + Pattern.compile(Pattern.quote(currentConfig.search_pattern)) + } - return (currentConfig.input_files ?: emptyList()) - .flatMap { filePattern -> - val matcher = FileSystems.getDefault().getPathMatcher("glob:$filePattern") - FileSelectionUtils.filteredWalk(root.toFile()) { path -> - matcher.matches(root.relativize(path.toPath())) && !FileSelectionUtils.isLLMIgnored(path.toPath()) - }.map { it.toPath() }.flatMap { path -> - try { - val fileContentLines = if (currentConfig.extractContent && !isTextFile(path.toFile())) { - extractDocumentContent(path.toFile()).lines() - } else { - Files.readAllLines(path) - } - val relativePath = root.relativize(path).toString() + return (currentConfig.input_files ?: emptyList()) + .flatMap { filePattern -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$filePattern") + FileSelectionUtils.filteredWalk(root.toFile()) { path -> + matcher.matches(root.relativize(path.toPath())) && !FileSelectionUtils.isLLMIgnored(path.toPath()) + }.map { it.toPath() }.flatMap { path -> + try { + val fileContentLines = if (currentConfig.extractContent && !isTextFile(path.toFile())) { + extractDocumentContent(path.toFile()).lines() + } else { + Files.readAllLines(path) + } + val relativePath = root.relativize(path).toString() - // 1. Find all individual raw matches (line number and content) - val rawMatches = fileContentLines.mapIndexedNotNull { index, line -> - if (pattern.matcher(line).find()) { - RawMatch(lineNumber = index + 1, lineContent = line) // 1-based line number - } else null - } - if (rawMatches.isEmpty()) return@flatMap emptyList() - // 2. Group raw matches into DisplayBlocks - val combinedBlocks = mutableListOf() - var currentBlockAggregatedMatches = mutableListOf() - var currentBlockContextStartLineInFile = 0 // 1-based - var currentBlockContextEndLineInFile = 0 // 1-based - val contextLinesCount = currentConfig.context_lines - for (match in rawMatches) { // rawMatches are already sorted by line number - val matchIdealContextStart = (match.lineNumber - contextLinesCount).coerceAtLeast(1) - val matchIdealContextEnd = - (match.lineNumber + contextLinesCount).coerceAtMost(fileContentLines.size) - if (currentBlockAggregatedMatches.isEmpty() || matchIdealContextStart > currentBlockContextEndLineInFile + 1) { - // Finalize previous block if it exists - if (currentBlockAggregatedMatches.isNotEmpty()) { - val actualContext = fileContentLines.subList( - (currentBlockContextStartLineInFile - 1).coerceAtLeast(0), // to 0-based index - currentBlockContextEndLineInFile.coerceAtMost(fileContentLines.size) // exclusive end - ) - combinedBlocks.add( - DisplayBlock( - file = relativePath, - contextLines = actualContext, - firstLineNumberInFile = currentBlockContextStartLineInFile, - matches = currentBlockAggregatedMatches.map { aggMatch -> - MatchInBlock( - originalLineNumber = aggMatch.lineNumber, - indexInDisplayBlockContext = aggMatch.lineNumber - currentBlockContextStartLineInFile - ) - } - )) - } - // Start a new block - currentBlockAggregatedMatches = mutableListOf(match) - currentBlockContextStartLineInFile = matchIdealContextStart - currentBlockContextEndLineInFile = matchIdealContextEnd - } else { - // Merge with current block - currentBlockAggregatedMatches.add(match) - // currentBlockContextStartLineInFile remains the earliest start (already set) - currentBlockContextEndLineInFile = - max(currentBlockContextEndLineInFile, matchIdealContextEnd) - } - } - // Add the last processed block - if (currentBlockAggregatedMatches.isNotEmpty()) { - val actualContext = fileContentLines.subList( - (currentBlockContextStartLineInFile - 1).coerceAtLeast(0), - currentBlockContextEndLineInFile.coerceAtMost(fileContentLines.size) - ) - combinedBlocks.add( - DisplayBlock( - file = relativePath, - contextLines = actualContext, - firstLineNumberInFile = currentBlockContextStartLineInFile, - matches = currentBlockAggregatedMatches.map { aggMatch -> - MatchInBlock( - originalLineNumber = aggMatch.lineNumber, - indexInDisplayBlockContext = aggMatch.lineNumber - currentBlockContextStartLineInFile - ) - } - )) - } - combinedBlocks // Return list of blocks for this file - } catch (e: Exception) { - log.warn("Error processing file ${root.relativize(path)} for search: ${e.message}", e) - emptyList() - } + // 1. Find all individual raw matches (line number and content) + val rawMatches = fileContentLines.mapIndexedNotNull { index, line -> + if (pattern.matcher(line).find()) { + RawMatch(lineNumber = index + 1, lineContent = line) // 1-based line number + } else null + } + if (rawMatches.isEmpty()) return@flatMap emptyList() + // 2. Group raw matches into DisplayBlocks + val combinedBlocks = mutableListOf() + var currentBlockAggregatedMatches = mutableListOf() + var currentBlockContextStartLineInFile = 0 // 1-based + var currentBlockContextEndLineInFile = 0 // 1-based + val contextLinesCount = currentConfig.context_lines + for (match in rawMatches) { // rawMatches are already sorted by line number + val matchIdealContextStart = (match.lineNumber - contextLinesCount).coerceAtLeast(1) + val matchIdealContextEnd = + (match.lineNumber + contextLinesCount).coerceAtMost(fileContentLines.size) + if (currentBlockAggregatedMatches.isEmpty() || matchIdealContextStart > currentBlockContextEndLineInFile + 1) { + // Finalize previous block if it exists + if (currentBlockAggregatedMatches.isNotEmpty()) { + val actualContext = fileContentLines.subList( + (currentBlockContextStartLineInFile - 1).coerceAtLeast(0), // to 0-based index + currentBlockContextEndLineInFile.coerceAtMost(fileContentLines.size) // exclusive end + ) + combinedBlocks.add( + DisplayBlock( + file = relativePath, + contextLines = actualContext, + firstLineNumberInFile = currentBlockContextStartLineInFile, + matches = currentBlockAggregatedMatches.map { aggMatch -> + MatchInBlock( + originalLineNumber = aggMatch.lineNumber, + indexInDisplayBlockContext = aggMatch.lineNumber - currentBlockContextStartLineInFile + ) + } + )) } + // Start a new block + currentBlockAggregatedMatches = mutableListOf(match) + currentBlockContextStartLineInFile = matchIdealContextStart + currentBlockContextEndLineInFile = matchIdealContextEnd + } else { + // Merge with current block + currentBlockAggregatedMatches.add(match) + // currentBlockContextStartLineInFile remains the earliest start (already set) + currentBlockContextEndLineInFile = + max(currentBlockContextEndLineInFile, matchIdealContextEnd) + } } - } - - private fun isTextFile(file: java.io.File): Boolean { - val textExtensions = setOf( - "txt", - "md", - "kt", - "java", - "js", - "ts", - "py", - "rb", - "go", - "rs", - "c", - "cpp", - "h", - "hpp", - "css", - "html", - "xml", - "json", - "yaml", - "yml", - "properties", - "gradle", - "maven" - ) - return textExtensions.contains(file.extension.lowercase()) - } + // Add the last processed block + if (currentBlockAggregatedMatches.isNotEmpty()) { + val actualContext = fileContentLines.subList( + (currentBlockContextStartLineInFile - 1).coerceAtLeast(0), + currentBlockContextEndLineInFile.coerceAtMost(fileContentLines.size) + ) + combinedBlocks.add( + DisplayBlock( + file = relativePath, + contextLines = actualContext, + firstLineNumberInFile = currentBlockContextStartLineInFile, + matches = currentBlockAggregatedMatches.map { aggMatch -> + MatchInBlock( + originalLineNumber = aggMatch.lineNumber, + indexInDisplayBlockContext = aggMatch.lineNumber - currentBlockContextStartLineInFile + ) + } + )) + } + combinedBlocks // Return list of blocks for this file + } catch (e: Exception) { + log.warn("Error processing file ${root.relativize(path)} for search: ${e.message}", e) + emptyList() + } + } + } + } + private fun isTextFile(file: java.io.File): Boolean { + val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } - private fun formatSearchResults(results: List, maxLength: Int = 500000): String { - if (results.isEmpty()) { - return "# Search Results\n\nNo matches found." - } - val sb = StringBuilder() - val truncationMessage = "\n\n... (results truncated due to length limit)" - // Effective max length for content, allowing space for truncation message if needed. - // If maxLength is too small to even hold the truncation message, effectiveMaxLength might be 0 or negative. - val effectiveMaxLength = if (maxLength > truncationMessage.length) maxLength - truncationMessage.length else 0 + private fun formatSearchResults(results: List, maxLength: Int = 500000): String { + if (results.isEmpty()) { + return "# Search Results\n\nNo matches found." + } - var outputTruncated = false + val sb = StringBuilder() + val truncationMessage = "\n\n... (results truncated due to length limit)" + // Effective max length for content, allowing space for truncation message if needed. + // If maxLength is too small to even hold the truncation message, effectiveMaxLength might be 0 or negative. + val effectiveMaxLength = if (maxLength > truncationMessage.length) maxLength - truncationMessage.length else 0 - // Helper to append string segments, checking against effectiveMaxLength - fun StringBuilder.appendCheckingLength(str: String): Boolean { - if (this.length + str.length > effectiveMaxLength && effectiveMaxLength > 0) { // Check effectiveMaxLength > 0 to avoid issues if it's 0 - val remainingSpace = effectiveMaxLength - this.length - if (remainingSpace > 0) { - this.append(str.take(remainingSpace)) - } - outputTruncated = true - return false // Signal to stop further appends to main content - } else if (effectiveMaxLength <= 0 && maxLength > 0) { // Not enough space for content + truncation message - // This case means we can only fit a small part of the content or just the truncation message - outputTruncated = true - return false - } - this.append(str) - return true // Signal to continue - } + var outputTruncated = false - // Handle extremely small maxLength - if (maxLength <= 0) return "" - if (maxLength < 20 && results.isNotEmpty()) { // Arbitrary small number, too small for meaningful output - return truncationMessage.trimStart().take(maxLength) // Show a part of truncation message if possible + // Helper to append string segments, checking against effectiveMaxLength + fun StringBuilder.appendCheckingLength(str: String): Boolean { + if (this.length + str.length > effectiveMaxLength && effectiveMaxLength > 0) { // Check effectiveMaxLength > 0 to avoid issues if it's 0 + val remainingSpace = effectiveMaxLength - this.length + if (remainingSpace > 0) { + this.append(str.take(remainingSpace)) } + outputTruncated = true + return false // Signal to stop further appends to main content + } else if (effectiveMaxLength <= 0 && maxLength > 0) { // Not enough space for content + truncation message + // This case means we can only fit a small part of the content or just the truncation message + outputTruncated = true + return false + } + this.append(str) + return true // Signal to continue + } - if (!sb.appendCheckingLength("# Search Results\n\n")) { - if (outputTruncated) { // Append truncation message if space allows, within original maxLength - val finalMsg = truncationMessage.trimStart() - sb.clear() // Clear partially added header - sb.append(finalMsg.take(maxLength)) - } - return sb.toString() - } + // Handle extremely small maxLength + if (maxLength <= 0) return "" + if (maxLength < 20 && results.isNotEmpty()) { // Arbitrary small number, too small for meaningful output + return truncationMessage.trimStart().take(maxLength) // Show a part of truncation message if possible + } - val totalMatches = results.sumOf { it.matches.size } - val filesWithMatches = results.distinctBy { it.file }.size // Correctly counts files based on DisplayBlock.file - val summary = "Found $totalMatches match(es) in $filesWithMatches file(s).\n\n" - if (!sb.appendCheckingLength(summary)) { - if (outputTruncated) { // Append truncation message, ensuring total length <= maxLength - val spaceForMessage = maxLength - sb.length - if (spaceForMessage > 0) sb.append(truncationMessage.take(spaceForMessage)) - } - return sb.toString().take(maxLength) // Ensure final length constraint - } + if (!sb.appendCheckingLength("# Search Results\n\n")) { + if (outputTruncated) { // Append truncation message if space allows, within original maxLength + val finalMsg = truncationMessage.trimStart() + sb.clear() // Clear partially added header + sb.append(finalMsg.take(maxLength)) + } + return sb.toString() + } - results.groupBy { it.file }.forEach { (file, fileBlocks) -> // fileBlocks is List - if (outputTruncated) return@forEach + val totalMatches = results.sumOf { it.matches.size } + val filesWithMatches = results.distinctBy { it.file }.size // Correctly counts files based on DisplayBlock.file + val summary = "Found $totalMatches match(es) in $filesWithMatches file(s).\n\n" + if (!sb.appendCheckingLength(summary)) { + if (outputTruncated) { // Append truncation message, ensuring total length <= maxLength + val spaceForMessage = maxLength - sb.length + if (spaceForMessage > 0) sb.append(truncationMessage.take(spaceForMessage)) + } + return sb.toString().take(maxLength) // Ensure final length constraint + } - val fileHeader = "## $file\n\n" - if (!sb.appendCheckingLength(fileHeader)) return@forEach + results.groupBy { it.file }.forEach { (file, fileBlocks) -> // fileBlocks is List + if (outputTruncated) return@forEach - fileBlocks.forEach { block -> // Iterate over each DisplayBlock - if (outputTruncated) return@forEach + val fileHeader = "## $file\n\n" + if (!sb.appendCheckingLength(fileHeader)) return@forEach - val blockEndLine = block.firstLineNumberInFile + block.contextLines.size - 1 - val resultHeader = "### Lines ${block.firstLineNumberInFile} - $blockEndLine\n\n" + fileBlocks.forEach { block -> // Iterate over each DisplayBlock + if (outputTruncated) return@forEach - val contextBlockString = buildString { - appendLine("```") - block.contextLines.forEachIndexed { indexInBlock, lineContent -> - val actualLineNumber = block.firstLineNumberInFile + indexInBlock - // Check if this line is one of the actual matches - val isMatchedLine = block.matches.any { it.indexInDisplayBlockContext == indexInBlock } - val prefix = if (isMatchedLine) ">" else " " - appendLine("$prefix ${actualLineNumber.toString().padStart(5)}: $lineContent") - } - appendLine("```") - appendLine() // Extra newline after the block - } - val fullResultBlock = resultHeader + contextBlockString - if (!sb.appendCheckingLength(fullResultBlock)) return@forEach - } - } + val blockEndLine = block.firstLineNumberInFile + block.contextLines.size - 1 + val resultHeader = "### Lines ${block.firstLineNumberInFile} - $blockEndLine\n\n" - if (outputTruncated) { - val spaceForMessage = maxLength - sb.length - if (spaceForMessage > 0) { - sb.append(truncationMessage.take(spaceForMessage)) - } + val contextBlockString = buildString { + appendLine("```") + block.contextLines.forEachIndexed { indexInBlock, lineContent -> + val actualLineNumber = block.firstLineNumberInFile + indexInBlock + // Check if this line is one of the actual matches + val isMatchedLine = block.matches.any { it.indexInDisplayBlockContext == indexInBlock } + val prefix = if (isMatchedLine) ">" else " " + appendLine("$prefix ${actualLineNumber.toString().padStart(5)}: $lineContent") + } + appendLine("```") + appendLine() // Extra newline after the block } + val fullResultBlock = resultHeader + contextBlockString + if (!sb.appendCheckingLength(fullResultBlock)) return@forEach + } + } - return sb.toString().take(maxLength) // Final safeguard + if (outputTruncated) { + val spaceForMessage = maxLength - sb.length + if (spaceForMessage > 0) { + sb.append(truncationMessage.take(spaceForMessage)) + } } - companion object { - private val log = LoggerFactory.getLogger(FileSearchTask::class.java) - fun getAvailableFiles( - path: Path, - treatDocumentsAsText: Boolean = false, - ): List { - return try { - listOf(FileSelectionUtils.filteredWalkAsciiTree(path.toFile(), 20, treatDocumentsAsText = treatDocumentsAsText)) - } catch (e: Exception) { - log.error("Error listing available files", e) - listOf("Error listing files: ${e.message}") - } - } + return sb.toString().take(maxLength) // Final safeguard + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("search_transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing search transcript to $link html pdf" + ) + return markdownTranscript + } - val FileSearch = TaskType( - "FileSearch", - com.simiacryptus.cognotik.plan.tools.file.FileSearchTask.SearchTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Search project files using patterns with contextual results", - """ + + companion object { + private val log = LoggerFactory.getLogger(FileSearchTask::class.java) + + val FileSearch = TaskType( + "FileSearch", + SearchTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Search project files using patterns with contextual results", + """ Performs pattern-based searches across project files with context.
  • Supports both substring and regex search patterns
  • @@ -352,6 +362,6 @@ ${getAvailableFiles(root).joinToString("\n") { " - $it" }}
  • Provides organized, readable output format
""" - ) - } -} \ No newline at end of file + ) + } +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileSearchTask.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileSearchTask.md deleted file mode 100644 index af3453388..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/FileSearchTask.md +++ /dev/null @@ -1,199 +0,0 @@ -# FileSearchTask - -## Overview - -The `FileSearchTask` is a specialized task implementation that performs pattern-based searches across project files with contextual results. It supports both substring and regex search patterns, provides configurable context lines around matches, and presents results in an organized, readable format. - -## Purpose - -This task enables users to: -- Search for specific patterns or text across multiple files in a project -- Use either simple substring matching or complex regex patterns -- View search results with surrounding context for better understanding -- Extract and search content from non-text files (PDF, HTML, etc.) when needed -- Filter searches to specific files or file patterns using glob syntax - -## Configuration - -### SearchTaskConfigData - -The task is configured using the `SearchTaskConfigData` class with the following parameters: - -| Parameter | Type | Default | Description | -|-----------|------|---------|-------------| -| `search_pattern` | String | "" | The search pattern (substring or regex) to look for in the files | -| `is_regex` | Boolean | false | Whether the search pattern is a regex (true) or a substring (false) | -| `context_lines` | Int | 2 | The number of context lines to include before and after each match | -| `input_files` | List? | null | The specific files (or file patterns) to be searched | -| `extractContent` | Boolean | false | Whether to extract and search text content from non-text files (PDF, HTML, etc.) | -| `task_description` | String? | null | Optional description of the task | -| `task_dependencies` | List? | null | Optional list of task dependencies | -| `state` | TaskState? | null | Current state of the task | - -## Features - -### Search Capabilities - -1. **Pattern Types** - - **Substring Search**: Simple text matching (default) - - **Regex Search**: Complex pattern matching using Java regex syntax - -2. **File Selection** - - Supports glob patterns for file selection (e.g., `*.kt`, `src/**/*.java`) - - Automatically filters out LLM-ignored files - - Respects project file selection utilities - -3. **Context Display** - - Shows configurable number of lines before and after each match - - Groups nearby matches into combined context blocks to avoid redundancy - - Preserves line numbers from original files - -4. **Content Extraction** - - Can extract searchable text from non-text files (PDF, HTML, etc.) - - Automatically detects text file types based on extension - -### Output Format - -The search results are formatted as markdown with: -- Summary of total matches and files -- Results grouped by file -- Line numbers for easy reference -- Visual indicators (>) for matched lines -- Code blocks for context display - -## Implementation Details - -### Key Components - -1. **Data Structures** - - `RawMatch`: Represents a single match with line number and content - - `DisplayBlock`: Groups related matches with their context - - `MatchInBlock`: Maps matches to their position within a display block - -2. **Search Process** - ``` - 1. Parse search pattern (substring or regex) - 2. Iterate through specified files/patterns - 3. Read file content (or extract if needed) - 4. Find all matches in each file - 5. Group matches into context blocks - 6. Format results as markdown - ``` - -3. **Context Grouping Algorithm** - - Merges overlapping or adjacent context windows - - Minimizes redundant display of lines - - Preserves all match locations - -### Text File Detection - -The following extensions are recognized as text files: -- Programming: `kt`, `java`, `js`, `ts`, `py`, `rb`, `go`, `rs`, `c`, `cpp`, `h`, `hpp` -- Web: `css`, `html`, `xml`, `json` -- Configuration: `yaml`, `yml`, `properties`, `gradle`, `maven` -- Documentation: `txt`, `md` - -## Usage Examples - -### Basic Substring Search -```kotlin -SearchTaskConfigData( - search_pattern = "TODO", - is_regex = false, - context_lines = 2, - input_files = listOf("src/**/*.kt") -) -``` - -### Regex Pattern Search -```kotlin -SearchTaskConfigData( - search_pattern = "\\bclass\\s+\\w+Task\\b", - is_regex = true, - context_lines = 3, - input_files = listOf("**/*.kt", "**/*.java") -) -``` - -### Search with Content Extraction -```kotlin -SearchTaskConfigData( - search_pattern = "configuration", - is_regex = false, - context_lines = 1, - input_files = listOf("docs/**/*"), - extractContent = true -) -``` - -## Output Example - -```markdown -# Search Results - -Found 3 match(es) in 2 file(s). - -## src/main/kotlin/Example.kt - -### Lines 10 - 14 - -``` - 10: class ExampleClass { -> 11: // TODO: Implement this method - 12: fun doSomething() { - 13: println("Not implemented") - 14: } -``` - -### Lines 25 - 29 - -``` - 25: fun anotherMethod() { - 26: val result = calculate() -> 27: // TODO: Add error handling - 28: return result - 29: } -``` - -## src/test/kotlin/ExampleTest.kt - -### Lines 5 - 9 - -``` - 5: class ExampleTest { - 6: @Test -> 7: fun testSomething() { -> 8: // TODO: Write actual test - 9: assertTrue(true) -``` -``` - -## Error Handling - -The task handles various error scenarios: -- Invalid regex patterns are caught and reported -- File reading errors are logged with warnings -- Missing or inaccessible files are skipped -- Results are truncated if they exceed the maximum length limit - -## Performance Considerations - -1. **File Walking**: Uses filtered walk to avoid processing ignored files -2. **Memory Usage**: Processes files line-by-line when possible -3. **Result Truncation**: Limits output size to prevent memory issues (default 500KB) -4. **Context Grouping**: Reduces redundant display by merging overlapping contexts - -## Integration - -The `FileSearchTask` integrates with: -- `TaskOrchestrator`: For task execution and coordination -- `FileSelectionUtils`: For file filtering and selection -- `AbstractFileTask`: For content extraction capabilities -- `MarkdownUtil`: For rendering formatted results - -## Limitations - -- Binary files are not searchable unless content extraction is enabled -- Large files may impact performance -- Complex regex patterns may be slow on large codebases -- Context display is limited to prevent excessive output size \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/GenerateImageTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/GenerateImageTask.kt new file mode 100644 index 000000000..b1b5b5385 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/GenerateImageTask.kt @@ -0,0 +1,183 @@ +package com.simiacryptus.cognotik.plan.tools.file + +import com.simiacryptus.cognotik.agents.ImageAndText +import com.simiacryptus.cognotik.agents.ImageModificationAgent +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.OrchestrationConfig +import com.simiacryptus.cognotik.plan.TaskOrchestrator +import com.simiacryptus.cognotik.plan.TaskType +import com.simiacryptus.cognotik.plan.TaskTypeConfig +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.MarkdownUtil +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import com.simiacryptus.cognotik.webui.session.SocketManager +import org.slf4j.Logger +import java.util.* +import javax.imageio.ImageIO + +class GenerateImageTask( + orchestrationConfig: OrchestrationConfig, + planTask: GenerateImageTaskExecutionConfigData? +) : AbstractFileTask(orchestrationConfig, planTask) { + + class GenerateImageTaskExecutionConfigData( + @Description("The image file to be created (relative path, must end with .png, .jpg, or .jpeg)") + files: List? = null, + @Description("Additional files for context (e.g., reference images, style guides)") + related_files: List? = null, + @Description("Detailed description of the image to generate including subject, style, composition, colors, mood, and any specific requirements") + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : ValidatedObject, FileTaskExecutionConfig( + task_type = GenerateImage.name, + task_description = task_description, + files = files, + related_files = related_files, + task_dependencies = task_dependencies, + state = state + ) { + override fun validate(): String? { + // Validate that at least one file is specified + if (files.isNullOrEmpty()) { + return "GenerateImageTask requires at least one file to be specified" + } + + // Validate that the file has a valid image extension + val imageFile = files.first() + if (!imageFile.matches(Regex(".*\\.(png|jpg|jpeg)$", RegexOption.IGNORE_CASE))) { + return "GenerateImageTask file must have .png, .jpg, or .jpeg extension: $imageFile" + } + return ValidatedObject.validateFields(this) + } + } + + override fun promptSegment(): String { + return """ +GenerateImage - Create images using AI image generation models + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val imageFiles = executionConfig?.files ?: emptyList() + if (imageFiles.isEmpty()) { + resultFn("CONFIGURATION ERROR: No image file specified") + return + } + + val imageFile = imageFiles.first() + if (!imageFile.matches(Regex(".*\\.(png|jpg|jpeg)$", RegexOption.IGNORE_CASE))) { + resultFn("CONFIGURATION ERROR: File must have .png, .jpg, or .jpeg extension: $imageFile") + return + } + + task.add(MarkdownUtil.renderMarkdown("## Generating Image: `$imageFile`", ui = task.ui)) + + val contextFiles = getInputFileCode() + val priorCode = getPriorCode(agent.executionState) + + // Build the image generation prompt + val imagePrompt = buildString { + append(executionConfig?.task_description ?: "Generate an image") + + if (contextFiles.isNotEmpty()) { + append("\n\nContext from related files:\n") + append(contextFiles) + } + + if (priorCode.isNotEmpty()) { + append("\n\nPrevious task results:\n") + append(priorCode) + } + } + + task.add(MarkdownUtil.renderMarkdown("### Image Generation Prompt", ui = task.ui)) + task.add(MarkdownUtil.renderMarkdown("```\n$imagePrompt\n```", ui = task.ui)) + + try { + // Generate the image + task.add(MarkdownUtil.renderMarkdown("### Generating image...", ui = task.ui)) + + // Use the image generation agent + val imageAgent = ImageModificationAgent( + prompt = "Transform the user request into an image", + name = "ImageGenerator", + model = orchestrationConfig.imageChatChatter, + ) + + val result = imageAgent.answer(listOf(ImageAndText(imagePrompt))) + val generatedImage = result.image + val optimizedPrompt = result.text + + task.add(MarkdownUtil.renderMarkdown("### Optimized Prompt Used", ui = task.ui)) + task.add(MarkdownUtil.renderMarkdown("```\n$optimizedPrompt\n```", ui = task.ui)) + + // Display the generated image + task.add(MarkdownUtil.renderMarkdown("### Generated Image Preview", ui = task.ui)) + val filename = "preview_" + UUID.randomUUID() + ".png" + ImageIO.write(generatedImage, "png", task.resolve(filename)!!) + val previewLink = task.linkTo(filename) + task.add("""""") + + // Save the image + val outputPath = root.resolve(imageFile) + outputPath.toFile().parentFile?.mkdirs() + + val format = when { + imageFile.endsWith(".png", ignoreCase = true) -> "png" + imageFile.endsWith(".jpg", ignoreCase = true) -> "jpg" + imageFile.endsWith(".jpeg", ignoreCase = true) -> "jpeg" + else -> "png" + } + + ImageIO.write(generatedImage, format, outputPath.toFile()) + + val summary = "Successfully generated and saved image to $imageFile." + task.complete(summary) + task.add(""" created""") + resultFn(summary) + + } catch (e: Exception) { + log.error("Error generating image", e) + task.error(e) + resultFn("ERROR: ${e.message}") + } + } + + override fun acceptButtonFooter(ui: SocketManager, fn: () -> Unit): String { + val acceptLink = ui.hrefLink("Accept and Save Image") { + fn() + } + return """ + | + |--- + | + |$acceptLink + """.trimMargin() + } + + companion object { + private val log: Logger = LoggerFactory.getLogger(GenerateImageTask::class.java) + val GenerateImage = TaskType( + "GenerateImage", + GenerateImageTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Generate images using AI image generation models", + """ + Creates images from text descriptions using AI models like DALL-E. +
    +
  • Generates high-quality images from detailed prompts
  • +
  • Context-aware generation using related files
  • +
  • Integration with previous task results
  • +
+ """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/GeneratePresentationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/GeneratePresentationTask.kt index c4047546d..6f92ea821 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/GeneratePresentationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/GeneratePresentationTask.kt @@ -1,6 +1,8 @@ package com.simiacryptus.cognotik.plan.tools.file -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ImageAndText +import com.simiacryptus.cognotik.agents.ImageModificationAgent import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.OrchestrationConfig import com.simiacryptus.cognotik.plan.TaskOrchestrator @@ -12,6 +14,8 @@ import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.SocketManager import org.slf4j.Logger +import java.io.FileOutputStream +import javax.imageio.ImageIO class GeneratePresentationTask( orchestrationConfig: OrchestrationConfig, @@ -25,6 +29,16 @@ class GeneratePresentationTask( related_files: List? = null, @Description("Detailed description of the presentation including topic, key points, target audience, and desired style") task_description: String? = null, + @Description("Whether to generate images for key slides") + val generate_images: Boolean = false, + @Description("Image generation model to use (e.g., 'DallE3', 'DallE2')") + val image_model: String = "DallE3", + @Description("Width of generated images in pixels") + val image_width: Int = 1024, + @Description("Height of generated images in pixels") + val image_height: Int = 1024, + @Description("Maximum number of images to generate (1-10)") + val max_images: Int = 5, task_dependencies: List? = null, state: TaskState? = TaskState.Pending, ) : ValidatedObject, FileTaskExecutionConfig( @@ -40,20 +54,29 @@ class GeneratePresentationTask( if (files.isNullOrEmpty()) { return "GeneratePresentationTask requires at least one file to be specified" } - + // Validate that the file has .html extension val htmlFile = files.first() if (!htmlFile.endsWith(".html", ignoreCase = true)) { return "GeneratePresentationTask file must have .html extension: $htmlFile" } - + if (image_width < 256 || image_width > 2048) { + return "Image width must be between 256 and 2048, got: $image_width" + } + if (image_height < 256 || image_height > 2048) { + return "Image height must be between 256 and 2048, got: $image_height" + } + if (max_images < 1 || max_images > 10) { + return "Max images must be between 1 and 10, got: $max_images" + } + return ValidatedObject.validateFields(this) } } override fun promptSegment(): String { return """ -GeneratePresentation - Create a Reveal.js presentation with custom styling + GeneratePresentation - Create a Reveal.js presentation with custom styling ** Specify the HTML presentation file path in the files array (must end with .html) ** Provide a detailed description including: - Presentation topic and title @@ -67,6 +90,7 @@ GeneratePresentation - Create a Reveal.js presentation with custom styling - Custom CSS file (presentation.css) for styling - Autoplay controls and voice selection UI - Proper accessibility features + - Optional AI-generated images for key slides ** Related files can include reference materials or existing presentations ** Output will be presented for review before being written to disk """.trimIndent() @@ -169,15 +193,30 @@ Provide ONLY the slide sections within a code block (no DOCTYPE, html, head, or newTask.add(MarkdownUtil.renderMarkdown("### Step 1: Generating Presentation Structure", ui = ui)) - val slideContent = extractCodeFromResponse(chatAgent.answer(toInput(outlinePrompt)), "html") + val response = chatAgent.answer(toInput(outlinePrompt)) + val slideContent = extractCodeFromResponse(response, "html") if (slideContent.isEmpty()) { resultFn("ERROR: Failed to generate presentation structure") return } + // Step 1.5: Generate images for key slides if enabled + val imageMap = mutableMapOf() + if (executionConfig?.generate_images != false) { + newTask.add(MarkdownUtil.renderMarkdown("### Step 1.5: Generating Images for Key Slides", ui = ui)) + imageMap.putAll(generateSlideImages(slideContent, task, orchestrationConfig, newTask)) + } + // Extract title from first slide for the HTML template val titleMatch = "

(.*?)

".toRegex().find(slideContent) val presentationTitle = titleMatch?.groupValues?.get(1) ?: "Presentation" + // Inject images into slide content + val enhancedSlideContent = if (imageMap.isNotEmpty()) { + injectImagesIntoSlides(slideContent, imageMap) + } else { + slideContent + } + // Wrap slides in the HTML template val htmlStructure = """ @@ -202,7 +241,7 @@ Provide ONLY the slide sections within a code block (no DOCTYPE, html, head, or
-$slideContent +$enhancedSlideContent
@@ -267,6 +306,9 @@ Provide only the CSS code within a code block: newTask.add(MarkdownUtil.renderMarkdown("### Step 3: Generating Presentation JavaScript", ui = ui)) filesToWrite.add(htmlFile to htmlStructure) filesToWrite.add("presentation.css" to (standardCss + "\n\n" + cssCode)) + // Generate transcript + val transcriptStream = transcript(task, slideContent, presentationTitle) + transcriptStream?.close() // Display preview @@ -331,6 +373,193 @@ Provide only the CSS code within a code block: return "" } + private fun transcript(task: SessionTask, slideContent: String, presentationTitle: String): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + if (markdownTranscript != null) { + try { + // Write transcript header + markdownTranscript.write("# $presentationTitle - Transcript\n\n".toByteArray()) + markdownTranscript.write("Generated: ${java.time.LocalDateTime.now()}\n\n".toByteArray()) + markdownTranscript.write("---\n\n".toByteArray()) + // Extract content from slides + val sectionRegex = "]*>(.*?)".toRegex(RegexOption.DOT_MATCHES_ALL) + val sections = sectionRegex.findAll(slideContent) + var slideNumber = 0 + sections.forEach { section -> + slideNumber++ + val sectionContent = section.groupValues[1] + // Extract heading + val headingRegex = "]*>(.*?)".toRegex(RegexOption.DOT_MATCHES_ALL) + val heading = headingRegex.find(sectionContent)?.groupValues?.get(1)?.replace(Regex("<[^>]+>"), "")?.trim() + // Extract speaker notes + val notesRegex = "]*class=\"notes\"[^>]*>(.*?)".toRegex(RegexOption.DOT_MATCHES_ALL) + val notes = notesRegex.find(sectionContent)?.groupValues?.get(1)?.replace(Regex("<[^>]+>"), "")?.trim() + markdownTranscript.write("## Slide $slideNumber${if (heading != null) ": $heading" else ""}\n\n".toByteArray()) + if (notes != null && notes.isNotEmpty()) { + markdownTranscript.write("$notes\n\n".toByteArray()) + } + } + } catch (e: Exception) { + log.error("Error writing transcript", e) + } + task.complete( + "Writing transcript to $link html pdf" + ) + } + return markdownTranscript + } + + private fun generateSlideImages( + slideContent: String, + task: SessionTask, + orchestrationConfig: OrchestrationConfig, + newTask: SessionTask + ): Map { + val imageMap = mutableMapOf() + try { + // Extract slides and identify key ones for image generation + val sectionRegex = "]*>(.*?)".toRegex(RegexOption.DOT_MATCHES_ALL) + val sections = sectionRegex.findAll(slideContent).toList() + val maxImages = executionConfig?.max_images?.coerceIn(1, 10) ?: 3 + val slideIndices = selectSlidesForImages(sections.size, maxImages) + newTask.add( + MarkdownUtil.renderMarkdown( + "Generating images for ${slideIndices.size} slides (indices: ${slideIndices.joinToString(", ")})", + ui = task.ui + ) + ) + slideIndices.forEachIndexed { idx, slideIndex -> + if (slideIndex >= sections.size) return@forEachIndexed + val section = sections[slideIndex] + val sectionContent = section.groupValues[1] + // Extract heading and content for image prompt + val headingRegex = "]*>(.*?)".toRegex(RegexOption.DOT_MATCHES_ALL) + val heading = headingRegex.find(sectionContent)?.groupValues?.get(1) + ?.replace(Regex("<[^>]+>"), "")?.trim() ?: "Slide ${slideIndex + 1}" + // Extract text content (remove HTML tags) + val textContent = sectionContent + .replace(Regex("]*>.*?", RegexOption.DOT_MATCHES_ALL), "") + .replace(Regex("<[^>]+>"), " ") + .replace(Regex("\\s+"), " ") + .trim() + .take(200) + val imageFilename = "slide_${slideIndex + 1}_image.png" + try { + newTask.add( + MarkdownUtil.renderMarkdown( + "Generating image ${idx + 1}/${slideIndices.size}: $heading", + ui = task.ui + ) + ) + val imageAgent = ImageModificationAgent( + prompt = "Create a professional, visually appealing image for a presentation slide", + model = orchestrationConfig.imageChatChatter, + temperature = 0.7, + ) + val imagePrompt = """ +Create a professional presentation slide image for: +Title: $heading +Content: $textContent +Style: Clean, modern, professional presentation aesthetic + """.trimIndent() + val result = imageAgent.answer(listOf(ImageAndText(imagePrompt))) + val image = result.image + // Save image + val imageFile = task.resolve(imageFilename)!! + ImageIO.write(image, "png", imageFile) + imageMap[slideIndex] = imageFilename + newTask.add( + MarkdownUtil.renderMarkdown( + "✅ Generated image for slide ${slideIndex + 1}: [${imageFilename}](${task.linkTo(imageFilename)})", + ui = task.ui + ) + ) + log.debug("Generated image for slide ${slideIndex + 1}: $imageFilename") + } catch (e: Exception) { + log.error("Failed to generate image for slide ${slideIndex + 1}", e) + newTask.add( + MarkdownUtil.renderMarkdown( + "⚠️ Failed to generate image for slide ${slideIndex + 1}: ${e.message}", + ui = task.ui + ) + ) + } + } + } catch (e: Exception) { + log.error("Error during image generation", e) + newTask.add( + MarkdownUtil.renderMarkdown( + "⚠️ Image generation encountered errors: ${e.message}", + ui = task.ui + ) + ) + } + return imageMap + } + + private fun selectSlidesForImages(totalSlides: Int, maxImages: Int): List { + if (totalSlides <= 1) return emptyList() + // Skip title slide (index 0), select evenly distributed slides + val availableSlides = totalSlides - 1 + val numImages = minOf(maxImages, availableSlides) + if (numImages <= 0) return emptyList() + val indices = mutableListOf() + val step = availableSlides.toDouble() / numImages + for (i in 0 until numImages) { + val index = (1 + (i * step)).toInt().coerceIn(1, totalSlides - 1) + if (!indices.contains(index)) { + indices.add(index) + } + } + return indices.sorted() + } + + private fun injectImagesIntoSlides(slideContent: String, imageMap: Map): String { + val sectionRegex = "]*>(.*?)".toRegex(RegexOption.DOT_MATCHES_ALL) + val sections = sectionRegex.findAll(slideContent).toList() + val result = StringBuilder() + sections.forEachIndexed { index, section -> + val sectionContent = section.groupValues[1] + if (imageMap.containsKey(index)) { + val imageFilename = imageMap[index]!! + // Find the position to insert the image (after the heading, before content) + val headingRegex = "(]*>.*?)".toRegex(RegexOption.DOT_MATCHES_ALL) + val headingMatch = headingRegex.find(sectionContent) + val enhancedContent = if (headingMatch != null) { + val heading = headingMatch.value + val afterHeading = sectionContent.substring(headingMatch.range.last + 1) + val imageHtml = """ +
+ Slide visual +
+""".trimIndent() + heading + "\n" + imageHtml + afterHeading + } else { + // No heading found, prepend image + val imageHtml = """ +
+ Slide visual +
+""".trimIndent() + imageHtml + "\n" + sectionContent + } + result.append("
") + result.append(enhancedContent) + result.append("
\n\n") + } else { + result.append(section.value) + result.append("\n\n") + } + } + return result.toString().trim() + } + + override fun acceptButtonFooter(ui: SocketManager, fn: () -> Unit): String { val acceptLink = ui.hrefLink("Accept and Write Files") { fn() @@ -357,6 +586,7 @@ Provide only the CSS code within a code block:
  • Includes Reveal.js framework integration
  • Adds speaker notes for each slide
  • Supports custom styling and themes
  • +
  • Optional AI-generated images for key slides
  • Interactive approval or auto-apply mode
  • Includes navigation and progress indicators
  • Optional audio narration support
  • diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/WriteHtmlTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/WriteHtmlTask.kt index 233422c75..266f31de4 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/WriteHtmlTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/file/WriteHtmlTask.kt @@ -1,6 +1,9 @@ package com.simiacryptus.cognotik.plan.tools.file -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ImageAndText +import com.simiacryptus.cognotik.agents.ImageModificationAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.OrchestrationConfig import com.simiacryptus.cognotik.plan.TaskOrchestrator @@ -9,119 +12,144 @@ import com.simiacryptus.cognotik.plan.TaskTypeConfig import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.MarkdownUtil import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.chat.transcriptFilter import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.SocketManager +import com.simiacryptus.cognotik.webui.session.getChildClient import org.slf4j.Logger +import javax.imageio.ImageIO class WriteHtmlTask( - orchestrationConfig: OrchestrationConfig, - planTask: WriteHtmlTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: WriteHtmlTaskExecutionConfigData? ) : AbstractFileTask(orchestrationConfig, planTask) { - class WriteHtmlTaskExecutionConfigData( - @Description("The HTML file to be created (relative path, must end with .html)") - files: List? = null, - @Description("Additional files for context (e.g., existing HTML templates, related files)") - related_files: List? = null, - @Description("Detailed description of the HTML page to create, including layout, styling, and functionality requirements") - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = TaskState.Pending, - ) : ValidatedObject, FileTaskExecutionConfig( - task_type = WriteHtml.name, - task_description = task_description, - files = files, - related_files = related_files, - task_dependencies = task_dependencies, - state = state - ) { - override fun validate(): String? { - // Validate that files list is not empty - if (files.isNullOrEmpty()) { - return "WriteHtmlTaskExecutionConfigData: files list cannot be null or empty" - } - - // Validate that the file has .html extension - val htmlFile = files.first() - if (!htmlFile.endsWith(".html", ignoreCase = true)) { - return "WriteHtmlTaskExecutionConfigData: file must have .html extension, got: $htmlFile" - } - - // Validate task description is provided - if (task_description.isNullOrBlank()) { - return "WriteHtmlTaskExecutionConfigData: task_description cannot be null or blank" - } - - // Call parent validation - return super.validate() - } + class WriteHtmlTaskExecutionConfigData( + @Description("The HTML file to be created (relative path, must end with .html)") + files: List? = null, + @Description("Additional files for context (e.g., existing HTML templates, related files)") + related_files: List? = null, + @Description("Detailed description of the HTML page to create, including layout, styling, and functionality requirements") + task_description: String? = null, + @Description("Whether to generate images for the HTML page") + val generate_images: Boolean = false, + @Description("Number of images to generate (valid range: 0-10)") + var image_count: Int = 0, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : ValidatedObject, FileTaskExecutionConfig( + task_type = WriteHtml.name, + task_description = task_description, + files = files, + related_files = related_files, + task_dependencies = task_dependencies, + state = state + ) { + override fun validate(): String? { + // Validate that files list is not empty + if (files.isNullOrEmpty()) { + return "WriteHtmlTaskExecutionConfigData: files list cannot be null or empty" + } + + // Validate that the file has .html extension + val htmlFile = files.first() + if (!htmlFile.endsWith(".html", ignoreCase = true)) { + return "WriteHtmlTaskExecutionConfigData: file must have .html extension, got: $htmlFile" + } + + // Validate task description is provided + if (task_description.isNullOrBlank()) { + return "WriteHtmlTaskExecutionConfigData: task_description cannot be null or blank" + } + // Validate image count + if (image_count < 0 || image_count > 10) { + image_count = image_count.coerceIn(0, 10) + } + + // Call parent validation + return super.validate() } + } - init { - // Validate the configuration on initialization - planTask?.validate()?.let { errorMessage -> - throw ValidatedObject.ValidationError(errorMessage, planTask) - } + init { + // Validate the configuration on initialization + planTask?.validate()?.let { errorMessage -> + throw ValidatedObject.ValidationError(errorMessage, planTask) } + } - override fun promptSegment(): String { - return """ -WriteHtml - Create a complete HTML file with embedded CSS and JavaScript + override fun promptSegment(): String { + return """ + WriteHtml - Create a complete HTML file with embedded CSS and JavaScript ** Specify the HTML file path in the files array (must end with .html) ** Provide a detailed description of the page requirements including: - Layout and structure - Styling requirements (colors, fonts, spacing, etc.) - Interactive functionality needed - Any specific HTML5 features to use + - Image requirements (if generate_images is enabled) ** The generated HTML will be a complete, self-contained document with: - Proper HTML5 structure (, , , ) - Embedded CSS within \n") + } + append(afterHeadBeforeBody) + if (jsCode.isNotEmpty()) { + append("\n \n") + } + append(afterBody) + } + } + + private fun parseImageSpecs(response: String): List> { + val specs = mutableListOf>() + val lines = response.lines() + var currentFilename: String? = null + var currentDescription: String? = null + for (line in lines) { + when { + line.startsWith("IMAGE:", ignoreCase = true) -> { + // Save previous spec if exists + if (currentFilename != null && currentDescription != null) { + specs.add(currentFilename to currentDescription) + } + currentFilename = line.substringAfter(":", "").trim() + currentDescription = null } - val beforeHead = htmlStructure.substring(0, headEndIndex) - val afterHeadBeforeBody = htmlStructure.substring(headEndIndex, bodyEndIndex) - val afterBody = htmlStructure.substring(bodyEndIndex) - - return buildString { - append(beforeHead) - if (cssCode.isNotEmpty()) { - append("\n \n") - } - append(afterHeadBeforeBody) - if (jsCode.isNotEmpty()) { - append("\n \n") - } - append(afterBody) + line.startsWith("DESCRIPTION:", ignoreCase = true) -> { + currentDescription = line.substringAfter(":", "").trim() } - } - override fun acceptButtonFooter(ui: SocketManager, fn: () -> Unit): String { - val acceptLink = ui.hrefLink("Accept and Write File") { - fn() + currentDescription != null && line.isNotBlank() -> { + // Continue multi-line description + currentDescription += " " + line.trim() } - return """ + } + } + // Save last spec + if (currentFilename != null && currentDescription != null) { + specs.add(currentFilename to currentDescription) + } + return specs + } + + private fun insertImageReferences( + htmlStructure: String, + generatedImages: List>, + chatAgent: ChatAgent, + toInput: (String) -> List, + transcriptWriter: java.io.BufferedWriter?, + newTask: SessionTask, + ui: SocketManager + ): String { + if (generatedImages.isEmpty()) { + return htmlStructure + } + newTask.add(MarkdownUtil.renderMarkdown("### Step 3.5: Inserting Image References", ui = ui)) + transcriptWriter?.write("### Step 3.5: Inserting Image References\n\n") + val imageList = generatedImages.joinToString("\n") { (filename, description) -> + "- $filename: $description" + } + val imageInsertPrompt = """ +You need to insert image references into the HTML structure. +## Current HTML Structure: +```html +$htmlStructure +``` +## Generated Images: +$imageList +## Instructions: +1. Insert tags at appropriate locations in the HTML where these images should appear +2. Use the given PNG filename (e.g., "filename.png") for the src attribute +3. Add appropriate alt text based on the image description +4. Add appropriate class names for styling +5. Consider the semantic meaning of where each image should go (hero sections, content areas, etc.) +6. Maintain the existing HTML structure and class names +7. Do NOT add any CSS or JavaScript - just insert the tags +## Output Format: +Provide the complete updated HTML structure within a code block: +```html + +... +``` + """.trimIndent() + transcriptWriter?.write("**Prompt:**\n```\n$imageInsertPrompt\n```\n\n") + val imageInsertResponse = chatAgent.answer(toInput(imageInsertPrompt)) + transcriptWriter?.write("**Response:**\n$imageInsertResponse\n\n") + val updatedHtml = extractCodeFromResponse(imageInsertResponse, "html") + return if (updatedHtml.isNotEmpty()) { + newTask.add(MarkdownUtil.renderMarkdown("✅ Successfully inserted ${generatedImages.size} image reference(s)", ui = ui)) + updatedHtml + } else { + log.warn("Failed to insert image references, using original HTML structure") + newTask.add(MarkdownUtil.renderMarkdown("⚠️ Failed to insert image references, using original structure", ui = ui)) + htmlStructure + } + } + + + override fun acceptButtonFooter(ui: SocketManager, fn: () -> Unit): String { + val acceptLink = ui.hrefLink("Accept and Write File") { + fn() + } + return """ | |--- | |$acceptLink """.trimMargin() - } - - companion object { - private val log: Logger = LoggerFactory.getLogger(WriteHtmlTask::class.java) - val WriteHtml = TaskType( - "WriteHtml", - WriteHtmlTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Create complete HTML files with embedded CSS and JavaScript", - """ + } + + companion object { + private val log: Logger = LoggerFactory.getLogger(WriteHtmlTask::class.java) + val WriteHtml = TaskType( + "WriteHtml", + WriteHtmlTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Create complete HTML files with embedded CSS and JavaScript", + """ Creates standalone HTML files with embedded CSS and JavaScript.
    • Generates complete, self-contained HTML documents
    • Embeds CSS styles within <style> tags
    • Embeds JavaScript within <script> tags
    • Supports modern HTML5 features
    • +
    • Can generate images using AI image models
    • +
    • Automatically creates image directory and references
    • Interactive approval or auto-apply mode
    • Proper HTML structure and formatting
    """ - ) - } + ) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/DataTableCompilationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/DataTableCompilationTask.kt index 7292a3b11..44133ff0d 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/DataTableCompilationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/DataTableCompilationTask.kt @@ -1,24 +1,16 @@ package com.simiacryptus.cognotik.plan.tools.graph import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description -import com.simiacryptus.cognotik.plan.AbstractTask -import com.simiacryptus.cognotik.plan.OrchestrationConfig -import com.simiacryptus.cognotik.plan.TaskContextYamlDescriber -import com.simiacryptus.cognotik.plan.TaskExecutionConfig -import com.simiacryptus.cognotik.plan.TaskOrchestrator -import com.simiacryptus.cognotik.plan.TaskTypeConfig +import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.MarkdownUtil import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient -import java.io.BufferedWriter -import java.io.File -import java.io.FileWriter -import java.io.StringWriter +import java.io.* import java.nio.file.FileSystems import java.nio.file.Files import java.nio.file.Path @@ -27,39 +19,39 @@ import kotlin.io.path.isRegularFile import kotlin.io.path.name class DataTableCompilationTask( - orchestrationConfig: OrchestrationConfig, - planTask: DataTableCompilationTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: DataTableCompilationTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - class DataTableCompilationTaskExecutionConfigData( - @Description("List of file glob patterns to include in the data compilation") - val file_patterns: List = listOf(), - @Description("REQUIRED: Output file path where the compiled data table will be saved (CSV or JSON)") - val output_file: String = "compiled_data.json", - @Description("Instructions for identifying rows in the data") - val row_identification_instructions: String = "", - @Description("Instructions for identifying columns in the data") - val column_identification_instructions: String = "", - @Description("Instructions for extracting cell data") - val cell_extraction_instructions: String = "", - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null - ) : TaskExecutionConfig( - task_type = "DataTableCompilation", - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) - - data class Rows(val rows: List = listOf()) - data class Row(val id: String = "", val sourceFiles: List = listOf()) - data class Columns(val columns: List = listOf()) - data class Column(val id: String = "", val name: String = "", val description: String = "") - data class RowData(val rowId: String, val data: Map) - data class TableData(val rows: List>, val columns: List) - - override fun promptSegment() = """ + class DataTableCompilationTaskExecutionConfigData( + @Description("List of file glob patterns to include in the data compilation") + val file_patterns: List = listOf(), + @Description("REQUIRED: Output file path where the compiled data table will be saved (CSV or JSON)") + val output_file: String = "compiled_data.json", + @Description("Instructions for identifying rows in the data") + val row_identification_instructions: String = "", + @Description("Instructions for identifying columns in the data") + val column_identification_instructions: String = "", + @Description("Instructions for extracting cell data") + val cell_extraction_instructions: String = "", + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null + ) : TaskExecutionConfig( + task_type = "DataTableCompilation", + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) + + data class Rows(val rows: List = listOf()) + data class Row(val id: String = "", val sourceFiles: List = listOf()) + data class Columns(val columns: List = listOf()) + data class Column(val id: String = "", val name: String = "", val description: String = "") + data class RowData(val rowId: String, val data: Map) + data class TableData(val rows: List>, val columns: List) + + override fun promptSegment() = """ DataTableCompilation - Compile structured data tables from multiple files ** Specify file glob patterns to include in the compilation ** Define instructions for identifying rows in the data @@ -68,66 +60,85 @@ class DataTableCompilationTask( ** Specify output file path for the compiled table """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - - task.add(MarkdownUtil.renderMarkdown("## Step 1: Collecting files from patterns")) - val result = mutableListOf() - val basePath = Paths.get(orchestrationConfig.absoluteWorkingDir ?: ".") - executionConfig?.file_patterns?.forEach { pattern -> - val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") - Files.walk(basePath).use { paths -> - paths - .filter { it.isRegularFile() } - .filter { matcher.matches(basePath.relativize(it)) } - .forEach { result.add(it) } - } - } - val matchedFiles = result.distinct() - if (matchedFiles.isEmpty()) { - val errorMsg = "No files matched the provided patterns: ${executionConfig?.file_patterns?.joinToString(", ")}" - task.error(Exception(errorMsg)) - resultFn(errorMsg) - return - } - task.add(MarkdownUtil.renderMarkdown("Found ${matchedFiles.size} files matching the patterns")) + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val transcript = transcript(task) + transcript?.let { out -> + out.write("# Data Table Compilation Task\n\n".toByteArray()) + out.write("## Configuration\n\n".toByteArray()) + out.write("- File Patterns: ${executionConfig?.file_patterns?.joinToString(", ")}\n".toByteArray()) + out.write("- Output File: ${executionConfig?.output_file}\n\n".toByteArray()) + } - val fileContentString = matchedFiles.joinToString("\n\n") { file -> - val content = readFileContent(file) - "### ${file.name}\n```\n${content.take(1000)}${if (content.length > 1000) "..." else ""}\n```" - } + task.add(MarkdownUtil.renderMarkdown("## Step 1: Collecting files from patterns")) + val result = mutableListOf() + val basePath = Paths.get(orchestrationConfig.absoluteWorkingDir ?: ".") + executionConfig?.file_patterns?.forEach { pattern -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + Files.walk(basePath).use { paths -> + paths + .filter { it.isRegularFile() } + .filter { matcher.matches(basePath.relativize(it)) } + .forEach { result.add(it) } + } + } + val matchedFiles = result.distinct() + if (matchedFiles.isEmpty()) { + val errorMsg = "No files matched the provided patterns: ${executionConfig?.file_patterns?.joinToString(", ")}" + transcript?.let { out -> + out.write("### Error\n\n".toByteArray()) + out.write("$errorMsg\n\n".toByteArray()) + } + task.error(Exception(errorMsg)) + resultFn(errorMsg) + return + } + task.add(MarkdownUtil.renderMarkdown("Found ${matchedFiles.size} files matching the patterns")) + transcript?.let { out -> + out.write("## Step 1: File Collection\n\n".toByteArray()) + out.write("Found ${matchedFiles.size} files:\n\n".toByteArray()) + matchedFiles.forEach { file -> + out.write("- ${file.name}\n".toByteArray()) + } + out.write("\n".toByteArray()) + } + + val fileContentString = matchedFiles.joinToString("\n\n") { file -> + val content = readFileContent(file) + "### ${file.name}\n```\n${content.take(1000)}${if (content.length > 1000) "..." else ""}\n```" + } - val typeConfig = typeConfig ?: throw RuntimeException() - val chatter = - (typeConfig.model?.let { orchestrationConfig.instance(it) } ?: orchestrationConfig.defaultChatter).getChildClient(task) - val columnsResponse = ParsedAgent( - name = "ColumnIdentifier", - resultClass = Columns::class.java, - exampleInstance = Columns( - listOf( - Column( - id = "Name", - name = "Name of the fruit", - description = "The name of the fruit in the row" - ), - Column( - id = "Color", - name = "Color of the fruit", - description = "The color of the fruit in the row" - ), - Column( - id = "Taste", - name = "Taste of the fruit", - description = "The taste of the fruit in the row" - ) - ) - ), - prompt = """ + val typeConfig = typeConfig ?: throw RuntimeException() + val chatter = + (typeConfig.model?.let { orchestrationConfig.instance(it) } ?: orchestrationConfig.defaultChatter).getChildClient(task) + val columnsResponse = ParsedAgent( + name = "ColumnIdentifier", + resultClass = Columns::class.java, + exampleInstance = Columns( + listOf( + Column( + id = "Name", + name = "Name of the fruit", + description = "The name of the fruit in the row" + ), + Column( + id = "Color", + name = "Color of the fruit", + description = "The color of the fruit in the row" + ), + Column( + id = "Taste", + name = "Taste of the fruit", + description = "The taste of the fruit in the row" + ) + ) + ), + prompt = """ Analyze the provided files and identify distinct columns for a data table based on the following instructions: ${executionConfig?.column_identification_instructions} @@ -135,39 +146,48 @@ class DataTableCompilationTask( 1. Assign a unique column ID - should be a short, descriptive string 2. Provide a detailed description of what the column represents """.trimIndent(), - model = chatter, - parsingChatter = orchestrationConfig.parsingChatter, - temperature = orchestrationConfig.temperature, - describer = TaskContextYamlDescriber(orchestrationConfig), - ).answer( - listOf( - fileContentString - ), + model = chatter, + parsingChatter = orchestrationConfig.parsingChatter, + temperature = orchestrationConfig.temperature, + describer = TaskContextYamlDescriber(orchestrationConfig), + ).answer( + listOf( + fileContentString + ), + ) + val columns = columnsResponse.obj + val columnsList = columns.columns.map { + Column( + id = it.id, + name = it.name, + description = it.description, + ) + } + transcript?.let { out -> + out.write("## Step 2: Column Identification\n\n".toByteArray()) + out.write("Identified ${columnsList.size} columns:\n\n".toByteArray()) + columnsList.forEach { col -> + out.write("- **${col.name}** (${col.id}): ${col.description}\n".toByteArray()) + } + out.write("\n".toByteArray()) + } + + val rowsList = ParsedAgent( + name = "RowIdentifier", + resultClass = Rows::class.java, + exampleInstance = Rows( + listOf( + Row( + id = "Apple", + sourceFiles = listOf("apples.md", "apple_recipes.md") + ), + Row( + id = "Banana", + sourceFiles = listOf("bananas.md", "banana_recipes.md") + ) ) - val columns = columnsResponse.obj - val columnsList = columns.columns.map { - Column( - id = it.id, - name = it.name, - description = it.description, - ) - } - val rowsList = ParsedAgent( - name = "RowIdentifier", - resultClass = Rows::class.java, - exampleInstance = Rows( - listOf( - Row( - id = "Apple", - sourceFiles = listOf("apples.md", "apple_recipes.md") - ), - Row( - id = "Banana", - sourceFiles = listOf("bananas.md", "banana_recipes.md") - ) - ) - ), - prompt = """ + ), + prompt = """ You are a data extraction agent that is building a data table. Analyze the provided files and identify ALL distinct rows found in the data: @@ -178,178 +198,220 @@ class DataTableCompilationTask( 1. Assign a unique row ID - should be a short, descriptive string 2. List the source files that contain data for this row """.trimIndent(), - model = chatter, - parsingChatter = orchestrationConfig.parsingChatter, - temperature = orchestrationConfig.temperature, - describer = TaskContextYamlDescriber(orchestrationConfig), - ).answer( - listOf( - fileContentString, - "Columns:\n" + columnsList.joinToString("\n") { "- ${it.id}: ${it.name} (${it.description})" } - ), - ) + model = chatter, + parsingChatter = orchestrationConfig.parsingChatter, + temperature = orchestrationConfig.temperature, + describer = TaskContextYamlDescriber(orchestrationConfig), + ).answer( + listOf( + fileContentString, + "Columns:\n" + columnsList.joinToString("\n") { "- ${it.id}: ${it.name} (${it.description})" } + ), + ) + + task.add(MarkdownUtil.renderMarkdown("Identified ${rowsList.obj.rows.size} rows")) + task.add(MarkdownUtil.renderMarkdown("Identified ${columnsList.size} columns")) + transcript?.let { out -> + out.write("## Step 3: Row Identification\n\n".toByteArray()) + out.write("Identified ${rowsList.obj.rows.size} rows:\n\n".toByteArray()) + rowsList.obj.rows.forEach { row -> + out.write("- **${row.id}** (Sources: ${row.sourceFiles.joinToString(", ")})\n".toByteArray()) + } + out.write("\n".toByteArray()) + } - task.add(MarkdownUtil.renderMarkdown("Identified ${rowsList.obj.rows.size} rows")) - task.add(MarkdownUtil.renderMarkdown("Identified ${columnsList.size} columns")) - - task.add(MarkdownUtil.renderMarkdown("## Step 4: Extracting cell data for each row")) - val tableData = mutableListOf>() - val progressTotal = rowsList.obj.rows.size - var progressCurrent = 0 - - rowsList.obj.rows.forEach { row -> - progressCurrent++ - task.add( - MarkdownUtil.renderMarkdown( - "Processing row ${progressCurrent}/${progressTotal}: ${row.id}", - ui = task.ui - ) - ) - val rowDataResponse = ParsedAgent( - name = "CellExtractor", - resultClass = RowData::class.java, - exampleInstance = RowData( - rowId = "Apple", - data = mapOf( - "Name" to "Apple", - "Color" to "Red", - "Taste" to "Sweet" - ) - ), - prompt = "Extract data for a data row for `${row.id}` from the provided source files.\n\n" + - "Expected Columns:\n${columnsList.joinToString("\n") { "- ${it.id}: ${it.name} (${it.description})" }}\n\n" + - "Special Instructions:\n${executionConfig?.cell_extraction_instructions}\n\n" + - "IMPORTANT: Respond with ONLY the single JSON object for the row `${row.id}`. Do NOT return a JSON array.", - model = chatter, - parsingChatter = orchestrationConfig.parsingChatter, - temperature = orchestrationConfig.temperature, - describer = TaskContextYamlDescriber(orchestrationConfig), - ).answer( - listOf( - "Source Files:\n" + row.sourceFiles.mapNotNull { fileName -> - matchedFiles.find { it.name == fileName || it.toString().endsWith(fileName) } - }.joinToString("\n\n") { file -> - "### ${file.name}\n```\n${readFileContent(file).indent(" ")}\n```" - } - ), - ) - - val rowData = rowDataResponse.obj - val rowMap = mutableMapOf() - rowMap["rowId"] = row.id - rowMap.putAll(rowData.data) - - tableData.add(rowMap) + task.add(MarkdownUtil.renderMarkdown("## Step 4: Extracting cell data for each row")) + val tableData = mutableListOf>() + val progressTotal = rowsList.obj.rows.size + var progressCurrent = 0 + + rowsList.obj.rows.forEach { row -> + progressCurrent++ + task.add( + MarkdownUtil.renderMarkdown( + "Processing row ${progressCurrent}/${progressTotal}: ${row.id}", + ui = task.ui + ) + ) + val rowDataResponse = ParsedAgent( + name = "CellExtractor", + resultClass = RowData::class.java, + exampleInstance = RowData( + rowId = "Apple", + data = mapOf( + "Name" to "Apple", + "Color" to "Red", + "Taste" to "Sweet" + ) + ), + prompt = "Extract data for a data row for `${row.id}` from the provided source files.\n\n" + + "Expected Columns:\n${columnsList.joinToString("\n") { "- ${it.id}: ${it.name} (${it.description})" }}\n\n" + + "Special Instructions:\n${executionConfig?.cell_extraction_instructions}\n\n" + + "IMPORTANT: Respond with ONLY the single JSON object for the row `${row.id}`. Do NOT return a JSON array.", + model = chatter, + parsingChatter = orchestrationConfig.parsingChatter, + temperature = orchestrationConfig.temperature, + describer = TaskContextYamlDescriber(orchestrationConfig), + ).answer( + listOf( + "Source Files:\n" + row.sourceFiles.mapNotNull { fileName -> + matchedFiles.find { it.name == fileName || it.toString().endsWith(fileName) } + }.joinToString("\n\n") { file -> + "### ${file.name}\n```\n${readFileContent(file).indent(" ")}\n```" + } + ), + ) + + val rowData = rowDataResponse.obj + val rowMap = mutableMapOf() + rowMap["rowId"] = row.id + rowMap.putAll(rowData.data) + + tableData.add(rowMap) + transcript?.let { out -> + out.write("### Row: ${row.id}\n\n".toByteArray()) + rowData.data.forEach { (key, value) -> + out.write("- $key: $value\n".toByteArray()) } + out.write("\n".toByteArray()) + } + } - task.add(MarkdownUtil.renderMarkdown("## Step 5: Compiling and saving data table")) + task.add(MarkdownUtil.renderMarkdown("## Step 5: Compiling and saving data table")) - val outputPath = executionConfig?.output_file ?: "compiled_data.json" - val outputFile = if (orchestrationConfig.absoluteWorkingDir != null) { - File(orchestrationConfig.absoluteWorkingDir, outputPath) - } else { - File(outputPath) - } + val outputPath = executionConfig?.output_file ?: "compiled_data.json" + val outputFile = if (orchestrationConfig.absoluteWorkingDir != null) { + File(orchestrationConfig.absoluteWorkingDir, outputPath) + } else { + File(outputPath) + } - outputFile.parentFile?.mkdirs() + outputFile.parentFile?.mkdirs() - when { - outputPath.endsWith(".json", ignoreCase = true) -> { + when { + outputPath.endsWith(".json", ignoreCase = true) -> { - val finalData = TableData(tableData, columnsList) - val mapper = jacksonObjectMapper() - mapper.writerWithDefaultPrettyPrinter().writeValue(outputFile, finalData) - } + val finalData = TableData(tableData, columnsList) + val mapper = jacksonObjectMapper() + mapper.writerWithDefaultPrettyPrinter().writeValue(outputFile, finalData) + } - outputPath.endsWith(".csv", ignoreCase = true) -> { + outputPath.endsWith(".csv", ignoreCase = true) -> { - BufferedWriter(FileWriter(outputFile)).use { writer -> + BufferedWriter(FileWriter(outputFile)).use { writer -> - val header = columnsList.joinToString(",") { "\"${it.name.replace("\"", "\"\"")}\"" } - writer.write(header) - writer.newLine() + val header = columnsList.joinToString(",") { "\"${it.name.replace("\"", "\"\"")}\"" } + writer.write(header) + writer.newLine() - tableData.forEach { row -> - val rowValues = columnsList.map { column -> - val value = row[column.id]?.toString() ?: "N/A" - "\"${value.replace("\"", "\"\"")}\"" - } - writer.write(rowValues.joinToString(",")) - writer.newLine() - } - } + tableData.forEach { row -> + val rowValues = columnsList.map { column -> + val value = row[column.id]?.toString() ?: "N/A" + "\"${value.replace("\"", "\"\"")}\"" } + writer.write(rowValues.joinToString(",")) + writer.newLine() + } + } + } - outputPath.endsWith(".md", ignoreCase = true) -> { + outputPath.endsWith(".md", ignoreCase = true) -> { - BufferedWriter(FileWriter(outputFile)).use { writer -> - writeMarkdown(columnsList, writer, tableData) - } - } + BufferedWriter(FileWriter(outputFile)).use { writer -> + writeMarkdown(columnsList, writer, tableData) + } + } - outputPath.isBlank() -> { + outputPath.isBlank() -> { - } + } - else -> { + else -> { - val finalData = TableData(tableData, columnsList) - val mapper = jacksonObjectMapper() - mapper.writerWithDefaultPrettyPrinter().writeValue(outputFile, finalData) - } - } + val finalData = TableData(tableData, columnsList) + val mapper = jacksonObjectMapper() + mapper.writerWithDefaultPrettyPrinter().writeValue(outputFile, finalData) + } + } - val resultMessage = (""" + val resultMessage = (""" Data table compilation complete! - Processed ${matchedFiles.size} source files - Identified ${rowsList.obj.rows.size} rows and ${columnsList.size} columns - Saved compiled data to: ${outputFile.absolutePath} """.trimIndent() + "\n\n" + "### Compiled Data\n\n${ - StringWriter().use { - BufferedWriter(it).use { - writeMarkdown(columnsList, it, tableData) - } - it.toString() - } - }").renderMarkdown() - - resultFn(resultMessage) - } - - private fun writeMarkdown( - columnsList: List, - writer: BufferedWriter, - tableData: MutableList> - ) { - - val header = columnsList.joinToString(" | ") { it.name } - writer.write("| $header |") - writer.newLine() - - val separator = columnsList.joinToString(" | ") { "---" } - writer.write("| $separator |") - writer.newLine() - - tableData.forEach { row -> - val rowValues = columnsList.joinToString(" | ") { column -> - val value = row[column.id]?.toString() ?: "N/A" - value - } - writer.write("| $rowValues |") - writer.newLine() + StringWriter().use { + BufferedWriter(it).use { + writeMarkdown(columnsList, it, tableData) } - } - - private fun readFileContent(path: Path): String { - return try { - Files.readString(path) - } catch (e: Exception) { - log.warn("Failed to read file: $path", e) - "ERROR: Could not read file content" + it.toString() + } + }").renderMarkdown() + transcript?.let { out -> + out.write("## Step 5: Final Results\n\n".toByteArray()) + out.write("### Summary\n\n".toByteArray()) + out.write("- Processed ${matchedFiles.size} source files\n".toByteArray()) + out.write("- Identified ${rowsList.obj.rows.size} rows and ${columnsList.size} columns\n".toByteArray()) + out.write("- Saved compiled data to: ${outputFile.absolutePath}\n\n".toByteArray()) + out.write("### Compiled Data Table\n\n".toByteArray()) + StringWriter().use { sw -> + BufferedWriter(sw).use { bw -> + writeMarkdown(columnsList, bw, tableData) } + out.write(sw.toString().toByteArray()) + } } - companion object { - private val log = LoggerFactory.getLogger(DataTableCompilationTask::class.java) + resultFn(resultMessage) + } + + private fun writeMarkdown( + columnsList: List, + writer: BufferedWriter, + tableData: MutableList> + ) { + + val header = columnsList.joinToString(" | ") { it.name } + writer.write("| $header |") + writer.newLine() + + val separator = columnsList.joinToString(" | ") { "---" } + writer.write("| $separator |") + writer.newLine() + + tableData.forEach { row -> + val rowValues = columnsList.joinToString(" | ") { column -> + val value = row[column.id]?.toString() ?: "N/A" + value + } + writer.write("| $rowValues |") + writer.newLine() + } + } + + private fun readFileContent(path: Path): String { + return try { + Files.readString(path) + } catch (e: Exception) { + log.warn("Failed to read file: $path", e) + "ERROR: Could not read file content" } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + companion object { + private val log = LoggerFactory.getLogger(DataTableCompilationTask::class.java) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphGenerationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphGenerationTask.kt index d47a8d4a5..6890de6c3 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphGenerationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphGenerationTask.kt @@ -1,14 +1,14 @@ package com.simiacryptus.cognotik.plan.tools.graph -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.graph.SoftwareNodeType import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.AbbrevWhitelistYamlDescriber import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.describe.TypeDescriber import com.simiacryptus.cognotik.plan.AbstractTask -import com.simiacryptus.cognotik.plan.TaskOrchestrator import com.simiacryptus.cognotik.plan.OrchestrationConfig +import com.simiacryptus.cognotik.plan.TaskOrchestrator import com.simiacryptus.cognotik.plan.TaskTypeConfig import com.simiacryptus.cognotik.plan.tools.file.AbstractFileTask import com.simiacryptus.cognotik.platform.model.ApiChatModel @@ -17,136 +17,170 @@ import com.simiacryptus.cognotik.util.MarkdownUtil import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream class SoftwareGraphGenerationTask( - orchestrationConfig: OrchestrationConfig, - planTask: SoftwareGraphGenerationTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: SoftwareGraphGenerationTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - class SoftwareGraphGenerationTaskExecutionConfigData( - @Description("The output file path where the software graph will be saved") - val output_file: String = "software_graph.json", - @Description("The type of nodes to focus on generating (e.g., CodeFile, CodePackage, etc.)") - val node_types: List = listOf(), - task_description: String? = null, - task_dependencies: List? = null, - input_files: List? = null, - state: TaskState? = null - ) : AbstractFileTask.FileTaskExecutionConfig( - task_type = "SoftwareGraphGeneration", - task_description = task_description, - task_dependencies = task_dependencies, - related_files = input_files, - state = state - ) + class SoftwareGraphGenerationTaskExecutionConfigData( + @Description("The output file path where the software graph will be saved") + val output_file: String = "software_graph.json", + @Description("The type of nodes to focus on generating (e.g., CodeFile, CodePackage, etc.)") + val node_types: List = listOf(), + task_description: String? = null, + task_dependencies: List? = null, + input_files: List? = null, + state: TaskState? = null + ) : AbstractFileTask.FileTaskExecutionConfig( + task_type = "SoftwareGraphGeneration", + task_description = task_description, + task_dependencies = task_dependencies, + related_files = input_files, + state = state + ) - val describer: TypeDescriber = object : AbbrevWhitelistYamlDescriber( - "com.simiacryptus", "aicoder.actions" - ) { - override val includeMethods: Boolean get() = false - } + val describer: TypeDescriber = object : AbbrevWhitelistYamlDescriber( + "com.simiacryptus", "aicoder.actions" + ) { + override val includeMethods: Boolean get() = false + } - override fun promptSegment() = """ + override fun promptSegment() = """ SoftwareGraphGeneration - Generate a SoftwareGraph representation of the codebase ** Specify the output file path for the generated graph ** Optionally specify node types to focus on ** List input files to analyze for graph generation """.trimIndent() - fun getInputFileCode(): String { - val inputFiles = executionConfig?.related_files ?: return "" - return inputFiles.joinToString("\n\n") { filePath -> - val file = File(filePath) - if (file.exists()) { - "### ${file.name}\n" + file.readText() - } else { - "### $filePath\nFile not found." - } - } + fun getInputFileCode(): String { + val inputFiles = executionConfig?.related_files ?: return "" + return inputFiles.joinToString("\n\n") { filePath -> + val file = File(filePath) + if (file.exists()) { + "### ${file.name}\n" + file.readText() + } else { + "### $filePath\nFile not found." + } } + } - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val typeConfig = typeConfig ?: throw RuntimeException() - val graphGenerationActor = ParsedAgent( - name = "SoftwareGraphGenerator", - resultClass = SoftwareNodeType.SoftwareGraph::class.java, - prompt = "Analyze the provided code files and generate a SoftwareGraph representation.\nThe graph should accurately represent the software architecture including:\n\nAvailable Node Types:\n" + - SoftwareNodeType.values().joinToString>>("\n") { - "* ${it.name}: ${it.description?.replace("\n", "\n ")}\n ${ - describer.describe(rawType = it.nodeClass).lineSequence() - .map { - when { - it.isBlank() -> { - when { - it.length < " ".length -> " " - else -> it - } - } + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val typeConfig = typeConfig ?: throw RuntimeException() + val markdownTranscript = transcript(task) + val graphGenerationActor = ParsedAgent( + name = "SoftwareGraphGenerator", + resultClass = SoftwareNodeType.SoftwareGraph::class.java, + prompt = "Analyze the provided code files and generate a SoftwareGraph representation.\nThe graph should accurately represent the software architecture including:\n\nAvailable Node Types:\n" + + SoftwareNodeType.values().joinToString>>("\n") { + "* ${it.name}: ${it.description?.replace("\n", "\n ")}\n ${ + describer.describe(rawType = it.nodeClass).lineSequence() + .map { + when { + it.isBlank() -> { + when { + it.length < " ".length -> " " + else -> it + } + } - else -> " " + it - } - } - .joinToString("\n") - }" - } + "\n\nGenerate appropriate NodeId values for each node.\nEnsure all relationships between nodes are properly established.\nFormat the response as a valid SoftwareGraph JSON structure.", - model = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } - ?: this.orchestrationConfig.defaultChatter).getChildClient(task), - parsingChatter = this.orchestrationConfig.parsingChatter, - temperature = this.orchestrationConfig.temperature, - describer = describer, - ) - val chatMessages = graphGenerationActor.chatMessages( - messages + listOf( - getInputFileCode(), - "Generate a SoftwareGraph for the above code focusing on these node types: ${ - executionConfig?.node_types?.joinToString( - ", " - ) - }" - ).filter { it.isNotBlank() }, - ) - val response = graphGenerationActor.respond( - messages = chatMessages, - input = messages, - ) + else -> " " + it + } + } + .joinToString("\n") + }" + } + "\n\nGenerate appropriate NodeId values for each node.\nEnsure all relationships between nodes are properly established.\nFormat the response as a valid SoftwareGraph JSON structure.", + model = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } + ?: this.orchestrationConfig.defaultChatter).getChildClient(task), + parsingChatter = this.orchestrationConfig.parsingChatter, + temperature = this.orchestrationConfig.temperature, + describer = describer, + ) + val chatMessages = graphGenerationActor.chatMessages( + messages + listOf( + getInputFileCode(), + "Generate a SoftwareGraph for the above code focusing on these node types: ${ + executionConfig?.node_types?.joinToString( + ", " + ) + }" + ).filter { it.isNotBlank() }, + ) + // Write to transcript + markdownTranscript?.write("# Software Graph Generation\n\n".toByteArray()) + markdownTranscript?.write("## Input Files\n\n".toByteArray()) + markdownTranscript?.write(getInputFileCode().toByteArray()) + markdownTranscript?.write("\n\n## Request\n\n".toByteArray()) + markdownTranscript?.write( + "Generate a SoftwareGraph for the above code focusing on these node types: ${ + executionConfig?.node_types?.joinToString(", ") + }\n\n".toByteArray() + ) - val outputFile = File(orchestrationConfig.absoluteWorkingDir ?: ".").resolve(executionConfig?.output_file.let { - when { - it.isNullOrBlank() -> "software_graph.json" - else -> it - } - }) - try { - outputFile.parentFile?.mkdirs() - outputFile.writeText(JsonUtil.toJson(response.obj)) + val response = graphGenerationActor.respond( + messages = chatMessages, + input = messages, + ) + // Write response to transcript + markdownTranscript?.write("## Generated Graph\n\n".toByteArray()) + markdownTranscript?.write("```json\n".toByteArray()) + markdownTranscript?.write(JsonUtil.toJson(response.obj).toByteArray()) + markdownTranscript?.write("\n```\n\n".toByteArray()) - val summary = buildString { - appendLine("# Software Graph Generation Complete") - appendLine() - appendLine("Generated graph saved to: ${outputFile.absolutePath}") - appendLine() - appendLine("## Graph Statistics") - appendLine("- Total nodes: ${response.obj.nodes.size}") - appendLine("- Node types:") - response.obj.nodes.groupBy { it.javaClass.simpleName }.forEach { (type, nodes) -> - appendLine(" - $type: ${nodes.size} nodes") - } - } + val outputFile = File(orchestrationConfig.absoluteWorkingDir ?: ".").resolve(executionConfig?.output_file.let { + when { + it.isNullOrBlank() -> "software_graph.json" + else -> it + } + }) + try { + outputFile.parentFile?.mkdirs() + outputFile.writeText(JsonUtil.toJson(response.obj)) - task.add(MarkdownUtil.renderMarkdown(summary, ui = task.ui)) - resultFn(summary) - } catch (e: Exception) { - task.error(e) - resultFn("Failed to save graph to ${outputFile.absolutePath}: ${e.message}") + val summary = buildString { + appendLine("# Software Graph Generation Complete") + appendLine() + appendLine("Generated graph saved to: ${outputFile.absolutePath}") + appendLine() + appendLine("## Graph Statistics") + appendLine("- Total nodes: ${response.obj.nodes.size}") + appendLine("- Node types:") + response.obj.nodes.groupBy { it.javaClass.simpleName }.forEach { (type, nodes) -> + appendLine(" - $type: ${nodes.size} nodes") } - } + } - companion object { + task.add(MarkdownUtil.renderMarkdown(summary, ui = task.ui)) + markdownTranscript?.write("## Summary\n\n".toByteArray()) + markdownTranscript?.write(summary.toByteArray()) + markdownTranscript?.close() + resultFn(summary) + } catch (e: Exception) { + task.error(e) + markdownTranscript?.write("\n\n## Error\n\n".toByteArray()) + markdownTranscript?.write("Failed to save graph: ${e.message}\n".toByteArray()) + markdownTranscript?.close() + resultFn("Failed to save graph to ${outputFile.absolutePath}: ${e.message}") } + } + + companion object; + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphModificationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphModificationTask.kt index 4b492bbe7..47d3a454b 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphModificationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphModificationTask.kt @@ -1,57 +1,58 @@ package com.simiacryptus.cognotik.plan.tools.graph -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.apps.graph.SoftwareNodeType import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.plan.TaskContextYamlDescriber import com.simiacryptus.cognotik.util.JsonUtil import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream class SoftwareGraphModificationTask( - orchestrationConfig: OrchestrationConfig, - planTask: SoftwareGraphModificationTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: SoftwareGraphModificationTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - class SoftwareGraphModificationTaskExecutionConfigData( - @Description("The path to the input software graph JSON file") - val input_graph_file: String? = null, - @Description("The path where the modified graph will be saved") - val output_graph_file: String? = null, - @Description("The modification goal or instructions") - val modification_goal: String? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null - ) : TaskExecutionConfig( - task_type = "SoftwareGraphModification", - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) - - override fun promptSegment() = """ + class SoftwareGraphModificationTaskExecutionConfigData( + @Description("The path to the input software graph JSON file") + val input_graph_file: String? = null, + @Description("The path where the modified graph will be saved") + val output_graph_file: String? = null, + @Description("The modification goal or instructions") + val modification_goal: String? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null + ) : TaskExecutionConfig( + task_type = "SoftwareGraphModification", + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) + + override fun promptSegment() = """ SoftwareGraphModification - Load, modify and save software graph representations ** Specify the input graph file path ** Specify the output graph file path (optional, defaults to input file) ** Describe the desired modifications to the graph """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val typeConfig = typeConfig ?: throw RuntimeException() - val graphModificationActor = ParsedAgent( - name = "SoftwareGraphModification", - resultClass = SoftwareNodeType.SoftwareGraph::class.java, - prompt = """ + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val typeConfig = typeConfig ?: throw RuntimeException() + val transcript = transcript(task) + val graphModificationActor = ParsedAgent( + name = "SoftwareGraphModification", + resultClass = SoftwareNodeType.SoftwareGraph::class.java, + prompt = """ Analyze the provided software graph and generate modifications based on the given goal. Return only the delta changes that should be applied to the graph. @@ -65,81 +66,114 @@ class SoftwareGraphModificationTask( Node Types: """.trimIndent() + SoftwareNodeType.values().joinToString("\n") { - "* " + it.name + ": " + it.description?.prependIndent(" ") + - "\n " + TaskContextYamlDescriber(orchestrationConfig).describe(rawType = it.nodeClass).lineSequence() - .map { - when { - it.isBlank() -> { - when { - it.length < " ".length -> " " - else -> it - } - } - - else -> " " + it - } - } - .joinToString("\n") - }, - model = (typeConfig.model?.let { orchestrationConfig.instance(it) } - ?: orchestrationConfig.defaultChatter).getChildClient(task), - parsingChatter = orchestrationConfig.parsingChatter, - temperature = orchestrationConfig.temperature, - describer = TaskContextYamlDescriber(orchestrationConfig), - ) - - val inputFile = (orchestrationConfig.absoluteWorkingDir?.let { File(it) } ?: File(".")) - .resolve(executionConfig?.input_graph_file ?: throw IllegalArgumentException("Input graph file not specified")) - if (!inputFile.exists()) throw IllegalArgumentException("Input graph file does not exist: ${inputFile.absolutePath}") - val originalGraph = JsonUtil.fromJson( - inputFile.readText(), - SoftwareNodeType.SoftwareGraph::class.java - ) - - val response = graphModificationActor.answer( - messages + listOf( - "Current graph:\n```json\n${JsonUtil.toJson(originalGraph)}\n```", - "Modification goal: ${executionConfig.modification_goal}" - ), - ) - - val deltaGraph = response.obj - val newGraph = originalGraph + deltaGraph - - val outputFile = (orchestrationConfig.absoluteWorkingDir?.let { File(it) } ?: File(".")) - .resolve( + "* " + it.name + ": " + it.description?.prependIndent(" ") + + "\n " + TaskContextYamlDescriber(orchestrationConfig).describe(rawType = it.nodeClass).lineSequence() + .map { + when { + it.isBlank() -> { when { - !executionConfig.output_graph_file.isNullOrBlank() -> executionConfig.output_graph_file - executionConfig.input_graph_file.isNotBlank() -> executionConfig.input_graph_file - else -> "modified_graph.json" + it.length < " ".length -> " " + else -> it } - ) - outputFile.parentFile?.mkdirs() - outputFile.writeText(JsonUtil.toJson(newGraph)) - - val summary = buildString { - appendLine("# Software Graph Modification Complete") - appendLine() - appendLine("Modified graph saved to: ${outputFile.absolutePath}") - appendLine() - appendLine("## Modification Summary") - appendLine("### Changes Applied:") - deltaGraph.nodes.groupBy { it.javaClass.simpleName }.forEach { (type, nodes) -> - appendLine("- $type: ${nodes.size} node(s) modified") - } - appendLine() - appendLine("### Final Graph Statistics:") - appendLine("- Total nodes: ${newGraph.nodes.size}") - appendLine("- Node types:") - newGraph.nodes.groupBy { it.javaClass.simpleName }.forEach { (type, nodes) -> - appendLine(" - $type: ${nodes.size} nodes") + } + + else -> " " + it } - } + } + .joinToString("\n") + }, + model = (typeConfig.model?.let { orchestrationConfig.instance(it) } + ?: orchestrationConfig.defaultChatter).getChildClient(task), + parsingChatter = orchestrationConfig.parsingChatter, + temperature = orchestrationConfig.temperature, + describer = TaskContextYamlDescriber(orchestrationConfig), + ) + + val inputFile = (orchestrationConfig.absoluteWorkingDir?.let { File(it) } ?: File(".")) + .resolve(executionConfig?.input_graph_file ?: throw IllegalArgumentException("Input graph file not specified")) + if (!inputFile.exists()) throw IllegalArgumentException("Input graph file does not exist: ${inputFile.absolutePath}") + + transcript?.write("# Software Graph Modification Task\n\n".toByteArray()) + transcript?.write("## Input\n\n".toByteArray()) + transcript?.write("- Input file: ${inputFile.absolutePath}\n".toByteArray()) + transcript?.write("- Modification goal: ${executionConfig.modification_goal}\n\n".toByteArray()) + val originalGraph = JsonUtil.fromJson( + inputFile.readText(), + SoftwareNodeType.SoftwareGraph::class.java + ) - task.add((summary.renderMarkdown)) - resultFn(summary) + transcript?.write("## Original Graph Statistics\n\n".toByteArray()) + transcript?.write("- Total nodes: ${originalGraph.nodes.size}\n".toByteArray()) + transcript?.write( + "- Node types: ${ + originalGraph.nodes.groupBy { it.javaClass.simpleName }.map { "${it.key}: ${it.value.size}" }.joinToString(", ") + }\n\n".toByteArray() + ) + val response = graphModificationActor.answer( + messages + listOf( + "Current graph:\n```json\n${JsonUtil.toJson(originalGraph)}\n```", + "Modification goal: ${executionConfig.modification_goal}" + ), + ) + + val deltaGraph = response.obj + transcript?.write("## Delta Changes\n\n".toByteArray()) + transcript?.write("```json\n${JsonUtil.toJson(deltaGraph)}\n```\n\n".toByteArray()) + + val newGraph = originalGraph + deltaGraph + + val outputFile = (orchestrationConfig.absoluteWorkingDir?.let { File(it) } ?: File(".")) + .resolve( + when { + !executionConfig.output_graph_file.isNullOrBlank() -> executionConfig.output_graph_file + executionConfig.input_graph_file.isNotBlank() -> executionConfig.input_graph_file + else -> "modified_graph.json" + } + ) + outputFile.parentFile?.mkdirs() + outputFile.writeText(JsonUtil.toJson(newGraph)) + + val summary = buildString { + appendLine("# Software Graph Modification Complete") + appendLine() + appendLine("Modified graph saved to: ${outputFile.absolutePath}") + appendLine() + appendLine("## Modification Summary") + appendLine("### Changes Applied:") + deltaGraph.nodes.groupBy { it.javaClass.simpleName }.forEach { (type, nodes) -> + appendLine("- $type: ${nodes.size} node(s) modified") + } + appendLine() + appendLine("### Final Graph Statistics:") + appendLine("- Total nodes: ${newGraph.nodes.size}") + appendLine("- Node types:") + newGraph.nodes.groupBy { it.javaClass.simpleName }.forEach { (type, nodes) -> + appendLine(" - $type: ${nodes.size} nodes") + } } - companion object { + task.add((summary.renderMarkdown)) + transcript?.write("## Summary\n\n".toByteArray()) + transcript?.write(summary.toByteArray()) + transcript?.flush() + transcript?.close() + + resultFn(summary) + } + + companion object { + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript } + + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphPlanningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphPlanningTask.kt index c7a2695d3..eab4ee46f 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphPlanningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/graph/SoftwareGraphPlanningTask.kt @@ -2,85 +2,106 @@ package com.simiacryptus.cognotik.plan.tools.graph import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.plan.TaskContextYamlDescriber import com.simiacryptus.cognotik.util.JsonUtil import com.simiacryptus.cognotik.webui.session.SessionTask import java.io.File +import java.io.FileOutputStream class SoftwareGraphPlanningTask( - orchestrationConfig: OrchestrationConfig, planTask: GraphBasedPlanningTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, planTask: GraphBasedPlanningTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - class GraphBasedPlanningTaskExecutionConfigData( - @Description("REQUIRED: The path to the input software graph JSON file") val input_graph_file: String? = null, - @Description("The instruction or goal to be achieved") val instruction: String = "", - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null - ) : TaskExecutionConfig( - task_type = "SoftwareGraphPlanning", - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) + class GraphBasedPlanningTaskExecutionConfigData( + @Description("REQUIRED: The path to the input software graph JSON file") val input_graph_file: String? = null, + @Description("The instruction or goal to be achieved") val instruction: String = "", + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null + ) : TaskExecutionConfig( + task_type = "SoftwareGraphPlanning", + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) - override fun promptSegment() = """ + override fun promptSegment() = """ SoftwareGraphPlanning - Use a software graph to generate an actionable sub-plan. ** Include the file path to the input graph file and the instruction. """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val inputFile = (orchestrationConfig.absoluteWorkingDir?.let { File(it) } ?: File(".")).resolve( - when { - !executionConfig?.input_graph_file.isNullOrBlank() -> executionConfig.input_graph_file - else -> throw IllegalArgumentException("Input graph file not specified") - } - ) - if (!inputFile.exists()) throw IllegalArgumentException("Input graph file does not exist: ${inputFile.absolutePath}") - val response = orchestrationConfig.planningActor(TaskContextYamlDescriber(orchestrationConfig),task).answer( - (messages + listOf( - "Software Graph `${executionConfig.input_graph_file}`:\n```json\n${inputFile.readText()}\n```", - "Instruction: ${executionConfig.instruction}" - )).filter { it.isNotBlank() }, - ) - val plan = com.simiacryptus.cognotik.plan.PlanUtil.filterPlan { response.obj.tasksByID } ?: emptyMap() - val planSummary = buildString { - appendLine("# Graph-Based Planning Result") - appendLine() - appendLine("## Generated Plan (DAG)") - appendLine("```json") - appendLine(JsonUtil.toJson(plan)) - appendLine("```") - } - val planProcessingState = agent.executePlan( - plan = plan, - task = task, - userMessage = executionConfig.instruction, - orchestrationConfig = orchestrationConfig, - ) - val executionSummary = buildString { - appendLine("## Plan Execution Summary") - appendLine("- Completed Tasks: ${planProcessingState.completedTasks.size}") - appendLine("- Failed Tasks: ${plan.size - planProcessingState.completedTasks.size}") - appendLine() - appendLine("### Task Results:") - planProcessingState.taskResult.forEach { (taskId, result) -> - appendLine("#### $taskId") - appendLine("```") - appendLine(result.take(500)) + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val markdownTranscript = transcript(task) + val inputFile = (orchestrationConfig.absoluteWorkingDir?.let { File(it) } ?: File(".")).resolve( + when { + !executionConfig?.input_graph_file.isNullOrBlank() -> executionConfig.input_graph_file + else -> throw IllegalArgumentException("Input graph file not specified") + } + ) + if (!inputFile.exists()) throw IllegalArgumentException("Input graph file does not exist: ${inputFile.absolutePath}") + val response = orchestrationConfig.planningActor(TaskContextYamlDescriber(orchestrationConfig), task).answer( + (messages + listOf( + "Software Graph `${executionConfig.input_graph_file}`:\n```json\n${inputFile.readText()}\n```", + "Instruction: ${executionConfig.instruction}" + )).filter { it.isNotBlank() }, + ) + markdownTranscript?.write("## Planning Response\n\n".toByteArray()) + markdownTranscript?.write("```yaml\n${response.text}\n```\n\n".toByteArray()) - appendLine("```") - } - } - resultFn(planSummary + "\n\n" + executionSummary) + val plan = PlanUtil.filterPlan { response.obj.tasksByID } ?: emptyMap() + val planSummary = buildString { + appendLine("# Graph-Based Planning Result") + appendLine() + appendLine("## Generated Plan (DAG)") + appendLine("```json") + appendLine(JsonUtil.toJson(plan)) + appendLine("```") } + markdownTranscript?.write(planSummary.toByteArray()) + markdownTranscript?.write("\n\n".toByteArray()) - companion object { + val planProcessingState = agent.executePlan( + plan = plan, + task = task, + userMessage = executionConfig.instruction, + orchestrationConfig = orchestrationConfig, + ) + val executionSummary = buildString { + appendLine("## Plan Execution Summary") + appendLine("- Completed Tasks: ${planProcessingState.completedTasks.size}") + appendLine("- Failed Tasks: ${plan.size - planProcessingState.completedTasks.size}") + appendLine() + appendLine("### Task Results:") + planProcessingState.taskResult.forEach { (taskId, result) -> + appendLine("#### $taskId") + appendLine("```") + appendLine(result.take(500)) + + appendLine("```") + } } + markdownTranscript?.write(executionSummary.toByteArray()) + resultFn(planSummary + "\n\n" + executionSummary) + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + companion object } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/KnowledgeIndexingTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/KnowledgeIndexingTask.kt index 8e236a326..decffc2ae 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/KnowledgeIndexingTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/KnowledgeIndexingTask.kt @@ -14,33 +14,33 @@ import java.util.concurrent.Executors import java.util.concurrent.TimeUnit class KnowledgeIndexingTask( - orchestrationConfig: OrchestrationConfig, - planTask: KnowledgeIndexingTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: KnowledgeIndexingTaskExecutionConfigData? ) : AbstractTask( - orchestrationConfig, - planTask + orchestrationConfig, + planTask ) { - class KnowledgeIndexingTaskExecutionConfigData( - @Description("The file paths to process and index") - val file_paths: List, - @Description("The type of parsing to use: 'document' or 'code'") - val parsing_type: String? = "document", - @Description("The chunk size for splitting documents (0.0 to 1.0)") - val chunk_size: Double? = 0.1, - @Description("The embedding model to use for indexing") - val embedding_model: String? = OllamaEmbeddingModels.NomicEmbedText.modelName, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null, - ) : TaskExecutionConfig( - task_type = KnowledgeIndexing.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) + class KnowledgeIndexingTaskExecutionConfigData( + @Description("The file paths to process and index") + val file_paths: List, + @Description("The type of parsing to use: 'document' or 'code'") + val parsing_type: String? = "document", + @Description("The chunk size for splitting documents (0.0 to 1.0)") + val chunk_size: Double? = 0.1, + @Description("The embedding model to use for indexing") + val embedding_model: String? = OllamaEmbeddingModels.NomicEmbedText.modelName, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null, + ) : TaskExecutionConfig( + task_type = KnowledgeIndexing.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) - override fun promptSegment() = """ + override fun promptSegment() = """ KnowledgeIndexing - Process and index files for semantic search ** Specify the file paths to process ** Specify the parsing type (document or code) @@ -48,89 +48,89 @@ class KnowledgeIndexingTask( ** Optionally specify the embedding model (default OllamaNomadic) """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val filePaths = executionConfig?.file_paths ?: return - val files = filePaths.map { path -> - File(path).also { file -> - if (!file.exists()) { - log.warn("File does not exist: $path") - } - } - }.filter { it.exists() } + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val filePaths = executionConfig?.file_paths ?: return + val files = filePaths.map { path -> + File(path).also { file -> + if (!file.exists()) { + log.warn("File does not exist: $path") + } + } + }.filter { it.exists() } - if (files.isEmpty()) { - val result = buildString { - appendLine("# No Valid Files Found") - appendLine() - appendLine("The following paths were specified but could not be found:") - filePaths.forEach { path -> - appendLine("* $path") - } - } - task.add(MarkdownUtil.renderMarkdown(result, ui = task.ui)) - resultFn(result) - return + if (files.isEmpty()) { + val result = buildString { + appendLine("# No Valid Files Found") + appendLine() + appendLine("The following paths were specified but could not be found:") + filePaths.forEach { path -> + appendLine("* $path") } + } + task.add(MarkdownUtil.renderMarkdown(result, ui = task.ui)) + resultFn(result) + return + } - val threadPool = Executors.newFixedThreadPool( - Runtime.getRuntime().availableProcessors().coerceAtMost(16) - ) - try { - val progressState = ProgressState.progressBar(task) - // Determine embedding model from configuration - val embeddingModel = EmbeddingModel.values().toList().firstOrNull { - it.second.modelName.equals(executionConfig.embedding_model, ignoreCase = true) - }!!.second - indexJsonFile( - pool = threadPool, - progressState = progressState, - inputPaths = files.map { it.absolutePath }.toTypedArray(), - model = embeddingModel - ) + val threadPool = Executors.newFixedThreadPool( + Runtime.getRuntime().availableProcessors().coerceAtMost(16) + ) + try { + val progressState = ProgressState.progressBar(task) + // Determine embedding model from configuration + val embeddingModel = EmbeddingModel.values().toList().firstOrNull { + it.second.modelName.equals(executionConfig.embedding_model, ignoreCase = true) + }!!.second + indexJsonFile( + pool = threadPool, + progressState = progressState, + inputPaths = files.map { it.absolutePath }.toTypedArray(), + model = embeddingModel + ) - val result = buildString { - appendLine("# Knowledge Indexing Complete") - appendLine() - appendLine("## Configuration") - appendLine("* Embedding Model: ${executionConfig?.embedding_model ?: "OllamaNomadic"}") - appendLine("* Parsing Type: ${executionConfig?.parsing_type ?: "document"}") - appendLine("* Chunk Size: ${executionConfig?.chunk_size ?: 0.1}") - appendLine() - appendLine("Processed ${files.size} files:") - files.forEach { file -> - appendLine("* ${file.name}") - } - } - task.add(MarkdownUtil.renderMarkdown(result, ui = task.ui)) - resultFn(result) - } finally { - threadPool.shutdown() - try { - if (!threadPool.awaitTermination(60, TimeUnit.SECONDS)) { - threadPool.shutdownNow() - } - } catch (e: InterruptedException) { - threadPool.shutdownNow() - Thread.currentThread().interrupt() - } + val result = buildString { + appendLine("# Knowledge Indexing Complete") + appendLine() + appendLine("## Configuration") + appendLine("* Embedding Model: ${executionConfig?.embedding_model ?: "OllamaNomadic"}") + appendLine("* Parsing Type: ${executionConfig?.parsing_type ?: "document"}") + appendLine("* Chunk Size: ${executionConfig?.chunk_size ?: 0.1}") + appendLine() + appendLine("Processed ${files.size} files:") + files.forEach { file -> + appendLine("* ${file.name}") } + } + task.add(MarkdownUtil.renderMarkdown(result, ui = task.ui)) + resultFn(result) + } finally { + threadPool.shutdown() + try { + if (!threadPool.awaitTermination(60, TimeUnit.SECONDS)) { + threadPool.shutdownNow() + } + } catch (e: InterruptedException) { + threadPool.shutdownNow() + Thread.currentThread().interrupt() + } } + } - companion object { - private val log = LoggerFactory.getLogger(KnowledgeIndexingTask::class.java) + companion object { + private val log = LoggerFactory.getLogger(KnowledgeIndexingTask::class.java) - val KnowledgeIndexing = TaskType( // TODO: This should be automatically done as needed during embedding search - "KnowledgeIndexing", - KnowledgeIndexingTask.KnowledgeIndexingTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Index content for semantic search capabilities", - """ + val KnowledgeIndexing = TaskType( // TODO: This should be automatically done as needed during embedding search + "KnowledgeIndexing", + KnowledgeIndexingTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Index content for semantic search capabilities", + """ Indexes documents and code for semantic search capabilities.
    • Processes both documentation and source code
    • @@ -140,7 +140,7 @@ class KnowledgeIndexingTask(
    • Progress tracking and reporting
    """ - ) + ) - } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/KnowledgeIndexingTask.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/KnowledgeIndexingTask.md deleted file mode 100644 index 1a380b98d..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/KnowledgeIndexingTask.md +++ /dev/null @@ -1,121 +0,0 @@ -# KnowledgeIndexingTask - -## Overview - -The `KnowledgeIndexingTask` is a specialized task implementation that processes and indexes files for semantic search capabilities. It leverages embedding models to create searchable vector representations of document content, enabling efficient knowledge retrieval in AI-powered applications. - -## Purpose - -This task is designed to: -- Process multiple file paths for indexing -- Create embeddings using the OllamaNomadic embedding model -- Support concurrent processing for improved performance -- Provide progress tracking during indexing operations - -## Configuration - -### KnowledgeIndexingTaskConfigData - -The task configuration accepts the following parameters: - -| Parameter | Type | Required | Description | -|-----------|------|----------|-------------| -| `file_paths` | `List` | Yes | List of file paths to process and index | -| `task_description` | `String?` | No | Optional description of the task | -| `task_dependencies` | `List?` | No | Optional list of dependent task identifiers | -| `state` | `TaskState?` | No | Current state of the task | - -## Usage - -### Example Configuration - -```kotlin -val config = KnowledgeIndexingTaskConfigData( - file_paths = listOf( - "/path/to/document1.pdf", - "/path/to/document2.txt", - "/path/to/codebase/src" - ), - task_description = "Index project documentation for semantic search" -) -``` - -### Prompt Segment - -When using this task in an orchestration context, the following prompt segment is provided: - -``` -KnowledgeIndexing - Process and index files for semantic search - ** Specify the file paths to process - ** Specify the parsing type (document or code) - ** Optionally specify the chunk size (default 0.1) -``` - -## Implementation Details - -### Processing Flow - -1. **File Validation**: The task first validates that all specified file paths exist -2. **Thread Pool Creation**: Creates a thread pool with up to 16 threads (based on available processors) -3. **Indexing**: Uses the `indexJsonFile` method to process files with: - - Progress tracking via `ProgressState` - - OllamaNomadic embedding model for vector generation -4. **Result Reporting**: Generates a markdown report of processed files -5. **Cleanup**: Ensures proper thread pool shutdown - -### Error Handling - -- **Missing Files**: Files that don't exist are filtered out with warnings logged -- **Empty File List**: If no valid files are found, returns a detailed error report -- **Thread Interruption**: Properly handles thread interruption and ensures cleanup - -### Performance Considerations - -- **Concurrent Processing**: Utilizes multi-threading for parallel file processing -- **Thread Pool Size**: Limited to 16 threads maximum to prevent resource exhaustion -- **Graceful Shutdown**: Implements a 60-second timeout for thread pool termination - -## Output Format - -The task generates markdown-formatted output with two possible outcomes: - -### Success Output -```markdown -# Knowledge Indexing Complete - -Processed N files: -* file1.txt -* file2.pdf -* ... -``` - -### Error Output (No Valid Files) -```markdown -# No Valid Files Found - -The following paths were specified but could not be found: -* /invalid/path1 -* /invalid/path2 -``` - -## Dependencies - -- `com.simiacryptus.cognotik.apps.parse.DocumentRecord`: For file indexing functionality -- `com.simiacryptus.cognotik.embedding.EmbeddingModel`: For vector embeddings -- `com.simiacryptus.cognotik.util.MarkdownUtil`: For rendering markdown output - -## Limitations and Notes - -1. **Fixed Embedding Model**: Currently hardcoded to use `EmbeddingModel.OllamaNomadic` -2. **Chunk Size**: The prompt mentions chunk size configuration, but it's not exposed in the current implementation -3. **Parsing Type**: The prompt mentions document vs code parsing types, but this is not configurable in the current version - -## Future Enhancements - -Consider implementing: -- Configurable embedding models -- Adjustable chunk size parameter -- Document vs code parsing type selection -- Support for recursive directory processing -- File type filtering options -- Custom metadata extraction \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/VectorSearchTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/VectorSearchTask.kt index e41cba745..c07c807a2 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/VectorSearchTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/VectorSearchTask.kt @@ -9,18 +9,14 @@ import com.simiacryptus.cognotik.embedding.DistanceType import com.simiacryptus.cognotik.embedding.EmbedderClient import com.simiacryptus.cognotik.embedding.EmbeddingModel import com.simiacryptus.cognotik.embedding.OllamaEmbeddingModels -import com.simiacryptus.cognotik.plan.AbstractTask -import com.simiacryptus.cognotik.plan.TaskOrchestrator -import com.simiacryptus.cognotik.plan.OrchestrationConfig -import com.simiacryptus.cognotik.plan.TaskExecutionConfig -import com.simiacryptus.cognotik.plan.TaskType -import com.simiacryptus.cognotik.plan.TaskTypeConfig +import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.util.JsonUtil import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.MarkdownUtil import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import java.io.File +import java.io.FileOutputStream import java.nio.file.Files import java.util.concurrent.Executors import java.util.concurrent.TimeUnit @@ -28,54 +24,54 @@ import java.util.regex.Pattern import kotlin.streams.asSequence class VectorSearchTask( - orchestrationConfig: OrchestrationConfig, - planTask: VectorSearchTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: VectorSearchTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - class VectorSearchTaskExecutionConfigData( - @Description("The positive search queries to look for in the embeddings") - val positive_queries: List, - @Description("The negative search queries to avoid in the embeddings") - val negative_queries: List = emptyList(), - @Description("The distance type to use for comparing embeddings (Euclidean, Manhattan, or Cosine)") - val distance_type: DistanceType = DistanceType.Cosine, - @Description("The number of top results to return") - val count: Int = 5, - @Description("The minimum length of the content to be considered") - val min_length: Int = 0, - @Description("List of regex patterns that must be present in the content") - val required_regexes: List = emptyList(), - val model: String? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null, - ) : ValidatedObject, TaskExecutionConfig( - task_type = VectorSearch.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) { - override fun validate(): String? { - if (positive_queries.isEmpty()) { - return "At least one positive query is required" - } - if (count <= 0) { - return "Count must be greater than 0" - } - if (min_length < 0) { - return "Minimum length cannot be negative" - } - required_regexes.forEach { regex -> - try { - Pattern.compile(regex) - } catch (e: Exception) { - return "Invalid regex pattern: $regex - ${e.message}" - } - } - return ValidatedObject.validateFields(this) + class VectorSearchTaskExecutionConfigData( + @Description("The positive search queries to look for in the embeddings") + val positive_queries: List, + @Description("The negative search queries to avoid in the embeddings") + val negative_queries: List = emptyList(), + @Description("The distance type to use for comparing embeddings (Euclidean, Manhattan, or Cosine)") + val distance_type: DistanceType = DistanceType.Cosine, + @Description("The number of top results to return") + val count: Int = 5, + @Description("The minimum length of the content to be considered") + val min_length: Int = 0, + @Description("List of regex patterns that must be present in the content") + val required_regexes: List = emptyList(), + val model: String? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null, + ) : ValidatedObject, TaskExecutionConfig( + task_type = VectorSearch.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) { + override fun validate(): String? { + if (positive_queries.isEmpty()) { + return "At least one positive query is required" + } + if (count <= 0) { + return "Count must be greater than 0" + } + if (min_length < 0) { + return "Minimum length cannot be negative" + } + required_regexes.forEach { regex -> + try { + Pattern.compile(regex) + } catch (e: Exception) { + return "Invalid regex pattern: $regex - ${e.message}" } + } + return ValidatedObject.validateFields(this) } + } - override fun promptSegment() = """ + override fun promptSegment() = """ VectorSearch - Search for similar embeddings in index files and provide top results ** Specify the positive search queries ** Optionally specify negative search queries @@ -83,249 +79,276 @@ VectorSearch - Search for similar embeddings in index files and provide top resu ** Specify the number of top results to return """.trim() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val threadPool = Executors.newFixedThreadPool( - Runtime.getRuntime().availableProcessors().coerceAtMost(8) - ) - try { - val searchResults = performEmbeddingSearch( - ) - val formattedResults = formatSearchResults(searchResults) - task.add(MarkdownUtil.renderMarkdown(formattedResults, ui = task.ui)) - resultFn(formattedResults) - } finally { - threadPool.shutdown() - try { - if (!threadPool.awaitTermination(60, TimeUnit.SECONDS)) { - threadPool.shutdownNow() - } - } catch (_: InterruptedException) { - threadPool.shutdownNow() - Thread.currentThread().interrupt() - } + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val threadPool = Executors.newFixedThreadPool( + Runtime.getRuntime().availableProcessors().coerceAtMost(8) + ) + val transcript = transcript(task) + try { + transcript?.write("# Vector Search Task\n\n".toByteArray()) + transcript?.write("## Search Configuration\n\n".toByteArray()) + transcript?.write("```json\n${JsonUtil.toJson(executionConfig)}\n```\n\n".toByteArray()) + + val searchResults = performEmbeddingSearch( + ) + val formattedResults = formatSearchResults(searchResults) + task.add(MarkdownUtil.renderMarkdown(formattedResults, ui = task.ui)) + resultFn(formattedResults) + transcript?.write("## Search Results\n\n".toByteArray()) + transcript?.write(formattedResults.toByteArray()) + transcript?.flush() + } finally { + threadPool.shutdown() + try { + if (!threadPool.awaitTermination(60, TimeUnit.SECONDS)) { + threadPool.shutdownNow() } + } catch (_: InterruptedException) { + threadPool.shutdownNow() + Thread.currentThread().interrupt() + } + transcript?.close() } + } - private fun performEmbeddingSearch(): List { - // Validate queries first - executionConfig?.validate()?.let { errorMessage -> - throw ValidatedObject.ValidationError(errorMessage, executionConfig) - } - if (executionConfig?.positive_queries?.isEmpty() != false) { - throw ValidatedObject.ValidationError("At least one positive query is required", executionConfig!!) - } - - // Create embeddings with retry logic - fun createEmbeddingWithRetry(query: String, maxRetries: Int = 3): DoubleArray? { - repeat(maxRetries) { attempt -> - try { - return embedderClient(executionConfig.model).embed(query) - } catch (e: Exception) { - if (attempt == maxRetries - 1) { - log.error("Failed to create embedding for query after $maxRetries attempts: $query", e) - return null - } - Thread.sleep(1000L * (attempt + 1)) - } - } - return null - } + private fun performEmbeddingSearch(): List { + // Validate queries first + executionConfig?.validate()?.let { errorMessage -> + throw ValidatedObject.ValidationError(errorMessage, executionConfig) + } + log.info("Starting embedding search with ${executionConfig?.positive_queries?.size} positive queries and ${executionConfig?.negative_queries?.size} negative queries") - val positiveEmbeddings = executionConfig.positive_queries.map { query -> - createEmbeddingWithRetry(query) - } + if (executionConfig?.positive_queries?.isEmpty() != false) { + throw ValidatedObject.ValidationError("At least one positive query is required", executionConfig!!) + } - val negativeEmbeddings = executionConfig.negative_queries.map { query -> - createEmbeddingWithRetry(query) + // Create embeddings with retry logic + fun createEmbeddingWithRetry(query: String, maxRetries: Int = 3): DoubleArray? { + repeat(maxRetries) { attempt -> + try { + return embedderClient(executionConfig.model).embed(query) + } catch (e: Exception) { + if (attempt == maxRetries - 1) { + log.error("Failed to create embedding for query after $maxRetries attempts: $query", e) + return null + } + Thread.sleep(1000L * (attempt + 1)) } + } + return null + } - if (positiveEmbeddings.filterNotNull().isEmpty()) { - throw IllegalStateException("Failed to create any positive embeddings") - } - val filtered = Files.walk(root).asSequence() - .filter { path -> - path.toString().endsWith(".index.data") - }.toList().toTypedArray() - val minLength = executionConfig.min_length - val requiredRegexes = executionConfig.required_regexes.map { Pattern.compile(it) } - fun String.matchesAllRegexes(): Boolean { - return requiredRegexes.all { regex -> regex.matcher(this).find() } - } + val positiveEmbeddings = executionConfig.positive_queries.map { query -> + createEmbeddingWithRetry(query) + } - val searchResults = filtered - .flatMap { path -> - val results = mutableListOf() - try { - DocumentRecord.readBinaryStream(path.toString()) { record -> - record.vector?.let { vector -> - val positiveDistances = positiveEmbeddings.filterNotNull().map { embedding -> - executionConfig.distance_type.distance(vector, embedding) - } - val negativeDistances = negativeEmbeddings.filterNotNull().map { embedding -> - executionConfig.distance_type.distance(vector, embedding) - } - val overallDistance = if (negativeDistances.isEmpty()) { - positiveDistances.minOrNull() ?: Double.MAX_VALUE - } else { - (positiveDistances.minOrNull() ?: Double.MAX_VALUE) / (negativeDistances.minOrNull() - ?: Double.MIN_VALUE) - } - val content = record.text ?: "" - if (content.length >= minLength && content.matchesAllRegexes()) { - results.add( - EmbeddingSearchResult( - file = root.relativize(path).toString(), - record = record, - distance = overallDistance - ) - ) - } - } - } - } catch (e: Exception) { - log.error("Failed to search in file: $path", e) - } - results - } - .toList() - return searchResults - .sortedBy { it.distance } - .take(executionConfig.count) + val negativeEmbeddings = executionConfig.negative_queries.map { query -> + createEmbeddingWithRetry(query) } - private fun formatSearchResults(results: List): String { - return buildString { - appendLine("# Embedding Search Results") - appendLine() - results.forEachIndexed { index, result -> - appendLine("## Result ${index + 1}") - appendLine("* Distance: %.3f".format(result.distance)) - appendLine("* File: ${result.record.sourcePath}") - appendLine(getContextSummary(result.record)) - appendLine("Metadata:\n```json\n${result.record.metadata}\n```") - appendLine() + if (positiveEmbeddings.filterNotNull().isEmpty()) { + throw IllegalStateException("Failed to create any positive embeddings") + } + log.info("Successfully created ${positiveEmbeddings.filterNotNull().size} positive embeddings and ${negativeEmbeddings.filterNotNull().size} negative embeddings") + val filtered = Files.walk(root).asSequence() + .filter { path -> + path.toString().endsWith(".index.data") + }.toList().toTypedArray() + val minLength = executionConfig.min_length + val requiredRegexes = executionConfig.required_regexes.map { Pattern.compile(it) } + fun String.matchesAllRegexes(): Boolean { + return requiredRegexes.all { regex -> regex.matcher(this).find() } + } + + val searchResults = filtered + .flatMap { path -> + val results = mutableListOf() + try { + DocumentRecord.readBinaryStream(path.toString()) { record -> + record.vector?.let { vector -> + val positiveDistances = positiveEmbeddings.filterNotNull().map { embedding -> + executionConfig.distance_type.distance(vector, embedding) + } + val negativeDistances = negativeEmbeddings.filterNotNull().map { embedding -> + executionConfig.distance_type.distance(vector, embedding) + } + val overallDistance = if (negativeDistances.isEmpty()) { + positiveDistances.minOrNull() ?: Double.MAX_VALUE + } else { + (positiveDistances.minOrNull() ?: Double.MAX_VALUE) / (negativeDistances.minOrNull() + ?: Double.MIN_VALUE) + } + val content = record.text ?: "" + if (content.length >= minLength && content.matchesAllRegexes()) { + results.add( + EmbeddingSearchResult( + file = root.relativize(path).toString(), + record = record, + distance = overallDistance + ) + ) + } } + } + } catch (e: Exception) { + log.error("Failed to search in file: $path", e) } + results + } + .toList() + log.info("Found ${searchResults.size} total results, returning top ${executionConfig.count}") + return searchResults + .sortedBy { it.distance } + .take(executionConfig.count) + } + + private fun formatSearchResults(results: List): String { + return buildString { + appendLine("# Embedding Search Results") + appendLine() + results.forEachIndexed { index, result -> + appendLine("## Result ${index + 1}") + appendLine("* Distance: %.3f".format(result.distance)) + appendLine("* File: ${result.record.sourcePath}") + appendLine(getContextSummary(result.record)) + appendLine("Metadata:\n```json\n${result.record.metadata}\n```") + appendLine() + } } + } - private fun getContextSummary(record: DocumentRecord): String { - return try { - val sourceFile = File(record.sourcePath) - if (!sourceFile.exists()) { - return "Source file not found: ${record.sourcePath}" - } - try { - val objectMapper = ObjectMapper() - val jsonNode = objectMapper.readTree(sourceFile) - val contextNode = getNodeAtPath(jsonNode, record.jsonPath) - buildString { - appendLine("```json") - appendLine(summarizeContext(contextNode, record.jsonPath, jsonNode)) - appendLine("```") - } - } catch (e: JsonParseException) { - buildString { - appendLine() - appendLine("**Source Path:** ${record.sourcePath}") - appendLine() - appendLine("**JSON Path:** ${record.jsonPath}") - appendLine() - appendLine("```text") - appendLine(record.text) - appendLine("```") - appendLine() + private fun getContextSummary(record: DocumentRecord): String { + return try { + val sourceFile = File(record.sourcePath) + if (!sourceFile.exists()) { + return "Source file not found: ${record.sourcePath}" + } + try { + val objectMapper = ObjectMapper() + val jsonNode = objectMapper.readTree(sourceFile) + val contextNode = getNodeAtPath(jsonNode, record.jsonPath) + buildString { + appendLine("```json") + appendLine(summarizeContext(contextNode, record.jsonPath, jsonNode)) + appendLine("```") + } + } catch (e: JsonParseException) { + buildString { + appendLine() + appendLine("**Source Path:** ${record.sourcePath}") + appendLine() + appendLine("**JSON Path:** ${record.jsonPath}") + appendLine() + appendLine("```text") + appendLine(record.text) + appendLine("```") + appendLine() // appendLine("```text") // appendLine(summarizeTextContext(sourceFile, record.jsonPath)) // appendLine("```") - appendLine() - } - } - } catch (e: Exception) { - log.warn("Error getting context summary for ${record.sourcePath}:${record.jsonPath}", e) - "Context summary unavailable: ${e.message}" + appendLine() } + } + } catch (e: Exception) { + log.warn("Error getting context summary for ${record.sourcePath}:${record.jsonPath}", e) + "Context summary unavailable: ${e.message}" } + } - private fun getNodeAtPath(jsonNode: JsonNode, path: String): JsonNode { - var currentNode = jsonNode + private fun getNodeAtPath(jsonNode: JsonNode, path: String): JsonNode { + var currentNode = jsonNode - path.split(".").forEach { segment -> - currentNode = when { - segment.contains("[") -> { - val (arrayName, indexPart) = segment.split("[", limit = 2) - val index = indexPart.substringBefore("]").toIntOrNull() ?: run { - log.warn("Invalid index in path segment: $segment") - return currentNode - } - val field = currentNode.get(arrayName) - val child = field?.get(index) - if (child == null) { - log.warn("Child not found for segment: $segment in path: $path") - return currentNode - } - child - } + path.split(".").forEach { segment -> + currentNode = when { + segment.contains("[") -> { + val (arrayName, indexPart) = segment.split("[", limit = 2) + val index = indexPart.substringBefore("]").toIntOrNull() ?: run { + log.warn("Invalid index in path segment: $segment") + return currentNode + } + val field = currentNode.get(arrayName) + val child = field?.get(index) + if (child == null) { + log.warn("Child not found for segment: $segment in path: $path") + return currentNode + } + child + } - else -> { - val child = currentNode.get(segment) - if (child == null) { - log.warn("Child not found for segment: $segment in path: $path") - return currentNode - } - child - } - } + else -> { + val child = currentNode.get(segment) + if (child == null) { + log.warn("Child not found for segment: $segment in path: $path") + return currentNode + } + child } - return currentNode + } } + return currentNode + } - private fun summarizeContext(node: JsonNode, path: String, jsonNode: JsonNode): String { - var summary = mutableMapOf() + private fun summarizeContext(node: JsonNode, path: String, jsonNode: JsonNode): String { + var summary = mutableMapOf() - node.fields().forEach { (key, value) -> - if (value.isPrimitive()) { - summary[key] = value.asText() - } - } + node.fields().forEach { (key, value) -> + if (value.isPrimitive()) { + summary[key] = value.asText() + } + } - val pathSegments = path.split(".") - for (i in pathSegments.size - 1 downTo 1) { - val parentPath = pathSegments.subList(0, i).joinToString(".") - val parentNode = getNodeAtPath(jsonNode, parentPath) - summary = mutableMapOf( - pathSegments[i] to summary - ) - parentNode.fields().forEach { (key, value) -> - when { - value.isPrimitive() -> summary[key] = value.asText() - key == "entities" || key == "tags" || key == "metadata" -> summary[key] = value - } - } + val pathSegments = path.split(".") + for (i in pathSegments.size - 1 downTo 1) { + val parentPath = pathSegments.subList(0, i).joinToString(".") + val parentNode = getNodeAtPath(jsonNode, parentPath) + summary = mutableMapOf( + pathSegments[i] to summary + ) + parentNode.fields().forEach { (key, value) -> + when { + value.isPrimitive() -> summary[key] = value.asText() + key == "entities" || key == "tags" || key == "metadata" -> summary[key] = value } - return JsonUtil.toJson(summary) + } } + return JsonUtil.toJson(summary) + } + + data class EmbeddingSearchResult( + val file: String, + val record: DocumentRecord, + val distance: Double + ) - data class EmbeddingSearchResult( - val file: String, - val record: DocumentRecord, - val distance: Double + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" ) + return markdownTranscript + } + - companion object { - private val log = LoggerFactory.getLogger(VectorSearchTask::class.java) + companion object { + private val log = LoggerFactory.getLogger(VectorSearchTask::class.java) - val VectorSearch = TaskType( - "VectorSearch", - VectorSearchTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Perform semantic search using AI embeddings", - """ + val VectorSearch = TaskType( + "VectorSearch", + VectorSearchTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Perform semantic search using AI embeddings", + """ Performs semantic search using AI embeddings across indexed content.
    • Uses OpenAI embeddings for semantic matching
    • @@ -335,17 +358,17 @@ VectorSearch - Search for similar embeddings in index files and provide top resu
    • Returns ranked results with context
    """ - ) + ) - var embedderClient : (String?) -> EmbedderClient = { - val modelName = it ?: OllamaEmbeddingModels.NomicEmbedText.modelName!! - val embeddingModel = EmbeddingModel.values()[modelName] - ?: throw IllegalArgumentException("Unknown embedding model: $modelName") - embeddingModel.instance() - } + var embedderClient: (String?) -> EmbedderClient = { + val modelName = it ?: OllamaEmbeddingModels.NomicEmbedText.modelName!! + val embeddingModel = EmbeddingModel.values()[modelName] + ?: throw IllegalArgumentException("Unknown embedding model: $modelName") + embeddingModel.instance() } + } } private fun JsonNode.isPrimitive(): Boolean { - return this.isNumber || this.isTextual || this.isBoolean -} \ No newline at end of file + return this.isNumber || this.isTextual || this.isBoolean +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/VectorSearchTask.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/VectorSearchTask.md deleted file mode 100644 index e9b50543a..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/knowledge/VectorSearchTask.md +++ /dev/null @@ -1,160 +0,0 @@ -# VectorSearchTask - -## Overview - -The `VectorSearchTask` is a specialized task implementation that performs semantic similarity searches using vector embeddings. It searches through indexed document embeddings to find content that is semantically similar to provided query strings, supporting both positive queries (what to find) and negative queries (what to avoid). - -## Purpose - -This task enables semantic search capabilities within the Cognotik framework by: -- Converting search queries into vector embeddings -- Comparing query embeddings against pre-indexed document embeddings -- Ranking results based on semantic similarity -- Supporting both inclusion and exclusion criteria -- Filtering results based on content requirements - -## Configuration - -### VectorSearchTaskConfigData - -The task is configured using the `VectorSearchTaskConfigData` class with the following parameters: - -| Parameter | Type | Default | Description | -|-----------|------|---------|-------------| -| `positive_queries` | `List` | Required | Search queries to find similar content for | -| `negative_queries` | `List` | `emptyList()` | Search queries to avoid in results | -| `distance_type` | `DistanceType` | `Cosine` | Distance metric for embedding comparison (Euclidean, Manhattan, or Cosine) | -| `count` | `Int` | `5` | Number of top results to return | -| `min_length` | `Int` | `0` | Minimum content length requirement | -| `required_regexes` | `List` | `emptyList()` | Regex patterns that must match in content | -| `model` | `EmbeddingModel` | `OllamaNomadic` | Embedding model to use for query vectorization | - -## How It Works - -### 1. Query Processing -- Validates that at least one positive query is provided -- Converts all positive and negative queries into vector embeddings using the specified model -- Implements retry logic (up to 3 attempts) for embedding creation to handle transient failures - -### 2. Search Process -- Scans the configured root directory for `.index.data` files containing pre-computed embeddings -- For each indexed document: - - Calculates distances between document embedding and all query embeddings - - Computes minimum distance to positive queries - - If negative queries exist, adjusts score by dividing by minimum negative distance - - Applies content filters (minimum length, required regex patterns) - -### 3. Result Ranking -- Sorts results by computed distance (lower is better for similarity) -- Returns the top N results as specified by the `count` parameter - -### 4. Output Formatting -- Generates markdown-formatted results -- Includes distance scores, file paths, and metadata -- Provides context summaries showing the JSON structure around matched content - -## Distance Metrics - -The task supports three distance metrics for comparing embeddings: - -- **Cosine Distance**: Measures angle between vectors (default, best for semantic similarity) -- **Euclidean Distance**: Measures straight-line distance between points -- **Manhattan Distance**: Measures sum of absolute differences - -## Filtering Capabilities - -### Content Length Filter -- Excludes results with content shorter than `min_length` characters -- Useful for filtering out trivial matches - -### Regex Pattern Matching -- Supports multiple required regex patterns -- All patterns must match for a result to be included -- Useful for domain-specific filtering - -## Output Format - -Results are formatted as markdown with: -- Numbered result sections -- Distance scores (lower indicates better match) -- Source file paths -- JSON context summaries showing document structure -- Metadata in JSON format - -Example output structure: -```markdown -# Embedding Search Results - -## Result 1 -* Distance: 0.234 -* File: path/to/document.json -```json -{ - "context": "...", - "metadata": {...} -} -``` -``` - -## Error Handling - -The task implements robust error handling: -- Retry logic for embedding creation failures -- Graceful handling of missing or corrupted index files -- Validation of required configuration parameters -- Thread pool management with proper shutdown procedures -- Detailed error logging for debugging - -## Performance Considerations - -- Uses parallel processing with a thread pool (up to 8 threads) -- Streams document records to minimize memory usage -- Implements efficient distance calculations -- Sorts only after all distances are computed - -## Use Cases - -1. **Semantic Document Search**: Find documents discussing similar concepts -2. **Content Discovery**: Locate related information across large document sets -3. **Duplicate Detection**: Identify semantically similar content -4. **Filtered Search**: Combine semantic search with pattern-based filtering -5. **Negative Filtering**: Exclude certain topics while searching for others - -## Integration - -The task integrates with: -- `DocumentRecord`: For reading indexed embeddings -- `EmbeddingModel`: For creating query embeddings -- `TaskOrchestrator`: For task execution coordination -- `SessionTask`: For UI interaction and result display - -## Limitations - -- Requires pre-indexed embeddings in `.index.data` files -- Performance depends on the number and size of index files -- Embedding model must be compatible with indexed embeddings -- Memory usage scales with the number of search results - -## Example Configuration - -```json -{ - "task_type": "EmbeddingSearch", - "positive_queries": ["machine learning algorithms", "neural networks"], - "negative_queries": ["basic statistics"], - "distance_type": "Cosine", - "count": 10, - "min_length": 100, - "required_regexes": ["\\b(AI|ML|deep learning)\\b"], - "model": "OllamaNomadic" -} -``` - -## Best Practices - -1. **Query Design**: Use specific, descriptive queries for better results -2. **Distance Metric**: Use Cosine distance for semantic similarity -3. **Negative Queries**: Use sparingly to avoid over-filtering -4. **Result Count**: Balance between coverage and relevance -5. **Regex Patterns**: Test patterns separately before using in searches -6. **Model Selection**: Ensure query and index embeddings use compatible models \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/mcp/MCPToolTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/mcp/MCPToolTask.kt index 7656d1848..fb2c3258e 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/mcp/MCPToolTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/mcp/MCPToolTask.kt @@ -1,258 +1,298 @@ package com.simiacryptus.cognotik.plan.tools.mcp - import com.simiacryptus.cognotik.describe.Description - import com.simiacryptus.cognotik.plan.AbstractTask - import com.simiacryptus.cognotik.plan.ExecutionState - import com.simiacryptus.cognotik.plan.OrchestrationConfig - import com.simiacryptus.cognotik.plan.TaskExecutionConfig - import com.simiacryptus.cognotik.plan.TaskOrchestrator - import com.simiacryptus.cognotik.plan.TaskTypeConfig - import com.simiacryptus.cognotik.util.JsonUtil - import com.simiacryptus.cognotik.util.LoggerFactory - import com.simiacryptus.cognotik.webui.session.SessionTask - import com.simiacryptus.cognotik.mcp.MCPServerRegistry - import org.slf4j.Logger +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.mcp.MCPServerRegistry +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.util.JsonUtil +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream import java.util.concurrent.TimeUnit import java.util.concurrent.TimeoutException - class MCPToolTask( - orchestrationConfig: OrchestrationConfig, - executionConfig: MCPToolTaskExecutionConfigData? - ) : AbstractTask( - orchestrationConfig, - executionConfig +class MCPToolTask( + orchestrationConfig: OrchestrationConfig, + executionConfig: MCPToolTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + executionConfig ) { - class MCPToolTaskExecutionConfigData( - @Description("The name of the MCP server to connect to") - val server_name: String? = null, - @Description("The name of the tool to execute on the MCP server") - val tool_name: String? = null, - @Description("Arguments to pass to the MCP tool as a JSON object") - val tool_arguments: Map? = null, - @Description("Optional timeout in seconds for the tool execution") - val timeout_seconds: Int? = 30, - task_description: String? = null, - task_dependencies: MutableList? = null, - state: TaskState? = null - ) : TaskExecutionConfig( - task_type = MCPTool.name, - task_description = task_description, - task_dependencies = task_dependencies, - state = state - ) + class MCPToolTaskExecutionConfigData( + @Description("The name of the MCP server to connect to") + val server_name: String? = null, + @Description("The name of the tool to execute on the MCP server") + val tool_name: String? = null, + @Description("Arguments to pass to the MCP tool as a JSON object") + val tool_arguments: Map? = null, + @Description("Optional timeout in seconds for the tool execution") + val timeout_seconds: Int? = 30, + task_description: String? = null, + task_dependencies: MutableList? = null, + state: TaskState? = null + ) : TaskExecutionConfig( + task_type = MCPTool.name, + task_description = task_description, + task_dependencies = task_dependencies, + state = state + ) - class MCPToolTaskTypeConfig( - @Description("Default MCP server to use if not specified in execution config") - val default_server: String? = null, - @Description("Default timeout in seconds for MCP tool execution") - val default_timeout: Int = 30, - @Description("Whether to automatically retry failed tool executions") - val auto_retry: Boolean = false, - @Description("Maximum number of retry attempts") - val max_retries: Int = 3, - @Description("Initial retry delay in milliseconds") - val retry_delay_ms: Long = 1000, - @Description("Whether to use exponential backoff for retries") - val exponential_backoff: Boolean = true, - task_type: String? = MCPTool.name, - name: String? = null - ) : TaskTypeConfig( - task_type = task_type, - name = name - ) + class MCPToolTaskTypeConfig( + @Description("Default MCP server to use if not specified in execution config") + val default_server: String? = null, + @Description("Default timeout in seconds for MCP tool execution") + val default_timeout: Int = 30, + @Description("Whether to automatically retry failed tool executions") + val auto_retry: Boolean = false, + @Description("Whether to generate a transcript of the tool execution") + val generate_transcript: Boolean = true, + @Description("Maximum number of retry attempts") + val max_retries: Int = 3, + @Description("Initial retry delay in milliseconds") + val retry_delay_ms: Long = 1000, + @Description("Whether to use exponential backoff for retries") + val exponential_backoff: Boolean = true, + task_type: String? = MCPTool.name, + name: String? = null + ) : TaskTypeConfig( + task_type = task_type, + name = name + ) - override fun promptSegment(): String { - return """ + override fun promptSegment(): String { + return """ MCPTool - Execute tools from Model Context Protocol (MCP) servers ** Specify the MCP server name and tool to execute ** Provide tool arguments as a JSON object ** Configure timeout and retry behavior ** Supports integration with external MCP-compatible services """.trimIndent() - } + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val typeConfig = typeConfig ?: throw RuntimeException() + val config = executionConfig ?: throw IllegalStateException("Execution config is required") + val serverName = config.server_name ?: typeConfig.default_server + ?: throw IllegalStateException("MCP server name must be specified") + val toolName = config.tool_name ?: throw IllegalStateException("Tool name must be specified") + val arguments = config.tool_arguments ?: emptyMap() + val timeout = config.timeout_seconds ?: typeConfig.default_timeout + + task.add("Executing MCP tool: $toolName on server: $serverName") + val transcriptStream = if (typeConfig.generate_transcript) { + transcript(task) + } else null + + task.add("Arguments: ${JsonUtil.toJson(arguments)}") + + try { + val result = executeMCPTool( + serverName = serverName, + toolName = toolName, + arguments = arguments, + timeout = timeout, + task = task, + transcriptStream = transcriptStream + ) + + task.add("Tool execution completed successfully") + task.add("Result:\n```json\n${JsonUtil.toJson(result)}\n```") + transcriptStream?.let { + it.write("\n\n## Execution Completed Successfully\n".toByteArray()) + it.close() + } - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val typeConfig = typeConfig ?: throw RuntimeException() - val config = executionConfig ?: throw IllegalStateException("Execution config is required") - val serverName = config.server_name ?: typeConfig.default_server - ?: throw IllegalStateException("MCP server name must be specified") - val toolName = config.tool_name ?: throw IllegalStateException("Tool name must be specified") - val arguments = config.tool_arguments ?: emptyMap() - val timeout = config.timeout_seconds ?: typeConfig.default_timeout - - task.add("Executing MCP tool: $toolName on server: $serverName") - task.add("Arguments: ${JsonUtil.toJson(arguments)}") - - try { - val result = executeMCPTool( - serverName = serverName, - toolName = toolName, - arguments = arguments, - timeout = timeout, - task = task - ) - - task.add("Tool execution completed successfully") - task.add("Result:\n```json\n${JsonUtil.toJson(result)}\n```") - - resultFn(JsonUtil.toJson(result)) - state = TaskState.Completed - } catch (e: Exception) { - log.error("Error executing MCP tool", e) - - if (typeConfig.auto_retry && shouldRetry(e)) { - handleRetry(agent, messages, task, resultFn, orchestrationConfig, e) - } else { - state = TaskState.Completed - task.error(e) - throw e - } + resultFn(JsonUtil.toJson(result)) + state = TaskState.Completed + } catch (e: Exception) { + log.error("Error executing MCP tool", e) + + if (typeConfig.auto_retry && shouldRetry(e)) { + transcriptStream?.let { + it.write("\n\n## Retrying after error: ${e.message}\n".toByteArray()) } + handleRetry(agent, messages, task, resultFn, orchestrationConfig, e) + } else { + transcriptStream?.close() + state = TaskState.Completed + task.error(e) + throw e + } } + } - private fun executeMCPTool( - serverName: String, - toolName: String, - arguments: Map, - timeout: Int, - task: SessionTask - ): Map { - log.info("Connecting to MCP server: $serverName") - task.add("Connecting to MCP server: $serverName") - - // Get MCP client from registry - val client = MCPServerRegistry.getClient(serverName) - ?: throw IllegalStateException("MCP server not found: $serverName") - - try { - // Ensure client is connected - if (!client.isConnected()) { - task.add("Establishing connection to MCP server...") - client.connect() - } - - // List available tools to verify the tool exists - val availableTools = client.listTools() - val tool = availableTools.find { it.name == toolName } - ?: throw IllegalArgumentException("Tool '$toolName' not found on server '$serverName'. Available tools: ${availableTools.map { it.name }}") - - task.add("Tool found: ${tool.name}") - task.add("Tool description: ${tool.description}") - - // Execute the tool with timeout - task.add("Executing tool with timeout of $timeout seconds...") - val startTime = System.currentTimeMillis() - - val result = try { - client.executeTool(toolName, arguments, timeout.toLong(), TimeUnit.SECONDS) - } catch (e: TimeoutException) { - throw TimeoutException("Tool execution timed out after $timeout seconds") - } - - val executionTime = System.currentTimeMillis() - startTime - task.add("Tool executed in ${executionTime}ms") - - return mapOf( - "status" to "success", - "server" to serverName, - "tool" to toolName, - "arguments" to arguments, - "result" to result!!, - "execution_time_ms" to executionTime, - "timestamp" to System.currentTimeMillis() - ) - } catch (e: Exception) { - log.error("Error executing MCP tool: ${e.message}", e) - throw e - } + private fun executeMCPTool( + serverName: String, + toolName: String, + arguments: Map, + timeout: Int, + task: SessionTask, + transcriptStream: FileOutputStream? + ): Map { + log.info("Connecting to MCP server: $serverName") + task.add("Connecting to MCP server: $serverName") + transcriptStream?.write("# MCP Tool Execution Transcript\n\n".toByteArray()) + transcriptStream?.write("## Server: $serverName\n".toByteArray()) + transcriptStream?.write("## Tool: $toolName\n\n".toByteArray()) + + // Get MCP client from registry + val client = MCPServerRegistry.getClient(serverName) + ?: throw IllegalStateException("MCP server not found: $serverName") + + try { + // Ensure client is connected + if (!client.isConnected()) { + task.add("Establishing connection to MCP server...") + transcriptStream?.write("### Establishing connection to MCP server...\n".toByteArray()) + client.connect() + transcriptStream?.write("### Connection established\n\n".toByteArray()) + } + + // List available tools to verify the tool exists + val availableTools = client.listTools() + val tool = availableTools.find { it.name == toolName } + ?: throw IllegalArgumentException("Tool '$toolName' not found on server '$serverName'. Available tools: ${availableTools.map { it.name }}") + + task.add("Tool found: ${tool.name}") + task.add("Tool description: ${tool.description}") + transcriptStream?.write("### Tool Information\n".toByteArray()) + transcriptStream?.write("- **Name**: ${tool.name}\n".toByteArray()) + transcriptStream?.write("- **Description**: ${tool.description}\n\n".toByteArray()) + + // Execute the tool with timeout + task.add("Executing tool with timeout of $timeout seconds...") + transcriptStream?.write("### Execution\n".toByteArray()) + transcriptStream?.write("- **Arguments**: ```json\n${JsonUtil.toJson(arguments)}\n```\n".toByteArray()) + transcriptStream?.write("- **Timeout**: $timeout seconds\n\n".toByteArray()) + val startTime = System.currentTimeMillis() + + val result = try { + client.executeTool(toolName, arguments, timeout.toLong(), TimeUnit.SECONDS) + } catch (e: TimeoutException) { + throw TimeoutException("Tool execution timed out after $timeout seconds") + } + + val executionTime = System.currentTimeMillis() - startTime + task.add("Tool executed in ${executionTime}ms") + transcriptStream?.write("### Results\n".toByteArray()) + transcriptStream?.write("- **Execution Time**: ${executionTime}ms\n".toByteArray()) + transcriptStream?.write("- **Result**: ```json\n${JsonUtil.toJson(result)}\n```\n\n".toByteArray()) + + return mapOf( + "status" to "success", + "server" to serverName, + "tool" to toolName, + "arguments" to arguments, + "result" to result!!, + "execution_time_ms" to executionTime, + "timestamp" to System.currentTimeMillis() + ) + } catch (e: Exception) { + log.error("Error executing MCP tool: ${e.message}", e) + transcriptStream?.write("\n### Error\n".toByteArray()) + transcriptStream?.write("```\n${e.message}\n${e.stackTraceToString()}\n```\n".toByteArray()) + throw e } + } - private fun shouldRetry(e: Exception): Boolean { - // Determine if the error is retryable - return when { - e is java.net.SocketTimeoutException -> true - e is TimeoutException -> true - e is java.io.IOException -> true - e.message?.contains("connection", ignoreCase = true) == true -> true - e.message?.contains("timeout", ignoreCase = true) == true -> true - e.message?.contains("unavailable", ignoreCase = true) == true -> true - else -> false - } + private fun shouldRetry(e: Exception): Boolean { + // Determine if the error is retryable + return when { + e is java.net.SocketTimeoutException -> true + e is TimeoutException -> true + e is java.io.IOException -> true + e.message?.contains("connection", ignoreCase = true) == true -> true + e.message?.contains("timeout", ignoreCase = true) == true -> true + e.message?.contains("unavailable", ignoreCase = true) == true -> true + else -> false } + } + + private fun handleRetry( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig, + lastError: Exception + ) { + var retryCount = 0 + var lastException = lastError - private fun handleRetry( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig, - lastError: Exception - ) { - var retryCount = 0 - var lastException = lastError - - val typeConfig = typeConfig ?: throw RuntimeException() - while (retryCount < typeConfig.max_retries) { - retryCount++ - task.add("Retry attempt $retryCount of ${typeConfig.max_retries}") - - val typeConfig = typeConfig ?: throw RuntimeException() - try { - val delay = if (typeConfig.exponential_backoff) { - typeConfig.retry_delay_ms * (1 shl (retryCount - 1)) // 2^(n-1) exponential backoff - } else { - typeConfig.retry_delay_ms - } - task.add("Waiting ${delay}ms before retry...") - Thread.sleep(delay) - - run(agent, messages, task, resultFn, orchestrationConfig) - return - } catch (e: Exception) { - lastException = e - log.warn("Retry attempt $retryCount failed", e) - task.add("Retry attempt $retryCount failed: ${e.message}") - } + val typeConfig = typeConfig ?: throw RuntimeException() + while (retryCount < typeConfig.max_retries) { + retryCount++ + task.add("Retry attempt $retryCount of ${typeConfig.max_retries}") + + val typeConfig = typeConfig ?: throw RuntimeException() + try { + val delay = if (typeConfig.exponential_backoff) { + typeConfig.retry_delay_ms * (1 shl (retryCount - 1)) // 2^(n-1) exponential backoff + } else { + typeConfig.retry_delay_ms } - task.add("All retry attempts exhausted") - state = TaskState.Completed - - task.error(lastException) - throw lastException + task.add("Waiting ${delay}ms before retry...") + Thread.sleep(delay) + + run(agent, messages, task, resultFn, orchestrationConfig) + return + } catch (e: Exception) { + lastException = e + log.warn("Retry attempt $retryCount failed", e) + task.add("Retry attempt $retryCount failed: ${e.message}") + } } + task.add("All retry attempts exhausted") + state = TaskState.Completed - override fun getPriorCode(executionState: ExecutionState?): String { - val priorResults = executionConfig?.task_dependencies - ?.mapNotNull { dependency -> - executionState?.taskResult[dependency]?.let { result -> - "## Results from $dependency\n$result" - } - } - ?.joinToString("\n\n") - ?: "" - - return if (priorResults.isNotEmpty()) { - "# Prior Task Results\n\n$priorResults" - } else { - "" + task.error(lastException) + throw lastException + } + + override fun getPriorCode(executionState: ExecutionState?): String { + val priorResults = executionConfig?.task_dependencies + ?.mapNotNull { dependency -> + executionState?.taskResult[dependency]?.let { result -> + "## Results from $dependency\n$result" } + } + ?.joinToString("\n\n") + ?: "" + + return if (priorResults.isNotEmpty()) { + "# Prior Task Results\n\n$priorResults" + } else { + "" } + } - companion object { - private val log: Logger = LoggerFactory.getLogger(MCPToolTask::class.java) - val MCPTool = com.simiacryptus.cognotik.plan.TaskType( - "MCPTool", - MCPToolTaskExecutionConfigData::class.java, - MCPToolTaskTypeConfig::class.java, - "Execute tools from Model Context Protocol servers", - """ + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("mcp_tool_transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + companion object { + private val log: Logger = LoggerFactory.getLogger(MCPToolTask::class.java) + val MCPTool = TaskType( + "MCPTool", + MCPToolTaskExecutionConfigData::class.java, + MCPToolTaskTypeConfig::class.java, + "Execute tools from Model Context Protocol servers", + """ Executes tools from MCP (Model Context Protocol) servers.
    • Connect to MCP servers via various transports
    • @@ -264,6 +304,6 @@ import java.util.concurrent.TimeoutException
    • Exponential backoff retry strategy
    """ - ) - } + ) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/CrawlerAgentTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/CrawlerAgentTask.kt index e218eb33a..b00b21f61 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/CrawlerAgentTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/CrawlerAgentTask.kt @@ -2,9 +2,9 @@ package com.simiacryptus.cognotik.plan.tools.online import com.fasterxml.jackson.core.JsonProcessingException import com.fasterxml.jackson.databind.ObjectMapper -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent -import com.simiacryptus.cognotik.actors.ParsedResponse +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedResponse import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.describe.TypeDescriber @@ -14,6 +14,7 @@ import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.lang.Thread.sleep import java.net.URI import java.nio.charset.StandardCharsets @@ -27,103 +28,104 @@ import java.util.concurrent.atomic.AtomicInteger import java.util.regex.Pattern import kotlin.math.min - class CrawlerAgentTask( - orchestrationConfig: OrchestrationConfig, - planTask: CrawlerTaskExecutionConfigData?, +class CrawlerAgentTask( + orchestrationConfig: OrchestrationConfig, + planTask: CrawlerTaskExecutionConfigData?, ) : AbstractTask( - orchestrationConfig, - planTask + orchestrationConfig, + planTask ) { - class CrawlerTaskTypeConfig( - @Description("Method to seed the crawler (optional)") val seed_method: SeedMethod? = SeedMethod.GoogleProxy, - @Description("Method used to fetch content from URLs (optional)") val fetch_method: FetchMethod? = FetchMethod.HttpClient, - @Description("Whitespace-separated list of allowed domains/URL prefixes to restrict crawling (optional)") val allowed_domains: String? = null, - @Description("Respect robots.txt rules when crawling (default: true)") val respect_robots_txt: Boolean? = true, - @Description("Maximum number of pages to process in a single task") val max_pages_per_task: Int? = null, - @Description("Maximum depth to crawl from seed pages") val max_depth: Int? = null, - @Description("Maximum queue size to prevent memory issues") val max_queue_size: Int? = null, - @Description("Number of pages to process concurrently") val concurrent_page_processing: Int? = null, - @Description("Maximum characters in final summary") val max_final_output_size: Int? = null, - @Description("Minimum content length to process") val min_content_length: Int? = null, - @Description("Automatically follow links found in analyzed pages") val follow_links: Boolean? = null, - @Description("Allow crawling the same page multiple times") val allow_revisit_pages: Boolean? = null, - @Description("Generate a comprehensive summary of all results") val create_final_summary: Boolean? = null, - task_type: String = "CrawlerAgent", - model: ApiChatModel? = null, - name: String? = task_type, - ) : TaskTypeConfig(task_type = task_type, name = name, model = model), ValidatedObject { - override fun validate(): String? { - if (max_pages_per_task != null && max_pages_per_task!! <= 0) { - return "max_pages_per_task must be greater than 0" - } - if (max_depth != null && max_depth!! < 0) { - return "max_depth must be non-negative" - } - if (max_queue_size != null && max_queue_size!! <= 0) { - return "max_queue_size must be greater than 0" - } - if (concurrent_page_processing != null && concurrent_page_processing!! <= 0) { - return "concurrent_page_processing must be greater than 0" - } - if (max_final_output_size != null && max_final_output_size!! <= 0) { - return "max_final_output_size must be greater than 0" - } - if (min_content_length != null && min_content_length!! < 0) { - return "min_content_length must be non-negative" - } - return ValidatedObject.validateFields(this) - } + class CrawlerTaskTypeConfig( + @Description("Method to seed the crawler (optional)") val seed_method: SeedMethod? = SeedMethod.GoogleProxy, + @Description("Method used to fetch content from URLs (optional)") val fetch_method: FetchMethod? = FetchMethod.HttpClient, + @Description("Whitespace-separated list of allowed domains/URL prefixes to restrict crawling (optional)") val allowed_domains: String? = null, + @Description("Respect robots.txt rules when crawling (default: true)") val respect_robots_txt: Boolean? = true, + @Description("Maximum number of pages to process in a single task") val max_pages_per_task: Int? = null, + @Description("Maximum depth to crawl from seed pages") val max_depth: Int? = null, + @Description("Maximum queue size to prevent memory issues") val max_queue_size: Int? = null, + @Description("Number of pages to process concurrently") val concurrent_page_processing: Int? = null, + @Description("Maximum characters in final summary") val max_final_output_size: Int? = null, + @Description("Minimum content length to process") val min_content_length: Int? = null, + @Description("Automatically follow links found in analyzed pages") val follow_links: Boolean? = null, + @Description("Allow crawling the same page multiple times") val allow_revisit_pages: Boolean? = null, + @Description("Generate a comprehensive summary of all results") val create_final_summary: Boolean? = null, + @Description("Generate a detailed transcript of the crawling session") val generate_transcript: Boolean? = true, + task_type: String = "CrawlerAgent", + model: ApiChatModel? = null, + name: String? = task_type, + ) : TaskTypeConfig(task_type = task_type, name = name, model = model), ValidatedObject { + override fun validate(): String? { + if (max_pages_per_task != null && max_pages_per_task <= 0) { + return "max_pages_per_task must be greater than 0" + } + if (max_depth != null && max_depth < 0) { + return "max_depth must be non-negative" + } + if (max_queue_size != null && max_queue_size <= 0) { + return "max_queue_size must be greater than 0" + } + if (concurrent_page_processing != null && concurrent_page_processing <= 0) { + return "concurrent_page_processing must be greater than 0" + } + if (max_final_output_size != null && max_final_output_size <= 0) { + return "max_final_output_size must be greater than 0" + } + if (min_content_length != null && min_content_length < 0) { + return "min_content_length must be non-negative" + } + return ValidatedObject.validateFields(this) } - - class CrawlerTaskExecutionConfigData( - @Description("The search query to use for Google search") val search_query: String? = null, - @Description("Direct URLs to analyze (comma-separated)") val direct_urls: List? = null, - @Description("The query considered when processing the content - this should contain a detailed listing of the desired data, evaluation criteria, and filtering priorities used to transform the page into the desired summary") val content_queries: Any? = null, - @Description("Whitespace-separated list of allowed domains/URL prefixes to restrict crawling (optional)") val allowed_domains: String? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null, - ) : TaskExecutionConfig( - task_type = CrawlerAgent.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ), ValidatedObject { - override fun validate(): String? { - if (search_query.isNullOrBlank() && direct_urls.isNullOrEmpty()) { - return "Either search_query or direct_urls must be provided" - } - if (!direct_urls.isNullOrEmpty()) { - direct_urls.forEach { url -> - if (!url.matches(Regex("^(http|https)://.*"))) { - return "Invalid URL format in direct_urls: $url" - } - } - } - if (!allowed_domains.isNullOrBlank()) { - val domains = allowed_domains.split(Regex("\\s+")).filter { it.isNotBlank() } - if (domains.isEmpty()) { - return "allowed_domains must contain at least one valid domain when specified" - } - } - return ValidatedObject.validateFields(this) + } + + class CrawlerTaskExecutionConfigData( + @Description("The search query to use for Google search") val search_query: String? = null, + @Description("Direct URLs to analyze (comma-separated)") val direct_urls: List? = null, + @Description("The query considered when processing the content - this should contain a detailed listing of the desired data, evaluation criteria, and filtering priorities used to transform the page into the desired summary") val content_queries: Any? = null, + @Description("Whitespace-separated list of allowed domains/URL prefixes to restrict crawling (optional)") val allowed_domains: String? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null, + ) : TaskExecutionConfig( + task_type = CrawlerAgent.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (search_query.isNullOrBlank() && direct_urls.isNullOrEmpty()) { + return "Either search_query or direct_urls must be provided" + } + if (!direct_urls.isNullOrEmpty()) { + direct_urls.forEach { url -> + if (!url.matches(Regex("^(http|https)://.*"))) { + return "Invalid URL format in direct_urls: $url" + } + } + } + if (!allowed_domains.isNullOrBlank()) { + val domains = allowed_domains.split(Regex("\\s+")).filter { it.isNotBlank() } + if (domains.isEmpty()) { + return "allowed_domains must contain at least one valid domain when specified" } + } + return ValidatedObject.validateFields(this) } + } - var selenium: Selenium2S3? = null + var selenium: Selenium2S3? = null - val urlContentCache = ConcurrentHashMap() - private val robotsTxtParser = RobotsTxtParser() - private val pageQueueLock = Object() + val urlContentCache = ConcurrentHashMap() + private val robotsTxtParser = RobotsTxtParser() + private val pageQueueLock = Object() - // Use a priority queue that sorts by calculated priority (higher first) - private val pageQueue = java.util.PriorityQueue( - compareByDescending { it.calculatePriority() } - ) - private val seenUrls = ConcurrentHashMap.newKeySet() + // Use a priority queue that sorts by calculated priority (higher first) + private val pageQueue = java.util.PriorityQueue( + compareByDescending { it.calculatePriority() } + ) + private val seenUrls = ConcurrentHashMap.newKeySet() - override fun promptSegment() = """ + override fun promptSegment() = """ CrawlerAgent - Search Google, fetch top results, and analyze content ** Specify the search query ** Or provide direct URLs to analyze @@ -132,1059 +134,1180 @@ import kotlin.math.min ** Links found in analysis can be automatically followed for deeper research """.trimIndent() - fun cleanup() { + fun cleanup() { + try { + selenium?.let { + log.info("Cleaning up Selenium WebDriver instance") try { - selenium?.let { - log.info("Cleaning up Selenium WebDriver instance") - try { - it.quit() - } catch (e: Exception) { - log.warn("Failed to quit Selenium WebDriver gracefully: ${e.message}") - } - selenium = null - log.debug("Selenium WebDriver cleanup completed") - } + it.quit() } catch (e: Exception) { - log.error("Error cleaning up Selenium resources", e) + log.warn("Failed to quit Selenium WebDriver gracefully: ${e.message}") } + selenium = null + log.debug("Selenium WebDriver cleanup completed") + } + } catch (e: Exception) { + log.error("Error cleaning up Selenium resources", e) } - - data class LinkData( - @Description("The URL of the link to crawl") - val url: String? = null, - @Description("The title of the link (optional)") - val title: String? = null, - @Description("Tags associated with the link (optional)") - val tags: List? = null, - @Description("1-100") val relevance_score: Double = 100.0 - ) : ValidatedObject { - var started: Boolean = false - var completed: Boolean = false - var depth: Int = 0 - var error: String? = null - var processingTimeMs: Long = 0 - - // Priority calculation: higher relevance and lower depth = higher priority - fun calculatePriority(): Double = relevance_score / (depth + 1.0) - override fun validate(): String? { - if (url.isNullOrBlank()) { - return "link cannot be null or blank" - } - if (!url.matches(Regex("^(http|https)://.*"))) { - return "link must be a valid HTTP/HTTPS URL: $url" - } - if (relevance_score < 1.0 || relevance_score > 100.0) { - return "relevance_score must be between 1 and 100" - } - return ValidatedObject.validateFields(this) - } + } + + data class LinkData( + @Description("The URL of the link to crawl") + val url: String? = null, + @Description("The title of the link (optional)") + val title: String? = null, + @Description("Tags associated with the link (optional)") + val tags: List? = null, + @Description("1-100") val relevance_score: Double = 100.0 + ) : ValidatedObject { + var started: Boolean = false + var completed: Boolean = false + var depth: Int = 0 + var error: String? = null + var processingTimeMs: Long = 0 + + // Priority calculation: higher relevance and lower depth = higher priority + fun calculatePriority(): Double = relevance_score / (depth + 1.0) + override fun validate(): String? { + if (url.isNullOrBlank()) { + return "link cannot be null or blank" + } + if (!url.matches(Regex("^(http|https)://.*"))) { + return "link must be a valid HTTP/HTTPS URL: $url" + } + if (relevance_score < 1.0 || relevance_score > 100.0) { + return "relevance_score must be between 1 and 100" + } + return ValidatedObject.validateFields(this) } - - enum class PageType { - Error, - Irrelevant, - OK + } + + enum class PageType { + Error, + Irrelevant, + OK + } + + data class ParsedPage( + val page_type: PageType = PageType.OK, + val page_information: Any? = null, + val tags: List? = null, + val link_data: List? = null, + ) : ValidatedObject { + override fun validate(): String? { + if (page_type == PageType.OK && page_information == null) { + return "page_information is required when page_type is OK" + } + link_data?.forEach { linkData -> + linkData.validate()?.let { return it } + } + return ValidatedObject.validateFields(this) } - - data class ParsedPage( - val page_type: PageType = PageType.OK, - val page_information: Any? = null, - val tags: List? = null, - val link_data: List? = null, - ) : ValidatedObject { - override fun validate(): String? { - if (page_type == PageType.OK && page_information == null) { - return "page_information is required when page_type is OK" - } - link_data?.forEach { linkData -> - linkData.validate()?.let { return it } - } - return ValidatedObject.validateFields(this) - } + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + log.info("Starting CrawlerAgentTask.run() with messages count: ${messages.size}") + try { + resultFn(innerRun(agent, task, orchestrationConfig)) + } catch (e: Throwable) { + log.error("Unhandled exception in CrawlerAgentTask", e) + val errorMessage = "Error: ${e.message ?: "Unknown error occurred"}" + resultFn(errorMessage) + task.error(e) + } finally { + cleanup() } - - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - log.info("Starting CrawlerAgentTask.run() with messages count: ${messages.size}") - try { - resultFn(innerRun(agent, task, orchestrationConfig)) - } catch (e: Throwable) { - log.error("Unhandled exception in CrawlerAgentTask", e) - val errorMessage = "Error: ${e.message ?: "Unknown error occurred"}" - resultFn(errorMessage) - task.error(e) - } finally { - cleanup() + } + + private fun innerRun( + agent: TaskOrchestrator, + task: SessionTask, + orchestrationConfig: OrchestrationConfig + ): String { + var transcriptStream: FileOutputStream? = null + try { + val typeConfig = typeConfig ?: throw RuntimeException() + val startTime = System.currentTimeMillis() + log.info( + "Starting CrawlerAgentTask with config: search_query='${executionConfig?.search_query}', direct_urls='${ + executionConfig?.direct_urls?.joinToString( + ", " + ) ?: "" + }', max_pages=${typeConfig.max_pages_per_task ?: (typeConfig.max_pages_per_task ?: 30)}" + ) + val webSearchDir = File(agent.root.toFile(), ".websearch") + if (!webSearchDir.exists()) { + if (!webSearchDir.mkdirs()) { + log.error("Failed to create websearch directory: ${webSearchDir.absolutePath}") + return "Error: Failed to create output directory" } - } - - private fun innerRun( - agent: TaskOrchestrator, - task: SessionTask, - orchestrationConfig: OrchestrationConfig - ): String { - try { - val typeConfig = typeConfig ?: throw RuntimeException() - val startTime = System.currentTimeMillis() - log.info( - "Starting CrawlerAgentTask with config: search_query='${executionConfig?.search_query}', direct_urls='${ - executionConfig?.direct_urls?.joinToString( - ", " - ) ?: "" - }', max_pages=${typeConfig.max_pages_per_task ?: (typeConfig.max_pages_per_task ?: 30)}" - ) - val webSearchDir = File(agent.root.toFile(), ".websearch") - if (!webSearchDir.exists()) { - if (!webSearchDir.mkdirs()) { - log.error("Failed to create websearch directory: ${webSearchDir.absolutePath}") - return "Error: Failed to create output directory" - } - log.debug("Created websearch directory: ${webSearchDir.absolutePath}") - } - val tabs = TabbedDisplay(task) - - val seedMethod = when { - !executionConfig?.direct_urls.isNullOrEmpty() -> SeedMethod.DirectUrls - typeConfig.seed_method != null -> typeConfig.seed_method!! - !executionConfig?.search_query.isNullOrBlank() -> SeedMethod.GoogleProxy - else -> { - log.error("No seed method specified and no search query or direct URLs provided") - return "Error: No seed method specified and no search query or direct URLs provided" - } - } - log.info("Using seed method: $seedMethod") - val seedItems = try { - seedMethod.createStrategy(this, agent.user).getSeedItems(executionConfig, orchestrationConfig) - } catch (e: Exception) { - log.error("Failed to get seed items using method: $seedMethod", e) - task.error(e) - return "Error: Failed to get seed items - ${e.message}" - } - if (seedItems == null || seedItems.isEmpty()) { - log.warn("No seed items returned from seed method: $seedMethod") - return "Warning: No seed items found to start crawling" - } - // Create seed links tab - val seedLinksTask = task.ui.newTask(false) - tabs["Seed Links"] = seedLinksTask.placeholder - val seedLinksContent = buildString { - appendLine("# Seed Links") - appendLine() - appendLine("**Method:** ${seedMethod.name}") - appendLine() - appendLine("**Total Seeds:** ${seedItems.size}") - appendLine() - appendLine("---") - appendLine() - seedItems.forEachIndexed { index, item -> - appendLine("## ${index + 1}. [${item.title ?: "Untitled"}](${item.link})") - appendLine() - appendLine("- **URL:** ${item.link}") - appendLine("- **Relevance Score:** ${item.relevance_score}") - if (!item.tags.isNullOrEmpty()) { - appendLine("- **Tags:** ${item.tags.joinToString(", ")}") - } - appendLine() - } + log.debug("Created websearch directory: ${webSearchDir.absolutePath}") + } + val tabs = TabbedDisplay(task) + // Initialize transcript if enabled + if (typeConfig.generate_transcript != false) { + transcriptStream = initializeTranscript(task) + transcriptStream?.let { stream -> + writeTranscriptHeader(stream, startTime) + } + } + + val seedMethod = when { + !executionConfig?.direct_urls.isNullOrEmpty() -> SeedMethod.DirectUrls + typeConfig.seed_method != null -> typeConfig.seed_method!! + !executionConfig?.search_query.isNullOrBlank() -> SeedMethod.GoogleProxy + else -> { + log.error("No seed method specified and no search query or direct URLs provided") + return "Error: No seed method specified and no search query or direct URLs provided" + } + } + log.info("Using seed method: $seedMethod") + val seedItems = try { + seedMethod.createStrategy(this, agent.user).getSeedItems(executionConfig, orchestrationConfig) + } catch (e: Exception) { + log.error("Failed to get seed items using method: $seedMethod", e) + task.error(e) + return "Error: Failed to get seed items - ${e.message}" + } + if (seedItems == null || seedItems.isEmpty()) { + log.warn("No seed items returned from seed method: $seedMethod") + return "Warning: No seed items found to start crawling" + } + // Create seed links tab + val seedLinksTask = task.ui.newTask(false) + tabs["Seed Links"] = seedLinksTask.placeholder + val seedLinksContent = buildString { + appendLine("# Seed Links") + appendLine() + appendLine("**Method:** ${seedMethod.name}") + appendLine() + appendLine("**Total Seeds:** ${seedItems.size}") + appendLine() + appendLine("---") + appendLine() + seedItems.forEachIndexed { index, item -> + appendLine("## ${index + 1}. [${item.title ?: "Untitled"}](${item.link})") + appendLine() + appendLine("- **URL:** ${item.link}") + appendLine("- **Relevance Score:** ${item.relevance_score}") + if (!item.tags.isNullOrEmpty()) { + appendLine("- **Tags:** ${item.tags.joinToString(", ")}") + } + appendLine() + } + } + seedLinksTask.add(seedLinksContent.renderMarkdown) + task.update() + // Log seed links to transcript + transcriptStream?.let { stream -> + writeToTranscript(stream, "## Seed Links\n\n$seedLinksContent\n\n") + } + + + synchronized(pageQueueLock) { + seedItems.forEach { item -> + if (item.link != null && isBlacklistedDomain(item.link)) { + log.info("Skipping blacklisted seed URL: ${item.link}") + return@forEach + } + if (typeConfig.respect_robots_txt == true && !robotsTxtParser.isAllowed(item.link ?: "")) { + log.info("Skipping seed URL disallowed by robots.txt: ${item.link}") + return@forEach + } + LinkData( + url = item.link, + title = item.title, + tags = item.tags, + relevance_score = item.relevance_score + ).let { linkData -> + log.debug("Adding seed item to page queue: {}", linkData) + if (!addToQueue( + linkData, + typeConfig.max_depth ?: 3, + typeConfig.max_queue_size ?: 100 + ) + ) { + log.warn("No valid seed items found after processing") } - seedLinksTask.add(seedLinksContent.renderMarkdown) - task.update() - - + } + } + } + log.info("Initialized page queue with ${pageQueue.size} seed items") + if (pageQueue.isEmpty()) { + log.warn("No seed items found, cannot proceed with crawling") + return "Warning: No seed items found to start crawling" + } + + val analysisResultsMap = ConcurrentHashMap() + val maxPages = typeConfig.max_pages_per_task ?: (typeConfig.max_pages_per_task ?: 30) + val concurrentProcessing = /*taskConfig?.concurrent_page_processing ?:*/ + typeConfig.concurrent_page_processing ?: 3 + log.info("Processing configuration: maxPages=$maxPages, concurrentProcessing=$concurrentProcessing") + + val completionService: CompletionService = ExecutorCompletionService(agent.pool) + val activeTasks = ConcurrentHashMap.newKeySet() + val processedCount = AtomicInteger(0) + val errorCount = AtomicInteger(0) + val maxErrors = maxPages / 2 // Stop if too many errors + log.info("Starting crawling loop with maxErrors threshold: $maxErrors") + val fetchStrategy = (this@CrawlerAgentTask.typeConfig?.fetch_method + ?: FetchMethod.HttpClient).createStrategy( + this@CrawlerAgentTask + ) + + try { + val loopIterations = AtomicInteger(0) + val maxDepthConfig = typeConfig.max_depth ?: (typeConfig.max_depth ?: 3) + val maxQueueSizeConfig = typeConfig.max_queue_size ?: (typeConfig.max_queue_size ?: 100) + log.debug("Starting crawling loop: maxPages=$maxPages, maxErrors=$maxErrors, maxIterations=${1000}") + while (shouldContinue(maxPages, errorCount, maxErrors, loopIterations, activeTasks)) { + if (loopIterations.get() % 10 == 0) { synchronized(pageQueueLock) { - seedItems.forEach { item -> - if (item.link != null && isBlacklistedDomain(item.link)) { - log.info("Skipping blacklisted seed URL: ${item.link}") - return@forEach - } - if (typeConfig.respect_robots_txt == true && !robotsTxtParser.isAllowed(item.link ?: "")) { - log.info("Skipping seed URL disallowed by robots.txt: ${item.link}") - return@forEach - } - LinkData( - url = item.link, - title = item.title, - tags = item.tags, - relevance_score = item.relevance_score - ).let { linkData -> - log.debug("Adding seed item to page queue: {}", linkData) - if (!addToQueue( - linkData, - typeConfig.max_depth ?: 3, - typeConfig.max_queue_size ?: 100 - ) - ) { - log.warn("No valid seed items found after processing") - } - } - } + log.info("Loop iteration ${loopIterations.get()}: queue_size=${pageQueue.size}, active=${activeTasks.size}, errors=${errorCount.get()}") } - log.info("Initialized page queue with ${pageQueue.size} seed items") - if (pageQueue.isEmpty()) { - log.warn("No seed items found, cannot proceed with crawling") - return "Warning: No seed items found to start crawling" - } - - val analysisResultsMap = ConcurrentHashMap() - val maxPages = typeConfig.max_pages_per_task ?: (typeConfig.max_pages_per_task ?: 30) - val concurrentProcessing = /*taskConfig?.concurrent_page_processing ?:*/ - typeConfig.concurrent_page_processing ?: 3 - log.info("Processing configuration: maxPages=$maxPages, concurrentProcessing=$concurrentProcessing") - - val completionService: CompletionService = ExecutorCompletionService(agent.pool) - val activeTasks = ConcurrentHashMap.newKeySet() - val processedCount = AtomicInteger(0) - val errorCount = AtomicInteger(0) - val maxErrors = maxPages / 2 // Stop if too many errors - log.info("Starting crawling loop with maxErrors threshold: $maxErrors") - val fetchStrategy = (this@CrawlerAgentTask.typeConfig?.fetch_method - ?: FetchMethod.HttpClient).createStrategy( - this@CrawlerAgentTask + } + val queueStats = synchronized(pageQueueLock) { + "queue_size=${pageQueue.size}, seen=${seenUrls.size}, active=${activeTasks.size}" + } + // Queue new tasks while we have capacity and unstarted pages + while ( + activeTasks.size < concurrentProcessing && // Limit concurrent tasks + synchronized(pageQueueLock) { pageQueue.isNotEmpty() } && // There are still unstarted pages + errorCount.get() < maxErrors && // Not too many errors + processedCount.get() < maxPages // Haven't hit max pages yet + ) { + addCrawlTask( + queueStats = queueStats, + completionService = completionService, + activeTasks = activeTasks, + errorCount = errorCount, + maxErrors = maxErrors, + task = task, + tabs = tabs, + processedCount = processedCount, + maxPages = maxPages, + maxDepth = maxDepthConfig, + maxQueueSize = maxQueueSizeConfig, + webSearchDir = webSearchDir, + agent = agent, + fetchStrategy = fetchStrategy, + orchestrationConfig = orchestrationConfig, + analysisResultsMap = analysisResultsMap, + transcriptStream = transcriptStream ) + } + // Wait for at least one task to complete before checking the queue again + // This allows in-progress tasks to add new links to the queue + if (activeTasks.isNotEmpty()) { try { - val loopIterations = AtomicInteger(0) - val maxDepthConfig = typeConfig.max_depth ?: (typeConfig.max_depth ?: 3) - val maxQueueSizeConfig = typeConfig.max_queue_size ?: (typeConfig.max_queue_size ?: 100) - log.debug("Starting crawling loop: maxPages=$maxPages, maxErrors=$maxErrors, maxIterations=${1000}") - while (shouldContinue(maxPages, errorCount, maxErrors, loopIterations, activeTasks)) { - if (loopIterations.get() % 10 == 0) { - synchronized(pageQueueLock) { - log.info("Loop iteration ${loopIterations.get()}: queue_size=${pageQueue.size}, active=${activeTasks.size}, errors=${errorCount.get()}") - } - } - val queueStats = synchronized(pageQueueLock) { - "queue_size=${pageQueue.size}, seen=${seenUrls.size}, active=${activeTasks.size}" - } - // Queue new tasks while we have capacity and unstarted pages - while ( - activeTasks.size < concurrentProcessing && // Limit concurrent tasks - synchronized(pageQueueLock) { pageQueue.isNotEmpty() } && // There are still unstarted pages - errorCount.get() < maxErrors && // Not too many errors - processedCount.get() < maxPages // Haven't hit max pages yet - ) { - addCrawlTask( - queueStats = queueStats, - completionService = completionService, - activeTasks = activeTasks, - errorCount = errorCount, - maxErrors = maxErrors, - task = task, - tabs = tabs, - processedCount = processedCount, - maxPages = maxPages, - maxDepth = maxDepthConfig, - maxQueueSize = maxQueueSizeConfig, - webSearchDir = webSearchDir, - agent = agent, - fetchStrategy = fetchStrategy, - orchestrationConfig = orchestrationConfig, - analysisResultsMap = analysisResultsMap - ) - } - - // Wait for at least one task to complete before checking the queue again - // This allows in-progress tasks to add new links to the queue - if (activeTasks.isNotEmpty()) { - try { - val future = completionService.poll(1, java.util.concurrent.TimeUnit.SECONDS) - if (future != null) { - future.get() // This will throw if the task failed - } else { - while (activeTasks.isNotEmpty()) sleep(1000) - } - } catch (e: Exception) { - log.error("Task execution failed", e) - } - } else { - // No active tasks, check if there are unstarted pages we missed - val unstartedCount = synchronized(pageQueueLock) { pageQueue.size } - if (unstartedCount > 0) { - log.warn("No active tasks but $unstartedCount unstarted pages remain - continuing") - continue - } - } - - log.info("Crawling progress: processed=${processedCount.get()}/$maxPages, queue=${pageQueue.size}, active_tasks=${activeTasks.size}, errors=${errorCount.get()}/$maxErrors") - //while (activeTasks.isNotEmpty()) sleep(1000) - } - if (loopIterations.get() >= 1000) { - log.warn("Reached maximum iteration limit: ${1000}") - } + val future = completionService.poll(1, java.util.concurrent.TimeUnit.SECONDS) + if (future != null) { + future.get() // This will throw if the task failed + } else { + while (activeTasks.isNotEmpty()) sleep(1000) + } } catch (e: Exception) { - log.error("Error during processing", e) - task.error(e) - } finally { - log.info("Crawling phase completed, cleaning up resources") + log.error("Task execution failed", e) } - val totalTime = System.currentTimeMillis() - startTime - log.info("CrawlerAgentTask completed: total_time=${totalTime}ms, pages_processed=${processedCount.get()}, errors=${errorCount.get()}, success_rate=${if (processedCount.get() > 0) ((processedCount.get() - errorCount.get()) * 100 / processedCount.get()) else 0}%") - task.complete("Completed in ${totalTime / 1000} seconds, processed ${processedCount.get()} pages with ${errorCount.get()} errors.") - - val analysisResults = (1..processedCount.get()).asSequence().mapNotNull { - analysisResultsMap[it] - }.joinToString("\n") - if (analysisResults.isBlank()) { - val errorMessage = "No content was successfully processed. Check logs for errors." - log.error(errorMessage) - log.error("Processing stats: total_attempted=${processedCount.get()}, errors=${errorCount.get()}, queue_size=${pageQueue.size}") - return errorMessage + } else { + // No active tasks, check if there are unstarted pages we missed + val unstartedCount = synchronized(pageQueueLock) { pageQueue.size } + if (unstartedCount > 0) { + log.warn("No active tasks but $unstartedCount unstarted pages remain - continuing") + continue } + } - val summaryTask = task.ui.newTask(false) - tabs["Final Summary"] = summaryTask.placeholder - val finalOutput = - if (typeConfig.create_final_summary != false && analysisResults.length > typeConfig.max_final_output_size ?: 15000) { - log.info("Creating final summary: original_size=${analysisResults.length}, max_size=${typeConfig.max_final_output_size ?: 15000}") - try { - createFinalSummary(analysisResults, summaryTask) - } catch (e: Exception) { - log.error("Failed to create final summary, using truncated results", e) - analysisResults.substring( - 0, minOf( - analysisResults.length, - typeConfig.max_final_output_size ?: 15000 - ) - ) + - "\n\n---\n\n*Note: Summary generation failed, showing truncated results*" - } - } else { - log.info("Using analysis results directly: size=${analysisResults.length}") - analysisResults - } - try { - summaryTask.add(finalOutput.renderMarkdown) - task.update() - } catch (e: Exception) { - log.error("Failed to update task with final summary", e) - } - log.info("CrawlerAgentTask finished successfully, final output size: ${finalOutput.length}") - return finalOutput - } catch (e: Throwable) { - log.error("Unhandled exception in CrawlerAgentTask", e) - task.error(e) - return "Error: ${e.javaClass.simpleName} - ${e.message ?: "Unknown error"}" + log.info("Crawling progress: processed=${processedCount.get()}/$maxPages, queue=${pageQueue.size}, active_tasks=${activeTasks.size}, errors=${errorCount.get()}/$maxErrors") + //while (activeTasks.isNotEmpty()) sleep(1000) } - } - - fun addToQueue( - newLink: LinkData, - maxDepth: Int, - maxQueueSize: Int - ): Boolean = synchronized(pageQueueLock) { - val typeConfig = typeConfig ?: throw RuntimeException() - if (newLink.url.isNullOrBlank()) { - log.warn("Attempted to add invalid or empty URL to queue: $newLink") - return false + if (loopIterations.get() >= 1000) { + log.warn("Reached maximum iteration limit: ${1000}") } - if (typeConfig.respect_robots_txt == true && !robotsTxtParser.isAllowed(newLink.url)) { - log.debug("Skipping URL disallowed by robots.txt: ${newLink.url}") - return false - } - if (pageQueue.size >= maxQueueSize) { - log.warn("Page queue has reached maximum size of $maxQueueSize, cannot add more links") - return false - } - if (newLink.depth > maxDepth) { - log.debug("Skipping link due to depth limit (depth=${newLink.depth} > maxDepth=$maxDepth): ${newLink.url}") - return false + } catch (e: Exception) { + log.error("Error during processing", e) + task.error(e) + } finally { + log.info("Crawling phase completed, cleaning up resources") + } + val totalTime = System.currentTimeMillis() - startTime + log.info("CrawlerAgentTask completed: total_time=${totalTime}ms, pages_processed=${processedCount.get()}, errors=${errorCount.get()}, success_rate=${if (processedCount.get() > 0) ((processedCount.get() - errorCount.get()) * 100 / processedCount.get()) else 0}%") + task.complete("Completed in ${totalTime / 1000} seconds, processed ${processedCount.get()} pages with ${errorCount.get()} errors.") + // Write completion stats to transcript + transcriptStream?.let { stream -> + writeTranscriptFooter(stream, totalTime, processedCount.get(), errorCount.get()) + } + + val analysisResults = (1..processedCount.get()).asSequence().mapNotNull { + analysisResultsMap[it] + }.joinToString("\n") + if (analysisResults.isBlank()) { + val errorMessage = "No content was successfully processed. Check logs for errors." + log.error(errorMessage) + log.error("Processing stats: total_attempted=${processedCount.get()}, errors=${errorCount.get()}, queue_size=${pageQueue.size}") + return errorMessage + } + + val summaryTask = task.ui.newTask(false) + tabs["Final Summary"] = summaryTask.placeholder + val finalOutput = + if (typeConfig.create_final_summary != false && analysisResults.length > typeConfig.max_final_output_size ?: 15000) { + log.info("Creating final summary: original_size=${analysisResults.length}, max_size=${typeConfig.max_final_output_size ?: 15000}") + try { + createFinalSummary(analysisResults, summaryTask) + } catch (e: Exception) { + log.error("Failed to create final summary, using truncated results", e) + analysisResults.substring( + 0, minOf( + analysisResults.length, + typeConfig.max_final_output_size ?: 15000 + ) + ) + + "\n\n---\n\n*Note: Summary generation failed, showing truncated results*" + } + } else { + log.info("Using analysis results directly: size=${analysisResults.length}") + analysisResults } - if (seenUrls.contains(newLink.url)) { - log.debug("Skipping duplicate link already in queue: ${newLink.url}") - return false + try { + summaryTask.add(finalOutput.renderMarkdown) + task.update() + // Write final summary to transcript + transcriptStream?.let { stream -> + writeToTranscript(stream, "\n\n## Final Summary\n\n$finalOutput\n\n") } - seenUrls.add(newLink.url) - pageQueue.add(newLink) - log.debug("Added new link to queue: ${newLink.url} (depth=${newLink.depth}, priority=${newLink.calculatePriority()})") - true + } catch (e: Exception) { + log.error("Failed to update task with final summary", e) + } + log.info("CrawlerAgentTask finished successfully, final output size: ${finalOutput.length}") + return finalOutput + } catch (e: Throwable) { + log.error("Unhandled exception in CrawlerAgentTask", e) + task.error(e) + return "Error: ${e.javaClass.simpleName} - ${e.message ?: "Unknown error"}" + } finally { + transcriptStream?.close() + log.debug("Transcript stream closed") } - - fun getNextPage(): LinkData? = synchronized(pageQueueLock) { - // Poll removes and returns the highest priority element - val nextPage = pageQueue.poll() - nextPage?.let { - it.started = true - log.debug("Retrieved next page from queue: ${it.url} (priority=${it.calculatePriority()}, remaining=${pageQueue.size})") - } - nextPage + } + + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = task.createFile("crawler_transcript.md") + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null } + } + + private fun writeTranscriptHeader(stream: FileOutputStream, startTime: Long) { + try { + val header = buildString { + appendLine("# Crawler Agent Transcript") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("**Search Query:** ${executionConfig?.search_query ?: "N/A"}") + appendLine("**Direct URLs:** ${executionConfig?.direct_urls?.joinToString(", ") ?: "N/A"}") + appendLine("**Content Queries:** ${executionConfig?.content_queries ?: "N/A"}") + appendLine() + appendLine("---") + appendLine() + } + stream.write(header.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write transcript header", e) + } + } - private fun shouldContinue( - maxPages: Int, - errorCount: AtomicInteger, - maxErrors: Int, - loopIterations: AtomicInteger, - activeTasks: MutableSet - ): Boolean = synchronized(pageQueueLock) { - val completed = seenUrls.size - pageQueue.size - activeTasks.size - val unstarted = pageQueue.size - val hasActiveTasks = activeTasks.isNotEmpty() - - // Continue if: - // 1. We have active tasks (they might add more links), OR - // 2. We have unstarted pages in the queue - // AND we haven't hit our limits - val shouldContinue = (hasActiveTasks || unstarted > 0) && - completed < maxPages && - errorCount.get() < maxErrors && - loopIterations.getAndIncrement() < 1000 - - if (!shouldContinue) { - log.info("Stopping crawl: completed=$completed/$maxPages, unstarted=$unstarted, active=$hasActiveTasks, errors=${errorCount.get()}/$maxErrors") - } + private fun writeToTranscript(stream: FileOutputStream, content: String) { + try { + stream.write(content.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write to transcript", e) + } + } + + private fun writeTranscriptFooter(stream: FileOutputStream, totalTime: Long, processedCount: Int, errorCount: Int) { + try { + val footer = buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## Crawling Session Summary") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine("**Total Time:** ${totalTime / 1000} seconds") + appendLine("**Pages Processed:** $processedCount") + appendLine("**Errors:** $errorCount") + appendLine("**Success Rate:** ${if (processedCount > 0) ((processedCount - errorCount) * 100 / processedCount) else 0}%") + appendLine() + } + stream.write(footer.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write transcript footer", e) + } + } + + + fun addToQueue( + newLink: LinkData, + maxDepth: Int, + maxQueueSize: Int + ): Boolean = synchronized(pageQueueLock) { + val typeConfig = typeConfig ?: throw RuntimeException() + if (newLink.url.isNullOrBlank()) { + log.warn("Attempted to add invalid or empty URL to queue: $newLink") + return false + } + if (typeConfig.respect_robots_txt == true && !robotsTxtParser.isAllowed(newLink.url)) { + log.debug("Skipping URL disallowed by robots.txt: ${newLink.url}") + return false + } + if (pageQueue.size >= maxQueueSize) { + log.warn("Page queue has reached maximum size of $maxQueueSize, cannot add more links") + return false + } + if (newLink.depth > maxDepth) { + log.debug("Skipping link due to depth limit (depth=${newLink.depth} > maxDepth=$maxDepth): ${newLink.url}") + return false + } + if (seenUrls.contains(newLink.url)) { + log.debug("Skipping duplicate link already in queue: ${newLink.url}") + return false + } + seenUrls.add(newLink.url) + pageQueue.add(newLink) + log.debug("Added new link to queue: ${newLink.url} (depth=${newLink.depth}, priority=${newLink.calculatePriority()})") + true + } + + fun getNextPage(): LinkData? = synchronized(pageQueueLock) { + // Poll removes and returns the highest priority element + val nextPage = pageQueue.poll() + nextPage?.let { + it.started = true + log.debug("Retrieved next page from queue: ${it.url} (priority=${it.calculatePriority()}, remaining=${pageQueue.size})") + } + nextPage + } + + private fun shouldContinue( + maxPages: Int, + errorCount: AtomicInteger, + maxErrors: Int, + loopIterations: AtomicInteger, + activeTasks: MutableSet + ): Boolean = synchronized(pageQueueLock) { + val completed = seenUrls.size - pageQueue.size - activeTasks.size + val unstarted = pageQueue.size + val hasActiveTasks = activeTasks.isNotEmpty() + + // Continue if: + // 1. We have active tasks (they might add more links), OR + // 2. We have unstarted pages in the queue + // AND we haven't hit our limits + val shouldContinue = (hasActiveTasks || unstarted > 0) && + completed < maxPages && + errorCount.get() < maxErrors && + loopIterations.getAndIncrement() < 1000 + + if (!shouldContinue) { + log.info("Stopping crawl: completed=$completed/$maxPages, unstarted=$unstarted, active=$hasActiveTasks, errors=${errorCount.get()}/$maxErrors") + } - shouldContinue + shouldContinue + } + + private fun addCrawlTask( + queueStats: String, + completionService: CompletionService, + activeTasks: MutableSet, + errorCount: AtomicInteger, + maxErrors: Int, + task: SessionTask, + tabs: TabbedDisplay, + processedCount: AtomicInteger, + maxPages: Int, + maxDepth: Int, + maxQueueSize: Int, + webSearchDir: File, + agent: TaskOrchestrator, + fetchStrategy: FetchStrategy, + orchestrationConfig: OrchestrationConfig, + analysisResultsMap: ConcurrentHashMap, + transcriptStream: FileOutputStream? + ): Boolean { + log.info("Status before queuing next page: $queueStats, active_tasks=${activeTasks.size}, errors=${errorCount.get()}/$maxErrors") + val page = getNextPage() ?: return true + if (page.url.isNullOrBlank()) { + log.error("Invalid page link encountered: $page") + errorCount.incrementAndGet() + page.completed = true + page.completed = true + page.error = "Invalid or empty URL" + return false } + activeTasks.add(page.url) - private fun addCrawlTask( - queueStats: String, - completionService: CompletionService, - activeTasks: MutableSet, - errorCount: AtomicInteger, - maxErrors: Int, - task: SessionTask, - tabs: TabbedDisplay, - processedCount: AtomicInteger, - maxPages: Int, - maxDepth: Int, - maxQueueSize: Int, - webSearchDir: File, - agent: TaskOrchestrator, - fetchStrategy: FetchStrategy, - orchestrationConfig: OrchestrationConfig, - analysisResultsMap: ConcurrentHashMap - ): Boolean { - log.info("Status before queuing next page: $queueStats, active_tasks=${activeTasks.size}, errors=${errorCount.get()}/$maxErrors") - val page = getNextPage() ?: return true - if (page.url.isNullOrBlank()) { - log.error("Invalid page link encountered: $page") - errorCount.incrementAndGet() - page.completed = true - page.completed = true - page.error = "Invalid or empty URL" - return false - } - activeTasks.add(page.url) + log.info("Queuing page for processing: url='${page.url}', title='${page.title}', depth=${page.depth}, relevance=${page.relevance_score}") - log.info("Queuing page for processing: url='${page.url}', title='${page.title}', depth=${page.depth}, relevance=${page.relevance_score}") + val subTask = try { + task.ui.newTask(false).apply { + tabs[page.url] = placeholder + task.update() + } + } catch (e: Exception) { + log.error("Failed to create subtask for URL: ${page.url}", e) + errorCount.incrementAndGet() + page.completed = true + page.completed = true + page.error = "Failed to create subtask: ${e.message}" + return false + } - val subTask = try { - task.ui.newTask(false).apply { - tabs[page.url] = placeholder - task.update() - } - } catch (e: Exception) { - log.error("Failed to create subtask for URL: ${page.url}", e) - errorCount.incrementAndGet() - page.completed = true - page.completed = true - page.error = "Failed to create subtask: ${e.message}" - return false - } + completionService.submit({ + try { + crawlPage( + processedCount, + page.url, + page, + maxPages, + maxDepth, + maxQueueSize, + webSearchDir, + agent, + fetchStrategy, + orchestrationConfig, + errorCount, + subTask, + analysisResultsMap, + transcriptStream + ) + } catch (e: Exception) { + log.error("Uncaught exception in page processing task for: ${page.url}", e) + errorCount.incrementAndGet() + page.completed = true + page.completed = true + page.error = "Uncaught exception: ${e.message}" + } finally { + activeTasks.remove(page.url) + } + }) + return false + } + + private fun crawlPage( + processedCount: AtomicInteger, + link: String, + page: LinkData, + maxPages: Int, + maxDepth: Int, + maxQueueSize: Int, + webSearchDir: File, + agent: TaskOrchestrator, + fetchStrategy: FetchStrategy, + orchestrationConfig: OrchestrationConfig, + errorCount: AtomicInteger, + task: SessionTask, + analysisResultsMap: ConcurrentHashMap, + transcriptStream: FileOutputStream? + ) { + val typeConfig = typeConfig ?: throw RuntimeException() + val pageStartTime = System.currentTimeMillis() + log.info("Starting to process page ${processedCount.get() + 1}: url='${link}', title='${page.title}'") + val currentIndex = processedCount.incrementAndGet() + // Apply crawl delay if robots.txt specifies one + if (typeConfig.respect_robots_txt == true) { + robotsTxtParser.getCrawlDelay(link)?.let { delay -> + log.debug("Applying robots.txt crawl delay of ${delay}ms for: $link") + sleep(delay) + } + } - completionService.submit({ + if (currentIndex > maxPages) { + log.warn("Max pages limit ($maxPages) reached, stopping processing for page: ${link}") + } else { + try { + val url = link + val title = page.title + val processPageResult = + buildString { + this.appendLine("## ${currentIndex}. [${title}]($url)") + this.appendLine() try { - crawlPage( - processedCount, - page.url, - page, - maxPages, - maxDepth, - maxQueueSize, - webSearchDir, - agent, - fetchStrategy, - orchestrationConfig, - errorCount, - subTask, - analysisResultsMap + // Log page processing start to transcript + transcriptStream?.let { stream -> + writeToTranscript(stream, "### Processing Page ${currentIndex}: [$title]($url)\n\n") + writeToTranscript(stream, "**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("HH:mm:ss"))}\n\n") + } + + val content = fetchAndProcessUrl( + url, + webSearchDir = webSearchDir, + index = currentIndex, + pool = agent.pool, + fetchStrategy = fetchStrategy + ) + log.debug("Fetched content for '$url': ${content.length} characters") + if (content.length < typeConfig.min_content_length ?: 500) { + log.info("Content too short for '$url': ${content.length} < ${typeConfig.min_content_length ?: 500} chars, skipping") + this.appendLine("*Content too short (${content.length} chars), skipping this result*") + this.appendLine() + return@buildString + } + + val analysisGoal = when { + this@CrawlerAgentTask.executionConfig?.content_queries != null -> executionConfig.toJson() + this@CrawlerAgentTask.executionConfig?.task_description?.isNotBlank() == true -> executionConfig.toString() + else -> "Analyze the content and provide insights." + } + log.debug("Analyzing content for '$url' with goal: $analysisGoal") + val analysis: ParsedResponse = + transformContent( + content, + analysisGoal, + orchestrationConfig, + task ) - } catch (e: Exception) { - log.error("Uncaught exception in page processing task for: ${page.url}", e) - errorCount.incrementAndGet() - page.completed = true - page.completed = true - page.error = "Uncaught exception: ${e.message}" - } finally { - activeTasks.remove(page.url) - } - }) - return false - } - private fun crawlPage( - processedCount: AtomicInteger, - link: String, - page: LinkData, - maxPages: Int, - maxDepth: Int, - maxQueueSize: Int, - webSearchDir: File, - agent: TaskOrchestrator, - fetchStrategy: FetchStrategy, - orchestrationConfig: OrchestrationConfig, - errorCount: AtomicInteger, - task: SessionTask, - analysisResultsMap: ConcurrentHashMap - ) { - val typeConfig = typeConfig ?: throw RuntimeException() - val pageStartTime = System.currentTimeMillis() - log.info("Starting to process page ${processedCount.get() + 1}: url='${link}', title='${page.title}'") - val currentIndex = processedCount.incrementAndGet() - // Apply crawl delay if robots.txt specifies one - if (typeConfig.respect_robots_txt == true) { - robotsTxtParser.getCrawlDelay(link)?.let { delay -> - log.debug("Applying robots.txt crawl delay of ${delay}ms for: $link") - Thread.sleep(delay) - } - } + val parsedPage = analysis.obj + if (parsedPage.page_type == PageType.Error) { + log.warn("Analysis returned error for '$url': ${parsedPage.page_information}") + this.appendLine( + "*Error processing this result: ${ + parsedPage.page_information?.let { + JsonUtil.toJson( + it + ) + } + }*" + ) + this.appendLine() + saveAnalysis(webSearchDir.resolve("error").apply { + mkdirs() + }, url, analysis, currentIndex) + return@buildString + } + + if (parsedPage.page_type == PageType.Irrelevant) { + log.info("Content marked as irrelevant for '$url', skipping") + this.appendLine("*Irrelevant content, skipping this result*") + this.appendLine() + saveAnalysis(webSearchDir.resolve("irrelevant").apply { + mkdirs() + }, url, analysis, currentIndex) + return@buildString + } + log.debug("Successfully analyzed content for '$url', saving results") + + saveAnalysis( + webSearchDir = webSearchDir, + url = url, + analysis = analysis, + index = currentIndex + ) + + this.appendLine(analysis.text) + this.appendLine() + + if (typeConfig.follow_links == true) { + + var linkData = parsedPage.link_data + val allowRevisit = /*taskConfig?.allow_revisit_pages ?:*/ + typeConfig.allow_revisit_pages == true + if (linkData.isNullOrEmpty()) { + linkData = extractLinksFromMarkdown(analysis.text) + log.debug("Extracted ${linkData.size} links from markdown for '$url'") + } else { + log.debug("Using ${linkData.size} structured links from analysis for '$url'") + } + // Add extracted links section to UI + if (linkData.isNotEmpty()) { + this.appendLine() + this.appendLine("### Extracted Links (${linkData.size} found)") + this.appendLine() + } - if (currentIndex > maxPages) { - log.warn("Max pages limit ($maxPages) reached, stopping processing for page: ${link}") - } else { - try { - val url = link - val title = page.title - val processPageResult = - buildString { - this.appendLine("## ${currentIndex}. [${title}]($url)") - this.appendLine() - try { - val content = fetchAndProcessUrl( - url, - webSearchDir = webSearchDir, - index = currentIndex, - pool = agent.pool, - fetchStrategy = fetchStrategy - ) - log.debug("Fetched content for '$url': ${content.length} characters") - if (content.length < typeConfig.min_content_length ?: 500) { - log.info("Content too short for '$url': ${content.length} < ${typeConfig.min_content_length ?: 500} chars, skipping") - this.appendLine("*Content too short (${content.length} chars), skipping this result*") - this.appendLine() - return@buildString - } - - val analysisGoal = when { - this@CrawlerAgentTask.executionConfig?.content_queries != null -> executionConfig.toJson() - this@CrawlerAgentTask.executionConfig?.task_description?.isNotBlank() == true -> executionConfig.toString() - else -> "Analyze the content and provide insights." - } - log.debug("Analyzing content for '$url' with goal: $analysisGoal") - val analysis: ParsedResponse = - transformContent( - content, - analysisGoal, - orchestrationConfig, - task - ) - - val parsedPage = analysis.obj - if (parsedPage.page_type == PageType.Error) { - log.warn("Analysis returned error for '$url': ${parsedPage.page_information}") - this.appendLine( - "*Error processing this result: ${ - parsedPage.page_information?.let { - JsonUtil.toJson( - it - ) - } - }*" - ) - this.appendLine() - saveAnalysis(webSearchDir.resolve("error").apply { - mkdirs() - }, url, analysis, currentIndex) - return@buildString - } - - if (parsedPage.page_type == PageType.Irrelevant) { - log.info("Content marked as irrelevant for '$url', skipping") - this.appendLine("*Irrelevant content, skipping this result*") - this.appendLine() - saveAnalysis(webSearchDir.resolve("irrelevant").apply { - mkdirs() - }, url, analysis, currentIndex) - return@buildString - } - log.debug("Successfully analyzed content for '$url', saving results") - - saveAnalysis( - webSearchDir = webSearchDir, - url = url, - analysis = analysis, - index = currentIndex - ) - - this.appendLine(analysis.text) - this.appendLine() - - if (typeConfig.follow_links == true) { - - var linkData = parsedPage.link_data - val allowRevisit = /*taskConfig?.allow_revisit_pages ?:*/ - typeConfig.allow_revisit_pages == true - if (linkData.isNullOrEmpty()) { - linkData = extractLinksFromMarkdown(analysis.text) - log.debug("Extracted ${linkData.size} links from markdown for '$url'") - } else { - log.debug("Using ${linkData.size} structured links from analysis for '$url'") - } - // Add extracted links section to UI - if (linkData.isNotEmpty()) { - this.appendLine() - this.appendLine("### Extracted Links (${linkData.size} found)") - this.appendLine() - } - - - var addedCount = 0 - val skippedLinks = mutableListOf>() - - linkData - .take(10) // Limit links per page to prevent explosion - .filter { link -> - val isValid = VALID_URL_PATTERN.matcher(link.url!!).matches() - val isNotBlacklisted = !isBlacklistedDomain(link.url) - val isNotDuplicate = allowRevisit || !seenUrls.contains(link.url) - val isAllowedByRobots = typeConfig.respect_robots_txt != true || - robotsTxtParser.isAllowed(link.url) - - if (!isValid) { - skippedLinks.add(link to "Invalid URL format") - } else if (!isNotBlacklisted) { - skippedLinks.add(link to "Blacklisted domain") - } else if (!isNotDuplicate) { - skippedLinks.add(link to "Already in queue") - } else if (!isAllowedByRobots) { - skippedLinks.add(link to "Disallowed by robots.txt") - } - - isValid && isNotBlacklisted && isNotDuplicate && isAllowedByRobots - } - .forEach { link -> - val newLink = link.apply { depth = page.depth + 1 } - if (addToQueue(newLink, maxDepth, maxQueueSize)) { - addedCount++ - this.appendLine("- ✅ **[${link.title ?: "Untitled"}](${link.url})** (depth: ${newLink.depth}, relevance: ${link.relevance_score})") - } else { - skippedLinks.add(link to "Queue limit reached or max depth exceeded") - } - } - // Show skipped links - if (skippedLinks.isNotEmpty()) { - this.appendLine() - this.appendLine("
    ") - this.appendLine("Skipped Links (${skippedLinks.size})") - this.appendLine() - skippedLinks.forEach { (link, reason) -> - this.appendLine("- ⏭️ **[${link.title ?: "Untitled"}](${link.url})** - *${reason}*") - } - this.appendLine() - this.appendLine("
    ") - this.appendLine() - } - - log.info("Added $addedCount new links to queue from '$url' (filtered from ${linkData.size} total)") - // Add summary - if (linkData.isNotEmpty()) { - this.appendLine() - this.appendLine("**Link Processing Summary:** ${addedCount} added to queue, ${skippedLinks.size} skipped") - this.appendLine() - } - } - } catch (e: Exception) { - log.error("Error processing URL: $url", e) - errorCount.incrementAndGet() - synchronized(pageQueueLock) { - page.error = e.message - } - this.appendLine("*Error processing this result: ${e.message}*") - this.appendLine() - } + + var addedCount = 0 + val skippedLinks = mutableListOf>() + + linkData + .take(10) // Limit links per page to prevent explosion + .filter { link -> + val isValid = VALID_URL_PATTERN.matcher(link.url!!).matches() + val isNotBlacklisted = !isBlacklistedDomain(link.url) + val isNotDuplicate = allowRevisit || !seenUrls.contains(link.url) + val isAllowedByRobots = typeConfig.respect_robots_txt != true || + robotsTxtParser.isAllowed(link.url) + + if (!isValid) { + skippedLinks.add(link to "Invalid URL format") + } else if (!isNotBlacklisted) { + skippedLinks.add(link to "Blacklisted domain") + } else if (!isNotDuplicate) { + skippedLinks.add(link to "Already in queue") + } else if (!isAllowedByRobots) { + skippedLinks.add(link to "Disallowed by robots.txt") + } + + isValid && isNotBlacklisted && isNotDuplicate && isAllowedByRobots + } + .forEach { link -> + val newLink = link.apply { depth = page.depth + 1 } + if (addToQueue(newLink, maxDepth, maxQueueSize)) { + addedCount++ + this.appendLine("- ✅ **[${link.title ?: "Untitled"}](${link.url})** (depth: ${newLink.depth}, relevance: ${link.relevance_score})") + } else { + skippedLinks.add(link to "Queue limit reached or max depth exceeded") } - task.add(processPageResult.renderMarkdown) - analysisResultsMap[currentIndex] = processPageResult - log.info("Successfully processed page ${currentIndex}: url='${link}', processing_time=${System.currentTimeMillis() - pageStartTime}ms") + } + // Show skipped links + if (skippedLinks.isNotEmpty()) { + this.appendLine() + this.appendLine("
    ") + this.appendLine("Skipped Links (${skippedLinks.size})") + this.appendLine() + skippedLinks.forEach { (link, reason) -> + this.appendLine("- ⏭️ **[${link.title ?: "Untitled"}](${link.url})** - *${reason}*") + } + this.appendLine() + this.appendLine("
    ") + this.appendLine() + } + + log.info("Added $addedCount new links to queue from '$url' (filtered from ${linkData.size} total)") + // Add summary + if (linkData.isNotEmpty()) { + this.appendLine() + this.appendLine("**Link Processing Summary:** ${addedCount} added to queue, ${skippedLinks.size} skipped") + this.appendLine() + } + // Log link processing to transcript + transcriptStream?.let { stream -> + writeToTranscript( + stream, + "**Links Found:** ${linkData.size}, **Added to Queue:** $addedCount, **Skipped:** ${skippedLinks.size}\n\n" + ) + } + } + } catch (e: Exception) { - task.error(e) - log.error("Error processing page: ${link}", e) - errorCount.incrementAndGet() - page.error = e.message + log.error("Error processing URL: $url", e) + errorCount.incrementAndGet() + synchronized(pageQueueLock) { page.error = e.message - analysisResultsMap[currentIndex] = - "## ${currentIndex}. [${page.title}](${link})\n\n*Error processing this result: ${e.message}*\n\n" - } finally { - page.completed = true - page.processingTimeMs = System.currentTimeMillis() - pageStartTime - page.completed = true - log.debug("Page processing completed: url='${link}', time=${page.processingTimeMs}ms, error='${page.error ?: "none"}'") + } + this.appendLine("*Error processing this result: ${e.message}*") + this.appendLine() + // Log error to transcript + transcriptStream?.let { stream -> + writeToTranscript(stream, "**Error:** ${e.message}\n\n") + } } + } + task.add(processPageResult.renderMarkdown) + analysisResultsMap[currentIndex] = processPageResult + log.info("Successfully processed page ${currentIndex}: url='${link}', processing_time=${System.currentTimeMillis() - pageStartTime}ms") + } catch (e: Exception) { + task.error(e) + log.error("Error processing page: ${link}", e) + errorCount.incrementAndGet() + page.error = e.message + page.error = e.message + analysisResultsMap[currentIndex] = + "## ${currentIndex}. [${page.title}](${link})\n\n*Error processing this result: ${e.message}*\n\n" + } finally { + // Log page completion to transcript + transcriptStream?.let { stream -> + writeToTranscript(stream, "**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("HH:mm:ss"))}\n") + writeToTranscript(stream, "**Processing Time:** ${System.currentTimeMillis() - pageStartTime}ms\n\n---\n\n") } + + page.completed = true + page.processingTimeMs = System.currentTimeMillis() - pageStartTime + page.completed = true + log.debug("Page processing completed: url='${link}', time=${page.processingTimeMs}ms, error='${page.error ?: "none"}'") + } } + } - private fun isBlacklistedDomain(url: String): Boolean { - val blacklistedDomains = setOf( - "facebook.com", "twitter.com", "instagram.com", "linkedin.com", - "youtube.com", "tiktok.com", "pinterest.com", "reddit.com", - "amazon.com", "ebay.com", "aliexpress.com" - ) - return try { - val uri = URI.create(url) - val typeConfig = typeConfig ?: throw RuntimeException() - - // Check if URL is restricted by allowed_domains whitelist - val allowedDomains = - ((typeConfig.allowed_domains?.split(Regex("\\s+"))?.filter { it.isNotBlank() } ?: listOf()) + - (executionConfig?.allowed_domains?.split(Regex("\\s+"))?.filter { it.isNotBlank() } - ?: listOf())).toSet() - if (!allowedDomains.isNullOrEmpty()) { - val isAllowed = allowedDomains.any { allowedDomainOrPrefix -> - val normalizedAllowed = allowedDomainOrPrefix.lowercase().trim() - when { - // Check if it's a full URL prefix match - normalizedAllowed.startsWith("http://") || normalizedAllowed.startsWith("https://") -> { - url.lowercase().startsWith(normalizedAllowed) - } - // Check if it's a domain match (exact or subdomain) - else -> { - val domain = uri.host?.lowercase() - if (domain == null) { - log.warn("Could not extract domain from URL: $url") - return true - } - domain == normalizedAllowed || domain.endsWith(".${normalizedAllowed}") - } - } - } - if (!isAllowed) { - log.debug("URL not in allowed domains list: $url") - return true - } + private fun isBlacklistedDomain(url: String): Boolean { + val blacklistedDomains = setOf( + "facebook.com", "twitter.com", "instagram.com", "linkedin.com", + "youtube.com", "tiktok.com", "pinterest.com", "reddit.com", + "amazon.com", "ebay.com", "aliexpress.com" + ) + return try { + val uri = URI.create(url) + val typeConfig = typeConfig ?: throw RuntimeException() + + // Check if URL is restricted by allowed_domains whitelist + val allowedDomains = + ((typeConfig.allowed_domains?.split(Regex("\\s+"))?.filter { it.isNotBlank() } ?: listOf()) + + (executionConfig?.allowed_domains?.split(Regex("\\s+"))?.filter { it.isNotBlank() } + ?: listOf())).toSet() + if (!allowedDomains.isNullOrEmpty()) { + val isAllowed = allowedDomains.any { allowedDomainOrPrefix -> + val normalizedAllowed = allowedDomainOrPrefix.lowercase().trim() + when { + // Check if it's a full URL prefix match + normalizedAllowed.startsWith("http://") || normalizedAllowed.startsWith("https://") -> { + url.lowercase().startsWith(normalizedAllowed) } - - // Check blacklist - val domain = uri.host?.lowercase() - if (domain == null) { + // Check if it's a domain match (exact or subdomain) + else -> { + val domain = uri.host?.lowercase() + if (domain == null) { log.warn("Could not extract domain from URL: $url") return true + } + domain == normalizedAllowed || domain.endsWith(".${normalizedAllowed}") } - blacklistedDomains.any { domain.contains(it) } - } catch (e: Exception) { - log.warn("Invalid URL format: $url", e) - true // Blacklist invalid URLs + } } - } - - private fun createFinalSummary(analysisResults: String, task: SessionTask): String { - log.info("Creating final summary of analysis results (original size: ${analysisResults.length})") - - val typeConfig = typeConfig ?: throw RuntimeException() - if (analysisResults.length < (typeConfig.max_final_output_size ?: 15000) * 1.2) { - log.info("Analysis results only slightly exceed max size, truncating instead of summarizing") - return analysisResults.substring( - 0, - min(analysisResults.length, typeConfig.max_final_output_size ?: 15000) - ) + "\n\n---\n\n*Note: Some content has been truncated due to length limitations.*" - } - - val headerEndIndex = analysisResults.indexOf("## 1. [") - val header = if (headerEndIndex > 0) { - analysisResults.substring(0, headerEndIndex) - } else { - "# Web Search: ${executionConfig?.search_query ?: executionConfig?.direct_urls?.joinToString(", ") ?: ""}\n\n" + if (!isAllowed) { + log.debug("URL not in allowed domains list: $url") + return true } - - val urlSections = extractUrlSections(analysisResults) - log.info("Extracted ${urlSections.size} URL sections for summarization") - val summary = ChatAgent( - prompt = listOf( - "Create a comprehensive summary of the following web search results and analyses.", - "Original analysis contained ${urlSections.size} web pages related to: ${executionConfig?.search_query ?: ""}", - "Analysis goal: ${executionConfig?.content_queries ?: executionConfig?.task_description ?: "Provide key insights"}", - "For each source, extract the most important insights, facts, and conclusions.", - "Organize information by themes rather than by source when possible.", - "Use markdown formatting with headers, bullet points, and emphasis where appropriate.", - "Include the most important links that should be followed up on.", - "Keep your response under ${(typeConfig.max_final_output_size ?: 15000) / 1000}K characters." - ).joinToString("\n\n"), - model = (typeConfig.model?.let { orchestrationConfig.instance(it) } - ?: orchestrationConfig.parsingChatter).getChildClient(task), - ).answer( - listOf( - "Here are summaries of each analyzed page:\n${analysisResults}" - ), - ) - return header + summary + } + + // Check blacklist + val domain = uri.host?.lowercase() + if (domain == null) { + log.warn("Could not extract domain from URL: $url") + return true + } + blacklistedDomains.any { domain.contains(it) } + } catch (e: Exception) { + log.warn("Invalid URL format: $url", e) + true // Blacklist invalid URLs } - - private fun extractUrlSections(analysisResults: String): List { - val sections = mutableListOf() - val sectionPattern = Pattern.compile("""## \d+\. \[([^]]+)]\(([^)]+)\)(.*?)(?=## \d+\. \[|$)""", Pattern.DOTALL) - val matcher = sectionPattern.matcher(analysisResults) - while (matcher.find()) { - val title = matcher.group(1) - val url = matcher.group(2) - val content = matcher.group(3).trim() - val condensed = "**[${title}](${url})**: ${summarizeSection(content)}" - sections.add(condensed) - } - return sections + } + + private fun createFinalSummary(analysisResults: String, task: SessionTask): String { + log.info("Creating final summary of analysis results (original size: ${analysisResults.length})") + + val typeConfig = typeConfig ?: throw RuntimeException() + if (analysisResults.length < (typeConfig.max_final_output_size ?: 15000) * 1.2) { + log.info("Analysis results only slightly exceed max size, truncating instead of summarizing") + return analysisResults.substring( + 0, + min(analysisResults.length, typeConfig.max_final_output_size ?: 15000) + ) + "\n\n---\n\n*Note: Some content has been truncated due to length limitations.*" } - private fun summarizeSection(content: String): String { - - val firstParagraph = content.split("\n\n").firstOrNull()?.trim() ?: "" - if (firstParagraph.length < 300) return firstParagraph + val headerEndIndex = analysisResults.indexOf("## 1. [") + val header = if (headerEndIndex > 0) { + analysisResults.substring(0, headerEndIndex) + } else { + "# Web Search: ${executionConfig?.search_query ?: executionConfig?.direct_urls?.joinToString(", ") ?: ""}\n\n" + } - val sentences = content.split(". ").take(3) - return sentences.joinToString(". ") + (if (sentences.size >= 3) "..." else "") + val urlSections = extractUrlSections(analysisResults) + log.info("Extracted ${urlSections.size} URL sections for summarization") + val summary = ChatAgent( + prompt = listOf( + "Create a comprehensive summary of the following web search results and analyses.", + "Original analysis contained ${urlSections.size} web pages related to: ${executionConfig?.search_query ?: ""}", + "Analysis goal: ${executionConfig?.content_queries ?: executionConfig?.task_description ?: "Provide key insights"}", + "For each source, extract the most important insights, facts, and conclusions.", + "Organize information by themes rather than by source when possible.", + "Use markdown formatting with headers, bullet points, and emphasis where appropriate.", + "Include the most important links that should be followed up on.", + "Keep your response under ${(typeConfig.max_final_output_size ?: 15000) / 1000}K characters." + ).joinToString("\n\n"), + model = (typeConfig.model?.let { orchestrationConfig.instance(it) } + ?: orchestrationConfig.parsingChatter).getChildClient(task), + ).answer( + listOf( + "Here are summaries of each analyzed page:\n${analysisResults}" + ), + ) + return header + summary + } + + private fun extractUrlSections(analysisResults: String): List { + val sections = mutableListOf() + val sectionPattern = Pattern.compile("""## \d+\. \[([^]]+)]\(([^)]+)\)(.*?)(?=## \d+\. \[|$)""", Pattern.DOTALL) + val matcher = sectionPattern.matcher(analysisResults) + while (matcher.find()) { + val title = matcher.group(1) + val url = matcher.group(2) + val content = matcher.group(3).trim() + val condensed = "**[${title}](${url})**: ${summarizeSection(content)}" + sections.add(condensed) } + return sections + } - private fun fetchAndProcessUrl( - url: String, webSearchDir: File, index: Int, pool: ExecutorService, fetchStrategy: FetchStrategy - ): String { - val typeConfig = typeConfig ?: throw RuntimeException() - if (url.isBlank()) { - throw IllegalArgumentException("URL cannot be blank") - } + private fun summarizeSection(content: String): String { + val firstParagraph = content.split("\n\n").firstOrNull()?.trim() ?: "" + if (firstParagraph.length < 300) return firstParagraph - if (!(typeConfig.allow_revisit_pages == true) && urlContentCache.containsKey(url)) { - log.debug("Using cached content for URL: $url (cache size: ${urlContentCache.size})") - return urlContentCache[url]!! - } - log.debug( - "Fetching content for URL: {} using method: {}", - url, - typeConfig.fetch_method ?: FetchMethod.HttpClient - ) + val sentences = content.split(". ").take(3) + return sentences.joinToString(". ") + (if (sentences.size >= 3) "..." else "") + } - return try { - val content = fetchStrategy.fetch(url, webSearchDir, index, pool, orchestrationConfig) - // Cache successful fetches - if (content.isNotBlank()) { - urlContentCache[url] = content - log.debug("Cached content for URL: $url (content length: ${content.length}, cache size: ${urlContentCache.size})") - } else { - log.warn("Fetched empty content for URL: $url") - } - content - } catch (e: Exception) { - log.error("Failed to fetch URL: $url - ${e.javaClass.simpleName}: ${e.message}", e) - throw e - } + private fun fetchAndProcessUrl( + url: String, webSearchDir: File, index: Int, pool: ExecutorService, fetchStrategy: FetchStrategy + ): String { + val typeConfig = typeConfig ?: throw RuntimeException() + if (url.isBlank()) { + throw IllegalArgumentException("URL cannot be blank") } - private fun extractLinksFromMarkdown(markdown: String): List { - val links = mutableListOf>() - val matcher = LINK_PATTERN.matcher(markdown) - var matchCount = 0 - while (matcher.find()) { - matchCount++ - if (matchCount > 100) { - log.warn("Too many links found in markdown (>100), stopping extraction") - break - } - val linkText = matcher.group(1) - val linkUrl = matcher.group(2) - try { - if (VALID_URL_PATTERN.matcher(linkUrl).matches()) { - links.add(Pair(linkText, linkUrl)) - } else { - log.debug("Skipping invalid URL in markdown: $linkUrl") - } - } catch (e: Exception) { - log.warn("Invalid URL found in markdown: $linkUrl", e) - } - } - log.debug("Extracted ${links.size} valid links from markdown") - return links.map { (linkText, linkUrl) -> - LinkData( - url = linkUrl, - title = linkText, - relevance_score = 50.0 - ) - } - } - fun saveRawContent(webSearchDir: File, url: String, content: String) { - try { - val urlSafe = url.replace(Regex("[^a-zA-Z0-9]"), "_").take(50) - if (!webSearchDir.exists() && !webSearchDir.mkdirs()) { - log.error("Failed to create directory: ${webSearchDir.absolutePath}") - return - } - val extension = when { - webSearchDir.name.contains("document") -> ".txt" - webSearchDir.name.contains("text") -> ".txt" - webSearchDir.name.contains("extracted_text") -> ".txt" - else -> ".html" - } - val rawFile = File(webSearchDir, urlSafe + extension) - // Ensure content is saved with proper encoding - try { - rawFile.writeText(content, StandardCharsets.UTF_8) - } catch (e: Exception) { - log.error("Failed to write content to file: ${rawFile.absolutePath}", e) - return - } - log.debug("Saved raw content to: ${rawFile.absolutePath} (size: ${content.length} chars)") - } catch (e: Exception) { - log.error("Failed to save raw content for URL: $url", e) - } + if (!(typeConfig.allow_revisit_pages == true) && urlContentCache.containsKey(url)) { + log.debug("Using cached content for URL: $url (cache size: ${urlContentCache.size})") + return urlContentCache[url]!! } + log.debug( + "Fetching content for URL: {} using method: {}", + url, + typeConfig.fetch_method ?: FetchMethod.HttpClient + ) - private fun saveAnalysis(webSearchDir: File, url: String, analysis: ParsedResponse, index: Int) { - try { - val timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")) - val urlSafe = url.replace(Regex("https?://"), "").replace(Regex("[^a-zA-Z0-9]"), "_").take(100) - val analysisFile = File(webSearchDir, "${urlSafe}_${index}_${timestamp}.md") - - val metadata = mapOf( - "url" to url, - "timestamp" to LocalDateTime.now().toString(), - "index" to index, - "query" to (executionConfig?.search_query ?: ""), - "content_query" to (executionConfig?.content_queries ?: "") - ) - val metadataJson = try { - ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(metadata) - } catch (e: JsonProcessingException) { - log.error("Failed to serialize metadata for URL: $url", e) - "{}" - } - - val objJson = try { - analysis.obj.let { JsonUtil.toJson(it) } - } catch (e: Exception) { - log.error("Failed to serialize analysis object for URL: $url", e) - "" - } - - val contentWithHeader = "\n\n${analysis.text}" - analysisFile.writeText(contentWithHeader) - log.debug("Saved analysis to file: ${analysisFile.absolutePath} (size: ${contentWithHeader.length} chars)") - } catch (e: Exception) { - log.error("Failed to save analysis for URL: $url", e) - } + return try { + val content = fetchStrategy.fetch(url, webSearchDir, index, pool, orchestrationConfig) + // Cache successful fetches + if (content.isNotBlank()) { + urlContentCache[url] = content + log.debug("Cached content for URL: $url (content length: ${content.length}, cache size: ${urlContentCache.size})") + } else { + log.warn("Fetched empty content for URL: $url") + } + content + } catch (e: Exception) { + log.error("Failed to fetch URL: $url - ${e.javaClass.simpleName}: ${e.message}", e) + throw e } - - private fun transformContent( - content: String, - analysisGoal: String, - orchestrationConfig: OrchestrationConfig, - task: SessionTask - ): ParsedResponse { - val describer = TaskContextYamlDescriber(orchestrationConfig) - val maxChunkSize = 50000 - if (content.length <= maxChunkSize) { - log.debug("Content size (${content.length}) within limit, processing as single chunk") - return pageParsedResponse(orchestrationConfig, analysisGoal, content, describer, task) - } - - log.debug("Content size (${content.length}) exceeds limit, splitting into chunks") - val chunks = splitContentIntoChunks(content, maxChunkSize) - log.debug("Split content into ${chunks.size} chunks") - val chunkResults = chunks.mapIndexed { index, chunk -> - log.debug("Processing chunk ${index + 1}/${chunks.size} (size: ${chunk.length})") - val chunkGoal = "$analysisGoal (Part ${index + 1}/${chunks.size})" - pageParsedResponse(orchestrationConfig, chunkGoal, chunk, describer, task) - } - if (chunkResults.size == 1) { - log.debug("Only one chunk result, returning directly") - return chunkResults[0] + } + + private fun extractLinksFromMarkdown(markdown: String): List { + val links = mutableListOf>() + val matcher = LINK_PATTERN.matcher(markdown) + var matchCount = 0 + while (matcher.find()) { + matchCount++ + if (matchCount > 100) { + log.warn("Too many links found in markdown (>100), stopping extraction") + break + } + val linkText = matcher.group(1) + val linkUrl = matcher.group(2) + try { + if (VALID_URL_PATTERN.matcher(linkUrl).matches()) { + links.add(Pair(linkText, linkUrl)) + } else { + log.debug("Skipping invalid URL in markdown: $linkUrl") } - log.debug("Combining ${chunkResults.size} chunk results into final analysis") - val combinedAnalysis = chunkResults.joinToString("\n\n---\n\n") { it.text } - return pageParsedResponse(orchestrationConfig, analysisGoal, combinedAnalysis, describer, task) + } catch (e: Exception) { + log.warn("Invalid URL found in markdown: $linkUrl", e) + } } - - private fun pageParsedResponse( - orchestrationConfig: OrchestrationConfig, - analysisGoal: String, - content: String, - describer: TypeDescriber, - task: SessionTask - ) = try { - val typeConfig = typeConfig ?: throw RuntimeException() - val model = (typeConfig.model?.let { orchestrationConfig.instance(it) } - ?: orchestrationConfig.parsingChatter).getChildClient(task) - ParsedAgent( - prompt = listOf( - "Below are analyses of different parts of a web page related to this goal: $analysisGoal", - "Create a unified summary that combines the key insights from all parts.", - "Use markdown formatting for your response, with * characters for bullets.", - "Identify the most important links that should be followed up on according to the goal." - ).joinToString("\n\n"), - resultClass = ParsedPage::class.java, - model = model, - describer = describer, - parsingChatter = model, - ).answer(listOf(content)) + log.debug("Extracted ${links.size} valid links from markdown") + return links.map { (linkText, linkUrl) -> + LinkData( + url = linkUrl, + title = linkText, + relevance_score = 50.0 + ) + } + } + + fun saveRawContent(webSearchDir: File, url: String, content: String) { + try { + val urlSafe = url.replace(Regex("[^a-zA-Z0-9]"), "_").take(50) + if (!webSearchDir.exists() && !webSearchDir.mkdirs()) { + log.error("Failed to create directory: ${webSearchDir.absolutePath}") + return + } + val extension = when { + webSearchDir.name.contains("document") -> ".txt" + webSearchDir.name.contains("text") -> ".txt" + webSearchDir.name.contains("extracted_text") -> ".txt" + else -> ".html" + } + val rawFile = File(webSearchDir, urlSafe + extension) + // Ensure content is saved with proper encoding + try { + rawFile.writeText(content, StandardCharsets.UTF_8) + } catch (e: Exception) { + log.error("Failed to write content to file: ${rawFile.absolutePath}", e) + return + } + log.debug("Saved raw content to: ${rawFile.absolutePath} (size: ${content.length} chars)") } catch (e: Exception) { - log.error("Error during content transformation", e) - object : ParsedResponse( - clazz = ParsedPage::class.java - ) { - override val obj: ParsedPage - get() = ParsedPage( - page_type = PageType.Error, - page_information = "Error during analysis: ${e.message}" - ) - override val text: String - get() = "Error during analysis: ${e.message}" - } + log.error("Failed to save raw content for URL: $url", e) + } + } + + private fun saveAnalysis(webSearchDir: File, url: String, analysis: ParsedResponse, index: Int) { + try { + val timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss")) + val urlSafe = url.replace(Regex("https?://"), "").replace(Regex("[^a-zA-Z0-9]"), "_").take(100) + val analysisFile = File(webSearchDir, "${urlSafe}_${index}_${timestamp}.md") + + val metadata = mapOf( + "url" to url, + "timestamp" to LocalDateTime.now().toString(), + "index" to index, + "query" to (executionConfig?.search_query ?: ""), + "content_query" to (executionConfig?.content_queries ?: "") + ) + val metadataJson = try { + ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(metadata) + } catch (e: JsonProcessingException) { + log.error("Failed to serialize metadata for URL: $url", e) + "{}" + } + + val objJson = try { + analysis.obj.let { JsonUtil.toJson(it) } + } catch (e: Exception) { + log.error("Failed to serialize analysis object for URL: $url", e) + "" + } + + val contentWithHeader = "\n\n${analysis.text}" + analysisFile.writeText(contentWithHeader) + log.debug("Saved analysis to file: ${analysisFile.absolutePath} (size: ${contentWithHeader.length} chars)") + } catch (e: Exception) { + log.error("Failed to save analysis for URL: $url", e) } + } - private fun splitContentIntoChunks(content: String, maxChunkSize: Int): List { - val chunks = mutableListOf() - var remainingContent = content - while (remainingContent.isNotEmpty()) { - val chunkSize = if (remainingContent.length <= maxChunkSize) { - remainingContent.length - } else { - val breakPoint = findBreakPoint(remainingContent, maxChunkSize) - breakPoint - } - chunks.add(remainingContent.substring(0, chunkSize)) - remainingContent = remainingContent.substring(chunkSize) - } - return chunks + private fun transformContent( + content: String, + analysisGoal: String, + orchestrationConfig: OrchestrationConfig, + task: SessionTask + ): ParsedResponse { + val describer = TaskContextYamlDescriber(orchestrationConfig) + val maxChunkSize = 50000 + if (content.length <= maxChunkSize) { + log.debug("Content size (${content.length}) within limit, processing as single chunk") + return pageParsedResponse(orchestrationConfig, analysisGoal, content, describer, task) } - private fun findBreakPoint(text: String, maxSize: Int): Int { - val paragraphBreakSearch = text.substring(0, minOf(maxSize, text.length)).lastIndexOf("\n\n") - if (paragraphBreakSearch > maxSize * 0.7) { - return paragraphBreakSearch + 2 - } - val newlineSearch = text.substring(0, minOf(maxSize, text.length)).lastIndexOf("\n") - if (newlineSearch > maxSize * 0.7) { - return newlineSearch + 1 - } - val sentenceSearch = text.substring(0, minOf(maxSize, text.length)).lastIndexOf(". ") - if (sentenceSearch > maxSize * 0.7) { - return sentenceSearch + 2 + log.debug("Content size (${content.length}) exceeds limit, splitting into chunks") + val chunks = splitContentIntoChunks(content, maxChunkSize) + log.debug("Split content into ${chunks.size} chunks") + val chunkResults = chunks.mapIndexed { index, chunk -> + log.debug("Processing chunk ${index + 1}/${chunks.size} (size: ${chunk.length})") + val chunkGoal = "$analysisGoal (Part ${index + 1}/${chunks.size})" + pageParsedResponse(orchestrationConfig, chunkGoal, chunk, describer, task) + } + if (chunkResults.size == 1) { + log.debug("Only one chunk result, returning directly") + return chunkResults[0] + } + log.debug("Combining ${chunkResults.size} chunk results into final analysis") + val combinedAnalysis = chunkResults.joinToString("\n\n---\n\n") { it.text } + return pageParsedResponse(orchestrationConfig, analysisGoal, combinedAnalysis, describer, task) + } - } - return minOf(maxSize, text.length) + private fun pageParsedResponse( + orchestrationConfig: OrchestrationConfig, + analysisGoal: String, + content: String, + describer: TypeDescriber, + task: SessionTask + ) = try { + val typeConfig = typeConfig ?: throw RuntimeException() + val model = (typeConfig.model?.let { orchestrationConfig.instance(it) } + ?: orchestrationConfig.parsingChatter).getChildClient(task) + ParsedAgent( + prompt = listOf( + "Below are analyses of different parts of a web page related to this goal: $analysisGoal", + "Create a unified summary that combines the key insights from all parts.", + "Use markdown formatting for your response, with * characters for bullets.", + "Identify the most important links that should be followed up on according to the goal." + ).joinToString("\n\n"), + resultClass = ParsedPage::class.java, + model = model, + describer = describer, + parsingChatter = model, + ).answer(listOf(content)) + } catch (e: Exception) { + log.error("Error during content transformation", e) + object : ParsedResponse( + clazz = ParsedPage::class.java + ) { + override val obj: ParsedPage + get() = ParsedPage( + page_type = PageType.Error, + page_information = "Error during analysis: ${e.message}" + ) + override val text: String + get() = "Error during analysis: ${e.message}" + } + } + + private fun splitContentIntoChunks(content: String, maxChunkSize: Int): List { + val chunks = mutableListOf() + var remainingContent = content + while (remainingContent.isNotEmpty()) { + val chunkSize = if (remainingContent.length <= maxChunkSize) { + remainingContent.length + } else { + val breakPoint = findBreakPoint(remainingContent, maxChunkSize) + breakPoint + } + chunks.add(remainingContent.substring(0, chunkSize)) + remainingContent = remainingContent.substring(chunkSize) } + return chunks + } - companion object { - private val log = LoggerFactory.getLogger(CrawlerAgentTask::class.java) - private val LINK_PATTERN = Pattern.compile("""\[([^]]+)]\(([^)]+)\)""") - private val VALID_URL_PATTERN = Pattern.compile("^(http|https)://.*") - val CrawlerAgent = TaskType( - "CrawlerAgent", - CrawlerAgentTask.CrawlerTaskExecutionConfigData::class.java, - CrawlerAgentTask.CrawlerTaskTypeConfig::class.java, - "Search Google, fetch top results, and analyze content", - """ + private fun findBreakPoint(text: String, maxSize: Int): Int { + val paragraphBreakSearch = text.substring(0, minOf(maxSize, text.length)).lastIndexOf("\n\n") + if (paragraphBreakSearch > maxSize * 0.7) { + return paragraphBreakSearch + 2 + } + val newlineSearch = text.substring(0, minOf(maxSize, text.length)).lastIndexOf("\n") + if (newlineSearch > maxSize * 0.7) { + return newlineSearch + 1 + } + val sentenceSearch = text.substring(0, minOf(maxSize, text.length)).lastIndexOf(". ") + if (sentenceSearch > maxSize * 0.7) { + return sentenceSearch + 2 + + } + return minOf(maxSize, text.length) + } + + companion object { + private val log = LoggerFactory.getLogger(CrawlerAgentTask::class.java) + private val LINK_PATTERN = Pattern.compile("""\[([^]]+)]\(([^)]+)\)""") + private val VALID_URL_PATTERN = Pattern.compile("^(http|https)://.*") + val CrawlerAgent = TaskType( + "CrawlerAgent", + CrawlerTaskExecutionConfigData::class.java, + CrawlerTaskTypeConfig::class.java, + "Search Google, fetch top results, and analyze content", + """ Searches Google for specified queries and analyzes the top results.
    • Performs Google searches
    • @@ -1193,7 +1316,7 @@ import kotlin.math.min
    • Generates detailed analysis reports
    """ - ) + ) - } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/CrawlerAgentTask.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/CrawlerAgentTask.md deleted file mode 100644 index d93ab0aa9..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/CrawlerAgentTask.md +++ /dev/null @@ -1,621 +0,0 @@ -# Web Crawler Agent Documentation - -## Overview - -The `CrawlerAgentTask` is a sophisticated web crawling and content analysis system that can search the web, fetch content from URLs, analyze pages using AI, and automatically follow relevant links. It's designed to gather and synthesize information from multiple web sources based on specific queries or goals. - -## Key Features - -- **Multiple Seeding Methods**: Start crawling from Google search results or direct URLs -- **Intelligent Content Fetching**: Support for HTML, PDF, DOCX, and other document formats -- **AI-Powered Analysis**: Uses language models to extract relevant information from pages -- **Automatic Link Following**: Discovers and follows relevant links based on analysis -- **Robots.txt Compliance**: Respects website crawling rules and rate limits -- **Priority Queue**: Processes pages based on relevance scores and depth -- **Concurrent Processing**: Handles multiple pages simultaneously for efficiency -- **Content Caching**: Avoids re-fetching the same URLs -- **Comprehensive Logging**: Detailed tracking of crawling progress and errors - -## Architecture - -### Core Components - -#### 1. CrawlerAgentTask -The main orchestrator that manages the crawling workflow: -- Initializes the page queue with seed URLs -- Manages concurrent page processing -- Coordinates content fetching and analysis -- Generates final summaries - -#### 2. Seed Methods -Strategies for initializing the crawler: - -**GoogleSearch**: Searches Google and extracts top results -```kotlin -enum class SeedMethod { - GoogleSearch, - DirectUrls -} -``` - -**DirectUrls**: Uses explicitly provided URLs - -#### 3. Fetch Strategies -Methods for retrieving web content: - -**HttpClient**: Standard HTTP client with SSL support -- Handles HTML, text, and document formats -- Extracts text from PDFs, DOCX, etc. -- Simplifies HTML for better analysis - -**Selenium**: Browser automation for JavaScript-heavy sites -- Renders dynamic content -- Captures screenshots -- Handles complex interactions - -#### 4. Content Processing Pipeline - -``` -URL → Fetch → Simplify → Analyze → Extract Links → Queue New URLs -``` - -1. **Fetch**: Retrieve content using selected strategy -2. **Simplify**: Clean HTML, extract text from documents -3. **Analyze**: Use AI to extract relevant information -4. **Extract Links**: Find and score new URLs to follow -5. **Queue**: Add promising links to priority queue - -## Configuration - -### Task Type Configuration - -```kotlin -class CrawlerTaskTypeConfig( - val seed_method: SeedMethod? = SeedMethod.GoogleSearch, - val fetch_method: FetchMethod? = FetchMethod.HttpClient, - val allowed_domains: String? = null, - val respect_robots_txt: Boolean? = true, - val max_pages_per_task: Int? = 30, - val max_depth: Int? = 3, - val max_queue_size: Int? = 100, - val concurrent_page_processing: Int? = 3, - val max_final_output_size: Int? = 15000, - val min_content_length: Int? = 500, - val follow_links: Boolean? = true, - val allow_revisit_pages: Boolean? = false, - val create_final_summary: Boolean? = true -) -``` - -### Execution Configuration - -```kotlin -class CrawlerTaskExecutionConfigData( - val search_query: String? = null, - val direct_urls: List? = null, - val content_queries: Any? = null, - val allowed_domains: String? = null -) -``` - -## Usage Examples - -### Example 1: Google Search with Analysis - -```kotlin -val config = CrawlerTaskExecutionConfigData( - search_query = "artificial intelligence recent developments", - content_queries = """ - Extract: - - Key technological breakthroughs - - Companies involved - - Potential applications - - Publication dates - """, - allowed_domains = "arxiv.org nature.com sciencedaily.com" -) -``` - -### Example 2: Direct URL Analysis - -```kotlin -val config = CrawlerTaskExecutionConfigData( - direct_urls = listOf( - "https://example.com/article1", - "https://example.com/article2" - ), - content_queries = "Summarize the main arguments and supporting evidence" -) -``` - -### Example 3: Deep Crawl with Link Following - -```kotlin -val typeConfig = CrawlerTaskTypeConfig( - seed_method = SeedMethod.DirectUrls, - max_depth = 5, - max_pages_per_task = 100, - follow_links = true, - concurrent_page_processing = 5 -) -``` - -## Data Structures - -### LinkData -Represents a URL to be crawled: - -```kotlin -data class LinkData( - val link: String?, - val title: String?, - val tags: List?, - val relevance_score: Double = 100.0, - var depth: Int = 0, - var started: Boolean = false, - var completed: Boolean = false, - var error: String? = null -) -``` - -**Priority Calculation**: `relevance_score / (depth + 1.0)` -- Higher relevance = higher priority -- Lower depth = higher priority - -### ParsedPage -Result of AI analysis: - -```kotlin -data class ParsedPage( - val page_type: PageType, // OK, Error, Irrelevant - val page_information: Any?, - val tags: List?, - val link_data: List? -) -``` - -## Processing Flow - -### 1. Initialization -``` -┌─────────────────┐ -│ Seed Method │ -│ (Google/URLs) │ -└────────┬────────┘ - │ - ▼ -┌─────────────────┐ -│ Priority Queue │ -│ (LinkData) │ -└─────────────────┘ -``` - -### 2. Crawling Loop -``` -┌──────────────────────────────────────────┐ -│ While (pages < max && errors < limit) │ -│ ┌────────────────────────────────────┐ │ -│ │ Get Next Page from Queue │ │ -│ │ (Highest Priority) │ │ -│ └──────────┬─────────────────────────┘ │ -│ │ │ -│ ▼ │ -│ ┌────────────────────────────────────┐ │ -│ │ Fetch Content │ │ -│ │ (HTTP/Selenium) │ │ -│ └──────────┬─────────────────────────┘ │ -│ │ │ -│ ▼ │ -│ ┌────────────────────────────────────┐ │ -│ │ Simplify/Extract Text │ │ -│ └──────────┬─────────────────────────┘ │ -│ │ │ -│ ▼ │ -│ ┌────────────────────────────────────┐ │ -│ │ AI Analysis │ │ -│ │ (Extract Information) │ │ -│ └──────────┬─────────────────────────┘ │ -│ │ │ -│ ▼ │ -│ ┌────────────────────────────────────┐ │ -│ │ Extract & Score Links │ │ -│ └──────────┬─────────────────────────┘ │ -│ │ │ -│ ▼ │ -│ ┌────────────────────────────────────┐ │ -│ │ Add to Queue (if relevant) │ │ -│ └────────────────────────────────────┘ │ -└──────────────────────────────────────────┘ -``` - -### 3. Finalization -``` -┌─────────────────┐ -│ All Results │ -└────────┬────────┘ - │ - ▼ -┌─────────────────┐ -│ Create Summary │ -│ (if enabled) │ -└────────┬────────┘ - │ - ▼ -┌─────────────────┐ -│ Final Output │ -└─────────────────┘ -``` - -## Content Fetching Strategies - -### HttpClient Strategy - -**Advantages**: -- Fast and lightweight -- Good for static content -- Handles documents (PDF, DOCX) -- Low resource usage - -**Limitations**: -- Cannot execute JavaScript -- May miss dynamic content -- Limited interaction capabilities - -**Supported Formats**: -- HTML pages -- Plain text -- PDF documents -- Microsoft Office (DOC, DOCX, XLS, XLSX, PPT, PPTX) -- OpenDocument formats (ODT, ODS, ODP) -- RTF files - -### Selenium Strategy - -**Advantages**: -- Executes JavaScript -- Renders dynamic content -- Can interact with pages -- Captures visual state - -**Limitations**: -- Slower than HTTP -- Higher resource usage -- Requires browser driver -- More complex setup - -**Use Cases**: -- Single-page applications -- JavaScript-heavy sites -- Sites requiring interaction -- Visual verification needed - -## Robots.txt Compliance - -The crawler respects robots.txt rules when enabled: - -### Features -- **Automatic Fetching**: Downloads and caches robots.txt per domain -- **Rule Parsing**: Supports Disallow, Allow, Crawl-delay, Sitemap -- **User-Agent Matching**: Respects rules for "*" and "CognotikBot" -- **Pattern Matching**: Handles wildcards and path patterns -- **Crawl Delays**: Automatically applies specified delays - -### Example robots.txt -``` -User-agent: * -Disallow: /admin/ -Disallow: /private/ -Allow: /public/ -Crawl-delay: 1 - -Sitemap: https://example.com/sitemap.xml -``` - -## Content Analysis - -### AI-Powered Extraction - -The crawler uses language models to: -1. **Classify Pages**: Determine if content is relevant, error, or irrelevant -2. **Extract Information**: Pull out specific data based on queries -3. **Score Links**: Evaluate relevance of discovered URLs -4. **Summarize Content**: Create concise summaries of findings - -### Analysis Prompt Structure - -```kotlin -val prompt = """ -Below are analyses of different parts of a web page related to this goal: $analysisGoal - -Create a unified summary that combines the key insights from all parts. -Use markdown formatting for your response. -Identify the most important links that should be followed up on. -""" -``` - -### Content Chunking - -For large pages (>50KB): -1. Split into manageable chunks -2. Analyze each chunk separately -3. Combine results into unified summary -4. Preserve context across chunks - -## Output and Storage - -### Directory Structure -``` -.websearch/ -├── raw_pages/ # Original HTML -├── reduced_pages/ # Simplified HTML -├── documents/ # Downloaded files -├── extracted_text/ # Text from documents -├── text_pages/ # Plain text content -├── error/ # Failed analyses -└── irrelevant/ # Filtered content -``` - -### Analysis Files -Each analyzed page is saved as: -``` -{url_safe}_{index}_{timestamp}.md -``` - -With metadata header: -```markdown - - -## Page Title - -Analysis content... -``` - -## Error Handling - -### Retry Logic -- Tracks retry count per URL -- Implements exponential backoff -- Maximum retry attempts configurable - -### Error Types -1. **Network Errors**: Connection failures, timeouts -2. **HTTP Errors**: 4xx, 5xx status codes -3. **Parse Errors**: Invalid HTML, malformed documents -4. **Analysis Errors**: AI model failures -5. **Resource Errors**: Memory limits, disk space - -### Error Recovery -```kotlin -try { - // Fetch and process -} catch (e: Exception) { - log.error("Error processing URL: $url", e) - errorCount.incrementAndGet() - page.error = e.message - // Continue with next page -} -``` - -## Performance Optimization - -### Concurrent Processing -- Configurable worker threads -- Completion service for task management -- Active task tracking - -### Caching -- URL content cache (in-memory) -- Robots.txt cache (per domain) -- Prevents redundant fetches - -### Queue Management -- Priority-based processing -- Maximum queue size limits -- Duplicate URL detection - -### Resource Limits -- Maximum pages per task -- Maximum crawl depth -- Content size limits -- Queue size limits - -## Best Practices - -### 1. Define Clear Goals -```kotlin -content_queries = """ -Extract specific information: -- Data point 1 -- Data point 2 -- Evaluation criteria -- Filtering priorities -""" -``` - -### 2. Restrict Domains -```kotlin -allowed_domains = "example.com trusted-source.org" -``` - -### 3. Set Reasonable Limits -```kotlin -max_pages_per_task = 50 // Don't crawl too much -max_depth = 3 // Prevent infinite loops -concurrent_page_processing = 3 // Balance speed/resources -``` - -### 4. Respect Websites -```kotlin -respect_robots_txt = true // Always enable -// Crawler automatically applies delays -``` - -### 5. Monitor Progress -- Check logs for errors -- Review intermediate results -- Adjust configuration as needed - -## Troubleshooting - -### Common Issues - -**1. No Results Found** -- Check search query specificity -- Verify allowed_domains aren't too restrictive -- Ensure URLs are accessible - -**2. Too Many Errors** -- Reduce concurrent_page_processing -- Check network connectivity -- Verify robots.txt compliance - -**3. Irrelevant Content** -- Refine content_queries -- Adjust relevance scoring -- Restrict domains more carefully - -**4. Memory Issues** -- Reduce max_queue_size -- Lower max_pages_per_task -- Decrease concurrent_page_processing - -**5. Slow Performance** -- Increase concurrent_page_processing -- Use HttpClient instead of Selenium -- Reduce max_depth - -## API Reference - -### Main Methods - -#### `run()` -Executes the crawling task -```kotlin -fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig -) -``` - -#### `addToQueue()` -Adds a new URL to the processing queue -```kotlin -fun addToQueue( - newLink: LinkData, - maxDepth: Int, - maxQueueSize: Int -): Boolean -``` - -#### `getNextPage()` -Retrieves the highest priority page from queue -```kotlin -fun getNextPage(): LinkData? -``` - -#### `fetchAndProcessUrl()` -Fetches and processes content from a URL -```kotlin -private fun fetchAndProcessUrl( - url: String, - webSearchDir: File, - index: Int, - pool: ExecutorService, - fetchStrategy: FetchStrategy -): String -``` - -#### `transformContent()` -Analyzes content using AI -```kotlin -private fun transformContent( - content: String, - analysisGoal: String, - orchestrationConfig: OrchestrationConfig, - task: SessionTask -): ParsedResponse -``` - -## Advanced Features - -### Custom Fetch Strategies - -Implement `FetchStrategy` interface: -```kotlin -interface FetchStrategy { - fun fetch( - url: String, - webSearchDir: File, - index: Int, - pool: ExecutorService, - orchestrationConfig: OrchestrationConfig - ): String -} -``` - -### Custom Seed Methods - -Implement `SeedStrategy` interface: -```kotlin -interface SeedStrategy { - fun getSeedItems( - executionConfig: CrawlerTaskExecutionConfigData?, - orchestrationConfig: OrchestrationConfig - ): List? -} -``` - -### Link Extraction - -Automatic extraction from: -- Markdown links: `[text](url)` -- HTML anchor tags: `` -- Structured data from AI analysis - -### Content Filtering - -Multiple filtering stages: -1. **Domain whitelist/blacklist** -2. **Robots.txt compliance** -3. **Duplicate detection** -4. **Relevance scoring** -5. **Content length requirements** - -## Security Considerations - -### SSL/TLS -- Accepts all certificates (configurable) -- Supports HTTPS connections -- Handles certificate errors gracefully - -### Rate Limiting -- Respects robots.txt crawl delays -- Configurable concurrent requests -- Automatic backoff on errors - -### Content Validation -- URL format validation -- Content type checking -- Size limit enforcement -- Malicious content detection - -## Future Enhancements - -Potential improvements: -- [ ] Distributed crawling support -- [ ] Advanced JavaScript rendering -- [ ] Image and video analysis -- [ ] Multi-language support -- [ ] Custom extraction rules -- [ ] Real-time monitoring dashboard -- [ ] Export to various formats -- [ ] Integration with knowledge graphs diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/DirectUrls.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/DirectUrls.kt index a11fc24fd..579975b98 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/DirectUrls.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/DirectUrls.kt @@ -5,37 +5,37 @@ import com.simiacryptus.cognotik.platform.model.User import java.net.URI class DirectUrls : SeedMethodFactory { - override fun createStrategy(task: CrawlerAgentTask, user: User?): SeedStrategy = object : SeedStrategy { - override fun getSeedItems( - taskConfig: CrawlerAgentTask.CrawlerTaskExecutionConfigData?, - orchestrationConfig: OrchestrationConfig - ): List? { - SeedMethod.Companion.log.info("Starting DirectUrls seed method") - if (taskConfig?.direct_urls.isNullOrEmpty()) { - SeedMethod.Companion.log.error("Direct URLs are missing for DirectUrls seed method") - return emptyList() - } - SeedMethod.Companion.log.debug("Processing direct URLs: ${taskConfig.direct_urls.joinToString(", ")}") - return taskConfig.direct_urls.map { it.trim() }.filter { it.isNotBlank() } - .filter { url -> - try { - URI.create(url) - url.startsWith("http://") || url.startsWith("https://") - } catch (e: Exception) { - SeedMethod.Companion.log.warn("Invalid URL format: $url") - false - } - } - .mapIndexed { index, url -> - SeedMethod.Companion.log.debug("Adding direct URL: $url") - SeedItem( - link = url, - title = "Direct URL ${index + 1}", - additionalData = mapOf("index" to index) - ) - }.also { - SeedMethod.Companion.log.info("Successfully processed ${it.size} direct URLs") - } + override fun createStrategy(task: CrawlerAgentTask, user: User?): SeedStrategy = object : SeedStrategy { + override fun getSeedItems( + taskConfig: CrawlerAgentTask.CrawlerTaskExecutionConfigData?, + orchestrationConfig: OrchestrationConfig + ): List { + SeedMethod.log.info("Starting DirectUrls seed method") + if (taskConfig?.direct_urls.isNullOrEmpty()) { + SeedMethod.log.error("Direct URLs are missing for DirectUrls seed method") + return emptyList() + } + SeedMethod.log.debug("Processing direct URLs: ${taskConfig.direct_urls.joinToString(", ")}") + return taskConfig.direct_urls.map { it.trim() }.filter { it.isNotBlank() } + .filter { url -> + try { + URI.create(url) + url.startsWith("http://") || url.startsWith("https://") + } catch (e: Exception) { + SeedMethod.log.warn("Invalid URL format: $url") + false + } + } + .mapIndexed { index, url -> + SeedMethod.log.debug("Adding direct URL: $url") + SeedItem( + link = url, + title = "Direct URL ${index + 1}", + additionalData = mapOf("index" to index) + ) + }.also { + SeedMethod.log.info("Successfully processed ${it.size} direct URLs") } } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/FetchMethod.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/FetchMethod.kt index f91713890..7a8f25f7a 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/FetchMethod.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/FetchMethod.kt @@ -7,27 +7,27 @@ import java.io.File import java.util.concurrent.ExecutorService interface FetchStrategy : EnabledStrategy { - fun fetch(url: String, webSearchDir: File, index: Int, pool: ExecutorService, orchestrationConfig: OrchestrationConfig): String + fun fetch(url: String, webSearchDir: File, index: Int, pool: ExecutorService, orchestrationConfig: OrchestrationConfig): String } object FetchConfig { - var isSeleniumEnabled: Boolean = false + var isSeleniumEnabled: Boolean = false } + interface FetchMethodFactory { - fun createStrategy(task: CrawlerAgentTask): FetchStrategy + fun createStrategy(task: CrawlerAgentTask): FetchStrategy } -; @Suppress("unused") enum class FetchMethod : FetchMethodFactory { - Selenium { - override fun createStrategy(task: CrawlerAgentTask) = Selenium().createStrategy(task) - }, - HttpClient { - override fun createStrategy(task: CrawlerAgentTask) = HttpClientFetch().createStrategy(task) - }; + Selenium { + override fun createStrategy(task: CrawlerAgentTask) = Selenium().createStrategy(task) + }, + HttpClient { + override fun createStrategy(task: CrawlerAgentTask) = HttpClientFetch().createStrategy(task) + }; - companion object { - val log = LoggerFactory.getLogger(FetchMethod::class.java) - } + companion object { + val log = LoggerFactory.getLogger(FetchMethod::class.java) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GitHubSearchTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GitHubSearchTask.kt index fc04cb11c..842cb5b8c 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GitHubSearchTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GitHubSearchTask.kt @@ -9,60 +9,58 @@ import com.simiacryptus.cognotik.platform.ApplicationServices import com.simiacryptus.cognotik.util.MarkdownUtil import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask +import java.io.FileOutputStream import java.net.URI import java.net.http.HttpClient import java.net.http.HttpRequest import java.net.http.HttpResponse class GitHubSearchTask( - orchestrationConfig: OrchestrationConfig, - planTask: GitHubSearchTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: GitHubSearchTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - class GitHubSearchTaskExecutionConfigData( - @Description("The search query to use for GitHub search") - val search_query: String = "", - @Description("The type of GitHub search to perform (code, commits, issues, repositories, topics, users)") - val search_type: String = "repositories", - @Description("The number of results to return (max 100)") - val per_page: Int = 30, - @Description("Sort order for results") - val sort: String? = null, - @Description("Sort direction (asc or desc)") - val order: String? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null, - ) : ValidatedObject, TaskExecutionConfig( - task_type = GitHubSearch.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) { - override fun validate(): String? { - if (search_query.isBlank()) { - return "GitHub search query cannot be blank" - } - - val validSearchTypes = setOf("code", "commits", "issues", "repositories", "topics", "users") - if (search_type !in validSearchTypes) { - return "Invalid search_type: $search_type. Must be one of: ${validSearchTypes.joinToString(", ")}" - } - - if (per_page < 1 || per_page > 100) { - return "per_page must be between 1 and 100, got: $per_page" - } - - order?.let { if (it !in setOf("asc", "desc")) return "Invalid order: $it. Must be 'asc' or 'desc'" } - - return null - } + class GitHubSearchTaskExecutionConfigData( + @Description("The search query to use for GitHub search") + val search_query: String = "", + @Description("The type of GitHub search to perform (code, commits, issues, repositories, topics, users)") + val search_type: String = "repositories", + @Description("The number of results to return (max 100)") + val per_page: Int = 30, + @Description("Sort order for results") + val sort: String? = null, + @Description("Sort direction (asc or desc)") + val order: String? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null, + ) : ValidatedObject, TaskExecutionConfig( + task_type = GitHubSearch.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) { + override fun validate(): String? { + if (search_query.isBlank()) { + return "GitHub search query cannot be blank" + } + + val validSearchTypes = setOf("code", "commits", "issues", "repositories", "topics", "users") + if (search_type !in validSearchTypes) { + return "Invalid search_type: $search_type. Must be one of: ${validSearchTypes.joinToString(", ")}" + } + + if (per_page < 1 || per_page > 100) { + return "per_page must be between 1 and 100, got: $per_page" + } + + order?.let { if (it !in setOf("asc", "desc")) return "Invalid order: $it. Must be 'asc' or 'desc'" } + + return null } + } - override fun promptSegment() = """ - executionConfig?.validate()?.let { error -> - throw ValidatedObject.ValidationError(error, executionConfig!!) - } -GitHubSearch - Search GitHub for code, commits, issues, repositories, topics, or users + override fun promptSegment() = """ + GitHubSearch - Search GitHub for code, commits, issues, repositories, topics, or users * Specify the search query * Specify the type of search (code, commits, issues, repositories, topics, users) * Specify the number of results to return (max 100) @@ -70,134 +68,164 @@ GitHubSearch - Search GitHub for code, commits, issues, repositories, topics, or * Optionally specify sort direction (asc or desc) """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - - val searchResults = performGitHubSearch( - agent.user - ?.let { ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings(it) } - ?.apis?.firstOrNull { it.provider == APIProvider.Github }?.key?.trim() - ?: throw RuntimeException("GitHub API token is required") - ) - val actorAnswerText = formatSearchResults(searchResults) - task.add(MarkdownUtil.renderMarkdown(actorAnswerText, ui = task.ui)) - resultFn(actorAnswerText) - } - - private fun performGitHubSearch(githubToken: String): String { - val queryParams = mutableListOf() - - var searchQuery = executionConfig?.search_query - //if (searchQuery.isNullOrBlank()) throw IllegalArgumentException("GitHub search query is required and cannot be empty.") - if (searchQuery.isNullOrBlank()) { - searchQuery = "" - } - queryParams.add("q=${java.net.URLEncoder.encode(searchQuery, "UTF-8")}") - - queryParams.add("per_page=${executionConfig?.per_page}") // perPage is now guaranteed non-null - executionConfig?.sort?.let { queryParams.add("sort=${java.net.URLEncoder.encode(it, "UTF-8")}") } - executionConfig?.order?.let { queryParams.add("order=${java.net.URLEncoder.encode(it, "UTF-8")}") } - return HttpClient.newHttpClient().send( - HttpRequest.newBuilder() - .uri( - URI.create( - URI("https://api.github.com") - .resolve("/search/${executionConfig?.search_type}") - .toURL().toString() + "?" + queryParams.joinToString("&") - ) - ) - .header("Accept", "application/vnd.github+json") - .header("Authorization", "Bearer ${githubToken}") - .header("X-GitHub-Api-Version", "2022-11-28") - .GET() - .build(), HttpResponse.BodyHandlers.ofString() - ).apply { - if (statusCode() != 200) { - throw RuntimeException("GitHub API request failed with status ${statusCode()}: ${body()}") - } - }.body() - } - - private fun formatSearchResults(results: String): String { - val mapper = ObjectMapper() - val searchResults: Map = mapper.readValue(results) - val effectiveSearchType = this.executionConfig?.search_type ?: GitHubSearchTaskExecutionConfigData().search_type - return buildString { - appendLine("# GitHub Search Results") - appendLine() - appendLine("Total results: ${searchResults["total_count"]}") - appendLine() - appendLine("## Top Results:") - appendLine() - val items = searchResults["items"] as List> - items.take(minOf(10, items.size)).forEach { item -> // Ensure we don't go over items.size - when (effectiveSearchType) { // Use the resolved effectiveSearchType - "repositories" -> formatRepositoryResult(item) - "code" -> formatCodeResult(item) - "commits" -> formatCommitResult(item) - "issues" -> formatIssueResult(item) - "users" -> formatUserResult(item) - "topics" -> formatTopicResult(item) - else -> appendLine("- ${item["name"] ?: item["title"] ?: item["login"]}") - } - appendLine() - } - } + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + executionConfig?.validate()?.let { error -> + throw ValidatedObject.ValidationError(error, executionConfig!!) } - - private fun StringBuilder.formatTopicResult(topic: Map) { - appendLine("### [${topic["name"]}](${topic["url"]})") - appendLine("${topic["short_description"]}") - appendLine("Featured: ${topic["featured"]} | Curated: ${topic["curated"]}") + val transcript = transcript(task) + transcript?.let { out -> + out.write("# GitHub Search Task\n\n".toByteArray()) + out.write("## Configuration\n\n".toByteArray()) + out.write("- **Query**: ${executionConfig?.search_query}\n".toByteArray()) + out.write("- **Search Type**: ${executionConfig?.search_type}\n".toByteArray()) + out.write("- **Results Per Page**: ${executionConfig?.per_page}\n".toByteArray()) + executionConfig?.sort?.let { out.write("- **Sort**: $it\n".toByteArray()) } + executionConfig?.order?.let { out.write("- **Order**: $it\n\n".toByteArray()) } + out.write("\n## Search Results\n\n".toByteArray()) } - private fun StringBuilder.formatRepositoryResult(repo: Map) { - appendLine("### ${repo["full_name"]}") - appendLine("${repo["description"]}") - appendLine("Stars: ${repo["stargazers_count"]} | Forks: ${repo["forks_count"]}") - appendLine("[View on GitHub](${repo["html_url"]})") + val searchResults = performGitHubSearch( + agent.user + ?.let { ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings(it) } + ?.apis?.firstOrNull { it.provider == APIProvider.Github }?.key?.trim() + ?: throw RuntimeException("GitHub API token is required") + ) + val actorAnswerText = formatSearchResults(searchResults) + transcript?.let { out -> + out.write(actorAnswerText.toByteArray()) } - private fun StringBuilder.formatCodeResult(code: Map) { - val repo = code["repository"] as Map - appendLine("### [${repo["full_name"]}](${code["html_url"]})") - appendLine("File: ${code["path"]}") - appendLine("```") - appendLine(code["text_matches"]?.toString()?.take(200) ?: "") - appendLine("```") - } + task.add(MarkdownUtil.renderMarkdown(actorAnswerText, ui = task.ui)) + resultFn(actorAnswerText) + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } - private fun StringBuilder.formatCommitResult(commit: Map) { - val repo = commit["repository"] as Map - appendLine("### [${repo["full_name"]}](${commit["html_url"]})") - appendLine("${(commit["commit"] as Map)["message"]}") - appendLine("Author: ${(commit["author"] as Map)["login"]} | Date: ${((commit["commit"] as Map)["author"] as Map)["date"]}") - } + private fun performGitHubSearch(githubToken: String): String { + val queryParams = mutableListOf() - private fun StringBuilder.formatIssueResult(issue: Map) { - appendLine("### [${issue["title"]}](${issue["html_url"]})") - appendLine("State: ${issue["state"]} | Comments: ${issue["comments"]}") - appendLine("Created by ${(issue["user"] as Map)["login"]} on ${issue["created_at"]}") + var searchQuery = executionConfig?.search_query + if (searchQuery.isNullOrBlank()) { + throw IllegalArgumentException("GitHub search query is required and cannot be empty.") } - - private fun StringBuilder.formatUserResult(user: Map) { - appendLine("### [${user["login"]}](${user["html_url"]})") - appendLine("Type: ${user["type"]} | Repos: ${user["public_repos"]}") - appendLine("![Avatar](${user["avatar_url"]})") + queryParams.add("q=${java.net.URLEncoder.encode(searchQuery, "UTF-8")}") + + queryParams.add("per_page=${executionConfig?.per_page ?: 30}") + executionConfig?.sort?.let { queryParams.add("sort=${java.net.URLEncoder.encode(it, "UTF-8")}") } + executionConfig?.order?.let { queryParams.add("order=${java.net.URLEncoder.encode(it, "UTF-8")}") } + return HttpClient.newHttpClient().send( + HttpRequest.newBuilder() + .uri( + URI.create( + URI("https://api.github.com") + .resolve("/search/${executionConfig?.search_type}") + .toURL().toString() + "?" + queryParams.joinToString("&") + ) + ) + .header("Accept", "application/vnd.github+json") + .header("Authorization", "Bearer ${githubToken}") + .header("X-GitHub-Api-Version", "2022-11-28") + .GET() + .build(), HttpResponse.BodyHandlers.ofString() + ).apply { + if (statusCode() != 200) { + throw RuntimeException("GitHub API request failed with status ${statusCode()}: ${body()}") + } + }.body() + } + + private fun formatSearchResults(results: String): String { + val mapper = ObjectMapper() + val searchResults: Map = mapper.readValue(results) + val effectiveSearchType = this.executionConfig?.search_type ?: GitHubSearchTaskExecutionConfigData().search_type + return buildString { + appendLine("# GitHub Search Results") + appendLine() + appendLine("Total results: ${searchResults["total_count"]}") + appendLine() + appendLine("## Top Results:") + appendLine() + val items = searchResults["items"] as List> + items.take(minOf(10, items.size)).forEach { item -> // Ensure we don't go over items.size + when (effectiveSearchType) { // Use the resolved effectiveSearchType + "repositories" -> formatRepositoryResult(item) + "code" -> formatCodeResult(item) + "commits" -> formatCommitResult(item) + "issues" -> formatIssueResult(item) + "users" -> formatUserResult(item) + "topics" -> formatTopicResult(item) + else -> appendLine("- ${item["name"] ?: item["title"] ?: item["login"]}") + } + appendLine() + } } - - companion object { - val GitHubSearch = TaskType( - "GitHubSearch", - GitHubSearchTask.GitHubSearchTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Search GitHub repositories, code, issues and users", - """ + } + + private fun StringBuilder.formatTopicResult(topic: Map) { + appendLine("### [${topic["name"]}](${topic["url"]})") + appendLine("${topic["short_description"]}") + appendLine("Featured: ${topic["featured"]} | Curated: ${topic["curated"]}") + } + + private fun StringBuilder.formatRepositoryResult(repo: Map) { + appendLine("### ${repo["full_name"]}") + appendLine("${repo["description"]}") + appendLine("Stars: ${repo["stargazers_count"]} | Forks: ${repo["forks_count"]}") + appendLine("[View on GitHub](${repo["html_url"]})") + } + + private fun StringBuilder.formatCodeResult(code: Map) { + val repo = code["repository"] as Map + appendLine("### [${repo["full_name"]}](${code["html_url"]})") + appendLine("File: ${code["path"]}") + appendLine("```") + appendLine(code["text_matches"]?.toString()?.take(200) ?: "") + appendLine("```") + } + + private fun StringBuilder.formatCommitResult(commit: Map) { + val repo = commit["repository"] as Map + appendLine("### [${repo["full_name"]}](${commit["html_url"]})") + appendLine("${(commit["commit"] as Map)["message"]}") + appendLine("Author: ${(commit["author"] as Map)["login"]} | Date: ${((commit["commit"] as Map)["author"] as Map)["date"]}") + } + + private fun StringBuilder.formatIssueResult(issue: Map) { + appendLine("### [${issue["title"]}](${issue["html_url"]})") + appendLine("State: ${issue["state"]} | Comments: ${issue["comments"]}") + appendLine("Created by ${(issue["user"] as Map)["login"]} on ${issue["created_at"]}") + } + + private fun StringBuilder.formatUserResult(user: Map) { + appendLine("### [${user["login"]}](${user["html_url"]})") + appendLine("Type: ${user["type"]} | Repos: ${user["public_repos"]}") + appendLine("![Avatar](${user["avatar_url"]})") + } + + companion object { + val GitHubSearch = TaskType( + "GitHubSearch", + GitHubSearchTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Search GitHub repositories, code, issues and users", + """ Performs comprehensive searches across GitHub's content.
    • Searches repositories, code, and issues
    • @@ -207,7 +235,7 @@ GitHubSearch - Search GitHub for code, commits, issues, repositories, topics, or
    • Handles API rate limiting
    """ - ) + ) - } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleProxy.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleProxy.kt index 6c6840859..46d75f9f9 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleProxy.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleProxy.kt @@ -22,7 +22,7 @@ class GoogleProxy : SeedMethodFactory { override fun getSeedItems( taskConfig: CrawlerAgentTask.CrawlerTaskExecutionConfigData?, orchestrationConfig: OrchestrationConfig - ): List? { + ): List { SeedMethod.log.info("Starting Google Search via proxy with query: ${taskConfig?.search_query}") if (taskConfig?.search_query.isNullOrBlank()) { diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleSearch.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleSearch.kt index db7fa430f..1d31a6c66 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleSearch.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/GoogleSearch.kt @@ -16,110 +16,110 @@ import java.time.Duration import kotlin.math.min class GoogleSearch : SeedMethodFactory { - override fun createStrategy(task: CrawlerAgentTask, user: User?): SeedStrategy = object : SeedStrategy { - override fun getSeedItems( - taskConfig: CrawlerAgentTask.CrawlerTaskExecutionConfigData?, - orchestrationConfig: OrchestrationConfig - ): List? { - SeedMethod.Companion.log.info("Starting Google Search seed method with query: ${taskConfig?.search_query}") - if (taskConfig?.search_query.isNullOrBlank()) { - SeedMethod.Companion.log.error("Search query is missing for Google Search seed method") - throw IllegalArgumentException("Search query is required when using Google Search seed method") - } - val client = HttpClient.newBuilder().build() + override fun createStrategy(task: CrawlerAgentTask, user: User?): SeedStrategy = object : SeedStrategy { + override fun getSeedItems( + taskConfig: CrawlerAgentTask.CrawlerTaskExecutionConfigData?, + orchestrationConfig: OrchestrationConfig + ): List { + SeedMethod.log.info("Starting Google Search seed method with query: ${taskConfig?.search_query}") + if (taskConfig?.search_query.isNullOrBlank()) { + SeedMethod.log.error("Search query is missing for Google Search seed method") + throw IllegalArgumentException("Search query is required when using Google Search seed method") + } + val client = HttpClient.newBuilder().build() - val query = taskConfig?.search_query?.trim() - SeedMethod.Companion.log.debug("Using search query: $query") - val encodedQuery = URLEncoder.encode(query, "UTF-8") + val query = taskConfig?.search_query?.trim() + SeedMethod.log.debug("Using search query: $query") + val encodedQuery = URLEncoder.encode(query, "UTF-8") - val resultCount = min(10, 20) // Ensure we don't exceed API limits - val searchLimit = 15 // Reduced from 20 to be more conservative - SeedMethod.Companion.log.debug("Fetching user settings for Google Search API") - val userSettings = ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings( - user ?: UserSettingsManager.Companion.defaultUser - ) - val key = userSettings - .apis.firstOrNull { it.provider == APIProvider.Companion.Google }?.key?.trim() - ?: throw IllegalStateException("Google API token is required but not configured") - val engineId = userSettings.apiBase[APIProvider.Companion.Google]?.trim() - ?: throw IllegalStateException("Search engine ID is required but not configured") - SeedMethod.Companion.log.debug("Preparing Google Search API request with engine ID: $engineId") - val uriBuilder = - "https://www.googleapis.com/customsearch/v1?key=${key}&cx=${engineId}&q=$encodedQuery&num=$resultCount" - val request = HttpRequest.newBuilder() - .uri(URI.create(uriBuilder)) - .timeout(Duration.ofSeconds(30)) - .header("User-Agent", "CognoTik-Crawler/1.0") - .GET() - .build() - SeedMethod.Companion.log.info("Sending request to Google Search API") - val response = try { - client.send(request, HttpResponse.BodyHandlers.ofString()) - } catch (e: Exception) { - SeedMethod.Companion.log.error("Failed to connect to Google Search API", e) - throw RuntimeException("Failed to connect to Google Search API: ${e.message}", e) - } - val statusCode = response.statusCode() + val resultCount = min(10, 20) // Ensure we don't exceed API limits + val searchLimit = 15 // Reduced from 20 to be more conservative + SeedMethod.log.debug("Fetching user settings for Google Search API") + val userSettings = ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings( + user ?: UserSettingsManager.defaultUser + ) + val key = userSettings + .apis.firstOrNull { it.provider == APIProvider.Google }?.key?.trim() + ?: throw IllegalStateException("Google API token is required but not configured") + val engineId = userSettings.apiBase[APIProvider.Google]?.trim() + ?: throw IllegalStateException("Search engine ID is required but not configured") + SeedMethod.log.debug("Preparing Google Search API request with engine ID: $engineId") + val uriBuilder = + "https://www.googleapis.com/customsearch/v1?key=${key}&cx=${engineId}&q=$encodedQuery&num=$resultCount" + val request = HttpRequest.newBuilder() + .uri(URI.create(uriBuilder)) + .timeout(Duration.ofSeconds(30)) + .header("User-Agent", "CognoTik-Crawler/1.0") + .GET() + .build() + SeedMethod.log.info("Sending request to Google Search API") + val response = try { + client.send(request, HttpResponse.BodyHandlers.ofString()) + } catch (e: Exception) { + SeedMethod.log.error("Failed to connect to Google Search API", e) + throw RuntimeException("Failed to connect to Google Search API: ${e.message}", e) + } + val statusCode = response.statusCode() - if (statusCode != 200) { - SeedMethod.Companion.log.error("Google API request failed with status $statusCode: ${response.body()}") - val errorMsg = when (statusCode) { - 401 -> "Invalid API key" - 403 -> "API quota exceeded or access forbidden" - 429 -> "Rate limit exceeded" - else -> "HTTP $statusCode" - } - throw RuntimeException("Google API error: $errorMsg") - } - SeedMethod.Companion.log.debug("Parsing Google Search API response") + if (statusCode != 200) { + SeedMethod.log.error("Google API request failed with status $statusCode: ${response.body()}") + val errorMsg = when (statusCode) { + 401 -> "Invalid API key" + 403 -> "API quota exceeded or access forbidden" + 429 -> "Rate limit exceeded" + else -> "HTTP $statusCode" + } + throw RuntimeException("Google API error: $errorMsg") + } + SeedMethod.log.debug("Parsing Google Search API response") - val searchData: Map = try { - ObjectMapper().readValue(response.body()) - } catch (e: Exception) { - SeedMethod.Companion.log.error("Failed to parse Google Search API response", e) - throw RuntimeException("Invalid response from Google Search API", e) - } - val items = searchData["items"] as? List> - if (items.isNullOrEmpty()) { - SeedMethod.Companion.log.warn("No search results found for query: $query") - return emptyList() - } - SeedMethod.Companion.log.info( - "Successfully retrieved ${items.size} search results, returning ${ - min( - items.size, - searchLimit - ) - } items" - ) - return items.take(searchLimit).mapNotNull { item -> - val link = item["link"] as? String - val title = item["title"] as? String - val snippet = item["snippet"] as? String - if (link?.isNotBlank() == true && title?.isNotBlank() == true) { - SeedItem( - link = link, - title = title, - additionalData = buildMap { - snippet?.let { put("snippet", it) } - item["pagemap"]?.let { put("pagemap", it) } - item["displayLink"]?.let { put("displayLink", it) } - } - ) - } else { - SeedMethod.Companion.log.warn("Skipping invalid search result: $item") - null - } + val searchData: Map = try { + ObjectMapper().readValue(response.body()) + } catch (e: Exception) { + SeedMethod.log.error("Failed to parse Google Search API response", e) + throw RuntimeException("Invalid response from Google Search API", e) + } + val items = searchData["items"] as? List> + if (items.isNullOrEmpty()) { + SeedMethod.log.warn("No search results found for query: $query") + return emptyList() + } + SeedMethod.log.info( + "Successfully retrieved ${items.size} search results, returning ${ + min( + items.size, + searchLimit + ) + } items" + ) + return items.take(searchLimit).mapNotNull { item -> + val link = item["link"] as? String + val title = item["title"] as? String + val snippet = item["snippet"] as? String + if (link?.isNotBlank() == true && title?.isNotBlank() == true) { + SeedItem( + link = link, + title = title, + additionalData = buildMap { + snippet?.let { put("snippet", it) } + item["pagemap"]?.let { put("pagemap", it) } + item["displayLink"]?.let { put("displayLink", it) } } + ) + } else { + SeedMethod.log.warn("Skipping invalid search result: $item") + null } + } + } - override fun isEnabled(): Boolean { - return user?.let { - val userSettings = - ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings(it) - userSettings.apis.any { api -> api.provider == APIProvider.Companion.Google && api.key?.isNotBlank() == true } && - userSettings.apiBase[APIProvider.Companion.Google]?.isNotBlank() == true - } ?: false - } + override fun isEnabled(): Boolean { + return user?.let { + val userSettings = + ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings(it) + userSettings.apis.any { api -> api.provider == APIProvider.Google && api.key?.isNotBlank() == true } && + userSettings.apiBase[APIProvider.Google]?.isNotBlank() == true + } ?: false } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/HttpClientFetch.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/HttpClientFetch.kt index b885ab830..fca3bf8a7 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/HttpClientFetch.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/HttpClientFetch.kt @@ -18,227 +18,227 @@ import javax.net.ssl.SSLContext import javax.net.ssl.X509TrustManager class HttpClientFetch : FetchMethodFactory { - override fun createStrategy(task: CrawlerAgentTask): FetchStrategy = object : FetchStrategy { - override fun fetch( - url: String, - webSearchDir: File, - index: Int, - pool: ExecutorService, - orchestrationConfig: OrchestrationConfig - ): String { - FetchMethod.Companion.log.info("HttpClient fetching URL: $url (index: $index)") - // Create SSL context that accepts all certificates - val sslContext = SSLContext.getInstance("TLS") - sslContext.init(null, arrayOf(object : X509TrustManager { - override fun checkClientTrusted( - chain: Array?, - authType: String? - ) { - } - - override fun checkServerTrusted( - chain: Array?, - authType: String? - ) { - } - - override fun getAcceptedIssuers(): Array = arrayOf() - }), SecureRandom()) - - val client = HttpClient.newBuilder() - .connectTimeout(Duration.ofSeconds(30)) - .followRedirects(HttpClient.Redirect.NORMAL) - .sslContext(sslContext) - .build() -val request = HttpRequest.newBuilder().uri(URI.create(url)) - .timeout(Duration.ofSeconds(60)) - .header( - "User-Agent", - "Mozilla/5.0 (compatible; CognotikBot/1.0; +https://github.com/SimiaCryptus/cognotik)" - ) - .header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8") - .header("Accept-Language", "en-US,en;q=0.5") - //.header("Accept-Encoding", "gzip, deflate, br") - .header("Accept-Charset", "utf-8, iso-8859-1;q=0.5") - .GET() - .build() - FetchMethod.Companion.log.debug("Sending HTTP request to: $url") - val response = try { - client.send(request, HttpResponse.BodyHandlers.ofString(StandardCharsets.UTF_8)) - } catch (e: Exception) { - FetchMethod.Companion.log.error("HTTP request failed for URL: $url", e) - throw RuntimeException("Failed to fetch URL: $url - ${e.message}", e) - } + override fun createStrategy(task: CrawlerAgentTask): FetchStrategy = object : FetchStrategy { + override fun fetch( + url: String, + webSearchDir: File, + index: Int, + pool: ExecutorService, + orchestrationConfig: OrchestrationConfig + ): String { + FetchMethod.log.info("HttpClient fetching URL: $url (index: $index)") + // Create SSL context that accepts all certificates + val sslContext = SSLContext.getInstance("TLS") + sslContext.init(null, arrayOf(object : X509TrustManager { + override fun checkClientTrusted( + chain: Array?, + authType: String? + ) { + } - val contentType = response.headers().firstValue("Content-Type").orElse("") - FetchMethod.Companion.log.debug("Received response from $url with status: ${response.statusCode()}, Content-Type: $contentType") - if (response.statusCode() !in 200..299) { - throw RuntimeException("HTTP ${response.statusCode()} error for URL: $url") - } + override fun checkServerTrusted( + chain: Array?, + authType: String? + ) { + } - val content = when { - // Handle HTML content - contentType.startsWith("text/html") || contentType.isEmpty() -> { - val body = response.body() - if (body.isNullOrBlank()) { - FetchMethod.Companion.log.warn("Received empty body from URL: $url") - return "" - } - - FetchMethod.Companion.log.debug("Saving raw HTML content for URL: $url") - task.saveRawContent(webSearchDir.resolve("raw_pages"), url, body) - FetchMethod.Companion.log.debug("Simplifying HTML content for URL: $url") - var simplified = HtmlSimplifier.scrubHtml( - str = body, - baseUrl = url, - includeCssData = false, - simplifyStructure = true, - keepObjectIds = false, - preserveWhitespace = false, - keepScriptElements = false, - keepInteractiveElements = false, - keepMediaElements = false, - keepEventHandlers = false - ) - - // Check for reasonable content length - if (simplified.length > 5_000_000) { // 5MB limit - FetchMethod.Companion.log.info("Content too large (${simplified.length} chars) for URL: $url, truncating") - simplified = simplified.substring(0, 1_000_000) - } - - FetchMethod.Companion.log.debug("Saving simplified content for URL: $url") - task.saveRawContent(webSearchDir.resolve("reduced_pages"), url, simplified) - processHtmlContent(body, url, webSearchDir, task) - } - - // Handle document formats (PDF, DOCX, etc.) - contentType.startsWith("application/pdf") || - contentType.startsWith("application/msword") || - contentType.startsWith("application/vnd.openxmlformats-officedocument") || - contentType.startsWith("application/vnd.ms-") || - contentType.startsWith("application/vnd.oasis.opendocument") -> { - FetchMethod.Companion.log.info("Detected document content type: $contentType for URL: $url") - val binaryResponse = client.send(request, HttpResponse.BodyHandlers.ofByteArray()) - val bytes = binaryResponse.body() - // Check file size limit (10MB) - if (bytes.size > 10_000_000) { - FetchMethod.Companion.log.warn("Document too large (${bytes.size} bytes) for URL: $url, skipping") - return "Document too large to process (${bytes.size} bytes)" - } - - - val extension = getExtensionFromContentType(contentType, url) - - // Save the original document file - val urlSafe = url.replace(Regex("[^a-zA-Z0-9]"), "_").take(50) - val documentsDir = webSearchDir.resolve("documents") - documentsDir.mkdirs() - val documentFile = File(documentsDir, "${urlSafe}_${index}.$extension") - FileOutputStream(documentFile).use { it.write(bytes) } - FetchMethod.Companion.log.debug("Saved original document to: ${documentFile.absolutePath}") - - // Also create a temporary file for text extraction - val tempFile = File.createTempFile("webcrawl_", ".$extension") - tempFile.deleteOnExit() - - FileOutputStream(tempFile).use { it.write(bytes) } - FetchMethod.Companion.log.debug("Saved document to temporary file: ${tempFile.absolutePath}") - - // Use DocumentReader to extract text - val extractedText = try { - tempFile.getReader().use { reader -> - reader.getText() - } - } catch (e: Exception) { - FetchMethod.Companion.log.error("Failed to extract text from document at $url", e) - "" - } finally { - tempFile.delete() - } - - if (extractedText.isNotBlank()) { - FetchMethod.Companion.log.debug("Extracted ${extractedText.length} characters from document") - task.saveRawContent(webSearchDir.resolve("extracted_text"), url, extractedText) - } - extractedText - } - - // Handle plain text - contentType.startsWith("text/") -> { - val body = response.body() - FetchMethod.Companion.log.debug("Processing plain text content for URL: $url") - task.saveRawContent(webSearchDir.resolve("text_pages"), url, body) - body - } - - // Skip other content types - else -> { - FetchMethod.Companion.log.warn("Skipping unsupported content type: $contentType for URL: $url") - "" - } + override fun getAcceptedIssuers(): Array = arrayOf() + }), SecureRandom()) + + val client = HttpClient.newBuilder() + .connectTimeout(Duration.ofSeconds(30)) + .followRedirects(HttpClient.Redirect.NORMAL) + .sslContext(sslContext) + .build() + val request = HttpRequest.newBuilder().uri(URI.create(url)) + .timeout(Duration.ofSeconds(60)) + .header( + "User-Agent", + "Mozilla/5.0 (compatible; CognotikBot/1.0; +https://github.com/SimiaCryptus/cognotik)" + ) + .header("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8") + .header("Accept-Language", "en-US,en;q=0.5") + //.header("Accept-Encoding", "gzip, deflate, br") + .header("Accept-Charset", "utf-8, iso-8859-1;q=0.5") + .GET() + .build() + FetchMethod.log.debug("Sending HTTP request to: $url") + val response = try { + client.send(request, HttpResponse.BodyHandlers.ofString(StandardCharsets.UTF_8)) + } catch (e: Exception) { + FetchMethod.log.error("HTTP request failed for URL: $url", e) + throw RuntimeException("Failed to fetch URL: $url - ${e.message}", e) + } + + val contentType = response.headers().firstValue("Content-Type").orElse("") + FetchMethod.log.debug("Received response from $url with status: ${response.statusCode()}, Content-Type: $contentType") + if (response.statusCode() !in 200..299) { + throw RuntimeException("HTTP ${response.statusCode()} error for URL: $url") + } + + val content = when { + // Handle HTML content + contentType.startsWith("text/html") || contentType.isEmpty() -> { + val body = response.body() + if (body.isNullOrBlank()) { + FetchMethod.log.warn("Received empty body from URL: $url") + return "" + } + + FetchMethod.log.debug("Saving raw HTML content for URL: $url") + task.saveRawContent(webSearchDir.resolve("raw_pages"), url, body) + FetchMethod.log.debug("Simplifying HTML content for URL: $url") + var simplified = HtmlSimplifier.scrubHtml( + str = body, + baseUrl = url, + includeCssData = false, + simplifyStructure = true, + keepObjectIds = false, + preserveWhitespace = false, + keepScriptElements = false, + keepInteractiveElements = false, + keepMediaElements = false, + keepEventHandlers = false + ) + + // Check for reasonable content length + if (simplified.length > 5_000_000) { // 5MB limit + FetchMethod.log.info("Content too large (${simplified.length} chars) for URL: $url, truncating") + simplified = simplified.substring(0, 1_000_000) + } + + FetchMethod.log.debug("Saving simplified content for URL: $url") + task.saveRawContent(webSearchDir.resolve("reduced_pages"), url, simplified) + processHtmlContent(body, url, webSearchDir, task) + } + + // Handle document formats (PDF, DOCX, etc.) + contentType.startsWith("application/pdf") || + contentType.startsWith("application/msword") || + contentType.startsWith("application/vnd.openxmlformats-officedocument") || + contentType.startsWith("application/vnd.ms-") || + contentType.startsWith("application/vnd.oasis.opendocument") -> { + FetchMethod.log.info("Detected document content type: $contentType for URL: $url") + val binaryResponse = client.send(request, HttpResponse.BodyHandlers.ofByteArray()) + val bytes = binaryResponse.body() + // Check file size limit (10MB) + if (bytes.size > 10_000_000) { + FetchMethod.log.warn("Document too large (${bytes.size} bytes) for URL: $url, skipping") + return "Document too large to process (${bytes.size} bytes)" + } + + + val extension = getExtensionFromContentType(contentType, url) + + // Save the original document file + val urlSafe = url.replace(Regex("[^a-zA-Z0-9]"), "_").take(50) + val documentsDir = webSearchDir.resolve("documents") + documentsDir.mkdirs() + val documentFile = File(documentsDir, "${urlSafe}_${index}.$extension") + FileOutputStream(documentFile).use { it.write(bytes) } + FetchMethod.log.debug("Saved original document to: ${documentFile.absolutePath}") + + // Also create a temporary file for text extraction + val tempFile = File.createTempFile("webcrawl_", ".$extension") + tempFile.deleteOnExit() + + FileOutputStream(tempFile).use { it.write(bytes) } + FetchMethod.log.debug("Saved document to temporary file: ${tempFile.absolutePath}") + + // Use DocumentReader to extract text + val extractedText = try { + tempFile.getReader().use { reader -> + reader.getText() } + } catch (e: Exception) { + FetchMethod.log.error("Failed to extract text from document at $url", e) + "" + } finally { + tempFile.delete() + } + + if (extractedText.isNotBlank()) { + FetchMethod.log.debug("Extracted ${extractedText.length} characters from document") + task.saveRawContent(webSearchDir.resolve("extracted_text"), url, extractedText) + } + extractedText + } - task.urlContentCache[url] = content - FetchMethod.Companion.log.info("Successfully processed URL: $url, content length: ${content.length}") - return content + // Handle plain text + contentType.startsWith("text/") -> { + val body = response.body() + FetchMethod.log.debug("Processing plain text content for URL: $url") + task.saveRawContent(webSearchDir.resolve("text_pages"), url, body) + body } - private fun processHtmlContent( - body: String, - url: String, - webSearchDir: File, - task: CrawlerAgentTask - ): String { - FetchMethod.Companion.log.debug("Saving raw HTML content for URL: $url") - task.saveRawContent(webSearchDir.resolve("raw_pages"), url, body) - FetchMethod.Companion.log.debug("Simplifying HTML content for URL: $url") - val simplified = try { - HtmlSimplifier.scrubHtml( - str = body, - baseUrl = url, - includeCssData = false, - simplifyStructure = true, - keepObjectIds = false, - preserveWhitespace = false, - keepScriptElements = false, - keepInteractiveElements = false, - keepMediaElements = false, - keepEventHandlers = false - ) - } catch (e: Exception) { - FetchMethod.Companion.log.error("HTML simplification failed for URL: $url, using raw content", e) - // Fallback to basic text extraction if HTML simplification fails - body.replace(Regex("<[^>]+>"), " ") - .replace(Regex("\\s+"), " ") - .trim() - } - FetchMethod.Companion.log.debug("Saving simplified content for URL: $url") - task.saveRawContent(webSearchDir.resolve("reduced_pages"), url, simplified) - return simplified + // Skip other content types + else -> { + FetchMethod.log.warn("Skipping unsupported content type: $contentType for URL: $url") + "" } + } - private fun getExtensionFromContentType(contentType: String, url: String): String { - return when { - contentType.contains("pdf") -> "pdf" - contentType.contains("msword") -> "doc" - contentType.contains("wordprocessingml") -> "docx" - contentType.contains("spreadsheetml") -> "xlsx" - contentType.contains("ms-excel") -> "xls" - contentType.contains("presentationml") -> "pptx" - contentType.contains("ms-powerpoint") -> "ppt" - contentType.contains("opendocument.text") -> "odt" - contentType.contains("rtf") -> "rtf" - else -> { - // Try to extract from URL - val urlPath = url.substringBefore("?").substringAfterLast("/") - if (urlPath.contains(".")) { - urlPath.substringAfterLast(".") - } else { - "tmp" - } - } - } + task.urlContentCache[url] = content + FetchMethod.log.info("Successfully processed URL: $url, content length: ${content.length}") + return content + } + + private fun processHtmlContent( + body: String, + url: String, + webSearchDir: File, + task: CrawlerAgentTask + ): String { + FetchMethod.log.debug("Saving raw HTML content for URL: $url") + task.saveRawContent(webSearchDir.resolve("raw_pages"), url, body) + FetchMethod.log.debug("Simplifying HTML content for URL: $url") + val simplified = try { + HtmlSimplifier.scrubHtml( + str = body, + baseUrl = url, + includeCssData = false, + simplifyStructure = true, + keepObjectIds = false, + preserveWhitespace = false, + keepScriptElements = false, + keepInteractiveElements = false, + keepMediaElements = false, + keepEventHandlers = false + ) + } catch (e: Exception) { + FetchMethod.log.error("HTML simplification failed for URL: $url, using raw content", e) + // Fallback to basic text extraction if HTML simplification fails + body.replace(Regex("<[^>]+>"), " ") + .replace(Regex("\\s+"), " ") + .trim() + } + FetchMethod.log.debug("Saving simplified content for URL: $url") + task.saveRawContent(webSearchDir.resolve("reduced_pages"), url, simplified) + return simplified + } + + private fun getExtensionFromContentType(contentType: String, url: String): String { + return when { + contentType.contains("pdf") -> "pdf" + contentType.contains("msword") -> "doc" + contentType.contains("wordprocessingml") -> "docx" + contentType.contains("spreadsheetml") -> "xlsx" + contentType.contains("ms-excel") -> "xls" + contentType.contains("presentationml") -> "pptx" + contentType.contains("ms-powerpoint") -> "ppt" + contentType.contains("opendocument.text") -> "odt" + contentType.contains("rtf") -> "rtf" + else -> { + // Try to extract from URL + val urlPath = url.substringBefore("?").substringAfterLast("/") + if (urlPath.contains(".")) { + urlPath.substringAfterLast(".") + } else { + "tmp" + } } + } } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/RobotsTxtParser.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/RobotsTxtParser.kt index b90bcc13b..d7ef06b96 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/RobotsTxtParser.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/RobotsTxtParser.kt @@ -12,171 +12,175 @@ import java.util.concurrent.ConcurrentHashMap * Parser and cache for robots.txt files */ class RobotsTxtParser { - private val cache = ConcurrentHashMap() - private val client = HttpClient.newBuilder() - .connectTimeout(Duration.ofSeconds(10)) - .followRedirects(HttpClient.Redirect.NORMAL) - .build() - - data class RobotsTxt( - val disallowedPaths: List = emptyList(), - val allowedPaths: List = emptyList(), - val crawlDelay: Long? = null, - val sitemaps: List = emptyList() - ) - - /** - * Check if a URL is allowed by robots.txt - */ - fun isAllowed(url: String, userAgent: String = "*"): Boolean { - try { - val uri = URI.create(url) - val baseUrl = "${uri.scheme}://${uri.host}${if (uri.port != -1 && uri.port != 80 && uri.port != 443) ":${uri.port}" else ""}" - val robotsTxt = getRobotsTxt(baseUrl) - - val path = uri.path + (if (uri.query != null) "?${uri.query}" else "") - - // Check allowed paths first (they take precedence) - if (robotsTxt.allowedPaths.any { matchesPattern(path, it) }) { - log.debug("URL allowed by robots.txt allow rule: $url") - return true - } - - // Check disallowed paths - if (robotsTxt.disallowedPaths.any { matchesPattern(path, it) }) { - log.debug("URL disallowed by robots.txt: $url") - return false - } - - log.debug("URL allowed by robots.txt (no matching rules): $url") - return true - } catch (e: Exception) { - log.warn("Error checking robots.txt for $url, allowing by default", e) - return true - } + private val cache = ConcurrentHashMap() + private val client = HttpClient.newBuilder() + .connectTimeout(Duration.ofSeconds(10)) + .followRedirects(HttpClient.Redirect.NORMAL) + .build() + + data class RobotsTxt( + val disallowedPaths: List = emptyList(), + val allowedPaths: List = emptyList(), + val crawlDelay: Long? = null, + val sitemaps: List = emptyList() + ) + + /** + * Check if a URL is allowed by robots.txt + */ + fun isAllowed(url: String, userAgent: String = "*"): Boolean { + try { + val uri = URI.create(url) + val baseUrl = "${uri.scheme}://${uri.host}${if (uri.port != -1 && uri.port != 80 && uri.port != 443) ":${uri.port}" else ""}" + val robotsTxt = getRobotsTxt(baseUrl) + + val path = uri.path + (if (uri.query != null) "?${uri.query}" else "") + + // Check allowed paths first (they take precedence) + if (robotsTxt.allowedPaths.any { matchesPattern(path, it) }) { + log.debug("URL allowed by robots.txt allow rule: $url") + return true + } + + // Check disallowed paths + if (robotsTxt.disallowedPaths.any { matchesPattern(path, it) }) { + log.debug("URL disallowed by robots.txt: $url") + return false + } + + log.debug("URL allowed by robots.txt (no matching rules): $url") + return true + } catch (e: Exception) { + log.warn("Error checking robots.txt for $url, allowing by default", e) + return true } + } - /** - * Get crawl delay in milliseconds for a domain - */ - fun getCrawlDelay(url: String): Long? { - try { - val uri = URI.create(url) - val baseUrl = "${uri.scheme}://${uri.host}${if (uri.port != -1 && uri.port != 80 && uri.port != 443) ":${uri.port}" else ""}" - return getRobotsTxt(baseUrl).crawlDelay - } catch (e: Exception) { - log.warn("Error getting crawl delay for $url", e) - return null - } + /** + * Get crawl delay in milliseconds for a domain + */ + fun getCrawlDelay(url: String): Long? { + try { + val uri = URI.create(url) + val baseUrl = "${uri.scheme}://${uri.host}${if (uri.port != -1 && uri.port != 80 && uri.port != 443) ":${uri.port}" else ""}" + return getRobotsTxt(baseUrl).crawlDelay + } catch (e: Exception) { + log.warn("Error getting crawl delay for $url", e) + return null } + } + + /** + * Fetch and parse robots.txt for a domain + */ + private fun getRobotsTxt(baseUrl: String): RobotsTxt { + return cache.getOrPut(baseUrl) { + try { + val robotsUrl = "$baseUrl/robots.txt" + log.debug("Fetching robots.txt from: $robotsUrl") + + val request = HttpRequest.newBuilder() + .uri(URI.create(robotsUrl)) + .timeout(Duration.ofSeconds(10)) + .header("User-Agent", "Mozilla/5.0 (compatible; CognotikBot/1.0)") + .GET() + .build() - /** - * Fetch and parse robots.txt for a domain - */ - private fun getRobotsTxt(baseUrl: String): RobotsTxt { - return cache.getOrPut(baseUrl) { - try { - val robotsUrl = "$baseUrl/robots.txt" - log.debug("Fetching robots.txt from: $robotsUrl") - - val request = HttpRequest.newBuilder() - .uri(URI.create(robotsUrl)) - .timeout(Duration.ofSeconds(10)) - .header("User-Agent", "Mozilla/5.0 (compatible; CognotikBot/1.0)") - .GET() - .build() - - val response = client.send(request, HttpResponse.BodyHandlers.ofString()) - - if (response.statusCode() == 200) { - parseRobotsTxt(response.body()) - } else { - log.debug("No robots.txt found at $robotsUrl (status: ${response.statusCode()})") - RobotsTxt() // Empty rules = allow all - } - } catch (e: Exception) { - log.warn("Failed to fetch robots.txt from $baseUrl, allowing all", e) - RobotsTxt() // On error, allow all - } + val response = client.send(request, HttpResponse.BodyHandlers.ofString()) + + if (response.statusCode() == 200) { + parseRobotsTxt(response.body()) + } else { + log.debug("No robots.txt found at $robotsUrl (status: ${response.statusCode()})") + RobotsTxt() // Empty rules = allow all } + } catch (e: Exception) { + log.warn("Failed to fetch robots.txt from $baseUrl, allowing all", e) + RobotsTxt() // On error, allow all + } } + } + + /** + * Parse robots.txt content + */ + private fun parseRobotsTxt(content: String): RobotsTxt { + val disallowedPaths = mutableListOf() + val allowedPaths = mutableListOf() + val sitemaps = mutableListOf() + var crawlDelay: Long? = null + var isRelevantUserAgent = false + + content.lines().forEach { line -> + val trimmed = line.trim() - /** - * Parse robots.txt content - */ - private fun parseRobotsTxt(content: String): RobotsTxt { - val disallowedPaths = mutableListOf() - val allowedPaths = mutableListOf() - val sitemaps = mutableListOf() - var crawlDelay: Long? = null - var isRelevantUserAgent = false - - content.lines().forEach { line -> - val trimmed = line.trim() - - // Skip comments and empty lines - if (trimmed.isEmpty() || trimmed.startsWith("#")) { - return@forEach - } - - val parts = trimmed.split(":", limit = 2) - if (parts.size != 2) return@forEach - - val directive = parts[0].trim().lowercase() - val value = parts[1].trim() - - when (directive) { - "user-agent" -> { - // Match * or specific bot names - isRelevantUserAgent = value == "*" || value.lowercase().contains("cognotik") - } - "disallow" -> { - if (isRelevantUserAgent && value.isNotEmpty()) { - disallowedPaths.add(value) - } - } - "allow" -> { - if (isRelevantUserAgent && value.isNotEmpty()) { - allowedPaths.add(value) - } - } - "crawl-delay" -> { - if (isRelevantUserAgent) { - crawlDelay = value.toDoubleOrNull()?.let { (it * 1000).toLong() } - } - } - "sitemap" -> { - sitemaps.add(value) - } - } + // Skip comments and empty lines + if (trimmed.isEmpty() || trimmed.startsWith("#")) { + return@forEach + } + + val parts = trimmed.split(":", limit = 2) + if (parts.size != 2) return@forEach + + val directive = parts[0].trim().lowercase() + val value = parts[1].trim() + + when (directive) { + "user-agent" -> { + // Match * or specific bot names + isRelevantUserAgent = value == "*" || value.lowercase().contains("cognotik") } - log.debug("Parsed robots.txt: ${disallowedPaths.size} disallow rules, ${allowedPaths.size} allow rules, crawl-delay: $crawlDelay") - return RobotsTxt(disallowedPaths, allowedPaths, crawlDelay, sitemaps) - } + "disallow" -> { + if (isRelevantUserAgent && value.isNotEmpty()) { + disallowedPaths.add(value) + } + } + + "allow" -> { + if (isRelevantUserAgent && value.isNotEmpty()) { + allowedPaths.add(value) + } + } + + "crawl-delay" -> { + if (isRelevantUserAgent) { + crawlDelay = value.toDoubleOrNull()?.let { (it * 1000).toLong() } + } + } - /** - * Check if a path matches a robots.txt pattern - */ - private fun matchesPattern(path: String, pattern: String): Boolean { - // Convert robots.txt pattern to regex - // * matches any sequence of characters - // $ at end means end of URL - val regexPattern = pattern - .replace(".", "\\.") - .replace("*", ".*") - .replace("?", "\\?") - .let { if (it.endsWith("$")) it else "$it.*" } - - return try { - Regex("^$regexPattern").matches(path) - } catch (e: Exception) { - log.info("Invalid robots.txt pattern: $pattern", e) - false + "sitemap" -> { + sitemaps.add(value) } + } } - companion object { - private val log = LoggerFactory.getLogger(RobotsTxtParser::class.java) + log.debug("Parsed robots.txt: ${disallowedPaths.size} disallow rules, ${allowedPaths.size} allow rules, crawl-delay: $crawlDelay") + return RobotsTxt(disallowedPaths, allowedPaths, crawlDelay, sitemaps) + } + + /** + * Check if a path matches a robots.txt pattern + */ + private fun matchesPattern(path: String, pattern: String): Boolean { + // Convert robots.txt pattern to regex + // * matches any sequence of characters + // $ at end means end of URL + val regexPattern = pattern + .replace(".", "\\.") + .replace("*", ".*") + .replace("?", "\\?") + .let { if (it.endsWith("$")) it else "$it.*" } + + return try { + Regex("^$regexPattern").matches(path) + } catch (e: Exception) { + log.info("Invalid robots.txt pattern: $pattern", e) + false } + } + + companion object { + private val log = LoggerFactory.getLogger(RobotsTxtParser::class.java) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/SearchAPISearch.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/SearchAPISearch.kt index 371cbcf5e..167f0ef12 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/SearchAPISearch.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/SearchAPISearch.kt @@ -14,124 +14,124 @@ import java.net.http.HttpRequest import java.net.http.HttpResponse open class SearchAPISearch( - val engine: String, - private val mainResultField: String + val engine: String, + private val mainResultField: String ) : SeedMethodFactory { - override fun createStrategy( - task: CrawlerAgentTask, - user: User?, - ): SeedStrategy = object : SeedStrategy { - override fun getSeedItems( - taskConfig: CrawlerAgentTask.CrawlerTaskExecutionConfigData?, - orchestrationConfig: OrchestrationConfig, - ): List? { - log.info("Starting SearchAPI.io seed method with query: ${taskConfig?.search_query}") - if (taskConfig?.search_query.isNullOrBlank()) { - log.error("Search query is missing for SearchAPI.io seed method") - throw IllegalArgumentException("Search query is required when using SearchAPI.io seed method") - } - val client = HttpClient.newBuilder().build() - val query = taskConfig.search_query.trim() - log.debug("Using search query: $query") - val encodedQuery = URLEncoder.encode(query, "UTF-8") - val resultCount = 10 - val searchLimit = 20 - log.debug("Fetching user settings for SearchAPI.io") - val userSettings = - ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings( - user ?: UserSettingsManager.Companion.defaultUser - ) - val apiKey = userSettings - .apis.firstOrNull { it.provider == APIProvider.Companion.SearchAPI }?.key?.trim() - ?: throw RuntimeException("SearchAPI.io API key is required") - log.debug("Preparing SearchAPI.io request") - val uriBuilder = - "https://www.searchapi.io/api/v1/search?engine=$engine&q=$encodedQuery&num=$resultCount&api_key=$apiKey" - val request = HttpRequest.newBuilder() - .uri(URI.create(uriBuilder)) - .header("User-Agent", "CognoTik-Crawler/1.0") - .GET() - .build() - log.info("Sending request to SearchAPI.io") - val response = client.send(request, HttpResponse.BodyHandlers.ofString()) - val statusCode = response.statusCode() - val body = response.body() - if (statusCode != 200) { - log.error("SearchAPI.io request failed with status $statusCode: $body") - throw RuntimeException("SearchAPI.io request failed with status $statusCode: $body") - } - log.debug("Parsing SearchAPI.io response") - var results = handleResult(body, query) - log.info( - "Successfully retrieved ${results.size} search results, returning ${ - results.size.coerceAtMost(searchLimit) - } items" - ) - results = results.take(searchLimit) - return results.mapNotNull { result -> - val link = (result["link"] - ?: result["url"] - ?: result["website"] - ?: result["pdf"] - ?: result["apply_link"] - ) as? String - val title = (result["title"]) as? String - if (link?.isNotBlank() == true && title?.isNotBlank() == true) { - SeedItem( - link = link, - title = title, - additionalData = result.filterKeys { - it != "link" && - it != "url" && - it != "title" && - it != "website" && - it != "pdf" && - it != "apply_link" - } - ) - } else { - log.warn("Skipping invalid search result missing link or title: $result") - null - } + override fun createStrategy( + task: CrawlerAgentTask, + user: User?, + ): SeedStrategy = object : SeedStrategy { + override fun getSeedItems( + taskConfig: CrawlerAgentTask.CrawlerTaskExecutionConfigData?, + orchestrationConfig: OrchestrationConfig, + ): List { + log.info("Starting SearchAPI.io seed method with query: ${taskConfig?.search_query}") + if (taskConfig?.search_query.isNullOrBlank()) { + log.error("Search query is missing for SearchAPI.io seed method") + throw IllegalArgumentException("Search query is required when using SearchAPI.io seed method") + } + val client = HttpClient.newBuilder().build() + val query = taskConfig.search_query.trim() + log.debug("Using search query: $query") + val encodedQuery = URLEncoder.encode(query, "UTF-8") + val resultCount = 10 + val searchLimit = 20 + log.debug("Fetching user settings for SearchAPI.io") + val userSettings = + ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings( + user ?: UserSettingsManager.defaultUser + ) + val apiKey = userSettings + .apis.firstOrNull { it.provider == APIProvider.SearchAPI }?.key?.trim() + ?: throw RuntimeException("SearchAPI.io API key is required") + log.debug("Preparing SearchAPI.io request") + val uriBuilder = + "https://www.searchapi.io/api/v1/search?engine=$engine&q=$encodedQuery&num=$resultCount&api_key=$apiKey" + val request = HttpRequest.newBuilder() + .uri(URI.create(uriBuilder)) + .header("User-Agent", "CognoTik-Crawler/1.0") + .GET() + .build() + log.info("Sending request to SearchAPI.io") + val response = client.send(request, HttpResponse.BodyHandlers.ofString()) + val statusCode = response.statusCode() + val body = response.body() + if (statusCode != 200) { + log.error("SearchAPI.io request failed with status $statusCode: $body") + throw RuntimeException("SearchAPI.io request failed with status $statusCode: $body") + } + log.debug("Parsing SearchAPI.io response") + var results = handleResult(body, query) + log.info( + "Successfully retrieved ${results.size} search results, returning ${ + results.size.coerceAtMost(searchLimit) + } items" + ) + results = results.take(searchLimit) + return results.mapNotNull { result -> + val link = (result["link"] + ?: result["url"] + ?: result["website"] + ?: result["pdf"] + ?: result["apply_link"] + ) as? String + val title = (result["title"]) as? String + if (link?.isNotBlank() == true && title?.isNotBlank() == true) { + SeedItem( + link = link, + title = title, + additionalData = result.filterKeys { + it != "link" && + it != "url" && + it != "title" && + it != "website" && + it != "pdf" && + it != "apply_link" } + ) + } else { + log.warn("Skipping invalid search result missing link or title: $result") + null } - - override fun isEnabled() = user?.let { - ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings(it) - .apis.any { api -> api.provider == APIProvider.Companion.SearchAPI && api.key != null } - } ?: false + } } - open fun handleResult( - body: String, - query: String - ) = try { - JsonUtil.fromJson>( - body, - Map::class.java - ).let { rawData -> - try { - if (!rawData.containsKey(mainResultField)) { - log.warn("Expected field '$mainResultField' not found in SearchAPI.io response for query: $query") - listOf(rawData) - } else { - val list = (rawData[mainResultField] as List>) - if (list.isEmpty()) { - log.warn("No search results found for query: $query") - listOf(rawData) - } else { - log.debug("Parsed ${list.size} results from SearchAPI.io response") - list - } - } - } catch (e: Exception) { - log.debug("Failed to parse SearchAPI.io response", e) - listOf(rawData) - } + override fun isEnabled() = user?.let { + ApplicationServices.fileApplicationServices().userSettingsManager.getUserSettings(it) + .apis.any { api -> api.provider == APIProvider.SearchAPI && api.key != null } + } ?: false + } + + open fun handleResult( + body: String, + query: String + ) = try { + JsonUtil.fromJson>( + body, + Map::class.java + ).let { rawData -> + try { + if (!rawData.containsKey(mainResultField)) { + log.warn("Expected field '$mainResultField' not found in SearchAPI.io response for query: $query") + listOf(rawData) + } else { + val list = (rawData[mainResultField] as List>) + if (list.isEmpty()) { + log.warn("No search results found for query: $query") + listOf(rawData) + } else { + log.debug("Parsed ${list.size} results from SearchAPI.io response") + list + } } - } catch (e: Exception) { + } catch (e: Exception) { log.debug("Failed to parse SearchAPI.io response", e) - listOf(JsonUtil.fromJson(body, Map::class.java)) + listOf(rawData) + } } + } catch (e: Exception) { + log.debug("Failed to parse SearchAPI.io response", e) + listOf(JsonUtil.fromJson(body, Map::class.java)) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/Selenium.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/Selenium.kt index c47b3eb05..b932238fa 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/Selenium.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/online/Selenium.kt @@ -7,48 +7,48 @@ import java.io.File import java.util.concurrent.ExecutorService class Selenium : FetchMethodFactory { - override fun createStrategy(task: CrawlerAgentTask): FetchStrategy = object : FetchStrategy { - override fun fetch( - url: String, - webSearchDir: File, - index: Int, - pool: ExecutorService, - orchestrationConfig: OrchestrationConfig - ): String { - log.info("Selenium fetching URL: $url (index: $index)") - return try { - if (task.selenium == null) { - log.debug("Initializing Selenium driver") - task.selenium = Selenium2S3( - pool = pool, cookies = null, driver = Selenium2S3.Companion.chromeDriver() - ) - } - try { - log.debug("Navigating to URL with Selenium: $url") - task.selenium?.navigate(url) - val pageSource = task.selenium?.getPageSource() ?: "" - log.debug("Retrieved page source with Selenium, length: ${pageSource.length}") - pageSource - } finally { - task.selenium?.let { - log.debug("Quitting Selenium driver") - it.quit() - task.selenium = null - } - } - } catch (e: Exception) { - log.warn("Selenium fetch failed for URL: $url, falling back to HttpClient. Error: ${e.message}", e) - FetchConfig.isSeleniumEnabled = false - createStrategy(task).fetch(url, webSearchDir, index, pool, orchestrationConfig) - } + override fun createStrategy(task: CrawlerAgentTask): FetchStrategy = object : FetchStrategy { + override fun fetch( + url: String, + webSearchDir: File, + index: Int, + pool: ExecutorService, + orchestrationConfig: OrchestrationConfig + ): String { + log.info("Selenium fetching URL: $url (index: $index)") + return try { + if (task.selenium == null) { + log.debug("Initializing Selenium driver") + task.selenium = Selenium2S3( + pool = pool, cookies = null, driver = Selenium2S3.chromeDriver() + ) } - - override fun isEnabled(): Boolean { - return FetchConfig.isSeleniumEnabled; + try { + log.debug("Navigating to URL with Selenium: $url") + task.selenium?.navigate(url) + val pageSource = task.selenium?.getPageSource() ?: "" + log.debug("Retrieved page source with Selenium, length: ${pageSource.length}") + pageSource + } finally { + task.selenium?.let { + log.debug("Quitting Selenium driver") + it.quit() + task.selenium = null + } } + } catch (e: Exception) { + log.warn("Selenium fetch failed for URL: $url, falling back to HttpClient. Error: ${e.message}", e) + FetchConfig.isSeleniumEnabled = false + createStrategy(task).fetch(url, webSearchDir, index, pool, orchestrationConfig) + } } - - companion object { - val log = LoggerFactory.getLogger(Selenium::class.java) + + override fun isEnabled(): Boolean { + return FetchConfig.isSeleniumEnabled } + } + + companion object { + val log = LoggerFactory.getLogger(Selenium::class.java) + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbductiveReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbductiveReasoningTask.kt index d0c91b51e..2f9b9c2a9 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbductiveReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbductiveReasoningTask.kt @@ -1,15 +1,21 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.getReader import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.nio.file.Path import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -36,6 +42,14 @@ class AbductiveReasoningTask( val testable_predictions: List = emptyList() ) + protected val codeFiles = mutableMapOf() + + data class LinkInfo( + val link: String, + val file: File? + ) + + data class HypothesesResponse( val hypotheses: List = emptyList(), val reasoning: String = "" @@ -48,6 +62,8 @@ class AbductiveReasoningTask( val generate_hypotheses: Boolean = true, @Description("Maximum number of hypotheses to generate") val max_hypotheses: Int = 5, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Criteria for evaluating hypotheses: explanatory_power, simplicity, testability, prior_probability") val evaluate_criteria: List? = listOf( "explanatory_power", @@ -99,6 +115,10 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses val startTime = System.currentTimeMillis() var stepStartTime = System.currentTimeMillis() log.info("Starting AbductiveReasoningTask with ${executionConfig?.observations?.size ?: 0} observations") + val transcript = transcript(task) + // Combine messages with file input + val inputContext = (messages + listOf(getInputFileCode())).filter { it.isNotBlank() } + val observations = executionConfig?.observations if (observations.isNullOrEmpty()) { @@ -106,6 +126,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses log.error(errorMsg) task.safeComplete(errorMsg, log) resultFn(errorMsg) + transcript?.close() return } @@ -132,6 +153,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses overviewTask.add( buildString { + writeToTranscript(transcript, this) appendLine("# Abductive Reasoning Analysis") appendLine() appendLine("**Purpose:** Generate and evaluate explanatory hypotheses") @@ -146,6 +168,8 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses appendLine() appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") appendLine() + appendLine("**Input Context:** ${inputContext.size} sections provided") + appendLine() appendLine("---") appendLine() appendLine("## Progress") @@ -161,6 +185,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses tabs["Observations"] = observationsTask.placeholder observationsTask.add( buildString { + writeToTranscript(transcript, this) appendLine("# Observations") appendLine() appendLine("The following observations need explanation:") @@ -178,12 +203,14 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses // Gather context val priorContext = getPriorCode(agent.executionState) + val combinedContext = (priorContext + "\n\n" + inputContext.joinToString("\n\n")).trim() if (priorContext.isNotBlank()) { log.debug("Found prior context: ${priorContext.length} characters") val contextTask = ui.newTask(false) tabs["Context"] = contextTask.placeholder contextTask.add( buildString { + writeToTranscript(transcript, this) appendLine("# Context from Previous Tasks") appendLine() appendLine(priorContext.truncateForDisplay()) @@ -195,6 +222,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses // Update overview overviewTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("✅ Observations documented") appendLine() @@ -208,9 +236,10 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses tabs["Hypotheses"] = hypothesesTask.placeholder hypothesesTask.add( buildString { + writeToTranscript(transcript, this) appendLine("# Hypothesis Generation") appendLine() - appendLine("**Status:** 🔄 Generating explanatory hypotheses...") + appendLine("**Status:** 🔄 Generating hypotheses...") }.renderMarkdown ) task.update() @@ -222,7 +251,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses maxHypotheses, evaluateCriteria, domainContext, - priorContext, + combinedContext, api ) } else { @@ -233,7 +262,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses existing, evaluateCriteria, domainContext, - priorContext, + combinedContext, api ) } @@ -245,6 +274,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses // Display hypotheses hypothesesTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("## Generated Hypotheses") appendLine() @@ -293,6 +323,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses // Update overview overviewTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("✅ Hypotheses generated: ${hypotheses.size} (${hypothesesTime}s)") appendLine() @@ -306,6 +337,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses tabs["Analysis"] = analysisTask.placeholder analysisTask.add( buildString { + writeToTranscript(transcript, this) appendLine("# Comparative Analysis") appendLine() appendLine("**Status:** 🔄 Analyzing hypotheses...") @@ -326,6 +358,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses analysisTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("## Comparative Analysis Results") appendLine() @@ -341,6 +374,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses // Update overview overviewTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("✅ Comparative analysis complete (${analysisTime}s)") if (suggestTests) { @@ -358,6 +392,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses tabs["Validation Tests"] = testsTask.placeholder testsTask.add( buildString { + writeToTranscript(transcript, this) appendLine("# Validation Tests") appendLine() appendLine("**Status:** 🔄 Generating test suggestions...") @@ -378,6 +413,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses testsTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("## Suggested Validation Tests") appendLine() @@ -392,6 +428,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses overviewTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("✅ Validation tests generated (${testsTime}s)") }.renderMarkdown @@ -405,6 +442,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses tabs["Best Explanation"] = summaryTask.placeholder summaryTask.add( buildString { + writeToTranscript(transcript, this) appendLine("# Best Explanation (Inference to Best Explanation)") appendLine() if (bestHypothesis != null) { @@ -418,10 +456,24 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses appendLine() appendLine("### Key Strengths") appendLine() - appendLine("- **Explanatory Power:** ${String.format("%.2f", bestHypothesis.explanatory_power)} - ${getStrengthDescription(bestHypothesis.explanatory_power)}") + appendLine( + "- **Explanatory Power:** ${ + String.format( + "%.2f", + bestHypothesis.explanatory_power + ) + } - ${getStrengthDescription(bestHypothesis.explanatory_power)}" + ) appendLine("- **Simplicity:** ${String.format("%.2f", bestHypothesis.simplicity)} - ${getSimplicityDescription(bestHypothesis.simplicity)}") appendLine("- **Testability:** ${String.format("%.2f", bestHypothesis.testability)} - ${getTestabilityDescription(bestHypothesis.testability)}") - appendLine("- **Prior Probability:** ${String.format("%.2f", bestHypothesis.prior_probability)} - ${getProbabilityDescription(bestHypothesis.prior_probability)}") + appendLine( + "- **Prior Probability:** ${ + String.format( + "%.2f", + bestHypothesis.prior_probability + ) + } - ${getProbabilityDescription(bestHypothesis.prior_probability)}" + ) appendLine() if (bestHypothesis.testable_predictions.isNotEmpty()) { appendLine("### Next Steps: Validate This Hypothesis") @@ -443,6 +495,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses // Final summary val totalTime = System.currentTimeMillis() - startTime + val (summaryLink, summaryFile) = Pair(task.linkTo("analysis_summary.md"), task.resolve("analysis_summary.md")) val finalSummary = buildString { appendLine("# Abductive Reasoning Summary") appendLine() @@ -455,11 +508,28 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses appendLine() appendLine(analysis.truncateForDisplay(maxOutputSize)) } + // Write detailed summary to file + summaryFile?.outputStream()?.use { stream -> + stream.write(finalSummary.toByteArray()) + stream.flush() + } + val summaryMessage = buildString { + appendLine("Analysis complete. View detailed results:") + appendLine( + "Summary | HTML | PDF" + ) + } + log.info("AbductiveReasoningTask completed: total_time=${totalTime}ms, observations=${observations.size}, hypotheses=${hypotheses.size}, best_score=${bestHypothesis?.overall_score}") overviewTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("---") appendLine() @@ -475,9 +545,10 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses }.renderMarkdown ) task.update() + transcript?.close() task.safeComplete("Completed abductive reasoning analysis: ${hypotheses.size} hypotheses evaluated in ${totalTime / 1000}s", log) - resultFn(finalSummary.toString()) + resultFn(summaryMessage.toString()) } catch (e: Exception) { val duration = System.currentTimeMillis() - startTime @@ -486,6 +557,7 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses overviewTask.add( buildString { + writeToTranscript(transcript, this) appendLine() appendLine("---") appendLine() @@ -506,9 +578,60 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses appendLine("**Error:** ${e.message}") } resultFn(errorOutput) + transcript?.close() + } + } + + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", "c", "cpp", "h", "hpp", + "css", "html", "xml", "json", "yaml", "yml", "properties", "gradle", "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: File) = try { + file.getReader().use { reader -> + reader.getText() } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}", e) + file.readText() } + private fun generateHypotheses( observations: List, maxHypotheses: Int, @@ -757,6 +880,25 @@ AbductiveReasoning - Generate and evaluate explanatory hypotheses else -> "Unlikely - requires unusual circumstances" } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun writeToTranscript(transcript: FileOutputStream?, content: StringBuilder) { + transcript?.write(content.toString().toByteArray()) + transcript?.write("\n\n".toByteArray()) + } + + companion object { private val log: Logger = LoggerFactory.getLogger(AbductiveReasoningTask::class.java) val AbductiveReasoning = TaskType( diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbstractionLadderTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbstractionLadderTask.kt index bb1db4db3..bf355d9f1 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbstractionLadderTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AbstractionLadderTask.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description @@ -11,6 +11,7 @@ import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream class AbstractionLadderTask( orchestrationConfig: OrchestrationConfig, planTask: AbstractionLadderTaskExecutionConfigData? @@ -23,6 +24,7 @@ class AbstractionLadderTask( @Description("Direction to traverse: 'up' for abstraction (generalizations), 'down' for concretization (specific implementations), 'both' for bidirectional analysis") val direction: String = "both", @Description("Number of abstraction levels to traverse in each direction (1-5 recommended)") val levels: Int = 3, @Description("Whether to identify design patterns, anti-patterns, and refactoring opportunities at each level") val identify_patterns: Boolean = true, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") val input_files: List? = null, @Description("Additional files for context (e.g., existing code, related implementations)") val related_files: List? = null, task_description: String? = null, task_dependencies: List? = null, @@ -67,6 +69,7 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi override fun run( agent: TaskOrchestrator, messages: List, task: SessionTask, resultFn: (String) -> Unit, orchestrationConfig: OrchestrationConfig ) { + var detailedOutputFile: FileOutputStream? = null val startTime = System.currentTimeMillis() log.info("Starting Abstraction Ladder Analysis - Concept: ${executionConfig?.concrete_concept?.truncateForDisplay(100)}, Direction: ${executionConfig?.direction}, Levels: ${executionConfig?.levels}") // Validate configuration @@ -99,10 +102,27 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return - // Initialize UI with tabbed display for better organization + // Initialize detailed output file + detailedOutputFile = initializeDetailedOutput(task) + detailedOutputFile?.write( + """ + # Abstraction Ladder Analysis Transcript + **Concept:** $concept + **Direction:** $direction + **Levels:** $levels + **Pattern Analysis:** ${if (identifyPatterns) "Enabled" else "Disabled"} + --- + + **Concept:** $concept + **Direction:** $direction + **Levels:** $levels + **Pattern Analysis:** ${if (identifyPatterns) "Enabled" else "Disabled"} + """.trimIndent().toByteArray() + ) + val tabbedDisplay = TabbedDisplay(task) - // Overview tab + // Overview tab with input context val overviewTask = task.ui.newTask(false).apply { tabbedDisplay["Overview"] = placeholder add( @@ -117,6 +137,7 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi ) ) } + val inputFileContent = getInputFileContent() val contextFiles = getContextFiles() @@ -134,12 +155,15 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi levels = levels, identifyPatterns = identifyPatterns, contextFiles = contextFiles, + inputFileContent = inputFileContent, priorCode = priorCode, api = api, task = upwardTab ) result.append("## Upward Abstraction (Generalizations)\n\n") result.append(upwardAnalysis) + detailedOutputFile?.write("\n## Upward Abstraction (Generalizations)\n\n".toByteArray()) + detailedOutputFile?.write(upwardAnalysis.toByteArray()) result.append("\n\n") upwardTab.add(MarkdownUtil.renderMarkdown("✅ Upward analysis complete", ui = task.ui)) upwardTab.complete() @@ -154,12 +178,15 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi levels = levels, identifyPatterns = identifyPatterns, contextFiles = contextFiles, + inputFileContent = inputFileContent, priorCode = priorCode, api = api, task = downwardTab ) result.append("## Downward Concretization (Specific Implementations)\n\n") result.append(downwardAnalysis) + detailedOutputFile?.write("\n\n## Downward Concretization (Specific Implementations)\n\n".toByteArray()) + detailedOutputFile?.write(downwardAnalysis.toByteArray()) result.append("\n\n") downwardTab.add(MarkdownUtil.renderMarkdown("✅ Downward analysis complete", ui = task.ui)) downwardTab.complete() @@ -178,6 +205,8 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi ) result.append("## Pattern Analysis & Recommendations\n\n") result.append(patternSummary) + detailedOutputFile?.write("\n\n## Pattern Analysis & Recommendations\n\n".toByteArray()) + detailedOutputFile?.write(patternSummary.toByteArray()) patternTab.add(MarkdownUtil.renderMarkdown("✅ Pattern analysis complete", ui = task.ui)) patternTab.complete() } @@ -202,12 +231,18 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi val duration = System.currentTimeMillis() - startTime log.info("Abstraction Ladder Analysis completed successfully - Concept: ${concept.truncateForDisplay(100)}, Levels: $levels") - task.safeComplete("Abstraction ladder analysis complete for '${concept.truncateForDisplay(100)}' with $levels levels in $direction direction(s) (${duration}ms)", log) - resultFn(result.toString()) + detailedOutputFile?.close() + task.safeComplete( + "Abstraction ladder analysis complete for '${concept.truncateForDisplay(100)}' with $levels levels in $direction direction(s) (${duration}ms)", + log + ) + val summaryMessage = generateSummaryMessage(task, duration, concept, levels, direction) + resultFn(summaryMessage) } catch (e: Exception) { val duration = System.currentTimeMillis() - startTime log.error("Error in abstraction ladder analysis after ${duration}ms", e) + detailedOutputFile?.close() task.error(e) task.add( MarkdownUtil.renderMarkdown( @@ -230,6 +265,7 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi levels: Int, identifyPatterns: Boolean, contextFiles: String, + inputFileContent: String, priorCode: String, api: ChatInterface, task: SessionTask @@ -238,6 +274,9 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi Analyze the following concept by moving UP the abstraction ladder. Start with the concrete concept and identify increasingly general abstractions. + ## Input Files: + $inputFileContent + ## Concrete Concept: $concept @@ -289,6 +328,7 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi levels: Int, identifyPatterns: Boolean, contextFiles: String, + inputFileContent: String, priorCode: String, api: ChatInterface, task: SessionTask @@ -297,6 +337,9 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi Analyze the following concept by moving DOWN the abstraction ladder. Start with the concept and identify increasingly specific/concrete implementations. + ## Input Files: + $inputFileContent + ## Starting Concept: $concept @@ -406,6 +449,77 @@ AbstractionLadder - Traverse abstraction levels to find patterns and design insi } } + private fun getInputFileContent(): String { + val inputFiles = executionConfig?.input_files ?: emptyList() + if (inputFiles.isEmpty()) return "No input files provided." + return inputFiles.flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (com.simiacryptus.cognotik.util.FileSelectionUtils.filteredWalk(root.toFile()) { + when { + com.simiacryptus.cognotik.util.FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Exception) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + } + + private fun initializeDetailedOutput(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = Pair(task.linkTo("abstraction_ladder_analysis.md"), task.resolve("abstraction_ladder_analysis.md")) + val outputStream = file?.outputStream() + task.complete( + "Writing detailed analysis to $link " + + "html " + + "pdf" + ) + log.info("Initialized detailed output file: $link") + outputStream + } catch (e: Exception) { + log.error("Failed to initialize detailed output file", e) + null + } + } + + private fun generateSummaryMessage(task: SessionTask, duration: Long, concept: String, levels: Int, direction: String): String { + val (link, _) = Pair(task.linkTo("abstraction_ladder_analysis.md"), task.resolve("abstraction_ladder_analysis.md")) + return """ + Abstraction Ladder analysis complete for '$concept' with $levels levels in $direction direction(s). + **Duration:** ${duration / 1000}s + Detailed analysis: View Full Report + """.trimIndent() + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + companion object { private val log: Logger = LoggerFactory.getLogger(AbstractionLadderTask::class.java) val AbstractionLadder = TaskType( diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AdversarialReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AdversarialReasoningTask.kt index 25222e50d..1998d52c3 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AdversarialReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AdversarialReasoningTask.kt @@ -1,14 +1,16 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.getReader import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -54,6 +56,8 @@ class AdversarialReasoningTask( val suggest_mitigations: Boolean = true, @Description("Related files or code to analyze (glob patterns)") val related_files: List? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Specific assumptions to challenge") val challenge_assumptions: List? = null, @Description("Maximum number of vulnerabilities to identify per vector") @@ -67,28 +71,26 @@ class AdversarialReasoningTask( ?: "Red team analysis of '$target_system' with ${attack_vectors?.size ?: 0} attack vectors", task_dependencies = task_dependencies?.toMutableList(), state = state - ) , ValidatedObject { + ), ValidatedObject { override fun validate(): String? { if (target_system.isNullOrBlank()) { return "AdversarialReasoningTaskExecutionConfigData: target_system is required" } - - val validVectors = setOf("security", "performance", "logic", "business", "privacy", "compliance") + attack_vectors?.forEach { vector -> - if (vector.lowercase() !in validVectors) { - return "AdversarialReasoningTaskExecutionConfigData: invalid attack_vector '$vector'. Must be one of: ${validVectors.joinToString(", ")}" + if (vector.isBlank()) { + return "AdversarialReasoningTaskExecutionConfigData: invalid attack_vector '$vector'.}" } } - - val validCapabilities = setOf("basic", "intermediate", "advanced", "nation-state") - if (adversary_capability.lowercase() !in validCapabilities) { - return "AdversarialReasoningTaskExecutionConfigData: invalid adversary_capability '$adversary_capability'. Must be one of: ${validCapabilities.joinToString(", ")}" + + if (adversary_capability.isBlank()) { + return "AdversarialReasoningTaskExecutionConfigData: adversary_capability cannot be blank" } - + if (max_vulnerabilities_per_vector !in 1..20) { return "AdversarialReasoningTaskExecutionConfigData: max_vulnerabilities_per_vector must be between 1 and 20" } - + return ValidatedObject.validateFields(this) } } @@ -118,6 +120,7 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes ) { val startTime = System.currentTimeMillis() log.info("Starting AdversarialReasoningTask for target: '${executionConfig?.target_system}'") + var transcriptStream: FileOutputStream? = null val targetSystem = executionConfig?.target_system if (targetSystem.isNullOrBlank()) { @@ -137,9 +140,14 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes val challengeAssumptions = executionConfig.challenge_assumptions val maxVulnerabilitiesPerVector = executionConfig.max_vulnerabilities_per_vector.coerceIn(1, 20) + // Initialize transcript + transcriptStream = initializeTranscript(task) + transcriptStream?.let { stream -> + writeTranscriptHeader(stream, targetSystem, attackVectors, adversaryCapability, generateExploits, suggestMitigations) + } log.info( "Configuration: vectors=${attackVectors.size}, capability=$adversaryCapability, " + - "exploits=$generateExploits, mitigations=$suggestMitigations" + "exploits=$generateExploits, mitigations=$suggestMitigations" ) val ui = task.ui @@ -148,6 +156,18 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes // Overview tab val overviewTask = ui.newTask(false) tabs["Overview"] = overviewTask.placeholder + transcriptStream?.let { + it.write("# 🔴 Adversarial Reasoning / Red Team Analysis\n\n".toByteArray()) + it.write("**Target System:** $targetSystem\n\n".toByteArray()) + it.write("**Attack Vectors:** ${attackVectors.joinToString(", ")}\n\n".toByteArray()) + it.write("**Adversary Capability:** $adversaryCapability\n\n".toByteArray()) + it.write("**Generate Exploits:** ${if (generateExploits) "⚠️ Yes" else "No"}\n\n".toByteArray()) + it.write("**Suggest Mitigations:** ${if (suggestMitigations) "Yes" else "No"}\n\n".toByteArray()) + it.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + it.write("---\n\n".toByteArray()) + it.flush() + } + val overviewContent = buildString { appendLine("# 🔴 Adversarial Reasoning / Red Team Analysis") @@ -191,9 +211,17 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes } } else "" - if (priorContext.isNotBlank() || fileContext.isNotBlank()) { + val inputFileContent = getInputFileCode() + if (priorContext.isNotBlank() || fileContext.isNotBlank() || inputFileContent.isNotBlank()) { val contextTask = ui.newTask(false) tabs["Context"] = contextTask.placeholder + transcriptStream?.let { + it.write("## Context for Analysis\n\n".toByteArray()) + if (priorContext.isNotBlank()) { + it.write("### Prior Task Results\n\n".toByteArray()) + it.write("${priorContext.truncateForDisplay()}\n\n".toByteArray()) + } + } contextTask.add( buildString { appendLine("# Context for Analysis") @@ -204,6 +232,12 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes appendLine(priorContext.truncateForDisplay()) appendLine() } + if (inputFileContent.isNotBlank()) { + appendLine("## Input Files") + appendLine() + appendLine(inputFileContent) + appendLine() + } if (fileContext.isNotBlank()) { appendLine(fileContext) } @@ -236,6 +270,13 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes val vectorTask = ui.newTask(false) tabs["Vector: ${vector.capitalize()}"] = vectorTask.placeholder + transcriptStream?.let { + it.write("## Attack Vector: ${vector.capitalize()}\n\n".toByteArray()) + it.write("**Adversary Capability:** $adversaryCapability\n\n".toByteArray()) + it.write("---\n\n".toByteArray()) + it.flush() + } + vectorTask.add( buildString { @@ -282,6 +323,12 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes // Perform analysis val analysisResult = adversarialAgent.answer(listOf(analysisPrompt)) + transcriptStream?.let { + it.write("### Analysis Results\n\n".toByteArray()) + it.write("$analysisResult\n\n".toByteArray()) + it.flush() + } + vectorTask.add( buildString { @@ -306,6 +353,13 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes val vectorTime = System.currentTimeMillis() - vectorStartTime vectorAnalysisTimes[vector] = vectorTime + transcriptStream?.let { + it.write("**Vulnerabilities Found:** ${parsedVulnerabilities.size}\n\n".toByteArray()) + it.write("**Analysis Time:** ${vectorTime / 1000.0}s\n\n".toByteArray()) + it.write("---\n\n".toByteArray()) + it.flush() + } + vectorTask.add( buildString { @@ -351,6 +405,10 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes }.renderMarkdown ) task.update() + transcriptStream?.let { + it.write("## 🛡️ Mitigation Strategies\n\n".toByteArray()) + } + val mitigationAgent = createMitigationAgent(api) val mitigationPrompt = buildMitigationPrompt( @@ -360,6 +418,11 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes ) val mitigations = mitigationAgent.answer(listOf(mitigationPrompt)) + transcriptStream?.let { + it.write("$mitigations\n\n".toByteArray()) + it.flush() + } + mitigationTask.add( buildString { @@ -408,6 +471,12 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes failureModes = allFailureModes, totalTime = System.currentTimeMillis() - startTime ) + transcriptStream?.let { + it.write("## 📊 Executive Summary\n\n".toByteArray()) + it.write("$summary\n\n".toByteArray()) + it.flush() + } + summaryTask.add( buildString { @@ -425,6 +494,17 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes // Final overview update val totalTime = System.currentTimeMillis() - startTime + transcriptStream?.let { + it.write("---\n\n".toByteArray()) + it.write("## ✅ Analysis Complete\n\n".toByteArray()) + it.write("**Total Time:** ${totalTime / 1000.0}s\n\n".toByteArray()) + it.write("**Total Vulnerabilities:** ${allVulnerabilities.size}\n\n".toByteArray()) + it.write("**Edge Cases Identified:** ${allEdgeCases.size}\n\n".toByteArray()) + it.write("**Failure Modes:** ${allFailureModes.size}\n\n".toByteArray()) + it.flush() + it.close() + } + overviewTask.add( buildString { appendLine() @@ -460,7 +540,7 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes appendLine("- **Critical/High Severity:** ${allVulnerabilities.count { it.severity in listOf("critical", "high") }}") appendLine("- **Attack Vectors:** ${attackVectors.joinToString(", ")}") appendLine() - + if (allVulnerabilities.isNotEmpty()) { appendLine("## Top Vulnerabilities") appendLine() @@ -473,7 +553,7 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes appendLine() } } - + appendLine("## Statistics") appendLine("- Analysis Time: ${totalTime / 1000.0}s") appendLine("- Vectors Analyzed: ${attackVectors.size}") @@ -488,13 +568,21 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes log.info( "AdversarialReasoningTask completed: total_time=${totalTime}ms, " + - "vectors=${attackVectors.size}, vulnerabilities=${allVulnerabilities.size}, " + - "edge_cases=${allEdgeCases.size}, failure_modes=${allFailureModes.size}" + "vectors=${attackVectors.size}, vulnerabilities=${allVulnerabilities.size}, " + + "edge_cases=${allEdgeCases.size}, failure_modes=${allFailureModes.size}" ) resultFn(conciseResult) } catch (e: Exception) { + transcriptStream?.let { + it.write("\n\n---\n\n".toByteArray()) + it.write("## ❌ Error Occurred\n\n".toByteArray()) + it.write("**Error:** ${e.message}\n\n".toByteArray()) + it.write("**Type:** ${e.javaClass.simpleName}\n\n".toByteArray()) + it.flush() + it.close() + } log.error("Error during adversarial reasoning", e) task.error(e) @@ -529,9 +617,116 @@ AdversarialReasoning - Red team analysis to identify vulnerabilities and weaknes } } resultFn(errorOutput) + } finally { + transcriptStream?.close() + log.debug("Transcript stream closed") + } + } + + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = Pair(task.linkTo("adversarial_transcript.md"), task.resolve("adversarial_transcript.md")) + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + + private fun writeTranscriptHeader( + stream: FileOutputStream, + targetSystem: String, + attackVectors: List, + adversaryCapability: String, + generateExploits: Boolean, + suggestMitigations: Boolean + ) { + try { + val header = buildString { + appendLine("# 🔴 Adversarial Reasoning / Red Team Analysis Transcript") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("**Target System:** $targetSystem") + appendLine("**Attack Vectors:** ${attackVectors.joinToString(", ")}") + appendLine("**Adversary Capability:** $adversaryCapability") + appendLine("**Generate Exploits:** ${if (generateExploits) "⚠️ Yes" else "No"}") + appendLine("**Suggest Mitigations:** ${if (suggestMitigations) "Yes" else "No"}") + appendLine() + appendLine("---") + appendLine() + } + stream.write(header.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write transcript header", e) } } + private fun getInputFileCode(): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (com.simiacryptus.cognotik.util.FileSelectionUtils.filteredWalk(root.toFile()) { + when { + com.simiacryptus.cognotik.util.FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun isTextFile(file: java.io.File): Boolean { + val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", "c", "cpp", "h", "hpp", + "css", "html", "xml", "json", "yaml", "yml", "properties", "gradle", "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: java.io.File): String = try { + file.getReader().use { reader -> + when (reader) { + is com.simiacryptus.cognotik.input.PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + try { + file.readText() + } catch (e2: Exception) { + "Error reading file: ${e2.message}" + } + } + + private fun createAdversarialAgent( vector: String, adversaryCapability: String, @@ -612,24 +807,24 @@ Consider both immediate fixes and long-term architectural improvements. appendLine("## Target System") appendLine(targetSystem) appendLine() - + if (priorContext.isNotBlank()) { appendLine("## System Context") appendLine(priorContext.truncateForDisplay(5000)) appendLine() } - + if (fileContext.isNotBlank()) { appendLine(fileContext) appendLine() } - + appendLine("## Attack Vector Focus") appendLine("**Vector:** $vector") appendLine() appendLine("**Your Capability Level:** $adversaryCapability") appendLine() - + if (!challengeAssumptions.isNullOrEmpty()) { appendLine("## Assumptions to Challenge") challengeAssumptions.forEach { assumption -> @@ -637,7 +832,7 @@ Consider both immediate fixes and long-term architectural improvements. } appendLine() } - + appendLine("## Analysis Requirements") appendLine() appendLine("Identify up to $maxVulnerabilities vulnerabilities in the '$vector' category.") @@ -648,11 +843,11 @@ Consider both immediate fixes and long-term architectural improvements. appendLine("3. **Description**: Clear explanation of the weakness") appendLine("4. **Attack Scenario**: How an attacker would exploit this") appendLine("5. **Potential Impact**: What damage could be done") - + if (generateExploits) { appendLine("6. **Exploit Steps**: Detailed technical steps to exploit") } - + appendLine() appendLine("Also identify:") appendLine("- **Edge Cases**: Unusual inputs or conditions that could cause problems") @@ -684,7 +879,7 @@ Consider both immediate fixes and long-term architectural improvements. appendLine() appendLine("## Identified Vulnerabilities") appendLine() - + vulnerabilities .sortedByDescending { severityToInt(it.severity) } .forEach { vuln -> @@ -692,7 +887,7 @@ Consider both immediate fixes and long-term architectural improvements. appendLine(vuln.description) appendLine() } - + appendLine("## Required Mitigations") appendLine() appendLine("For each vulnerability category, provide:") @@ -715,15 +910,15 @@ Consider both immediate fixes and long-term architectural improvements. private fun parseVulnerabilities(analysisResult: String, vector: String): List { val vulnerabilities = mutableListOf() - + // Simple parsing - look for severity indicators and structure val lines = analysisResult.lines() var currentVuln: MutableMap? = null var currentSection = "" - + lines.forEach { line -> val trimmed = line.trim() - + // Detect severity markers when { trimmed.matches(Regex(".*\\b(critical|high|medium|low)\\b.*", RegexOption.IGNORE_CASE)) -> { @@ -739,7 +934,7 @@ Consider both immediate fixes and long-term architectural improvements. ) ) } - + // Start new vulnerability currentVuln = mutableMapOf() val severityMatch = Regex("\\b(critical|high|medium|low)\\b", RegexOption.IGNORE_CASE) @@ -747,12 +942,12 @@ Consider both immediate fixes and long-term architectural improvements. currentVuln["severity"] = severityMatch?.value?.lowercase() ?: "medium" currentVuln["category"] = trimmed.replace(Regex("\\*+|#+|severity:?|\\b(critical|high|medium|low)\\b", RegexOption.IGNORE_CASE), "").trim() } - + trimmed.matches(Regex("\\*\\*?(description|attack|scenario|impact|exploit).*", RegexOption.IGNORE_CASE)) -> { currentSection = Regex("(description|attack|scenario|impact|exploit)", RegexOption.IGNORE_CASE) .find(trimmed)?.value?.lowercase() ?: "" } - + currentVuln != null && trimmed.isNotEmpty() && !trimmed.startsWith("#") && !trimmed.startsWith("*") -> { when (currentSection) { "description" -> currentVuln["description"] = (currentVuln["description"] ?: "") + " " + trimmed @@ -762,7 +957,7 @@ Consider both immediate fixes and long-term architectural improvements. } } } - + // Save last vulnerability currentVuln?.let { vuln -> vulnerabilities.add( @@ -775,7 +970,7 @@ Consider both immediate fixes and long-term architectural improvements. ) ) } - + return vulnerabilities } @@ -783,16 +978,18 @@ Consider both immediate fixes and long-term architectural improvements. val edgeCases = mutableListOf() val lines = analysisResult.lines() var inEdgeCaseSection = false - + lines.forEach { line -> val trimmed = line.trim() when { trimmed.matches(Regex(".*edge\\s*case.*", RegexOption.IGNORE_CASE)) -> { inEdgeCaseSection = true } + trimmed.matches(Regex(".*failure\\s*mode.*", RegexOption.IGNORE_CASE)) -> { inEdgeCaseSection = false } + inEdgeCaseSection && (trimmed.startsWith("-") || trimmed.startsWith("*") || trimmed.matches(Regex("^\\d+\\."))) -> { val cleaned = trimmed.removePrefix("-").removePrefix("*").replace(Regex("^\\d+\\."), "").trim() if (cleaned.length > 10) { @@ -801,7 +998,7 @@ Consider both immediate fixes and long-term architectural improvements. } } } - + return edgeCases } @@ -809,16 +1006,18 @@ Consider both immediate fixes and long-term architectural improvements. val failureModes = mutableListOf() val lines = analysisResult.lines() var inFailureSection = false - + lines.forEach { line -> val trimmed = line.trim() when { trimmed.matches(Regex(".*failure\\s*mode.*", RegexOption.IGNORE_CASE)) -> { inFailureSection = true } + trimmed.startsWith("#") && inFailureSection -> { inFailureSection = false } + inFailureSection && (trimmed.startsWith("-") || trimmed.startsWith("*") || trimmed.matches(Regex("^\\d+\\."))) -> { val cleaned = trimmed.removePrefix("-").removePrefix("*").replace(Regex("^\\d+\\."), "").trim() if (cleaned.length > 10) { @@ -827,7 +1026,7 @@ Consider both immediate fixes and long-term architectural improvements. } } } - + return failureModes } @@ -844,7 +1043,7 @@ Consider both immediate fixes and long-term architectural improvements. val highCount = vulnerabilities.count { it.severity == "high" } val mediumCount = vulnerabilities.count { it.severity == "medium" } val lowCount = vulnerabilities.count { it.severity == "low" } - + return buildString { appendLine("## Overview") appendLine() @@ -859,17 +1058,17 @@ Consider both immediate fixes and long-term architectural improvements. appendLine("| 🟡 Medium | $mediumCount |") appendLine("| 🟢 Low | $lowCount |") appendLine() - + val overallRisk = when { criticalCount > 0 -> "🔴 **CRITICAL** - Immediate action required" highCount > 2 -> "🟠 **HIGH** - Urgent attention needed" highCount > 0 || mediumCount > 3 -> "🟡 **MEDIUM** - Should be addressed soon" else -> "🟢 **LOW** - Monitor and improve over time" } - + appendLine("**Overall Risk Level:** $overallRisk") appendLine() - + appendLine("## Attack Surface Analysis") appendLine() appendLine("**Vectors Analyzed:** ${attackVectors.joinToString(", ")}") @@ -878,7 +1077,7 @@ Consider both immediate fixes and long-term architectural improvements. appendLine() appendLine("**Failure Modes:** ${failureModes.size}") appendLine() - + if (vulnerabilities.isNotEmpty()) { appendLine("## Top Concerns") appendLine() @@ -891,7 +1090,7 @@ Consider both immediate fixes and long-term architectural improvements. appendLine() } } - + appendLine("## Recommendations") appendLine() when { @@ -900,18 +1099,20 @@ Consider both immediate fixes and long-term architectural improvements. appendLine("2. **Urgent:** Implement temporary mitigations for high-severity issues") appendLine("3. **Short-term:** Develop comprehensive remediation plan") } + highCount > 0 -> { appendLine("1. **Priority:** Address high-severity vulnerabilities within 1-2 weeks") appendLine("2. **Planning:** Schedule remediation for medium-severity issues") appendLine("3. **Monitoring:** Implement detection for identified attack patterns") } + else -> { appendLine("1. **Continuous Improvement:** Address identified issues in regular sprint cycles") appendLine("2. **Monitoring:** Implement logging and alerting for edge cases") appendLine("3. **Testing:** Add test coverage for identified failure modes") } } - + appendLine() appendLine("---") appendLine() diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AnalogicalReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AnalogicalReasoningTask.kt index 800edf9e1..619d92b4e 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AnalogicalReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/AnalogicalReasoningTask.kt @@ -1,7 +1,7 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* @@ -10,6 +10,8 @@ import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -32,6 +34,8 @@ class AnalogicalReasoningTask( val validate_mappings: Boolean = true, @Description("Additional context files to inform the reasoning process") val related_files: List? = null, + @Description("Input files to provide context for analogical reasoning (supports glob patterns)") + val input_files: List? = null, task_description: String? = null, task_dependencies: List? = null, state: TaskState? = TaskState.Pending, @@ -138,6 +142,7 @@ AnalogicalReasoning - Solve problems by finding and applying analogies from diff resultFn: (String) -> Unit, orchestrationConfig: OrchestrationConfig ) { + var transcriptStream: FileOutputStream? = null try { val startTime = System.currentTimeMillis() log.info("Starting AnalogicalReasoningTask with source_domain='${executionConfig?.source_domain}', target_problem='${executionConfig?.target_problem}', num_analogies=${executionConfig?.num_analogies ?: 3}") @@ -168,6 +173,12 @@ AnalogicalReasoning - Solve problems by finding and applying analogies from diff val tabs = TabbedDisplay(task) val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + // Initialize transcript + transcriptStream = initializeTranscript(task) + transcriptStream?.let { stream -> + writeTranscriptHeader(stream, sourceDomain, targetProblem, numAnalogies, validateMappings) + } + // Create overview tab val overviewTask = task.ui.newTask(false) @@ -196,6 +207,10 @@ AnalogicalReasoning - Solve problems by finding and applying analogies from diff log.debug("Gathering prior context and related files") val priorContext = getPriorCode(agent.executionState) val contextFiles = getContextFiles() + val inputFileContent = getInputFileContent() + transcriptStream?.let { stream -> + writeToTranscript(stream, "## Input Files Context\n\n$inputFileContent\n\n") + } log.debug("Context gathered: priorContext length=${priorContext.length}, contextFiles length=${contextFiles.length}") // Update overview with context info overviewTask.add(buildString { @@ -221,6 +236,9 @@ AnalogicalReasoning - Solve problems by finding and applying analogies from diff val analogiesPrompt = """ You are an expert in analogical reasoning and creative problem-solving. + ## Input Files + $inputFileContent + ## Task Generate $numAnalogies high-quality analogies from the source domain to help solve the target problem. @@ -278,6 +296,9 @@ AnalogicalReasoning - Solve problems by finding and applying analogies from diff appendLine() appendLine("- ✗ Analogy generation failed") }.renderMarkdown) + transcriptStream?.let { stream -> + writeToTranscript(stream, "## Error\n\nFailed to generate analogies\n\n") + } task.safeComplete("Failed to generate analogies", log) task.update() resultFn("ERROR: Failed to generate analogies") @@ -325,6 +346,10 @@ AnalogicalReasoning - Solve problems by finding and applying analogies from diff } }.renderMarkdown) task.update() + transcriptStream?.let { stream -> + writeToTranscript(stream, "## Generated Analogies\n\n${result.analogies.size} analogies generated\n\n") + } + // Update overview overviewTask.add(buildString { appendLine() @@ -403,6 +428,10 @@ Provide a brief validation assessment. appendLine(validationResult.truncateForDisplay()) }.renderMarkdown) task.update() + transcriptStream?.let { stream -> + writeToTranscript(stream, "## Validation Results\n\n$validationResult\n\n") + } + // Update overview overviewTask.add(buildString { appendLine() @@ -499,6 +528,10 @@ Provide a brief validation assessment. appendLine("**Status:** ✓ Complete") }.renderMarkdown) task.update() + transcriptStream?.let { stream -> + writeTranscriptFooter(stream, totalTime, result.analogies.size) + } + log.info( "AnalogicalReasoningTask completed successfully: total_time=${totalTime}ms, analogies=${result.analogies.size}, avg_confidence=${ @@ -510,6 +543,9 @@ Provide a brief validation assessment. } catch (e: Exception) { log.error("Error during AnalogicalReasoningTask execution", e) + transcriptStream?.let { stream -> + writeToTranscript(stream, "## Error\n\n${e.message}\n\n") + } task.error(e) val errorTask = task.ui.newTask(false) errorTask.add(buildString { @@ -522,6 +558,8 @@ Provide a brief validation assessment. }.renderMarkdown) task.safeComplete("Failed with error: ${e.message}", log) resultFn("ERROR: ${e.message}") + } finally { + transcriptStream?.close() } } @@ -605,6 +643,79 @@ Provide a brief validation assessment. } } + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = task.createFile("reasoning_transcript.md") + val transcriptStream = file?.outputStream() + task.complete( + "Writing detailed transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + + private fun writeTranscriptHeader( + stream: FileOutputStream, + sourceDomain: String, + targetProblem: String, + numAnalogies: Int, + validateMappings: Boolean + ) { + try { + val header = buildString { + appendLine("# Analogical Reasoning Transcript") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("## Configuration") + appendLine() + appendLine("- **Source Domain:** $sourceDomain") + appendLine("- **Target Problem:** $targetProblem") + appendLine("- **Number of Analogies:** $numAnalogies") + appendLine("- **Validation Enabled:** $validateMappings") + appendLine() + appendLine("---") + appendLine() + } + stream.write(header.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write transcript header", e) + } + } + + private fun writeToTranscript(stream: FileOutputStream, content: String) { + try { + stream.write(content.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write to transcript", e) + } + } + + private fun writeTranscriptFooter(stream: FileOutputStream, totalTime: Long, analogyCount: Int) { + try { + val footer = buildString { + appendLine("---") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine("**Total Time:** ${totalTime / 1000} seconds") + appendLine("**Analogies Generated:** $analogyCount") + } + stream.write(footer.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write transcript footer", e) + } + } + + private fun getContextFiles(): String { val relatedFiles = executionConfig?.related_files ?: return "" if (relatedFiles.isEmpty()) return "" @@ -633,6 +744,50 @@ Provide a brief validation assessment. } } + private fun getInputFileContent(): String { + val inputFiles = executionConfig?.input_files ?: return "" + if (inputFiles.isEmpty()) return "" + log.debug("Loading ${inputFiles.size} input files") + return buildString { + appendLine("## Input Files") + appendLine() + inputFiles.forEach { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + try { + val files = com.simiacryptus.cognotik.util.FileSelectionUtils.filteredWalk(root.toFile()) { + when { + com.simiacryptus.cognotik.util.FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }.filter { it.isFile && it.exists() }.distinct().filterNotNull().sortedBy { it } + files.forEach { file -> + try { + val relativePath = root.toFile().toPath().relativize(file.toPath()) + val content = file.readText().truncateForDisplay(500) + appendLine("### $relativePath") + appendLine("```") + appendLine(content) + appendLine("```") + appendLine() + log.debug("Successfully loaded input file: $relativePath") + } catch (e: Exception) { + log.warn("Error reading input file: ${file.name}", e) + } + } + } catch (e: Exception) { + log.warn("Error processing input file pattern: $pattern", e) + } + } + } + } + + private fun String.truncateForDisplay(maxLength: Int = 1000): String { + return if (this.length > maxLength) this.substring(0, maxLength) + "\n...(truncated)" else this + } + + companion object { private val log: Logger = LoggerFactory.getLogger(AnalogicalReasoningTask::class.java) val AnalogicalReasoning = TaskType( diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/BrainstormingTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/BrainstormingTask.kt index e32798994..dcea589c0 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/BrainstormingTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/BrainstormingTask.kt @@ -1,19 +1,24 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.PaginatedDocumentReader +import com.simiacryptus.cognotik.input.getReader import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets +import java.nio.file.FileSystems +import java.nio.file.Path import java.time.LocalDateTime import java.time.format.DateTimeFormatter + class BrainstormingTask( orchestrationConfig: OrchestrationConfig, planTask: BrainstormingTaskExecutionConfigData? @@ -23,6 +28,8 @@ class BrainstormingTask( ) { val maxSummaryLength: Int = 10000 + private var transcriptStream: FileOutputStream? = null + protected val codeFiles = mutableMapOf() data class BrainstormedOption( val title: String = "", @@ -67,6 +74,8 @@ class BrainstormingTask( class BrainstormingTaskExecutionConfigData( @Description("The problem or question to brainstorm solutions for") val problem_statement: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Number of options to generate (default: 5-10)") val target_option_count: Int = 7, @Description("Categories or domains to consider (optional)") @@ -86,7 +95,7 @@ class BrainstormingTask( task_description = task_description, task_dependencies = task_dependencies?.toMutableList(), state = state - ) , ValidatedObject { + ), ValidatedObject { override fun validate(): String? { if (problem_statement.isNullOrBlank()) { return "BrainstormingTaskExecutionConfigData problem_statement cannot be null or blank" @@ -144,10 +153,19 @@ Brainstorming - Generate and analyze multiple solution options val analysisDepth = executionConfig.analysis_depth log.info("Configuration: targetCount=$targetCount, categories=$categories, includeCreative=$includeCreative, analysisDepth=$analysisDepth") + log.info("Input files: ${executionConfig?.input_files?.joinToString(", ") ?: "none"}") val ui = task.ui try { + // Initialize transcript + transcriptStream = transcript(task) + transcriptStream?.write("# Brainstorming Session Transcript\n\n".toByteArray()) + transcriptStream?.write("**Input Files:** ${executionConfig?.input_files?.joinToString(", ") ?: "none"}\n\n".toByteArray()) + transcriptStream?.write("**Problem Statement:** $problemStatement\n\n".toByteArray()) + transcriptStream?.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + transcriptStream?.write("---\n\n".toByteArray()) + // Create tabbed display for organized output val tabs = TabbedDisplay(task) @@ -174,6 +192,11 @@ Brainstorming - Generate and analyze multiple solution options appendLine("**Analysis Depth:** $analysisDepth") appendLine() appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + if (!executionConfig?.input_files.isNullOrEmpty()) { + appendLine() + appendLine("**Input Files:**") + executionConfig?.input_files?.forEach { appendLine("- $it") } + } appendLine() appendLine("---") appendLine() @@ -183,6 +206,17 @@ Brainstorming - Generate and analyze multiple solution options } overviewTask.add(MarkdownUtil.renderMarkdown(overviewContent, ui = ui)) task.update() + // Get input file content + val inputFileContent = getInputFileCode() + if (inputFileContent.isNotBlank()) { + log.debug("Found input file content: ${inputFileContent.length} characters") + val inputFilesTask = task.ui.newTask(false) + tabs["Input Files"] = inputFilesTask.placeholder + inputFilesTask.add(MarkdownUtil.renderMarkdown(inputFileContent, ui = ui)) + task.update() + transcriptStream?.write("\n## Input Files\n\n$inputFileContent\n\n".toByteArray()) + } + // Gather context from previous tasks val priorContext = getPriorCode(agent.executionState) @@ -215,7 +249,8 @@ Brainstorming - Generate and analyze multiple solution options categories, constraints, includeCreative, - priorContext + priorContext, + inputFileContent ) val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return @@ -233,6 +268,15 @@ Brainstorming - Generate and analyze multiple solution options val options = brainstormResult.obj.options log.info("Generated ${options.size} options") + // Write to transcript + transcriptStream?.write("\n## Generated Options\n\n".toByteArray()) + options.forEachIndexed { index, option -> + transcriptStream?.write("### ${index + 1}. ${option.title}\n".toByteArray()) + if (option.category != null) { + transcriptStream?.write("**Category:** ${option.category}\n\n".toByteArray()) + } + transcriptStream?.write("${option.description}\n\n".toByteArray()) + } // Display generated options optionsTask.add( @@ -306,6 +350,20 @@ Brainstorming - Generate and analyze multiple solution options val analysis = analysisAgent.answer(listOf(analysisPrompt)) analyses[optionNumber] = analysis.obj + // Write analysis to transcript + transcriptStream?.write("\n## Option $optionNumber Analysis: ${option.title}\n\n".toByteArray()) + transcriptStream?.write("### ✅ Pros\n".toByteArray()) + analysis.obj.pros.forEach { transcriptStream?.write("- $it\n".toByteArray()) } + transcriptStream?.write("\n### ❌ Cons\n".toByteArray()) + analysis.obj.cons.forEach { transcriptStream?.write("- $it\n".toByteArray()) } + transcriptStream?.write("\n### 📊 Feasibility\n${analysis.obj.feasibility}\n\n".toByteArray()) + transcriptStream?.write("### 💥 Impact\n${analysis.obj.impact}\n\n".toByteArray()) + transcriptStream?.write("### ⚠️ Risks\n".toByteArray()) + analysis.obj.risks.forEach { transcriptStream?.write("- $it\n".toByteArray()) } + transcriptStream?.write("\n### 📋 Requirements\n".toByteArray()) + analysis.obj.requirements.forEach { transcriptStream?.write("- $it\n".toByteArray()) } + transcriptStream?.write("\n---\n\n".toByteArray()) + // Display analysis analysisTask.add( @@ -399,26 +457,63 @@ Brainstorming - Generate and analyze multiple solution options ) task.update() - // Build final concise output + val totalTime = System.currentTimeMillis() - startTime + // Write detailed results to file + val detailedResults = buildDetailedResults( + problemStatement, + options, + analyses, + summary, + totalTime + ) + val (resultsLink, resultsFile) = task.createFile("brainstorming_results.md") + resultsFile?.outputStream()?.use { stream -> + stream.write(detailedResults.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } + log.info("Saved detailed results to: $resultsLink") + + // Finalize transcript + transcriptStream?.write("\n## Session Complete\n\n".toByteArray()) + transcriptStream?.write("**Total Time:** ${totalTime / 1000.0}s\n".toByteArray()) + transcriptStream?.write("**Options Generated:** ${options.size}\n".toByteArray()) + transcriptStream?.write("**Options Analyzed:** ${analyses.size}\n".toByteArray()) + transcriptStream?.write("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n".toByteArray()) + transcriptStream?.flush() + transcriptStream?.close() + val transcriptLink = task.createFile("brainstorming_transcript.md").first + // Build final concise output with file links val finalOutput = buildString { appendLine("# Brainstorming Results: $problemStatement") appendLine() - appendLine("## Options Generated: ${options.size}") + appendLine("✅ Generated and analyzed ${options.size} options in ${totalTime / 1000}s") + appendLine() + appendLine("## Summary") + appendLine() + appendLine(summary.truncateForDisplay()) + appendLine() + appendLine("---") + appendLine() + appendLine("## Detailed Results") + appendLine() + appendLine("📄 [Full Results]($resultsLink) | [HTML](${resultsLink.removeSuffix(".md")}.html) | [PDF](${resultsLink.removeSuffix(".md")}.pdf)") + appendLine() + appendLine("📋 [Transcript]($transcriptLink) | [HTML](${transcriptLink.removeSuffix(".md")}.html) | [PDF](${transcriptLink.removeSuffix(".md")}.pdf)") + appendLine() + appendLine("**Options:** ${options.size} | **Analysis Depth:** $analysisDepth | **Time:** ${totalTime / 1000}s") + + appendLine() appendLine() options.forEachIndexed { index, option -> appendLine("### ${index + 1}. ${option.title}") - appendLine(option.description.truncateForDisplay()) appendLine() } - appendLine("## Key Findings") appendLine() - appendLine(summary.truncateForDisplay()) appendLine() appendLine("---") - appendLine("**Options:** ${options.size} | **Analysis Depth:** $analysisDepth | **Time:** ${(System.currentTimeMillis() - startTime) / 1000}s") + } - val totalTime = System.currentTimeMillis() - startTime log.info("BrainstormingTask completed: total_time=${totalTime}ms, options=${options.size}, output_size=${finalOutput.length} chars") // Update overview with completion @@ -448,6 +543,13 @@ Brainstorming - Generate and analyze multiple solution options } catch (e: Exception) { val duration = System.currentTimeMillis() - startTime log.error("BrainstormingTask failed after ${duration}ms for problem: $problemStatement", e) + // Write error to transcript + transcriptStream?.write("\n## ❌ Error Occurred\n\n".toByteArray()) + transcriptStream?.write("**Error:** ${e.message}\n".toByteArray()) + transcriptStream?.write("**Type:** ${e.javaClass.simpleName}\n".toByteArray()) + transcriptStream?.flush() + transcriptStream?.close() + task.error(e) val errorOutput = buildString { @@ -465,13 +567,28 @@ Brainstorming - Generate and analyze multiple solution options } } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun buildBrainstormPrompt( problemStatement: String, targetCount: Int, categories: String, constraints: List, includeCreative: Boolean, - priorContext: String + priorContext: String, + inputFileContent: String = "" ): String { val constraintsSection = if (constraints.isNotEmpty()) { """ @@ -636,6 +753,125 @@ Provide a well-structured, actionable summary now. """.trimIndent() } + private fun buildDetailedResults( + problemStatement: String, + options: List, + analyses: Map, + summary: String, + totalTime: Long + ): String { + return buildString { + appendLine("# Brainstorming Session - Detailed Results") + appendLine() + appendLine("**Problem Statement:** $problemStatement") + appendLine() + appendLine("**Session Duration:** ${totalTime / 1000}s") + appendLine() + appendLine("**Options Generated:** ${options.size}") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## All Options") + appendLine() + options.forEachIndexed { index, option -> + val optionNumber = index + 1 + appendLine("### ${optionNumber}. ${option.title}") + if (option.category != null) { + appendLine("**Category:** ${option.category}") + } + appendLine() + appendLine(option.description) + appendLine() + val analysis = analyses[optionNumber] + if (analysis != null) { + appendLine("#### Analysis") + appendLine() + appendLine("**Pros:**") + analysis.pros.forEach { appendLine("- $it") } + appendLine() + appendLine("**Cons:**") + analysis.cons.forEach { appendLine("- $it") } + appendLine() + appendLine("**Feasibility:** ${analysis.feasibility}") + appendLine() + appendLine("**Impact:** ${analysis.impact}") + appendLine() + appendLine("**Risks:**") + analysis.risks.forEach { appendLine("- $it") } + appendLine() + appendLine("**Requirements:**") + analysis.requirements.forEach { appendLine("- $it") } + appendLine() + } + appendLine("---") + appendLine() + } + appendLine("## Summary & Recommendations") + appendLine() + appendLine(summary) + appendLine() + } + } + + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", "c", "cpp", + "h", "hpp", "css", "html", "xml", "json", "yaml", "yml", "properties", "gradle", "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: File) = try { + file.getReader().use { reader -> + when (reader) { + is PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + try { + file.readText() + } catch (e2: Exception) { + "Error reading file: ${e2.message}" + } + } + + companion object { private val log: Logger = LoggerFactory.getLogger(BrainstormingTask::class.java) val Brainstorming = TaskType( diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CausalInferenceTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CausalInferenceTask.kt index befe10740..953faa629 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CausalInferenceTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CausalInferenceTask.kt @@ -1,15 +1,14 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.nio.file.FileSystems +import java.nio.file.Path class CausalInferenceTask( orchestrationConfig: OrchestrationConfig, @@ -18,6 +17,7 @@ class CausalInferenceTask( orchestrationConfig, planTask ) { + protected val codeFiles = mutableMapOf() val maxOutputLength: Int = 20000 @@ -32,6 +32,8 @@ class CausalInferenceTask( val identify_confounders: Boolean = true, @Description("Data sources for evidence (file patterns or paths)") val evidence_sources: List? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Additional files for context") val related_files: List? = null, task_description: String? = null, @@ -67,6 +69,7 @@ CausalInference - Identify causal relationships and root causes ** Optionally build a causal graph showing relationships ** Optionally identify confounding factors ** Provide evidence sources (logs, metrics, code files) + ** Optionally, list input files (supports glob patterns) to be examined ** Useful for: - Root cause analysis - Debugging complex issues @@ -82,23 +85,30 @@ CausalInference - Identify causal relationships and root causes resultFn: (String) -> Unit, orchestrationConfig: OrchestrationConfig ) { + val transcript = transcript(task) val startTime = System.currentTimeMillis() log.info("Starting CausalInference task for effect: ${executionConfig?.observed_effect}") + // Create transcript file for logging the analysis + var markdownTranscript: FileOutputStream? = null val observedEffect = executionConfig?.observed_effect if (observedEffect.isNullOrBlank()) { val errorMsg = "CONFIGURATION ERROR: No observed effect specified" log.error(errorMsg) task.complete(errorMsg) - resultFn("CONFIGURATION ERROR: No observed effect specified") + resultFn(formatResultMessage(task, transcript, errorMsg)) return } + markdownTranscript = transcript(task) + // Validate configuration executionConfig?.validate()?.let { validationError -> val errorMsg = "CONFIGURATION ERROR: $validationError" log.error(errorMsg) + markdownTranscript?.write("# Configuration Error\n\n$errorMsg\n".toByteArray()) + markdownTranscript?.close() task.complete(errorMsg) - resultFn("CONFIGURATION ERROR: No observed effect specified") + resultFn(formatResultMessage(task, transcript, errorMsg)) return } @@ -106,13 +116,27 @@ CausalInference - Identify causal relationships and root causes val ui = task.ui val api = orchestrationConfig.defaultChatter ?: run { log.error("No default chatter available") + markdownTranscript?.write("# Error\n\nNo API available\n".toByteArray()) + markdownTranscript?.close() task.complete("ERROR: No API available") - resultFn("ERROR: No API available") + resultFn(formatResultMessage(task, transcript, "ERROR: No API available")) return } try { // Create tabbed display for organized output val tabs = TabbedDisplay(task) + // Write header to transcript + markdownTranscript?.write( + """ + |# Causal Inference Analysis + | + |**Observed Effect:** $observedEffect + |**Start Time:** ${java.time.Instant.ofEpochMilli(startTime)} + | + |--- + | + """.trimMargin().toByteArray() + ) // Overview tab val overviewTask = task.ui.newTask(false) @@ -120,6 +144,12 @@ CausalInference - Identify causal relationships and root causes var overviewTaskStatus = overviewTask.add( MarkdownUtil.renderMarkdown( """ + |## Input Files + | + |${getInputFileCode()} + | + |--- + | |## Causal Inference Analysis | |**Observed Effect:** $observedEffect @@ -140,6 +170,18 @@ CausalInference - Identify causal relationships and root causes val evidenceContext = gatherEvidence() log.debug("Evidence gathered: ${evidenceContext.length} characters") + markdownTranscript?.write( + """ + |## Evidence Sources + | + |**Sources processed:** ${executionConfig?.evidence_sources?.size ?: 0} + | + |${evidenceContext.take(maxOutputLength)}${if (evidenceContext.length > maxOutputLength) "\n... (truncated)" else ""} + | + |--- + | + """.trimMargin().toByteArray() + ) evidenceLoading?.clear() evidenceTask.add( MarkdownUtil.renderMarkdown( @@ -165,6 +207,7 @@ CausalInference - Identify causal relationships and root causes log.debug("Retrieving prior context from execution state") val priorContext = getPriorCode(agent.executionState) + val messageContext = messages.joinToString("\n\n") val potentialCauses = executionConfig.potential_causes ?: emptyList() val causesText = if (potentialCauses.isNotEmpty()) { @@ -196,7 +239,8 @@ CausalInference - Identify causal relationships and root causes observedEffect, potentialCauses, evidenceContext, - priorContext + priorContext, + messageContext ) log.debug("Initializing ChatAgent with model: ${api.javaClass.simpleName}") @@ -218,6 +262,17 @@ CausalInference - Identify causal relationships and root causes var answer: String? = chatAgent.answer(toInput(prompt)) + // Write analysis to transcript + markdownTranscript?.write( + """ + |## Causal Analysis Results + | + |$answer + | + |--- + | + """.trimMargin().toByteArray() + ) analysisTaskLoading?.clear() analysisTask.add( @@ -279,6 +334,20 @@ Generate the Mermaid diagram now: var graphResult: String? = chatAgent.answer(toInput(graphPrompt)) val mermaidCode = extractMermaidCode(graphResult ?: "") + // Write graph to transcript + markdownTranscript?.write( + """ + |## Causal Graph + | + |```mermaid + |$mermaidCode + |``` + | + |--- + | + """.trimMargin().toByteArray() + ) + graphTaskStatus?.clear() if (mermaidCode.isNotEmpty()) { graphTaskStatus = graphTask.add( @@ -315,20 +384,52 @@ Generate the Mermaid diagram now: val duration = System.currentTimeMillis() - startTime val summary = "Causal inference analysis completed for effect: $observedEffect" log.info("$summary (duration: ${duration}ms, causes analyzed: ${potentialCauses.size}, evidence sources: ${executionConfig?.evidence_sources?.size ?: 0})") + // Write summary to transcript + markdownTranscript?.write( + """ + |## Summary + | + |$summary + | + |**Duration:** ${duration}ms + |**Causes Analyzed:** ${potentialCauses.size} + |**Evidence Sources:** ${executionConfig?.evidence_sources?.size ?: 0} + | + """.trimMargin().toByteArray() + ) + markdownTranscript?.close() task.complete(summary) - resultFn(answer ?: "Analysis completed") + resultFn(formatResultMessage(task, transcript, summary)) } catch (e: Exception) { val duration = System.currentTimeMillis() - startTime log.error("CausalInference task failed after ${duration}ms for effect: $observedEffect", e) + // Write error to transcript + markdownTranscript?.write( + """ + |## Error + | + |An error occurred during causal inference analysis: + | + |``` + |${e.message} + |${e.stackTraceToString()} + |``` + | + |**Duration:** ${duration}ms + | + """.trimMargin().toByteArray() + ) + markdownTranscript?.close() + task.error(e) val errorTask = task.ui.newTask(false) // tabs["Error"] = errorTask.placeholder errorTask.add(MarkdownUtil.renderMarkdown("## ❌ Error\n\nAn error occurred during causal inference analysis:\n\n```\n${e.message}\n```", ui = ui)) task.complete("Analysis failed: ${e.message}") - resultFn("ERROR: Causal inference analysis failed - ${e.message}") + resultFn(formatResultMessage(task, transcript, "ERROR: Causal inference analysis failed - ${e.message}")) } } @@ -336,10 +437,14 @@ Generate the Mermaid diagram now: observedEffect: String, potentialCauses: List, evidenceContext: String, - priorContext: String + priorContext: String, + messageContext: String ): String { val causesSection = if (potentialCauses.isNotEmpty()) { """ +## User Input and Context: +$messageContext +--- |## Potential Causes to Investigate: |${potentialCauses.joinToString("\n") { "- $it" }} """.trimMargin() @@ -409,6 +514,47 @@ Generate the causal analysis now: """.trimIndent() } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = codeFiles[file.toPath()] ?: file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + }.let { if (it.isBlank()) "No input files specified" else it } + + private fun formatResultMessage(task: SessionTask, transcript: FileOutputStream?, summary: String): String { + return try { + val (link, _) = task.createFile("analysis_results.md") + transcript?.close() + "✅ $summary\n\n" + + "📄 Detailed results: $link " + + "html " + + "pdf" + } catch (e: Exception) { + log.error("Failed to create result file", e) + summary + } + } + private fun gatherEvidence(): String { val evidenceSources = executionConfig?.evidence_sources ?: emptyList() val relatedFiles = executionConfig?.related_files ?: emptyList() @@ -451,6 +597,19 @@ Generate the causal analysis now: return match?.groupValues?.get(1)?.trim() ?: "" } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + companion object { private val log: Logger = LoggerFactory.getLogger(CausalInferenceTask::class.java) val CausalInference = TaskType( @@ -471,4 +630,4 @@ Generate the causal analysis now: """ ) } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ChainOfThoughtTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ChainOfThoughtTask.kt index 9b7c43036..38a495cd0 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ChainOfThoughtTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ChainOfThoughtTask.kt @@ -1,15 +1,14 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.file.FileSystems class ChainOfThoughtTask( orchestrationConfig: OrchestrationConfig, @@ -26,7 +25,7 @@ class ChainOfThoughtTask( val reasoning_depth: Int = 10, @Description("Whether to validate each step before proceeding") val validate_steps: Boolean = true, - @Description("Additional files for context") + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") val related_files: List = emptyList(), task_dependencies: List? = null, state: TaskState? = TaskState.Pending, @@ -83,7 +82,8 @@ class ChainOfThoughtTask( override fun promptSegment(): String { return """ -ChainOfThought - Break down complex problems into explicit reasoning steps + ChainOfThought - Break down complex problems into explicit reasoning steps + ** Optionally, list input files (supports glob patterns) to be examined for context ** Specify the problem statement that requires step-by-step reasoning ** Optionally set reasoning_depth to control the number of steps (default: auto) ** Enable validate_steps to validate each step before proceeding (default: true) @@ -106,6 +106,8 @@ ChainOfThought - Break down complex problems into explicit reasoning steps ) { val startTime = System.currentTimeMillis() log.info("Starting ChainOfThoughtTask with problem: '${executionConfig?.problem_statement}'") + val transcript = transcript(task) + val inputFileContent = getInputFileCode() val problemStatement = executionConfig?.problem_statement if (problemStatement?.isBlank() != false) { @@ -120,19 +122,14 @@ ChainOfThought - Break down complex problems into explicit reasoning steps log.info("Configuration: maxSteps=$maxSteps, validateSteps=$validateSteps") val ui = task.ui - val api = orchestrationConfig.defaultChatter ?: run { - log.error("No default chatter available") - task.complete("ERROR: No API available") - resultFn("ERROR: No API available") - return - } + val api = orchestrationConfig.defaultChatter // Create tabbed display for organized output val tabs = TabbedDisplay(task) // Overview tab val overviewTask = task.ui.newTask(false) tabs["Overview"] = overviewTask.placeholder - val overviewContent = buildString { + var overviewContent = buildString { appendLine("# Chain of Thought Reasoning") appendLine() appendLine("**Problem Statement:** $problemStatement") @@ -141,24 +138,31 @@ ChainOfThought - Break down complex problems into explicit reasoning steps appendLine() appendLine("**Validate Steps:** ${if (validateSteps) "Yes" else "No"}") appendLine() - appendLine( - "**Started:** ${ - java.time.LocalDateTime.now() - .format(java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) - }" - ) - appendLine() - appendLine("---") - appendLine() - appendLine("## Progress") - appendLine() - appendLine("*Initializing reasoning process...*") } + if (inputFileContent.isNotBlank()) { + overviewContent += "\n## Input Files\n\n$inputFileContent\n\n" + } + + // Write to transcript + transcript?.write(overviewContent.toByteArray()) + transcript?.flush() overviewTask.add( MarkdownUtil.renderMarkdown( - overviewContent, - ui = ui - ) + overviewContent + buildString { + appendLine() + appendLine( + "**Started:** ${ + java.time.LocalDateTime.now() + .format(java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) + }" + ) + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("*Initializing reasoning process...*") + }) ) task.update() @@ -192,6 +196,11 @@ ChainOfThought - Break down complex problems into explicit reasoning steps } }.let { MarkdownUtil.renderMarkdown(it, ui = ui) } ) + // Write context to transcript + transcript?.write("\n\n# Context\n\n".toByteArray()) + if (priorContext.isNotBlank()) transcript?.write("## Previous Tasks\n\n$priorContext\n\n".toByteArray()) + if (contextFiles.isNotBlank()) transcript?.write("## Related Files\n\n$contextFiles\n\n".toByteArray()) + transcript?.flush() task.update() } @@ -237,6 +246,11 @@ ChainOfThought - Break down complex problems into explicit reasoning steps ) ) task.update() + // Write step header to transcript + transcript?.write("\n\n# Step $stepNumber of $maxSteps\n\n".toByteArray()) + transcript?.write("**Question:** $currentQuestion\n\n".toByteArray()) + transcript?.flush() + val step = generateReasoningStep( stepTask, @@ -281,6 +295,11 @@ ChainOfThought - Break down complex problems into explicit reasoning steps ui = ui ) ) + // Write validation failure to transcript + transcript?.write("### ⚠️ Validation Failed\n\n".toByteArray()) + transcript?.write("**Issues**: ${validation.issues?.joinToString(", ")}\n\n".toByteArray()) + transcript?.write("**Suggestions**: ${validation.suggestions}\n\n".toByteArray()) + transcript?.flush() task.update() // Attempt to regenerate with validation feedback @@ -306,6 +325,14 @@ ChainOfThought - Break down complex problems into explicit reasoning steps val lastStep = reasoningChain.last() val stepTime = System.currentTimeMillis() - stepStartTime stepTimes.add(stepTime) + // Write completed step to transcript + transcript?.write("**Reasoning**: ${lastStep.reasoning}\n\n".toByteArray()) + transcript?.write("**Conclusion**: ${lastStep.conclusion}\n\n".toByteArray()) + transcript?.write("**Confidence**: ${String.format("%.1f%%", lastStep.confidence * 100)}\n\n".toByteArray()) + if (lastStep.next_question != null) { + transcript?.write("**Next Question**: ${lastStep.next_question}\n\n".toByteArray()) + } + transcript?.flush() // Mark step as complete stepTask.add( MarkdownUtil.renderMarkdown( @@ -399,6 +426,9 @@ ChainOfThought - Break down complex problems into explicit reasoning steps ui = ui ) ) + // Write summary to transcript + transcript?.write("\n\n# Final Summary\n\n$summary\n\n".toByteArray()) + transcript?.flush() summaryTask.complete() task.update() @@ -442,6 +472,7 @@ ChainOfThought - Break down complex problems into explicit reasoning steps ) ) task.update() + transcript?.close() task.complete("Completed ${reasoningChain.size} reasoning steps in ${totalTime / 1000}s.") @@ -487,6 +518,10 @@ ChainOfThought - Break down complex problems into explicit reasoning steps } } resultFn(errorOutput) + // Write error to transcript and close + transcript?.write("\n\n# Error\n\n${e.message}\n\n".toByteArray()) + transcript?.write("**Steps Completed:** ${reasoningChain.size} of $maxSteps\n".toByteArray()) + transcript?.close() } } @@ -692,6 +727,46 @@ ChainOfThought - Break down complex problems into explicit reasoning steps } } + private fun getInputFileCode() = (executionConfig?.related_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + companion object { private val log: Logger = LoggerFactory.getLogger(ChainOfThoughtTask::class.java) val ChainOfThought = TaskType( @@ -711,4 +786,4 @@ ChainOfThought - Break down complex problems into explicit reasoning steps """ ) } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintRelaxationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintRelaxationTask.kt index 97d48adeb..11261d048 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintRelaxationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintRelaxationTask.kt @@ -1,14 +1,17 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.nio.file.FileSystems import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -34,6 +37,8 @@ class ConstraintRelaxationTask( val find_creative_satisfactions: Boolean = true, @Description("Maximum number of relaxation/reintroduction iterations") val max_iterations: Int = 5, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Additional files for context") val related_files: List? = null, task_dependencies: List? = null, @@ -82,6 +87,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const ** Enable creative satisfaction finding to discover novel solutions ** Produces a solution that progressively satisfies constraints ** Shows evolution of solution as constraints are reintroduced + ** Optionally, list input files (supports glob patterns) to be examined for context """.trimIndent() } @@ -130,6 +136,8 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return val tabs = TabbedDisplay(task) + val (transcriptLink, transcriptFile) = Pair(task.linkTo("constraint_relaxation_transcript.md"), task.resolve("constraint_relaxation_transcript.md")) + val transcript = transcriptFile?.outputStream() val overviewTask = task.ui.newTask(false) tabs["Overview"] = overviewTask.placeholder @@ -168,7 +176,13 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine() appendLine("*Initializing constraint relaxation process...*") } + transcript?.write(overviewContent.toByteArray()) overviewTask.add(overviewContent.renderMarkdown) + task.complete( + "Writing transcript to $transcriptLink " + + "html " + + "pdf" + ) task.update() val priorContext = getPriorCode(agent.executionState) @@ -183,8 +197,10 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine(priorContext.truncateForDisplay()) }.renderMarkdown ) + transcript?.write("\n\n# Context from Previous Tasks\n\n${priorContext.truncateForDisplay()}\n".toByteArray()) task.update() } + val inputFileContent = getInputFileCode() overviewTask.add( buildString { @@ -194,11 +210,17 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine("*Analyzing constraint structure...*") }.renderMarkdown ) + transcript?.write("\n\n✅ Initialization complete\n\n*Analyzing constraint structure...*\n".toByteArray()) task.update() val solutionBuilder = StringBuilder() solutionBuilder.append("# Constraint Relaxation Solution\n\n") solutionBuilder.append("**Problem:** $problem\n\n") + if (inputFileContent.isNotBlank()) { + solutionBuilder.append("## Input Files Context\n\n") + solutionBuilder.append(inputFileContent) + solutionBuilder.append("\n\n") + } try { // Step 1: Analyze and order constraints @@ -217,24 +239,22 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const val orderedConstraints = orderConstraints(constraints, reintroductionOrder) val relaxedConstraints = selectConstraintsToRelax(orderedConstraints, relaxationStrategy) + buildString { + appendLine() + appendLine("## Constraint Ordering") + appendLine() + appendLine("Constraints will be reintroduced in the following order:") + appendLine() + orderedConstraints.forEachIndexed { index, (constraint, priority) -> + val status = if (relaxedConstraints.contains(constraint)) "🔓 Initially Relaxed" else "🔒 Active" + appendLine("${index + 1}. **$constraint** ($status, priority: ${String.format("%.2f", priority)})") + } + appendLine() + appendLine("---") + appendLine() + appendLine("**Status:** ✅ Analysis complete") + } - analysisTask.add( - buildString { - appendLine() - appendLine("## Constraint Ordering") - appendLine() - appendLine("Constraints will be reintroduced in the following order:") - appendLine() - orderedConstraints.forEachIndexed { index, (constraint, priority) -> - val status = if (relaxedConstraints.contains(constraint)) "🔓 Initially Relaxed" else "🔒 Active" - appendLine("${index + 1}. **$constraint** ($status, priority: ${String.format("%.2f", priority)})") - } - appendLine() - appendLine("---") - appendLine() - appendLine("**Status:** ✅ Analysis complete") - }.renderMarkdown - ) task.update() overviewTask.add( @@ -265,6 +285,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine("**Status:** Generating solution without relaxed constraints...") }.renderMarkdown ) + transcript?.write("\n\n# Initial Relaxed Solution\n\n**Relaxed Constraints:** ${relaxedConstraints.size}\n\n".toByteArray()) task.update() val activeConstraints = constraints.filterKeys { !relaxedConstraints.contains(it) } @@ -276,7 +297,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const findCreativeSatisfactions ) - relaxedSolutionTask.add( + val relaxedSolutionContent = buildString { appendLine() appendLine("## Solution") @@ -286,8 +307,10 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine("---") appendLine() appendLine("**Status:** ✅ Relaxed solution generated") - }.renderMarkdown - ) + } + + transcript?.write(relaxedSolutionContent.toByteArray()) + relaxedSolutionTask.add(relaxedSolutionContent.renderMarkdown) task.update() solutionBuilder.append("## Initial Relaxed Solution\n\n") @@ -336,6 +359,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine("**Status:** Adapting solution to satisfy this constraint...") }.renderMarkdown ) + transcript?.write("\n\n# Iteration ${index + 1}: Reintroducing Constraint\n\n**Constraint:** $constraint\n\n".toByteArray()) task.update() val newActiveConstraints = activeConstraints.toMutableMap() @@ -354,7 +378,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const val iterationTime = System.currentTimeMillis() - iterationStartTime - iterationTask.add( + val iterationContent = buildString { appendLine() appendLine("## Adapted Solution") @@ -364,8 +388,10 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine("---") appendLine() appendLine("**Status:** ✅ Complete (${iterationTime / 1000.0}s)") - }.renderMarkdown - ) + } + + transcript?.write(iterationContent.toByteArray()) + iterationTask.add(iterationContent.renderMarkdown) task.update() reintroductionSteps.add( @@ -411,7 +437,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const api ) - synthesisTask.add( + val synthesisContent = buildString { appendLine() appendLine(synthesis) @@ -419,8 +445,10 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const appendLine("---") appendLine() appendLine("**Status:** ✅ Complete") - }.renderMarkdown - ) + } + + transcript?.write("\n\n# Final Synthesis\n\n${synthesis}\n".toByteArray()) + synthesisTask.add(synthesisContent.renderMarkdown) task.update() solutionBuilder.append("## Progressive Reintroduction\n\n") @@ -446,36 +474,60 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const """.trimMargin() ) + val completionContent = buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Constraint Relaxation Complete") + appendLine() + appendLine("**Total Time:** ${totalTime / 1000.0}s") + appendLine() + appendLine("**Iterations:** ${reintroductionSteps.size}") + appendLine() + appendLine("**Average Iteration Time:** ${avgIterationTime / 1000.0}s") + appendLine() + appendLine("**Constraints Satisfied:** ${constraints.size - relaxedConstraints.size + reintroductionSteps.size}/${constraints.size}") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + } overviewTask.add( - buildString { - appendLine() - appendLine("---") - appendLine() - appendLine("## ✅ Constraint Relaxation Complete") - appendLine() - appendLine("**Total Time:** ${totalTime / 1000.0}s") - appendLine() - appendLine("**Iterations:** ${reintroductionSteps.size}") - appendLine() - appendLine("**Average Iteration Time:** ${avgIterationTime / 1000.0}s") - appendLine() - appendLine("**Constraints Satisfied:** ${constraints.size - relaxedConstraints.size + reintroductionSteps.size}/${constraints.size}") - appendLine() - appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") - }.renderMarkdown + completionContent.renderMarkdown ) + + transcript?.write(completionContent.toByteArray()) + transcript?.close() + overviewTask.add(completionContent.renderMarkdown) task.update() val finalResult = solutionBuilder.toString() + // Write detailed output to file + val (detailedLink, detailedFile) = Pair(task.linkTo("constraint_relaxation_detailed.md"), task.resolve("constraint_relaxation_detailed.md")) + detailedFile?.outputStream()?.use { stream -> + stream.write(finalResult.toByteArray()) + } + // Generate summary message + val summaryMessage = buildString { + appendLine("✅ Constraint Relaxation Complete") + appendLine() + appendLine("**Total Time:** ${totalTime / 1000.0}s") + appendLine("**Iterations:** ${reintroductionSteps.size}") + appendLine("**Constraints Satisfied:** ${constraints.size - relaxedConstraints.size + reintroductionSteps.size}/${constraints.size}") + appendLine() + appendLine("📄 [View Detailed Results]($detailedLink)") + } + task.safeComplete( - "Completed ${reintroductionSteps.size} constraint reintroduction iterations in ${totalTime / 1000}s", + summaryMessage, log ) - resultFn(finalResult) + resultFn(summaryMessage) } catch (e: Exception) { log.error("Error during constraint relaxation", e) task.error(e) + transcript?.let { + it.close() + } overviewTask.add( buildString { @@ -508,6 +560,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const } } + private fun orderConstraints( constraints: Map, order: String @@ -518,10 +571,12 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const // For now, treat lower priority as potentially easier to satisfy constraints.entries.sortedBy { it.value }.map { it.key to it.value } } + "by_dependency" -> { // Simple heuristic: reintroduce in priority order (could be enhanced with dependency analysis) constraints.entries.sortedByDescending { it.value }.map { it.key to it.value } } + else -> constraints.entries.sortedByDescending { it.value }.map { it.key to it.value } } } @@ -536,14 +591,17 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const val relaxCount = (orderedConstraints.size * 0.5).toInt().coerceAtLeast(1) orderedConstraints.takeLast(relaxCount).map { it.first }.toSet() } + "selective" -> { // Relax constraints with priority < 0.7 orderedConstraints.filter { it.second < 0.7 }.map { it.first }.toSet() } + "hierarchical" -> { // Relax all but the top priority tier (>= 0.9) orderedConstraints.filter { it.second < 0.9 }.map { it.first }.toSet() } + else -> { // Default to progressive val relaxCount = (orderedConstraints.size * 0.5).toInt().coerceAtLeast(1) @@ -556,7 +614,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const problem: String, constraints: Map, priorContext: String, - api: com.simiacryptus.cognotik.chat.model.ChatInterface, + api: ChatInterface, findCreative: Boolean ): String { val prompt = buildString { @@ -605,7 +663,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const priority: Double, allActiveConstraints: Map, priorContext: String, - api: com.simiacryptus.cognotik.chat.model.ChatInterface, + api: ChatInterface, findCreative: Boolean ): String { val prompt = buildString { @@ -666,7 +724,7 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const initiallyRelaxed: Set, reintroductionSteps: List, finalSolution: String, - api: com.simiacryptus.cognotik.chat.model.ChatInterface + api: ChatInterface ): String { val prompt = buildString { appendLine("You are an expert problem solver providing a final synthesis of a constraint relaxation process.") @@ -713,6 +771,34 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const return agent.answer(listOf("")) } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private data class ReintroductionStep( val constraint: String = "", val priority: Double = 0.0, @@ -742,4 +828,4 @@ ConstraintRelaxation - Solve over-constrained problems through progressive const """ ) } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintSatisfactionTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintSatisfactionTask.kt index 8715169e3..d0eafcd17 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintSatisfactionTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ConstraintSatisfactionTask.kt @@ -1,14 +1,16 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter class ConstraintSatisfactionTask( orchestrationConfig: OrchestrationConfig, @@ -21,6 +23,8 @@ class ConstraintSatisfactionTask( class ConstraintSatisfactionTaskExecutionConfigData( @Description("The problem requiring constraint satisfaction") val problem_description: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Hard constraints that must be satisfied (cannot be violated)") val hard_constraints: List? = null, @Description("Soft constraints to optimize with their relative weights (0.0-1.0)") @@ -39,31 +43,37 @@ class ConstraintSatisfactionTask( task_dependencies = task_dependencies?.toMutableList(), state = state ), ValidatedObject { - + override fun validate(): String? { // Validate problem description if (problem_description.isNullOrBlank()) { return "problem_description cannot be null or blank" } - + // Validate search strategy val validStrategies = setOf("backtracking", "forward", "local") if (search_strategy !in validStrategies) { return "search_strategy must be one of: ${validStrategies.joinToString(", ")}" } - + // Validate max iterations if (max_iterations <= 0) { return "max_iterations must be greater than 0" } - + // Validate soft constraint weights soft_constraints?.forEach { (constraint, weight) -> if (weight < 0.0 || weight > 1.0) { return "soft constraint '$constraint' has invalid weight $weight (must be between 0.0 and 1.0)" } } - + // Validate input files if provided + input_files?.forEach { pattern -> + if (pattern.isBlank()) { + return "input_files patterns cannot be blank" + } + } + // Call parent validation return ValidatedObject.validateFields(this) } @@ -71,7 +81,8 @@ class ConstraintSatisfactionTask( override fun promptSegment(): String { return """ -ConstraintSatisfaction - Solve problems with multiple competing constraints + ConstraintSatisfaction - Solve problems with multiple competing constraints + ** Optionally, list input files (supports glob patterns) to be examined when solving the problem ** Specify the problem description clearly ** Define hard constraints that MUST be satisfied (non-negotiable requirements) ** Define soft constraints with weights (0.0-1.0) representing their relative importance @@ -102,8 +113,9 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints resultFn("CONFIGURATION ERROR: $error") return } - + val startTime = System.currentTimeMillis() + var transcriptStream: FileOutputStream? = null try { val problemDescription = executionConfig?.problem_description if (problemDescription.isNullOrBlank()) { @@ -117,6 +129,12 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints val softConstraints = executionConfig.soft_constraints ?: emptyMap() val searchStrategy = executionConfig.search_strategy val maxIterations = executionConfig.max_iterations + // Initialize transcript + transcriptStream = initializeTranscript(task) + transcriptStream?.let { stream -> + writeTranscriptHeader(stream, problemDescription, hardConstraints, softConstraints, searchStrategy, maxIterations) + } + val toInput = { it: String -> listOf(it) } val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return @@ -133,6 +151,17 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints val tabbedDisplay = TabbedDisplay(task) task.ui.newTask(false).apply { tabbedDisplay["Problem Overview"] = placeholder + transcriptStream?.write( + """ + |## Constraint Satisfaction Problem + | + |**Problem**: $problemDescription + | + |**Hard Constraints** (${hardConstraints.size}): + |${hardConstraints.joinToString("\n") { "- $it" }} + | + """.trimMargin().toByteArray() + ) add( MarkdownUtil.renderMarkdown( """ @@ -155,6 +184,12 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints task.update() // Step 2: Gather Context task.ui.newTask(false).apply { + transcriptStream?.write( + """ + | + |### Gathering Context + """.trimMargin().toByteArray() + ) tabbedDisplay["Context"] = placeholder add( MarkdownUtil.renderMarkdown( @@ -167,6 +202,7 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints val priorCode = getPriorCode(agent.executionState) + val inputFileContent = getInputFileContent() val prompt = buildPrompt( problemDescription, @@ -174,10 +210,17 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints softConstraints, searchStrategy, maxIterations, - priorCode + priorCode, + inputFileContent ) task.ui.newTask(false).apply { tabbedDisplay["Context"] = placeholder + transcriptStream?.write( + """ + | + |### Context Gathered + """.trimMargin().toByteArray() + ) add( MarkdownUtil.renderMarkdown( """ @@ -192,6 +235,12 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints task.update() // Step 3: Generate Solution task.ui.newTask(false).apply { + transcriptStream?.write( + """ + | + |### Generating Solution + """.trimMargin().toByteArray() + ) tabbedDisplay["Solution Generation"] = placeholder task.add( MarkdownUtil.renderMarkdown( @@ -210,6 +259,12 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints var answer: String? = chatAgent.answer(toInput("")) task.ui.newTask(false).apply { + transcriptStream?.write( + """ + | + |### Solution Generated + """.trimMargin().toByteArray() + ) tabbedDisplay["Solution Generation"] = placeholder add( MarkdownUtil.renderMarkdown( @@ -226,6 +281,14 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints task.ui.newTask(false).apply { tabbedDisplay["Final Solution"] = placeholder val solution = answer + transcriptStream?.write( + """ + | + |## Final Solution + | + |${solution ?: "No solution generated."} + """.trimMargin().toByteArray() + ) add( MarkdownUtil.renderMarkdown( @@ -241,17 +304,28 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints task.update() val duration = System.currentTimeMillis() - startTime log.info("Constraint Satisfaction Task completed in ${duration}ms") + transcriptStream?.write("\n\n---\n**Completed in ${duration}ms**\n".toByteArray()) if (orchestrationConfig.autoFix) { - task.safeComplete("Constraint satisfaction solution generated and auto-applied", log) + val (link, _) = task.createFile("constraint_solution_transcript.md") + val summaryMessage = "Constraint satisfaction solution generated. " + + "View detailed transcript: markdown " + + "html " + + "pdf" + task.safeComplete(summaryMessage, log) resultFn(answer ?: "No solution generated") } else { task.add( MarkdownUtil.renderMarkdown( acceptButtonFooter(task.ui) { try { - task.complete("Constraint satisfaction solution accepted") + val (link, _) = task.createFile("constraint_solution_transcript.md") + val summaryMessage = "Constraint satisfaction solution accepted. " + + "View detailed transcript: markdown " + + "html " + + "pdf" + task.complete(summaryMessage) resultFn(answer ?: "No solution generated") } catch (e: Exception) { log.error("Error accepting constraint satisfaction solution", e) @@ -264,6 +338,7 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints ) } } catch (e: Exception) { + transcriptStream?.write("\n\n## ❌ Error\n\n${e.message}\n".toByteArray()) log.error("Error in Constraint Satisfaction Task", e) task.error(e) task.add( @@ -280,25 +355,119 @@ ConstraintSatisfaction - Solve problems with multiple competing constraints ) ) resultFn("ERROR: Failed to generate constraint satisfaction solution - ${e.message}") + } finally { + transcriptStream?.flush() + transcriptStream?.close() + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = task.createFile("constraint_solution_transcript.md") + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + + private fun writeTranscriptHeader( + stream: FileOutputStream, + problemDescription: String, + hardConstraints: List, + softConstraints: Map, + searchStrategy: String, + maxIterations: Int + ) { + try { + val header = buildString { + appendLine("# Constraint Satisfaction Task Transcript") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("**Problem:** $problemDescription") + appendLine("**Hard Constraints:** ${hardConstraints.size}") + appendLine("**Soft Constraints:** ${softConstraints.size}") + appendLine("**Search Strategy:** $searchStrategy") + appendLine("**Max Iterations:** $maxIterations") + appendLine() + appendLine("---") + appendLine() + } + stream.write(header.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write transcript header", e) } } + private fun getInputFileContent(): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + private fun buildPrompt( problemDescription: String, hardConstraints: List, softConstraints: Map, searchStrategy: String, maxIterations: Int, - priorCode: String + priorCode: String, + inputFileContent: String ): String { return """ -You are an expert problem solver specializing in constraint satisfaction problems (CSP). + You are an expert problem solver specializing in constraint satisfaction problems (CSP). + + ## Problem Description: + $problemDescription +## Input Files Context: +${if (inputFileContent.isNotBlank()) inputFileContent else "No input files provided"} -## Problem Description: -$problemDescription -## Hard Constraints (MUST be satisfied): -${hardConstraints.mapIndexed { i, c -> "${i + 1}. $c" }.joinToString("\n")} + ## Hard Constraints (MUST be satisfied): + ${hardConstraints.mapIndexed { i, c -> "${i + 1}. $c" }.joinToString("\n")} ## Soft Constraints (optimize with given weights): ${ @@ -378,4 +547,4 @@ Generate the constraint satisfaction solution now: """ ) } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CounterfactualAnalysisTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CounterfactualAnalysisTask.kt index ce65d0b43..46c9e7df5 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CounterfactualAnalysisTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/CounterfactualAnalysisTask.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* @@ -10,6 +10,8 @@ import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream class CounterfactualAnalysisTask( orchestrationConfig: OrchestrationConfig, @@ -19,6 +21,7 @@ class CounterfactualAnalysisTask( planTask ) { val maxDescriptionLength = 200 + protected val codeFiles = mutableMapOf() class CounterfactualAnalysisTaskExecutionConfigData( @Description("The actual scenario or decision to analyze") @@ -31,6 +34,8 @@ class CounterfactualAnalysisTask( val control_factors: List? = null, @Description("Additional files for context (e.g., historical data, related analyses)") val related_files: List? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Detailed description of the analysis objectives") task_description: String? = null, task_dependencies: List? = null, @@ -77,7 +82,7 @@ CounterfactualAnalysis - Explore "what-if" scenarios to understand causal relati resultFn: (String) -> Unit, orchestrationConfig: OrchestrationConfig ) { - System.currentTimeMillis() + val startTime = System.currentTimeMillis() log.info("Starting CounterfactualAnalysis task for scenario: ${executionConfig?.actual_scenario}") val actualScenario = executionConfig?.actual_scenario @@ -97,8 +102,9 @@ CounterfactualAnalysis - Explore "what-if" scenarios to understand causal relati return } - val toInput = { it: String -> listOf(it) } - return + val toInput = { it: String -> messages + listOf(getInputFileCode(), it).filter { it.isNotBlank() } } + val transcript = transcript(task) + transcript?.write("# Counterfactual Analysis Transcript\n\n".toByteArray()) val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return try { @@ -120,6 +126,18 @@ CounterfactualAnalysis - Explore "what-if" scenarios to understand causal relati ui = task.ui ) ) + transcript?.write( + """ + |## Counterfactual Analysis + | + |**Actual Scenario:** ${actualScenario.truncateForDisplay(maxDescriptionLength)} + | + |**Counterfactuals:** ${counterfactuals.size} + | + |**Status:** 🔄 Starting analysis... + | + """.trimMargin().toByteArray() + ) } catch (e: Exception) { log.warn("Failed to create overview tab", e) } @@ -135,41 +153,52 @@ CounterfactualAnalysis - Explore "what-if" scenarios to understand causal relati priorCode, api, task, - toInput + toInput, + transcript ) - + transcript?.write("\n## Actual Scenario Analysis\n\n".toByteArray()) + transcript?.write("**Scenario:** $actualScenario\n\n".toByteArray()) + transcript?.write("**Analysis:**\n\n$actualAnalysis\n\n".toByteArray()) // Analyze counterfactual scenarios val counterfactualAnalyses = counterfactuals.mapIndexed { index, counterfactual -> - analyzeScenario( + transcript?.write("\n## Counterfactual Scenario ${index + 1}\n\n".toByteArray()) + transcript?.write("**Scenario:** $counterfactual\n\n".toByteArray()) + val analysis = analyzeScenario( "Counterfactual ${index + 1}", counterfactual, contextFiles, priorCode, api, task, - toInput + toInput, + transcript ) + transcript?.write("**Analysis:**\n\n$analysis\n\n".toByteArray()) + analysis } - // Compare outcomes if requested val comparisonAnalysis = if (executionConfig?.compare_outcomes == true) { - compareScenarios( - actualScenario, - actualAnalysis, - counterfactuals, - counterfactualAnalyses, - executionConfig?.control_factors, - contextFiles, - priorCode, - api, - task, - toInput + transcript?.write("\n## Comparative Analysis\n\n".toByteArray()) + val comparison = compareScenarios( + actualScenario = actualScenario, + actualAnalysisTokens = actualAnalysis.split("\\s+"), + counterfactuals = counterfactuals, + counterfactualAnalyses = counterfactualAnalyses, + controlFactors = executionConfig.control_factors, + contextFiles = contextFiles, + priorCode = priorCode, + api = api, + task = task, + toInput = toInput, + transcript = transcript ) + transcript?.write(comparison.toByteArray()) + comparison } else { "" } - val fullAnalysis = buildString { + buildString { appendLine("# Counterfactual Analysis Results") appendLine() appendLine("## Actual Scenario") @@ -193,10 +222,16 @@ CounterfactualAnalysis - Explore "what-if" scenarios to understand causal relati appendLine(comparisonAnalysis) } } + transcript?.write("\n---\n\n**Analysis Complete**\n".toByteArray()) + transcript?.close() + + val (link, _) = task.createFile("analysis_results.md") + task.complete("Analysis complete. Full results written to $link") - task.add(MarkdownUtil.renderMarkdown(fullAnalysis, ui = task.ui)) + val summaryMessage = + "Counterfactual analysis completed in ${(System.currentTimeMillis() - startTime) / 1000}s. Results: $actualScenario with ${counterfactuals.size} counterfactual scenarios analyzed." task.safeComplete("Analysis complete", log) - resultFn(fullAnalysis) + resultFn(summaryMessage) } private fun analyzeScenario( @@ -206,10 +241,11 @@ CounterfactualAnalysis - Explore "what-if" scenarios to understand causal relati priorCode: String, api: ChatInterface, task: SessionTask, - toInput: (String) -> List + toInput: (String) -> List, + transcript: FileOutputStream? ): String { val prompt = """ -Analyze the following scenario in detail: + Analyze the following scenario in detail: ## Scenario: $scenarioName $scenario @@ -232,8 +268,11 @@ ${executionConfig?.control_factors?.joinToString("\n") { "- $it" } ?: "None spec 6. Highlight any assumptions or uncertainties 7. Provide insights on causal relationships -Provide a comprehensive analysis: + Provide a comprehensive analysis: """.trimIndent() + transcript?.write("\n### Prompt for $scenarioName\n\n".toByteArray()) + transcript?.write("```\n$prompt\n```\n\n".toByteArray()) + val chatAgent = ChatAgent( prompt = promptSegment(), @@ -241,12 +280,14 @@ Provide a comprehensive analysis: ) var result: String? = chatAgent.answer(toInput(prompt)) + transcript?.write("### Response for $scenarioName\n\n".toByteArray()) + transcript?.write("${result ?: "(No response)"}\n\n".toByteArray()) return result ?: "" } private fun compareScenarios( actualScenario: String, - actualAnalysis: String, + actualAnalysisTokens: List, counterfactuals: List, counterfactualAnalyses: List, controlFactors: List?, @@ -254,7 +295,8 @@ Provide a comprehensive analysis: priorCode: String, api: ChatInterface, task: SessionTask, - toInput: (String) -> List + toInput: (String) -> List, + transcript: FileOutputStream? ): String { val scenarioComparisons = counterfactuals.zip(counterfactualAnalyses) .mapIndexed { index, (counterfactual, analysis) -> @@ -270,7 +312,7 @@ Compare the following scenarios and provide insights on their differences: ## Actual Scenario **Description:** $actualScenario -**Analysis:** $actualAnalysis +**Analysis:** ${actualAnalysisTokens.joinToString(" ")} $scenarioComparisons @@ -293,8 +335,11 @@ $priorCode 7. Highlight any surprising or counterintuitive findings 8. Provide recommendations based on the analysis -Provide a comprehensive comparative analysis: + Provide a comprehensive comparative analysis: """.trimIndent() + transcript?.write("\n### Comparison Prompt\n\n".toByteArray()) + transcript?.write("```\n$prompt\n```\n\n".toByteArray()) + val chatAgent = ChatAgent( prompt = promptSegment(), @@ -302,6 +347,8 @@ Provide a comprehensive comparative analysis: ) var result: String? = chatAgent.answer(toInput(prompt)) + transcript?.write("### Comparison Response\n\n".toByteArray()) + transcript?.write("${result ?: "(No response)"}\n\n".toByteArray()) return result ?: "" } @@ -324,6 +371,64 @@ Provide a comprehensive comparative analysis: } } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (com.simiacryptus.cognotik.util.FileSelectionUtils.filteredWalk(root.toFile()) { + when { + com.simiacryptus.cognotik.util.FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", "c", "cpp", "h", "hpp", + "css", "html", "xml", "json", "yaml", "yml", "properties", "gradle", "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: File) = try { + file.readText() + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}", e) + "Error reading file: ${e.message}" + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + companion object { private val log: Logger = LoggerFactory.getLogger(CounterfactualAnalysisTask::class.java) val CounterfactualAnalysis = TaskType( @@ -344,4 +449,6 @@ Provide a comprehensive comparative analysis: """ ) } -} \ No newline at end of file +} + + diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DecompositionSynthesisTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DecompositionSynthesisTask.kt index 1adbb76e4..2a880f1fb 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DecompositionSynthesisTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DecompositionSynthesisTask.kt @@ -1,15 +1,17 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.util.concurrent.atomic.AtomicInteger class DecompositionSynthesisTask( @@ -21,7 +23,32 @@ class DecompositionSynthesisTask( ) { val maxDescriptionLength = 1000 + companion object { + private val log: Logger = LoggerFactory.getLogger(DecompositionSynthesisTask::class.java) + val DecompositionSynthesis: TaskType = TaskType( + "DecompositionSynthesis", + DecompositionSynthesisTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Decompose complex problems and synthesize solutions", + """ + Decomposes complex problems into manageable subproblems, solves them, and synthesizes solutions. +
      +
    • Multiple decomposition strategies (functional, temporal, spatial, hierarchical)
    • +
    • Configurable decomposition depth
    • +
    • Dependency-aware subproblem solving
    • +
    • Solution synthesis with coherence validation
    • +
    • Confidence tracking at each level
    • +
    • Implements divide-and-conquer reasoning
    • +
    + """ + ) + } + class DecompositionSynthesisTaskExecutionConfigData( + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, + @Description("Whether to include file context in the analysis") + val include_file_context: Boolean = true, @Description("The complex problem to decompose") val complex_problem: String? = null, @Description("Decomposition strategy: 'functional', 'temporal', 'spatial', 'hierarchical'") @@ -124,7 +151,8 @@ class DecompositionSynthesisTask( override fun promptSegment(): String { return """ -DecompositionSynthesis - Break down complex problems into subproblems and synthesize integrated solutions + DecompositionSynthesis - Break down complex problems into subproblems and synthesize integrated solutions + ** Optionally, list input files (supports glob patterns) to be examined for context ** Problem: ${executionConfig?.complex_problem?.take(100) ?: "Not specified"} ** Specify the complex problem to decompose ** Choose decomposition strategy: @@ -163,6 +191,7 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe // Create tabbed display for organized output val tabs = TabbedDisplay(task) val ui = task.ui + val transcriptStream = initializeTranscript(task) val api = orchestrationConfig.defaultChatter ?: run { log.error("No default chatter available") task.complete("ERROR: No API available") @@ -192,13 +221,14 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine("⏳ Starting decomposition analysis...") } overviewTask.add(overviewContent.renderMarkdown) + transcriptStream?.let { writeToTranscript(it, overviewContent) } task.update() try { // Step 3: Build context from related files and dependencies log.debug("Building context from related files and dependencies") // Get context from related files and dependencies - val context = buildContext(agent) + val context = buildContext(agent, root) // Context tab val contextTask = ui.newTask(false) tabs["Context"] = contextTask.placeholder @@ -211,6 +241,13 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() appendLine(context) }.renderMarkdown) + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine("# Task Context") + appendLine() + appendLine(context) + }) + } task.update() // Update overview with context info @@ -219,6 +256,7 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine("✅ Context built successfully") appendLine() }.renderMarkdown) + transcriptStream?.let { writeToTranscript(it, "\n✅ Context built successfully\n\n") } task.update() // Step 4: Decompose the problem // Decomposition tab @@ -233,6 +271,14 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine("**Max Depth:** ${executionConfig.max_depth}") }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine("# Problem Decomposition") + appendLine() + appendLine("**Strategy:** ${executionConfig.decomposition_strategy}") + appendLine("**Max Depth:** ${executionConfig.max_depth}") + }) + } log.info("Starting problem decomposition with strategy: ${executionConfig.decomposition_strategy}") val decomposition = decomposeProblem( @@ -275,6 +321,27 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine() + appendLine("## Decomposition Results") + appendLine() + appendLine("**Rationale:** ${decomposition.decomposition_rationale}") + appendLine() + appendLine("### Subproblems (${decomposition.subproblems.size})") + decomposition.subproblems.forEachIndexed { index, subproblem -> + appendLine("${index + 1}. **${subproblem.id}**: ${subproblem.description}") + appendLine(" - Complexity: ${subproblem.complexity}/10") + } + appendLine() + appendLine("### Dependencies") + if (decomposition.dependencies.isEmpty()) { + appendLine("*No dependencies*") + } else { + decomposition.dependencies.entries.forEach { (id, deps) -> appendLine("- **$id** → ${deps.joinToString(", ")}") } + } + }) + } // Step 5: Solve all subproblems // Update overview @@ -282,6 +349,7 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine("✅ Decomposition complete: ${decomposition.subproblems.size} subproblems identified") appendLine() }.renderMarkdown) + transcriptStream?.let { writeToTranscript(it, "\n✅ Decomposition complete: ${decomposition.subproblems.size} subproblems\n\n") } task.update() // Subproblem Solutions tab @@ -294,6 +362,13 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine("# Subproblem Solutions") + appendLine() + appendLine("Solving ${decomposition.subproblems.size} subproblems...") + }) + } val solvedCount = AtomicInteger(0) log.info("Starting to solve ${decomposition.subproblems.size} subproblems") @@ -319,6 +394,16 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine() + appendLine("## ${count}. ${subproblemId}") + appendLine() + appendLine("**Confidence:** ${(solution.confidence * 100).toInt()}%") + appendLine() + appendLine(solution.solution) + }) + } // Update overview overviewTask.add(buildString { @@ -337,6 +422,12 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine() + appendLine("✅ All subproblems solved! Average confidence: ${(solutions.map { it.confidence }.average() * 100).toInt()}%") + }) + } // Step 6: Synthesize solution (if requested) // Update overview @@ -344,6 +435,7 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine("✅ All ${solutions.size} subproblems solved") appendLine() }.renderMarkdown) + transcriptStream?.let { writeToTranscript(it, "\n✅ All ${solutions.size} subproblems solved\n\n") } task.update() val finalResult = if (executionConfig.synthesize_solution) { @@ -357,6 +449,13 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine("# Solution Synthesis") + appendLine() + appendLine("Integrating ${solutions.size} subproblem solutions...") + }) + } log.info("Starting solution synthesis from ${solutions.size} subproblem solutions") val synthesized = synthesizeSolution( problem = problem, @@ -385,6 +484,17 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine() + appendLine("## Synthesized Solution") + appendLine() + appendLine("**Synthesis Approach:** ${synthesized.synthesis_approach}") + appendLine("**Confidence:** ${(synthesized.confidence * 100).toInt()}%") + appendLine() + appendLine(synthesized.solution) + }) + } // Update overview overviewTask.add(buildString { @@ -392,6 +502,7 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { writeToTranscript(it, "\n✅ Solution synthesized (confidence: ${(synthesized.confidence * 100).toInt()}%)\n\n") } // Step 7: Validate coherence (if requested) // Validate coherence if requested @@ -406,6 +517,12 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine("# Coherence Validation") + appendLine() + }) + } log.info("Starting coherence validation") val validation = validateCoherence( problem = problem, @@ -447,6 +564,24 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe } }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine() + appendLine("## Validation Results") + appendLine() + appendLine("**Is Coherent:** ${if (validation.is_coherent) "Yes" else "No"}") + if (validation.issues.isNotEmpty()) { + appendLine() + appendLine("### Issues (${validation.issues.size})") + validation.issues.forEach { appendLine("- $it") } + } + if (validation.suggestions.isNotEmpty()) { + appendLine() + appendLine("### Suggestions (${validation.suggestions.size})") + validation.suggestions.forEach { appendLine("- $it") } + } + }) + } // Update overview overviewTask.add(buildString { @@ -454,6 +589,7 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { writeToTranscript(it, "\n✅ Validation complete\n\n") } } synthesized.solution @@ -487,6 +623,19 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe appendLine() }.renderMarkdown) task.update() + transcriptStream?.let { + writeToTranscript(it, buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## Analysis Complete") + appendLine() + appendLine("**Total Time:** ${totalTime / 1000}s") + appendLine("**Subproblems:** ${decomposition.subproblems.size}") + appendLine("**Solutions:** ${solutions.size}") + appendLine("**Avg Confidence:** ${(solutions.map { it.confidence }.average() * 100).toInt()}%") + }) + } val summary = "Decomposition & Synthesis completed: ${decomposition.subproblems.size} subproblems, ${solutions.size} solutions in ${totalTime / 1000}s" @@ -494,6 +643,7 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe resultFn(finalResult) } catch (e: Exception) { + transcriptStream?.let { writeToTranscript(it, "\n\n## ERROR\n\n${e.message}\n") } log.error("Error in decomposition synthesis", e) task.error(e) @@ -511,13 +661,23 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe task.update() resultFn("ERROR: ${e.message}") + } finally { + transcriptStream?.flush() + transcriptStream?.close() + log.debug("Transcript closed") } } - private fun buildContext(agent: TaskOrchestrator): String { + private fun buildContext(agent: TaskOrchestrator, root: java.nio.file.Path): String { log.debug("Building context from related files and prior code") val priorCode = getPriorCode(agent.executionState) val relatedFiles = executionConfig?.related_files?.joinToString("\n") { "- $it" } ?: "" + val fileContext = if (executionConfig?.include_file_context == true) { + getInputFileCode(root) + } else { + "" + } + return """ |## Context @@ -525,11 +685,67 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe |### Related Files |$relatedFiles | + |### Input Files + |$fileContext + | |### Previous Task Results |$priorCode """.trimMargin() } + private fun getInputFileCode(root: java.nio.file.Path): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = task.createFile("decomposition_transcript.md") + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + + private fun writeToTranscript(stream: FileOutputStream, content: String) { + try { + stream.write(content.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write to transcript", e) + } + } + private fun decomposeProblem( problem: String, strategy: String, @@ -575,9 +791,10 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe val decomposition = decompositionAgent.answer(listOf(problem)).obj // Validate the decomposition decomposition.validate()?.let { error -> - throw ValidatedObject.ValidationError(error, decomposition) + log.error("Decomposition validation failed: $error") + throw IllegalArgumentException("Invalid decomposition: $error") } - + return decomposition } @@ -656,7 +873,8 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe val solution = solutionAgent.answer(listOf(subproblem.description)).obj // Validate the solution solution.validate()?.let { error -> - throw ValidatedObject.ValidationError(error, solution) + log.error("Solution validation failed for ${subproblem.id}: $error") + throw IllegalArgumentException("Invalid solution for ${subproblem.id}: $error") } val finalSolution = solution.copy(subproblem_id = subproblem.id) @@ -765,9 +983,10 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe val synthesized: SynthesizedSolution = synthesisAgent.answer(listOf(problem)).obj // Validate the synthesized solution synthesized.validate()?.let { error -> - throw ValidatedObject.ValidationError(error, synthesized) + log.error("Synthesis validation failed: $error") + throw IllegalArgumentException("Invalid synthesis: $error") } - + return synthesized!! } @@ -813,30 +1032,25 @@ DecompositionSynthesis - Break down complex problems into subproblems and synthe val validation: CoherenceValidation = validationAgent.answer(listOf(synthesized.solution)).obj // Validate the validation result validation.validate()?.let { error -> - throw ValidatedObject.ValidationError(error, validation) + log.error("Validation result validation failed: $error") + throw IllegalArgumentException("Invalid validation result: $error") } - + return validation!! } - companion object { - private val log: Logger = LoggerFactory.getLogger(DecompositionSynthesisTask::class.java) - val DecompositionSynthesis: TaskType = TaskType( - "DecompositionSynthesis", - DecompositionSynthesisTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Decompose complex problems and synthesize solutions", - """ - Decomposes complex problems into manageable subproblems, solves them, and synthesizes solutions. -
      -
    • Multiple decomposition strategies (functional, temporal, spatial, hierarchical)
    • -
    • Configurable decomposition depth
    • -
    • Dependency-aware subproblem solving
    • -
    • Solution synthesis with coherence validation
    • -
    • Confidence tracking at each level
    • -
    • Implements divide-and-conquer reasoning
    • -
    - """ + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" ) + return markdownTranscript } + + } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DialecticalReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DialecticalReasoningTask.kt index b80ca60b1..1a1091482 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DialecticalReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/DialecticalReasoningTask.kt @@ -1,15 +1,18 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.nio.file.FileSystems +import java.nio.file.Path import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -20,6 +23,8 @@ class DialecticalReasoningTask( orchestrationConfig, planTask ) { + + protected val codeFiles = mutableMapOf() val maxDescriptionLength = 5000 class DialecticalReasoningTaskExecutionConfigData( @@ -33,6 +38,8 @@ class DialecticalReasoningTask( val synthesis_levels: Int = 3, @Description("Whether to preserve strengths from both sides in synthesis") val preserve_strengths: Boolean = true, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Additional files for context") val related_files: List? = null, task_dependencies: List? = null, @@ -79,11 +86,12 @@ DialecticalReasoning - Resolve contradictions through thesis-antithesis-synthesi ) { val startTime = System.currentTimeMillis() var stepStartTime = startTime + var transcriptStream: FileOutputStream? = null log.info("Starting DialecticalReasoningTask") val thesis = executionConfig?.thesis val antithesis = executionConfig?.antithesis - + if (thesis.isNullOrBlank() || antithesis.isNullOrBlank()) { log.error("Both thesis and antithesis must be specified") task.safeComplete("CONFIGURATION ERROR: Both thesis and antithesis must be specified", log) @@ -94,13 +102,14 @@ DialecticalReasoning - Resolve contradictions through thesis-antithesis-synthesi val context = executionConfig.context ?: "general domain" val synthesisLevels = executionConfig.synthesis_levels.coerceIn(1, 5) val preserveStrengths = executionConfig.preserve_strengths - + log.info("Configuration: thesis='$thesis', antithesis='$antithesis', context='$context', levels=$synthesisLevels, preserveStrengths=$preserveStrengths") val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return val ui = task.ui val tabs = TabbedDisplay(task) - + transcriptStream = initializeTranscript(task) + // Overview tab val overviewTask = ui.newTask(false) tabs["Overview"] = overviewTask.placeholder @@ -124,10 +133,24 @@ DialecticalReasoning - Resolve contradictions through thesis-antithesis-synthesi } overviewTask.add(overviewContent.renderMarkdown) task.update() + transcriptStream?.write( + """ + |# Dialectical Reasoning Analysis + | + |**Context:** $context + |**Synthesis Levels:** $synthesisLevels + |**Preserve Strengths:** ${if (preserveStrengths) "Yes" else "No"} + |**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))} + | + |--- + | + """.trimMargin().toByteArray() + ) val priorContext = getPriorCode(agent.executionState) val relatedFilesContent = getRelatedFilesContent() - + val inputFilesContent = getInputFileCode() + if (priorContext.isNotBlank() || relatedFilesContent.isNotBlank()) { val contextTask = ui.newTask(false) tabs["Context"] = contextTask.placeholder @@ -149,20 +172,37 @@ DialecticalReasoning - Resolve contradictions through thesis-antithesis-synthesi }.renderMarkdown ) task.update() + transcriptStream?.write( + """ + |## Context Information + | + """.trimMargin().toByteArray() + ) + if (priorContext.isNotBlank()) { + transcriptStream?.write("### Prior Task Results\n\n${priorContext.truncateForDisplay()}\n\n".toByteArray()) + } + if (relatedFilesContent.isNotBlank()) { + transcriptStream?.write("### Related Files\n\n${relatedFilesContent.truncateForDisplay()}\n\n".toByteArray()) + } + transcriptStream?.write("---\n\n".toByteArray()) + if (inputFilesContent.isNotBlank()) { + transcriptStream?.write("### Input Files\n\n${inputFilesContent.truncateForDisplay()}\n\n".toByteArray()) + } + transcriptStream?.write("---\n\n".toByteArray()) } // Concise output for final result val resultBuilder = StringBuilder() resultBuilder.append("# Dialectical Analysis\n\n") resultBuilder.append("**Context:** $context\n\n") - + try { // Step 1: Analyze Thesis log.info("Analyzing thesis") val thesisTask = ui.newTask(false) tabs["Thesis"] = thesisTask.placeholder - + thesisTask.add( buildString { appendLine("# Thesis Analysis") @@ -200,10 +240,24 @@ Be thorough and objective in your analysis. ) val thesisAnalysis = thesisAgent.answer(listOf("Analyze the thesis statement.")) - + val thesisTime = System.currentTimeMillis() - stepStartTime log.info("Thesis analysis completed in ${thesisTime}ms: ${thesisAnalysis.length} characters") stepStartTime = System.currentTimeMillis() + transcriptStream?.write( + """ + |## Thesis Analysis + | + |**Statement:** $thesis + | + |$thesisAnalysis + | + |**Status:** ✅ Complete (${thesisTime / 1000.0}s) + | + |--- + | + """.trimMargin().toByteArray() + ) thesisTask.add( buildString { @@ -233,7 +287,7 @@ Be thorough and objective in your analysis. log.info("Analyzing antithesis") val antithesisTask = ui.newTask(false) tabs["Antithesis"] = antithesisTask.placeholder - + antithesisTask.add( buildString { appendLine("# Antithesis Analysis") @@ -275,10 +329,24 @@ Be thorough and objective in your analysis. ) val antithesisAnalysis = antithesisAgent.answer(listOf("Analyze the antithesis statement.")) - + val antithesisTime = System.currentTimeMillis() - stepStartTime log.info("Antithesis analysis completed in ${antithesisTime}ms: ${antithesisAnalysis.length} characters") stepStartTime = System.currentTimeMillis() + transcriptStream?.write( + """ + |## Antithesis Analysis + | + |**Statement:** $antithesis + | + |$antithesisAnalysis + | + |**Status:** ✅ Complete (${antithesisTime / 1000.0}s) + | + |--- + | + """.trimMargin().toByteArray() + ) antithesisTask.add( buildString { @@ -308,7 +376,7 @@ Be thorough and objective in your analysis. log.info("Exploring contradictions and tensions") val contradictionsTask = ui.newTask(false) tabs["Contradictions"] = contradictionsTask.placeholder - + contradictionsTask.add( buildString { appendLine("# Contradictions & Tensions") @@ -347,10 +415,22 @@ Be thorough in exploring the dialectical tension. ) val contradictionsAnalysis = contradictionsAgent.answer(listOf("Explore the contradictions and tensions.")) - + val contradictionsTime = System.currentTimeMillis() - stepStartTime log.info("Contradictions analysis completed in ${contradictionsTime}ms: ${contradictionsAnalysis.length} characters") stepStartTime = System.currentTimeMillis() + transcriptStream?.write( + """ + |## Contradictions & Tensions + | + |$contradictionsAnalysis + | + |**Status:** ✅ Complete (${contradictionsTime / 1000.0}s) + | + |--- + | + """.trimMargin().toByteArray() + ) contradictionsTask.add( buildString { @@ -388,7 +468,7 @@ Be thorough in exploring the dialectical tension. log.info("Generating synthesis level $level of $synthesisLevels") val synthesisTask = ui.newTask(false) tabs["Synthesis L$level"] = synthesisTask.placeholder - + synthesisTask.add( buildString { appendLine("# Synthesis - Level $level") @@ -471,10 +551,22 @@ Aim for progressively deeper insight and integration. val synthesisTime = System.currentTimeMillis() - stepStartTime log.info("Synthesis level $level completed in ${synthesisTime}ms: ${synthesis.length} characters") stepStartTime = System.currentTimeMillis() - + transcriptStream?.write( + """ + |## Synthesis - Level $level + | + |$synthesis + | + |**Status:** ✅ Complete (${synthesisTime / 1000.0}s) + | + |--- + | + """.trimMargin().toByteArray() + ) + synthesisResults.add(synthesis) previousSynthesis = synthesis - + // Add to concise result only for first and last levels if (level == 1 || level == synthesisLevels) { @@ -513,7 +605,7 @@ Aim for progressively deeper insight and integration. log.info("Generating final integration") val integrationTask = ui.newTask(false) tabs["Final Integration"] = integrationTask.placeholder - + integrationTask.add( buildString { appendLine("# Final Integration") @@ -550,10 +642,22 @@ Be comprehensive yet concise in your final integration. ) val finalIntegration = integrationAgent.answer(listOf("Provide the final integration.")) - + val integrationTime = System.currentTimeMillis() - stepStartTime log.info("Final integration completed in ${integrationTime}ms: ${finalIntegration.length} characters") // stepStartTime = System.currentTimeMillis() // Not needed for the last step + transcriptStream?.write( + """ + |## Final Integration + | + |$finalIntegration + | + |**Status:** ✅ Complete (${integrationTime / 1000.0}s) + | + |--- + | + """.trimMargin().toByteArray() + ) resultBuilder.append("## Final Integration\n\n") resultBuilder.append(finalIntegration) @@ -593,6 +697,20 @@ Be comprehensive yet concise in your final integration. }.renderMarkdown ) task.update() + transcriptStream?.write( + """ + | + |## Summary + | + |**Total Time:** ${totalTime / 1000.0}s + |**Synthesis Levels:** $synthesisLevels + |**Total Output:** ${resultBuilder.length} characters + |**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))} + | + """.trimMargin().toByteArray() + ) + transcriptStream?.close() + task.safeComplete("Completed dialectical analysis with $synthesisLevels synthesis levels in ${totalTime / 1000}s", log) resultFn(resultBuilder.toString()) @@ -614,6 +732,18 @@ Be comprehensive yet concise in your final integration. }.renderMarkdown ) task.update() + transcriptStream?.write( + """ + | + |## ❌ Error Occurred + | + |**Error:** ${e.message} + |**Type:** ${e.javaClass.simpleName} + | + """.trimMargin().toByteArray() + ) + transcriptStream?.close() + val errorOutput = buildString { appendLine("# Error in Dialectical Reasoning") @@ -634,6 +764,50 @@ Be comprehensive yet concise in your final integration. } } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = codeFiles[file.toPath()] ?: file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = Pair(task.linkTo("dialectical_transcript.md"), task.resolve("dialectical_transcript.md")) + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + private fun getRelatedFilesContent(): String { val relatedFiles = executionConfig?.related_files ?: return "" diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/EthicalReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/EthicalReasoningTask.kt index 4fbb1e856..5c73a6ef5 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/EthicalReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/EthicalReasoningTask.kt @@ -1,14 +1,13 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.getReader import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -20,9 +19,31 @@ class EthicalReasoningTask( planTask ) { + companion object { + private val log: Logger = LoggerFactory.getLogger(EthicalReasoningTask::class.java) + val EthicalReasoning = TaskType( + "EthicalReasoning", + EthicalReasoningTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Analyze a dilemma through multiple ethical frameworks to guide decision-making.", + """ + Provides a structured analysis of a complex ethical problem or decision. +
      +
    • Evaluates a dilemma from the perspectives of several established ethical frameworks (e.g., Utilitarianism, Deontology, Virtue Ethics).
    • +
    • For each framework, it assesses the situation, applies the framework's core principles, and determines a recommended course of action.
    • +
    • Synthesizes these findings to provide a comprehensive recommendation, highlighting points of convergence, divergence, and the ethical trade-offs involved.
    • +
    • Useful for AI safety, product development, policy making, and corporate governance.
    • +
    • Generates a downloadable transcript in markdown, HTML, and PDF formats.
    • +
    + """ + ) + } + class EthicalReasoningTaskExecutionConfigData( @Description("A clear description of the ethical problem or decision to be made.") val ethical_dilemma: String? = null, + @Description("Optional input files (supports glob patterns) to provide context for the ethical analysis") + val input_files: List? = null, @Description("A list of individuals, groups, or entities affected by the decision.") val stakeholders: List? = null, @Description("The ethical frameworks to apply. Options: utilitarianism, deontology, virtue_ethics, care_ethics, rights_based.") @@ -44,10 +65,9 @@ class EthicalReasoningTask( if (stakeholders.isNullOrEmpty()) { return "stakeholders must not be null or empty" } - val validFrameworks = setOf("utilitarianism", "deontology", "virtue_ethics", "care_ethics", "rights_based") ethical_frameworks?.forEach { framework -> - if (framework !in validFrameworks) { - return "Invalid ethical framework: $framework. Valid options are: ${validFrameworks.joinToString(", ")}" + if (framework.isBlank()) { + return "Invalid ethical_frameworks entry: must not be blank" } } return ValidatedObject.validateFields(this) @@ -56,7 +76,10 @@ class EthicalReasoningTask( override fun promptSegment(): String { return """ -EthicalReasoning - Analyze a dilemma through multiple ethical frameworks + EthicalReasoning - Analyze a dilemma through multiple ethical frameworks + ** Optionally specify input files (supports glob patterns) to provide context + ** Files will be read and included in the analysis + ** Specify the ethical dilemma and stakeholders ** Specify the ethical dilemma and stakeholders ** Choose from frameworks: utilitarianism, deontology, virtue_ethics, care_ethics, rights_based ** Provides analysis from each framework's perspective @@ -77,49 +100,55 @@ EthicalReasoning - Analyze a dilemma through multiple ethical frameworks orchestrationConfig: OrchestrationConfig ) { val startTime = System.currentTimeMillis() + messages + getInputFileContent() log.info("Starting EthicalReasoning task for dilemma: ${executionConfig?.ethical_dilemma?.truncateForDisplay(200)}") + // Validate configuration first + executionConfig?.validate()?.let { validationError -> + val errorMsg = "VALIDATION ERROR: $validationError" + log.error(errorMsg) + task.safeComplete(errorMsg, log) + resultFn(errorMsg) + return + } + val dilemma = executionConfig?.ethical_dilemma if (dilemma.isNullOrBlank()) { - // Validate configuration - executionConfig?.validate()?.let { validationError -> - val errorMsg = "VALIDATION ERROR: $validationError" - log.error(errorMsg) - task.safeComplete(errorMsg, log) - resultFn(errorMsg) - return - } - val dilemma = executionConfig?.ethical_dilemma - if (dilemma.isNullOrBlank()) { - val errorMsg = "CONFIGURATION ERROR: No ethical dilemma specified" - log.error(errorMsg) - task.safeComplete(errorMsg, log) - resultFn(errorMsg) - return - } - - val stakeholders = executionConfig.stakeholders - if (stakeholders.isNullOrEmpty()) { - val errorMsg = "CONFIGURATION ERROR: No stakeholders specified" - log.error(errorMsg) - task.safeComplete(errorMsg, log) - resultFn(errorMsg) - return - } - - val frameworks = executionConfig.ethical_frameworks ?: listOf("utilitarianism", "deontology", "virtue_ethics") - val context = executionConfig.context ?: "" - - val ui = task.ui - val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return - val tabs = TabbedDisplay(task) - val overviewTask = task.ui.newTask(false) - tabs["Overview"] = overviewTask.placeholder - - try { - var overviewTaskStatus = overviewTask.add( - MarkdownUtil.renderMarkdown( - """ + val errorMsg = "CONFIGURATION ERROR: No ethical dilemma specified" + log.error(errorMsg) + task.safeComplete(errorMsg, log) + resultFn(errorMsg) + return + } + val stakeholders = executionConfig?.stakeholders + if (stakeholders.isNullOrEmpty()) { + val errorMsg = "CONFIGURATION ERROR: No stakeholders specified" + log.error(errorMsg) + task.safeComplete(errorMsg, log) + resultFn(errorMsg) + return + } + val frameworks = executionConfig?.ethical_frameworks ?: listOf("utilitarianism", "deontology", "virtue_ethics") + val context = executionConfig?.context ?: "" + + val ui = task.ui + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + val tabs = TabbedDisplay(task) + val transcript = transcript(task) + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + try { + transcript?.write("# Ethical Reasoning Analysis\n\n".toByteArray()) + transcript?.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + transcript?.write("**Dilemma:** $dilemma\n\n".toByteArray()) + transcript?.write("**Stakeholders:** ${stakeholders.joinToString(", ")}\n\n".toByteArray()) + transcript?.write("**Frameworks:** ${frameworks.joinToString(", ")}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) + + var overviewTaskStatus = overviewTask.add( + MarkdownUtil.renderMarkdown( + """ |## Ethical Reasoning Analysis | |**Dilemma:** ${dilemma.truncateForDisplay()} @@ -130,41 +159,41 @@ EthicalReasoning - Analyze a dilemma through multiple ethical frameworks | |**Status:** 🔄 Initializing analysis... """.trimMargin(), ui = ui - ) ) - task.update() - - val priorContext = getPriorCode(agent.executionState) - val fullContext = buildString { - if (priorContext.isNotBlank()) { - append("## Context from Previous Tasks\n\n") - append(priorContext) - append("\n\n") - } - if (context.isNotBlank()) { - append("## Additional Context\n\n") - append(context) - append("\n\n") - } + ) + task.update() + + val priorContext = getPriorCode(agent.executionState) + val fullContext = buildString { + if (priorContext.isNotBlank()) { + append("## Context from Previous Tasks\n\n") + append(priorContext) + append("\n\n") } - - if (fullContext.isNotBlank()) { - val contextTask = task.ui.newTask(false) - tabs["Context"] = contextTask.placeholder - contextTask.add(MarkdownUtil.renderMarkdown(fullContext, ui = ui)) - task.update() + if (context.isNotBlank()) { + append("## Additional Context\n\n") + append(context) + append("\n\n") } + } - // Step 1: Dilemma & Stakeholder Analysis - log.debug("Analyzing dilemma and stakeholders") - val analysisTask = task.ui.newTask(false) - tabs["Dilemma Analysis"] = analysisTask.placeholder - val analysisLoading = analysisTask.add( - MarkdownUtil.renderMarkdown("## Dilemma & Stakeholder Analysis\n\n🔄 Analyzing...", ui = ui) - ) + if (fullContext.isNotBlank()) { + val contextTask = task.ui.newTask(false) + tabs["Context"] = contextTask.placeholder + contextTask.add(MarkdownUtil.renderMarkdown("## Analysis Context\n\n$fullContext", ui = ui)) task.update() + } + + // Step 1: Dilemma & Stakeholder Analysis + log.debug("Analyzing dilemma and stakeholders") + val analysisTask = task.ui.newTask(false) + tabs["Dilemma Analysis"] = analysisTask.placeholder + val analysisLoading = analysisTask.add( + MarkdownUtil.renderMarkdown("## Dilemma & Stakeholder Analysis\n\n🔄 Analyzing...", ui = ui) + ) + task.update() - val analysisPrompt = """ + val analysisPrompt = """ You are an expert in ethical analysis. Your first task is to deconstruct the provided ethical dilemma and analyze the stakeholders. **Ethical Dilemma:** @@ -182,44 +211,48 @@ $fullContext Provide a detailed analysis. """.trimIndent() - val chatAgent = ChatAgent( - prompt = "", - model = api - ) - val dilemmaAnalysis = chatAgent.answer(listOf(analysisPrompt)) - log.info("Dilemma analysis completed. Length: ${dilemmaAnalysis.length} characters") - - analysisLoading?.clear() - analysisTask.add(MarkdownUtil.renderMarkdown("## Dilemma & Stakeholder Analysis\n\n$dilemmaAnalysis", ui = ui)) - task.update() - - overviewTaskStatus?.clear() - overviewTaskStatus = overviewTask.add( - MarkdownUtil.renderMarkdown( - """ + val chatAgent = ChatAgent( + prompt = "", + model = api + ) + val dilemmaAnalysis = chatAgent.answer(listOf(analysisPrompt)) + log.info("Dilemma analysis completed. Length: ${dilemmaAnalysis.length} characters") + transcript?.write("## Dilemma & Stakeholder Analysis\n\n".toByteArray()) + transcript?.write("${dilemmaAnalysis}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) + + + analysisLoading?.clear() + analysisTask.add(MarkdownUtil.renderMarkdown("## Dilemma & Stakeholder Analysis\n\n$dilemmaAnalysis", ui = ui)) + task.update() + + overviewTaskStatus?.clear() + overviewTaskStatus = overviewTask.add( + MarkdownUtil.renderMarkdown( + """ |## Ethical Reasoning Analysis | |**Dilemma:** ${dilemma.truncateForDisplay()} | |**Status:** 🔄 Applying ethical frameworks... """.trimMargin(), ui = ui - ) + ) + ) + task.update() + + // Step 2: Framework Application + val frameworkAnalyses = mutableMapOf() + for (framework in frameworks) { + val capitalizedFramework = framework.replaceFirstChar { it.titlecase() } + log.debug("Applying framework: $framework") + val frameworkTask = task.ui.newTask(false) + tabs["Framework: $capitalizedFramework"] = frameworkTask.placeholder + val frameworkLoading = frameworkTask.add( + MarkdownUtil.renderMarkdown("## $capitalizedFramework Analysis\n\n🔄 Applying framework...", ui = ui) ) task.update() - // Step 2: Framework Application - val frameworkAnalyses = mutableMapOf() - for (framework in frameworks) { - val capitalizedFramework = framework.replaceFirstChar { it.titlecase() } - log.debug("Applying framework: $framework") - val frameworkTask = task.ui.newTask(false) - tabs["Framework: $capitalizedFramework"] = frameworkTask.placeholder - val frameworkLoading = frameworkTask.add( - MarkdownUtil.renderMarkdown("## $capitalizedFramework Analysis\n\n🔄 Applying framework...", ui = ui) - ) - task.update() - - val frameworkPrompt = """ + val frameworkPrompt = """ You are an expert specializing in the **$capitalizedFramework** ethical framework. Analyze the following dilemma from this specific perspective. @@ -242,25 +275,29 @@ $fullContext Provide a clear and structured analysis. """.trimIndent() - val frameworkAnalysis = chatAgent.answer(listOf(frameworkPrompt)) - frameworkAnalyses[framework] = frameworkAnalysis - log.info("$framework analysis completed. Length: ${frameworkAnalysis.length} characters") + val frameworkAnalysis = chatAgent.answer(listOf(frameworkPrompt)) + frameworkAnalyses[framework] = frameworkAnalysis + log.info("$framework analysis completed. Length: ${frameworkAnalysis.length} characters") + transcript?.write("## $capitalizedFramework Analysis\n\n".toByteArray()) + transcript?.write("${frameworkAnalysis}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) - frameworkLoading?.clear() - frameworkTask.add(MarkdownUtil.renderMarkdown("## $capitalizedFramework Analysis\n\n$frameworkAnalysis", ui = ui)) - task.update() - } - // Step 3: Synthesis and Recommendation - log.debug("Synthesizing framework analyses") - val synthesisTask = task.ui.newTask(false) - tabs["Synthesis"] = synthesisTask.placeholder - val synthesisLoading = synthesisTask.add( - MarkdownUtil.renderMarkdown("## Synthesis & Recommendation\n\n🔄 Synthesizing results...", ui = ui) - ) + frameworkLoading?.clear() + frameworkTask.add(MarkdownUtil.renderMarkdown("## $capitalizedFramework Analysis\n\n$frameworkAnalysis", ui = ui)) task.update() + } + + // Step 3: Synthesis and Recommendation + log.debug("Synthesizing framework analyses") + val synthesisTask = task.ui.newTask(false) + tabs["Synthesis"] = synthesisTask.placeholder + val synthesisLoading = synthesisTask.add( + MarkdownUtil.renderMarkdown("## Synthesis & Recommendation\n\n🔄 Synthesizing results...", ui = ui) + ) + task.update() - val synthesisPrompt = """ + val synthesisPrompt = """ You are a master ethicist. Your task is to synthesize the analyses from multiple ethical frameworks to provide a final, balanced recommendation. **Ethical Dilemma:** @@ -277,29 +314,40 @@ ${frameworkAnalyses.entries.joinToString("\n\n") { "### ${it.key.replaceFirstCha Provide a detailed synthesis and a clear final recommendation. """.trimIndent() - val synthesis = chatAgent.answer(listOf(synthesisPrompt)) - log.info("Synthesis completed. Length: ${synthesis.length} characters") + val synthesis = chatAgent.answer(listOf(synthesisPrompt)) + log.info("Synthesis completed. Length: ${synthesis.length} characters") + transcript?.write("## Synthesis & Recommendation\n\n".toByteArray()) + transcript?.write("${synthesis}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) - synthesisLoading?.clear() - synthesisTask.add(MarkdownUtil.renderMarkdown("## Synthesis & Recommendation\n\n$synthesis", ui = ui)) - task.update() - // Final result and overview update - val finalRecommendationSummary = chatAgent.answer( - listOf( - """ -Based on the following synthesis, provide a very concise summary (2-3 sentences) of the final recommendation and the key trade-off. + synthesisLoading?.clear() + synthesisTask.add(MarkdownUtil.renderMarkdown("## Synthesis & Recommendation\n\n$synthesis", ui = ui)) + task.update() + + // Final result and overview update + val finalRecommendationSummary = chatAgent.answer( + listOf( + """ + Based on the following synthesis, provide a very concise summary (2-3 sentences) of the final recommendation and the key trade-off. -**Synthesis:** -$synthesis + **Synthesis:** + $synthesis """.trimIndent() - ) ) - - overviewTaskStatus?.clear() - overviewTask.add( - MarkdownUtil.renderMarkdown( - """ + ) + transcript?.write("## Final Recommendation Summary\n\n".toByteArray()) + transcript?.write("${finalRecommendationSummary}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) + transcript?.write("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n".toByteArray()) + transcript?.flush() + transcript?.close() + + + overviewTaskStatus?.clear() + overviewTask.add( + MarkdownUtil.renderMarkdown( + """ |## Ethical Reasoning Analysis | |**Dilemma:** ${dilemma.truncateForDisplay()} @@ -313,65 +361,136 @@ $synthesis |### Final Recommendation Summary |$finalRecommendationSummary """.trimMargin(), ui = ui - ) ) - task.update() - - val finalResult = buildString { - appendLine("# Ethical Reasoning Summary") - appendLine() - appendLine("**Dilemma:** ${dilemma.truncateForDisplay()}") - appendLine() - appendLine("**Recommendation:** $finalRecommendationSummary") - appendLine() - appendLine("---") - appendLine("Detailed analysis is available in the UI tabs.") - } - - val duration = System.currentTimeMillis() - startTime - val summary = "Ethical reasoning analysis completed for dilemma: ${dilemma.truncateForDisplay(200)}" - log.info("$summary (duration: ${duration}ms)") - - task.safeComplete(summary, log) - resultFn(finalResult) + ) + task.update() + + val finalResult = buildString { + appendLine("# Ethical Reasoning Summary") + appendLine() + appendLine("**Dilemma:** ${dilemma.truncateForDisplay()}") + appendLine() + appendLine("**Recommendation:** $finalRecommendationSummary") + appendLine() + appendLine("---") + appendLine("Detailed analysis is available in the UI tabs.") + } - } catch (e: Exception) { - val duration = System.currentTimeMillis() - startTime - log.error("EthicalReasoning task failed after ${duration}ms for dilemma: ${dilemma.truncateForDisplay(200)}", e) - overviewTask.add( - MarkdownUtil.renderMarkdown( - """ + val duration = System.currentTimeMillis() - startTime + val summary = "Ethical reasoning analysis completed for dilemma: ${dilemma.truncateForDisplay(200)}" + log.info("$summary (duration: ${duration}ms)") + val (transcriptLink, _) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + + task.safeComplete(summary, log) + resultFn("$finalResult\n\n---\n\nDetailed analysis: [View Transcript]($transcriptLink)") + + } catch (e: Exception) { + val duration = System.currentTimeMillis() - startTime + log.error("EthicalReasoning task failed after ${duration}ms for dilemma: ${dilemma.truncateForDisplay(200)}", e) + transcript?.write("\n\n## ERROR\n\n".toByteArray()) + transcript?.write("**Error:** ${e.message}\n".toByteArray()) + transcript?.write("**Stack Trace:**\n```\n${e.stackTraceToString()}\n```\n".toByteArray()) + transcript?.flush() + transcript?.close() + + overviewTask.add( + MarkdownUtil.renderMarkdown( + """ |## Ethical Reasoning Analysis | |**Status:** ❌ Analysis Failed | |**Error:** ${e.message} """.trimMargin(), ui = ui - ) ) - task.update() - task.error(e) - task.safeComplete("Analysis failed: ${e.message}", log) - resultFn("ERROR: Ethical reasoning analysis failed - ${e.message}") - } + ) + task.update() + task.error(e) + task.safeComplete("Analysis failed: ${e.message}", log) + resultFn("ERROR: Ethical reasoning analysis failed - ${e.message}") } } - companion object { - private val log: Logger = LoggerFactory.getLogger(EthicalReasoningTask::class.java) - val EthicalReasoning = TaskType( - "EthicalReasoning", - EthicalReasoningTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Analyze a dilemma through multiple ethical frameworks to guide decision-making.", - """ - Provides a structured analysis of a complex ethical problem or decision. -
      -
    • Evaluates a dilemma from the perspectives of several established ethical frameworks (e.g., Utilitarianism, Deontology, Virtue Ethics).
    • -
    • For each framework, it assesses the situation, applies the framework's core principles, and determines a recommended course of action.
    • -
    • Synthesizes these findings to provide a comprehensive recommendation, highlighting points of convergence, divergence, and the ethical trade-offs involved.
    • -
    • Useful for AI safety, product development, policy making, and corporate governance.
    • -
    - """ + + private fun getInputFileContent(): List { + return (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .mapNotNull { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + file.readText() + } + "# ${relativePath}\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + null + } + } + } + + private fun isTextFile(file: java.io.File): Boolean { + val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: java.io.File) = try { + file.getReader().use { it.getText() } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}", e) + file.readText() + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" ) + return markdownTranscript } + } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/FiniteStateMachineTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/FiniteStateMachineTask.kt index 3ecccdbce..cdec59254 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/FiniteStateMachineTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/FiniteStateMachineTask.kt @@ -1,13 +1,17 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.PaginatedDocumentReader +import com.simiacryptus.cognotik.input.getReader import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.MarkdownUtil import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.File +import java.nio.file.FileSystems import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -18,7 +22,9 @@ class FiniteStateMachineTask( orchestrationConfig, planTask ) { + protected val codeFiles = mutableMapOf() val maxDescriptionLength = 500 + private var transcriptStream: java.io.FileOutputStream? = null class FiniteStateMachineTaskExecutionConfigData( @Description("The concept, system, or process to model as a finite state machine") @@ -35,6 +41,8 @@ class FiniteStateMachineTask( val generate_test_scenarios: Boolean = true, @Description("Domain or context for the FSM (e.g., 'authentication system', 'order processing')") val domain_context: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, task_description: String? = null, task_dependencies: List? = null, state: TaskState? = TaskState.Pending, @@ -71,13 +79,23 @@ FiniteStateMachine - Model concepts using finite state machine analysis resultFn: (String) -> Unit, orchestrationConfig: OrchestrationConfig ) { + log.info("FiniteStateMachineTask.run() called with messages count: ${messages.size}") val startTime = System.currentTimeMillis() log.info("Starting FiniteStateMachineTask for concept: '${executionConfig?.concept_to_model}'") + // Initialize transcript + transcriptStream = transcript(task) + if (transcriptStream == null) { + log.error("Failed to initialize transcript stream") + } + writeToTranscript("# Finite State Machine Analysis\n\n${messages.joinToString("\n\n")}\n\n") + writeToTranscript("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n") val conceptToModel = executionConfig?.concept_to_model if (conceptToModel.isNullOrBlank()) { val errorMsg = "CONFIGURATION ERROR: No concept to model specified" log.error(errorMsg) + writeToTranscript("## Error\n\n$errorMsg\n\n") + closeTranscript() task.complete(errorMsg) resultFn(errorMsg) return @@ -86,6 +104,8 @@ FiniteStateMachine - Model concepts using finite state machine analysis val ui = task.ui val api = orchestrationConfig.defaultChatter ?: run { log.error("No default chatter available") + writeToTranscript("## Error\n\nNo API available\n\n") + closeTranscript() task.complete("ERROR: No API available") resultFn("ERROR: No API available") return @@ -102,6 +122,13 @@ FiniteStateMachine - Model concepts using finite state machine analysis val domainContext = executionConfig.domain_context ?: "general domain" val initialStates = executionConfig.initial_states ?: emptyList() val knownEvents = executionConfig.known_events ?: emptyList() + writeToTranscript("## Configuration\n\n") + writeToTranscript("**Concept:** $conceptToModel\n\n") + writeToTranscript("**Input Files:** ${if (executionConfig.input_files?.isNotEmpty() == true) executionConfig.input_files.joinToString(", ") else "None"}\n\n") + writeToTranscript("**Domain:** $domainContext\n\n") + writeToTranscript("**Initial States:** ${if (initialStates.isNotEmpty()) initialStates.joinToString(", ") else "To be identified"}\n\n") + writeToTranscript("**Known Events:** ${if (knownEvents.isNotEmpty()) knownEvents.joinToString(", ") else "To be identified"}\n\n") + writeToTranscript("---\n\n") var overviewContent = overviewTask.add( MarkdownUtil.renderMarkdown( @@ -128,6 +155,7 @@ FiniteStateMachine - Model concepts using finite state machine analysis log.debug("Gathering prior context from execution state") val priorContext = getPriorCode(agent.executionState) + val inputFileContent = getInputFileCode() // Step 1: Identify States log.info("Step 1: Identifying all possible states") @@ -143,7 +171,8 @@ FiniteStateMachine - Model concepts using finite state machine analysis conceptToModel, domainContext, initialStates, - priorContext + priorContext, + inputFileContent ) val stateAgent = ChatAgent( @@ -154,6 +183,12 @@ FiniteStateMachine - Model concepts using finite state machine analysis log.debug("Requesting state identification from LLM") val statesAnalysis = stateAgent.answer(listOf("Identify all possible states for this concept.")) + writeToTranscript("## Step 1: State Identification\n\n") + writeToTranscript("### Prompt\n\n") + writeToTranscript("```\n$stateIdentificationPrompt\n```\n\n") + writeToTranscript("### Response\n\n") + writeToTranscript("$statesAnalysis\n\n") + writeToTranscript("---\n\n") statesLoading?.clear() statesTask.add( @@ -203,6 +238,12 @@ FiniteStateMachine - Model concepts using finite state machine analysis log.debug("Requesting transition analysis from LLM") val transitionsAnalysis = stateAgent.answer(listOf(transitionPrompt)) + writeToTranscript("## Step 2: Transition Analysis\n\n") + writeToTranscript("### Prompt\n\n") + writeToTranscript("```\n$transitionPrompt\n```\n\n") + writeToTranscript("### Response\n\n") + writeToTranscript("$transitionsAnalysis\n\n") + writeToTranscript("---\n\n") transitionsLoading?.clear() transitionsTask.add( @@ -246,6 +287,16 @@ Generate the Mermaid diagram now: log.debug("Requesting state diagram from LLM") val diagramResult = stateAgent.answer(listOf(diagramPrompt)) val mermaidCode = extractMermaidCode(diagramResult) + writeToTranscript("## Step 3: State Diagram Generation\n\n") + writeToTranscript("### Prompt\n\n") + writeToTranscript("```\n$diagramPrompt\n```\n\n") + writeToTranscript("### Response\n\n") + if (mermaidCode.isNotEmpty()) { + writeToTranscript("```mermaid\n$mermaidCode\n```\n\n") + } else { + writeToTranscript("⚠️ Failed to generate diagram\n\n```\n$diagramResult\n```\n\n") + } + writeToTranscript("---\n\n") diagramLoading?.clear() if (mermaidCode.isNotEmpty()) { @@ -307,6 +358,12 @@ Provide a structured analysis of edge cases and recommendations. log.debug("Requesting edge case analysis from LLM") edgeCasesAnalysis = stateAgent.answer(listOf(edgeCasesPrompt)) + writeToTranscript("## Step 4: Edge Cases Analysis\n\n") + writeToTranscript("### Prompt\n\n") + writeToTranscript("```\n$edgeCasesPrompt\n```\n\n") + writeToTranscript("### Response\n\n") + writeToTranscript("$edgeCasesAnalysis\n\n") + writeToTranscript("---\n\n") edgeCasesLoading?.clear() edgeCasesTask.add( @@ -356,6 +413,12 @@ Provide a structured validation report. log.debug("Requesting FSM validation from LLM") validationAnalysis = stateAgent.answer(listOf(validationPrompt)) + writeToTranscript("## Step 5: FSM Property Validation\n\n") + writeToTranscript("### Prompt\n\n") + writeToTranscript("```\n$validationPrompt\n```\n\n") + writeToTranscript("### Response\n\n") + writeToTranscript("$validationAnalysis\n\n") + writeToTranscript("---\n\n") validationLoading?.clear() validationTask.add( @@ -406,6 +469,12 @@ Generate at least 5-10 diverse test scenarios. log.debug("Requesting test scenario generation from LLM") testScenariosAnalysis = stateAgent.answer(listOf(testScenariosPrompt)) + writeToTranscript("## Step 6: Test Scenario Generation\n\n") + writeToTranscript("### Prompt\n\n") + writeToTranscript("```\n$testScenariosPrompt\n```\n\n") + writeToTranscript("### Response\n\n") + writeToTranscript("$testScenariosAnalysis\n\n") + writeToTranscript("---\n\n") testScenariosLoading?.clear() testScenariosTask.add( @@ -447,6 +516,12 @@ Keep the summary concise but informative. log.debug("Requesting summary from LLM") val summaryAnalysis = stateAgent.answer(listOf(summaryPrompt)) + writeToTranscript("## Step 7: Summary\n\n") + writeToTranscript("### Prompt\n\n") + writeToTranscript("```\n$summaryPrompt\n```\n\n") + writeToTranscript("### Response\n\n") + writeToTranscript("$summaryAnalysis\n\n") + writeToTranscript("---\n\n") summaryLoading?.clear() summaryTask.add( @@ -513,13 +588,29 @@ Keep the summary concise but informative. } log.info("FiniteStateMachineTask completed: concept='$conceptToModel', duration=${totalTime}ms, output_size=${conciseResult.length} chars") - - task.complete("FSM analysis completed for: $conceptToModel") + writeToTranscript("## Completion\n\n") + writeToTranscript("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n") + writeToTranscript("**Duration:** ${totalTime / 1000.0}s\n\n") + writeToTranscript("**Status:** ✅ Analysis complete\n\n") + closeTranscript() + + val (link, _) = task.createFile("fsm_analysis.md") + task.complete( + "FSM analysis completed for: $conceptToModel. " + + "Full analysis written to $link " + + "html " + + "pdf" + ) resultFn(conciseResult) } catch (e: Exception) { val duration = System.currentTimeMillis() - startTime log.error("FiniteStateMachineTask failed after ${duration}ms for concept: $conceptToModel", e) + writeToTranscript("## Error\n\n") + writeToTranscript("**Failed after:** ${duration}ms\n\n") + writeToTranscript("**Error:** ${e.message}\n\n") + writeToTranscript("```\n${e.stackTraceToString()}\n```\n\n") + closeTranscript() task.error(e) task.complete("Analysis failed: ${e.message}") resultFn("ERROR: FSM analysis failed - ${e.message}") @@ -530,10 +621,21 @@ Keep the summary concise but informative. concept: String, domain: String, initialStates: List, - priorContext: String + priorContext: String, + inputFileContent: String ): String { + val fileContentSection = if (inputFileContent.isNotBlank()) { + """ + | + |## Reference Files: + |$inputFileContent + """.trimMargin() + } else { + "" + } val initialStatesSection = if (initialStates.isNotEmpty()) { """ +$fileContentSection | |## Known Initial States: |${initialStates.joinToString("\n") { "- $it" }} @@ -624,6 +726,62 @@ Format as a clear table or structured list. """.trimIndent() } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (com.simiacryptus.cognotik.util.FileSelectionUtils.filteredWalk(root.toFile()) { + when { + com.simiacryptus.cognotik.util.FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", "c", "cpp", + "h", "hpp", "css", "html", "xml", "json", "yaml", "yml", "properties", "gradle", "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: File) = try { + file.getReader().use { reader -> + when (reader) { + is PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + try { + file.readText() + } catch (e2: Exception) { + "Error reading file: ${e2.message}" + } + } + + private fun extractMermaidCode(response: String): String { // Try to extract mermaid code block val mermaidBlockRegex = "```mermaid\\s*([\\s\\S]*?)```".toRegex() @@ -642,6 +800,37 @@ Format as a clear table or structured list. return "" } + private fun transcript(task: SessionTask): java.io.FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun writeToTranscript(content: String) { + try { + transcriptStream?.write(content.toByteArray(Charsets.UTF_8)) + transcriptStream?.flush() + } catch (e: Exception) { + log.warn("Failed to write to transcript", e) + } + } + + private fun closeTranscript() { + try { + transcriptStream?.close() + transcriptStream = null + } catch (e: Exception) { + log.warn("Failed to close transcript", e) + } + } + companion object { private val log: Logger = LoggerFactory.getLogger(FiniteStateMachineTask::class.java) @@ -664,4 +853,5 @@ Format as a clear table or structured list. """ ) } -} \ No newline at end of file +} + diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GameTheoryTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GameTheoryTask.kt index d88f4d542..8e70d639f 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GameTheoryTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GameTheoryTask.kt @@ -1,15 +1,14 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.getReader import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -22,6 +21,59 @@ class GameTheoryTask( ) { val maxOutputLengthPerField = 10000 + companion object { + private val log: Logger = LoggerFactory.getLogger(GameTheoryTask::class.java) + val GameTheory = TaskType( + "GameTheory", + GameTheoryTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Analyze strategic interactions using game theory", + """ + Performs comprehensive game theory analysis of strategic situations. +
      +
    • Analyzes game structure and player strategies
    • +
    • Constructs payoff matrices for strategy combinations
    • +
    • Identifies Nash equilibria (pure and mixed strategies)
    • +
    • Analyzes dominant and dominated strategies
    • +
    • Finds Pareto optimal outcomes
    • +
    • Supports repeated game analysis with trigger strategies
    • +
    • Provides strategic recommendations for each player
    • +
    • Handles cooperative, non-cooperative, zero-sum, and sequential games
    • +
    • Useful for competitive analysis, negotiation, and strategic planning
    • +
    + """ + ) + private val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", + "c", "cpp", "h", "hpp", "css", "html", "xml", "json", "yaml", + "yml", "properties", "gradle", "maven" + ) + + fun isTextFile(file: java.io.File): Boolean { + return textExtensions.contains(file.extension.lowercase()) + } + + fun extractDocumentContent(file: java.io.File) = try { + file.getReader().use { reader -> + when (reader) { + is com.simiacryptus.cognotik.input.PaginatedDocumentReader -> + reader.getText(0, reader.getPageCount()) + + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + try { + file.readText() + } catch (e2: Exception) { + "Error reading file: ${e2.message}" + } + } + + + } + data class GameAnalysis( val game_type: String? = null, val players: List? = null, @@ -33,6 +85,8 @@ class GameTheoryTask( val recommendations: Map? = null ) + protected val codeFiles = mutableMapOf() + class GameTheoryTaskExecutionConfigData( @Description("The strategic situation or game to analyze") val game_scenario: String? = null, @@ -58,6 +112,8 @@ class GameTheoryTask( val iterations: Int = 10, @Description("Additional context or constraints") val additional_context: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, task_description: String? = null, task_dependencies: List? = null, state: TaskState? = TaskState.Pending, @@ -80,9 +136,8 @@ class GameTheoryTask( if (game_type.isNullOrBlank()) { return "game_type must not be null or blank" } - val validGameTypes = setOf("cooperative", "non-cooperative", "zero-sum", "repeated", "sequential") - if (game_type !in validGameTypes) { - return "game_type must be one of: ${validGameTypes.joinToString(", ")}" + if (game_type.isBlank()) { + return "game_type must not be blank" } if (iterations < 1) { return "iterations must be at least 1" @@ -123,6 +178,7 @@ GameTheory - Analyze strategic interactions using game theory ) { val startTime = System.currentTimeMillis() log.info("Starting GameTheory task for scenario: ${executionConfig?.game_scenario}") + val toInput = { it: String -> messages + listOf(getInputFileCode(), it).filter { it.isNotBlank() } } val gameScenario = executionConfig?.game_scenario if (gameScenario.isNullOrBlank()) { @@ -142,9 +198,9 @@ GameTheory - Analyze strategic interactions using game theory return } - val toInput = { it: String -> listOf(it) } val ui = task.ui val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + val transcript = transcript(task) // Create tabbed display for organized output val tabs = TabbedDisplay(task) // Overview tab @@ -153,6 +209,9 @@ GameTheory - Analyze strategic interactions using game theory try { + transcript?.write("# Game Theory Analysis\n\n".toByteArray()) + transcript?.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n".toByteArray()) + var overviewTaskStatus = overviewTask.add( MarkdownUtil.renderMarkdown( @@ -168,6 +227,18 @@ GameTheory - Analyze strategic interactions using game theory """.trimMargin(), ui = ui ) ) + transcript?.write( + """ + |## Game Theory Analysis + | + |**Scenario:** $gameScenario + | + |**Players:** ${players.joinToString(", ")} + | + |**Game Type:** ${executionConfig.game_type} + | + |""".trimMargin().toByteArray() + ) task.update() log.debug("Retrieving prior context from execution state") @@ -191,6 +262,14 @@ GameTheory - Analyze strategic interactions using game theory """.trimMargin(), ui = ui ) ) + transcript?.write( + """ + |## Context from Previous Tasks + | + |$priorContext + | + |""".trimMargin().toByteArray() + ) task.update() } @@ -239,6 +318,15 @@ GameTheory - Analyze strategic interactions using game theory val structureAnalysis = chatAgent.answer(toInput(structurePrompt)) log.info("Structure analysis completed in ${System.currentTimeMillis() - stepStartTime}ms. Length: ${structureAnalysis.length} characters") + transcript?.write( + """ + |## Game Structure Analysis + | + |$structureAnalysis + | + |""".trimMargin().toByteArray() + ) + structureLoading?.clear() structureTask.add( @@ -282,6 +370,15 @@ Generate the payoff matrix now: payoffMatrix = chatAgent.answer(toInput(payoffPrompt)) log.info("Payoff matrix generated in ${System.currentTimeMillis() - stepStartTime}ms. Length: ${payoffMatrix.length} characters") + transcript?.write( + """ + |## Payoff Matrix + | + |$payoffMatrix + | + |""".trimMargin().toByteArray() + ) + payoffLoading?.clear() payoffTask.add( @@ -329,6 +426,15 @@ Generate the Nash equilibrium analysis now: nashEquilibria = chatAgent.answer(toInput(nashPrompt)) log.info("Nash equilibria analysis completed in ${System.currentTimeMillis() - stepStartTime}ms. Length: ${nashEquilibria.length} characters") + transcript?.write( + """ + |## Nash Equilibria Analysis + | + |$nashEquilibria + | + |""".trimMargin().toByteArray() + ) + nashLoading?.clear() nashTask.add( @@ -373,6 +479,15 @@ Generate the dominant strategy analysis now: dominantStrategies = chatAgent.answer(toInput(dominantPrompt)) log.info("Dominant strategies analysis completed in ${System.currentTimeMillis() - stepStartTime}ms. Length: ${dominantStrategies.length} characters") + transcript?.write( + """ + |## Dominant Strategies Analysis + | + |$dominantStrategies + | + |""".trimMargin().toByteArray() + ) + dominantLoading?.clear() dominantTask.add( @@ -417,6 +532,15 @@ Generate the Pareto optimality analysis now: paretoOptimal = chatAgent.answer(toInput(paretoPrompt)) log.info("Pareto optimality analysis completed in ${System.currentTimeMillis() - stepStartTime}ms. Length: ${paretoOptimal.length} characters") + transcript?.write( + """ + |## Pareto Optimality Analysis + | + |$paretoOptimal + | + |""".trimMargin().toByteArray() + ) + paretoLoading?.clear() paretoTask.add( @@ -462,6 +586,15 @@ Generate the repeated game analysis now: repeatedGameAnalysis = chatAgent.answer(toInput(repeatedPrompt)) log.info("Repeated game analysis completed in ${System.currentTimeMillis() - stepStartTime}ms. Length: ${repeatedGameAnalysis.length} characters") + transcript?.write( + """ + |## Repeated Game Analysis + | + |$repeatedGameAnalysis + | + |""".trimMargin().toByteArray() + ) + repeatedLoading?.clear() repeatedTask.add( @@ -510,6 +643,15 @@ Generate the strategic recommendations now: recommendations = chatAgent.answer(toInput(recommendPrompt)) log.info("Recommendations generated in ${System.currentTimeMillis() - stepStartTime}ms. Length: ${recommendations.length} characters") + transcript?.write( + """ + |## Strategic Recommendations + | + |$recommendations + | + |""".trimMargin().toByteArray() + ) + recommendLoading?.clear() recommendTask.add( @@ -561,6 +703,30 @@ Provide this in a clear, structured format. val gameAnalysis = parsedAgent.answer(toInput(summaryPrompt)).obj log.info("Structured summary generated in ${System.currentTimeMillis() - stepStartTime}ms") + transcript?.write( + """ + |## Game Theory Analysis Summary + | + |### Game Type + |${gameAnalysis.game_type ?: "Not specified"} + | + |### Players + |${gameAnalysis.players?.joinToString(", ") ?: "Not specified"} + | + |### Nash Equilibria + |${gameAnalysis.nash_equilibria?.joinToString("\n") { "- $it" } ?: "None identified"} + | + |### Dominant Strategies + |${gameAnalysis.dominant_strategies?.entries?.joinToString("\n") { "- **${it.key}**: ${it.value}" } ?: "None identified"} + | + |### Pareto Optimal Outcomes + |${gameAnalysis.pareto_optimal_outcomes?.joinToString("\n") { "- $it" } ?: "None identified"} + | + |### Strategic Recommendations + |${gameAnalysis.recommendations?.entries?.joinToString("\n") { "- **${it.key}**: ${it.value}" } ?: "None provided"} + | + |""".trimMargin().toByteArray()) + summaryLoading?.clear() summaryTask.add( @@ -655,6 +821,10 @@ Provide this in a clear, structured format. val duration = System.currentTimeMillis() - startTime val summary = "Game theory analysis completed for scenario: $gameScenario" log.info("$summary (duration: ${duration}ms, players: ${players.size}, game_type: ${executionConfig.game_type})") + transcript?.write("\n---\n".toByteArray()) + transcript?.write("**Analysis completed in ${duration / 1000}s**\n".toByteArray()) + transcript?.write("**Finished:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n".toByteArray()) + transcript?.close() task.safeComplete(summary, log) resultFn(finalResult) @@ -674,12 +844,45 @@ Provide this in a clear, structured format. ) ) task.update() + transcript?.write("\n---\n**ERROR:** ${e.message}\n".toByteArray()) + transcript?.close() task.error(e) task.safeComplete("Analysis failed: ${e.message}", log) resultFn("ERROR: Game theory analysis failed - ${e.message}") } } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + private fun buildStructurePrompt( gameScenario: String, players: List, @@ -743,27 +946,14 @@ Generate the game structure analysis now: """.trimIndent() } - companion object { - private val log: Logger = LoggerFactory.getLogger(GameTheoryTask::class.java) - val GameTheory = TaskType( - "GameTheory", - GameTheoryTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Analyze strategic interactions using game theory", - """ - Performs comprehensive game theory analysis of strategic situations. -
      -
    • Analyzes game structure and player strategies
    • -
    • Constructs payoff matrices for strategy combinations
    • -
    • Identifies Nash equilibria (pure and mixed strategies)
    • -
    • Analyzes dominant and dominated strategies
    • -
    • Finds Pareto optimal outcomes
    • -
    • Supports repeated game analysis with trigger strategies
    • -
    • Provides strategic recommendations for each player
    • -
    • Handles cooperative, non-cooperative, zero-sum, and sequential games
    • -
    • Useful for competitive analysis, negotiation, and strategic planning
    • -
    - """ + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" ) + return markdownTranscript } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GeneticOptimizationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GeneticOptimizationTask.kt index 6dd2113a9..79aa40717 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GeneticOptimizationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/GeneticOptimizationTask.kt @@ -1,7 +1,7 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.describe.Description @@ -11,111 +11,139 @@ import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets import java.time.LocalDateTime import java.time.format.DateTimeFormatter import kotlin.math.max import kotlin.math.min class GeneticOptimizationTask( - orchestrationConfig: OrchestrationConfig, - planTask: GeneticOptimizationTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: GeneticOptimizationTaskExecutionConfigData? ) : AbstractTask( - orchestrationConfig, - planTask + orchestrationConfig, + planTask ) { - - class GeneticOptimizationTaskExecutionConfigData( - @Description("The initial text to optimize (seed for genetic algorithm)") - val initial_text: String? = null, - @Description("The optimization goal or criteria (e.g., 'clarity and conciseness', 'persuasiveness', 'technical accuracy')") - val optimization_goal: String? = null, - @Description("Number of generations to evolve (default: 5)") - val num_generations: Int = 5, - @Description("Population size per generation (default: 6)") - val population_size: Int = 6, - @Description("Number of top candidates to keep each generation (default: 2)") - val selection_size: Int = 2, - @Description("Mutation strategies to use (e.g., 'rephrase', 'simplify', 'elaborate', 'restructure')") - val mutation_strategies: List? = listOf("rephrase", "simplify", "elaborate"), - @Description("Whether to enable crossover (combining traits from multiple candidates)") - val enable_crossover: Boolean = true, - @Description("Evaluation criteria weights (e.g., {'clarity': 0.4, 'conciseness': 0.3, 'impact': 0.3})") - val evaluation_weights: Map? = null, - @Description("Additional context or constraints for optimization") - val constraints: List? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = TaskState.Pending, - ) : TaskExecutionConfig( - task_type = GeneticOptimization.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ), ValidatedObject { - override fun validate(): String? { - if (initial_text.isNullOrBlank()) { - return "initial_text must not be blank" - } - if (optimization_goal.isNullOrBlank()) { - return "optimization_goal must not be blank" - } - if (num_generations < 1) { - return "num_generations must be at least 1" - } - if (population_size < 2) { - return "population_size must be at least 2" - } - if (selection_size < 1 || selection_size >= population_size) { - return "selection_size must be between 1 and population_size-1" - } - return ValidatedObject.validateFields(this) - } - } - - data class TextVariant( - @Description("The text variant") - val text: String = "", - @Description("Brief explanation of what changed from parent") - val mutation_description: String = "", - @Description("The mutation strategy used") - val strategy: String = "" + companion object { + private val log: Logger = LoggerFactory.getLogger(GeneticOptimizationTask::class.java) + val GeneticOptimization = TaskType( + "GeneticOptimization", + GeneticOptimizationTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Iteratively evolve and perfect text through genetic algorithms", + """ + Uses genetic algorithms to optimize text through iterative evolution. +
      +
    • Generates variations using configurable mutation strategies
    • +
    • Evaluates variants against optimization criteria
    • +
    • Selects top performers for next generation
    • +
    • Applies crossover to combine successful traits
    • +
    • Tracks fitness progression across generations
    • +
    • Provides detailed analysis of evolution
    • +
    • Supports custom evaluation criteria and weights
    • +
    • Useful for perfecting prompts, copy, documentation, and messaging
    • +
    + """ ) - - data class EvaluationScore( - @Description("Overall fitness score (0-100)") - val overall_score: Double = 0.0, - @Description("Breakdown of scores by criteria") - val criteria_scores: Map = emptyMap(), - @Description("Strengths of this variant") - val strengths: List = emptyList(), - @Description("Weaknesses or areas for improvement") - val weaknesses: List = emptyList(), - @Description("Brief justification for the score") - val justification: String = "" - ): ValidatedObject { - override fun validate(): String? { - if (overall_score < 0.0 || overall_score > 100.0) { - return "overall_score must be between 0 and 100" - } - criteria_scores.forEach { (criterion, score) -> - if (score < 0.0 || score > 100.0) { - return "criteria_scores[$criterion] must be between 0 and 100" - } - } - return ValidatedObject.validateFields(this) + } + + class GeneticOptimizationTaskExecutionConfigData( + @Description("The initial text to optimize (seed for genetic algorithm)") + val initial_text: String? = null, + @Description("Optional input files (or file patterns, e.g. **/*.kt) to be used as context for optimization") + val input_files: List? = null, + @Description("The optimization goal or criteria (e.g., 'clarity and conciseness', 'persuasiveness', 'technical accuracy')") + val optimization_goal: String? = null, + @Description("Number of generations to evolve (default: 5)") + val num_generations: Int = 5, + @Description("Population size per generation (default: 6)") + val population_size: Int = 6, + @Description("Number of top candidates to keep each generation (default: 2)") + val selection_size: Int = 2, + @Description("Mutation strategies to use (e.g., 'rephrase', 'simplify', 'elaborate', 'restructure')") + val mutation_strategies: List? = listOf("rephrase", "simplify", "elaborate"), + @Description("Whether to enable crossover (combining traits from multiple candidates)") + val enable_crossover: Boolean = true, + @Description("Evaluation criteria weights (e.g., {'clarity': 0.4, 'conciseness': 0.3, 'impact': 0.3})") + val evaluation_weights: Map? = null, + @Description("Additional context or constraints for optimization") + val constraints: List? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = GeneticOptimization.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (initial_text.isNullOrBlank()) { + return "initial_text must not be blank" + } + // input_files is optional, so no validation needed + + if (optimization_goal.isNullOrBlank()) { + return "optimization_goal must not be blank" + } + if (num_generations < 1) { + return "num_generations must be at least 1" + } + if (population_size < 2) { + return "population_size must be at least 2" + } + if (selection_size < 1 || selection_size >= population_size) { + return "selection_size must be between 1 and population_size-1" + } + return ValidatedObject.validateFields(this) + } + } + + data class TextVariant( + @Description("The text variant") + val text: String = "", + @Description("Brief explanation of what changed from parent") + val mutation_description: String = "", + @Description("The mutation strategy used") + val strategy: String = "" + ) + + data class EvaluationScore( + @Description("Overall fitness score (0-100)") + val overall_score: Double = 0.0, + @Description("Breakdown of scores by criteria") + val criteria_scores: Map = emptyMap(), + @Description("Strengths of this variant") + val strengths: List = emptyList(), + @Description("Weaknesses or areas for improvement") + val weaknesses: List = emptyList(), + @Description("Brief justification for the score") + val justification: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (overall_score < 0.0 || overall_score > 100.0) { + return "overall_score must be between 0 and 100" + } + criteria_scores.forEach { (criterion, score) -> + if (score < 0.0 || score > 100.0) { + return "criteria_scores[$criterion] must be between 0 and 100" } + } + return ValidatedObject.validateFields(this) } - - data class EvaluatedVariant( - val text: String = "", - val score: EvaluationScore = EvaluationScore(), - val generation: Int = 0, - val parentIndex: Int? = null, - val strategy: String = "" - ) - - override fun promptSegment(): String { - return """ + } + + data class EvaluatedVariant( + val text: String = "", + val score: EvaluationScore = EvaluationScore(), + val generation: Int = 0, + val parentIndex: Int? = null, + val strategy: String = "" + ) + + override fun promptSegment(): String { + return """ GeneticOptimization - Iteratively evolve and perfect text through genetic algorithms ** Specify the initial text to optimize ** Define the optimization goal (e.g., clarity, persuasiveness, technical accuracy) @@ -137,536 +165,622 @@ GeneticOptimization - Iteratively evolve and perfect text through genetic algori - Optimizing technical documentation - Improving clarity and impact of messaging """.trimIndent() - } - - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - try { - val startTime = System.currentTimeMillis() - log.info("Starting GeneticOptimizationTask with initial_text length=${executionConfig?.initial_text?.length}, goal='${executionConfig?.optimization_goal}'") - // Validate configuration - executionConfig?.validate()?.let { errorMessage -> - log.error("Configuration validation failed: $errorMessage") - task.complete("VALIDATION ERROR: $errorMessage") - task.error(ValidatedObject.ValidationError(errorMessage, executionConfig)) - resultFn("VALIDATION ERROR: $errorMessage") - return - } - - - val initialText = executionConfig?.initial_text - val optimizationGoal = executionConfig?.optimization_goal - val numGenerations = executionConfig?.num_generations ?: 5 - val populationSize = executionConfig?.population_size ?: 6 - val selectionSize = min(executionConfig?.selection_size ?: 2, populationSize / 2) - val mutationStrategies = executionConfig?.mutation_strategies ?: listOf("rephrase", "simplify", "elaborate") - val enableCrossover = executionConfig?.enable_crossover ?: true - val evaluationWeights = executionConfig?.evaluation_weights ?: mapOf( - "clarity" to 0.35, - "conciseness" to 0.25, - "impact" to 0.25, - "goal_alignment" to 0.15 - ) - val constraints = executionConfig?.constraints ?: emptyList() - - if (initialText.isNullOrBlank() || optimizationGoal.isNullOrBlank()) { - log.error("Configuration error: initial_text or optimization_goal is blank") - task.complete("CONFIGURATION ERROR: Both initial_text and optimization_goal must be specified") - task.error(RuntimeException("Configuration error: initial_text or optimization_goal is blank")) - resultFn("CONFIGURATION ERROR: Both initial_text and optimization_goal must be specified") - return - } - - log.info("Configuration validated: generations=$numGenerations, population=$populationSize, selection=$selectionSize, crossover=$enableCrossover") - - val tabs = TabbedDisplay(task) - val api = orchestrationConfig.defaultChatter - - // Create overview tab - val overviewTask = task.ui.newTask(false) - tabs["Overview"] = overviewTask.placeholder - val overviewContent = buildString { - appendLine("# Genetic Optimization Task") - appendLine() - appendLine( - "**Started:** ${ - LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) - }" + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val transcript = transcript(task) + try { + val startTime = System.currentTimeMillis() + messages.joinToString("\n\n") + log.info("Starting GeneticOptimizationTask with initial_text length=${executionConfig?.initial_text?.length}, goal='${executionConfig?.optimization_goal}'") + // Validate configuration + executionConfig?.validate()?.let { errorMessage -> + log.error("Configuration validation failed: $errorMessage") + task.complete("VALIDATION ERROR: $errorMessage") + task.error(ValidatedObject.ValidationError(errorMessage, executionConfig)) + transcript?.close() + resultFn("VALIDATION ERROR: $errorMessage") + return + } + + + val initialText = executionConfig?.initial_text + val optimizationGoal = executionConfig?.optimization_goal + val numGenerations = executionConfig?.num_generations ?: 5 + val populationSize = executionConfig?.population_size ?: 6 + val selectionSize = min(executionConfig?.selection_size ?: 2, populationSize / 2) + val mutationStrategies = executionConfig?.mutation_strategies ?: listOf("rephrase", "simplify", "elaborate") + val enableCrossover = executionConfig?.enable_crossover ?: true + val evaluationWeights = executionConfig?.evaluation_weights ?: mapOf( + "clarity" to 0.35, + "conciseness" to 0.25, + "impact" to 0.25, + "goal_alignment" to 0.15 + ) + val constraints = executionConfig?.constraints ?: emptyList() + val inputFileContent = getInputFileContent() + + if (initialText.isNullOrBlank() || optimizationGoal.isNullOrBlank()) { + log.error("Configuration error: initial_text or optimization_goal is blank") + task.complete("CONFIGURATION ERROR: Both initial_text and optimization_goal must be specified") + transcript?.close() + task.error(RuntimeException("Configuration error: initial_text or optimization_goal is blank")) + resultFn("CONFIGURATION ERROR: Both initial_text and optimization_goal must be specified") + return + } + + log.info("Configuration validated: generations=$numGenerations, population=$populationSize, selection=$selectionSize, crossover=$enableCrossover") + + val tabs = TabbedDisplay(task) + val api = orchestrationConfig.defaultChatter + transcript?.write("# Genetic Optimization Task Transcript\n\n".toByteArray()) + + // Create overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + val overviewContent = buildString { + appendLine("# Genetic Optimization Task") + appendLine() + appendLine( + "**Started:** ${ + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) + }" + ) + if (inputFileContent.isNotBlank()) { + appendLine() + appendLine("## Input Context") + appendLine() + appendLine(inputFileContent) + } + appendLine() + appendLine("## Configuration") + appendLine() + appendLine("| Parameter | Value |") + appendLine("|-----------|-------|") + appendLine("| Optimization Goal | $optimizationGoal |") + appendLine("| Generations | $numGenerations |") + appendLine("| Population Size | $populationSize |") + appendLine("| Selection Size | $selectionSize |") + appendLine("| Mutation Strategies | ${mutationStrategies.joinToString(", ")} |") + appendLine("| Crossover | ${if (enableCrossover) "✓ Enabled" else "✗ Disabled"} |") + appendLine() + appendLine("## Evaluation Criteria") + appendLine() + evaluationWeights.forEach { (criterion, weight) -> + appendLine("- **$criterion**: ${String.format("%.0f%%", weight * 100)}") + } + if (constraints.isNotEmpty()) { + appendLine() + appendLine("## Constraints") + appendLine() + constraints.forEach { appendLine("- $it") } + } + appendLine() + appendLine("## Initial Text") + appendLine() + appendLine("```") + appendLine(initialText) + appendLine("```") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("- ⏳ Initializing population...") + } + overviewTask.add(overviewContent.renderMarkdown) + task.update() + transcript?.write(overviewContent.toByteArray(StandardCharsets.UTF_8)) + + // Gather context + log.debug("Gathering prior context") + val priorContext = getPriorCode(agent.executionState) + log.debug("Context gathered: length=${priorContext.length}") + + // Initialize population with the seed text + var currentPopulation = listOf( + EvaluatedVariant( + text = initialText, + score = EvaluationScore(overall_score = 0.0), + generation = 0, + strategy = "seed" + ) + ) + + // Evaluate initial text + log.info("Evaluating initial text") + val initialEvaluation = + evaluateVariant(initialText, optimizationGoal, evaluationWeights, constraints, api, inputFileContent) + currentPopulation = listOf( + currentPopulation[0].copy(score = initialEvaluation) + ) + + log.info("Initial evaluation: score=${initialEvaluation.overall_score}") + transcript?.write("\n\n## Initial Evaluation\n\n".toByteArray(StandardCharsets.UTF_8)) + transcript?.write("**Score:** ${String.format("%.1f", initialEvaluation.overall_score)}/100\n\n".toByteArray(StandardCharsets.UTF_8)) + transcript?.write("**Strengths:**\n".toByteArray(StandardCharsets.UTF_8)) + initialEvaluation.strengths.forEach { transcript?.write("- $it\n".toByteArray(StandardCharsets.UTF_8)) } + transcript?.write("\n**Weaknesses:**\n".toByteArray(StandardCharsets.UTF_8)) + initialEvaluation.weaknesses.forEach { transcript?.write("- $it\n".toByteArray(StandardCharsets.UTF_8)) } + transcript?.write("\n".toByteArray(StandardCharsets.UTF_8)) + + + // Update overview with initial score + overviewTask.add(buildString { + appendLine() + appendLine("- ✓ Initial evaluation: **${String.format("%.1f", initialEvaluation.overall_score)}/100**") + appendLine("- ⏳ Starting evolution...") + }.renderMarkdown) + task.update() + + // Track best variant across all generations + var bestVariant = currentPopulation[0] + val evolutionHistory = mutableListOf>() + evolutionHistory.add(currentPopulation) + + // Evolution loop + for (generation in 1..numGenerations) { + log.info("Starting generation $generation/$numGenerations") + + val generationTask = task.ui.newTask(false) + tabs["Generation $generation"] = generationTask.placeholder + transcript?.write("\n\n---\n\n".toByteArray(StandardCharsets.UTF_8)) + transcript?.write("# Generation $generation\n\n".toByteArray(StandardCharsets.UTF_8)) + generationTask.add(buildString { + appendLine("# Generation $generation") + appendLine() + appendLine("**Status:** In Progress") + appendLine() + appendLine("Generating $populationSize variants...") + }.renderMarkdown) + task.update() + + // Step 1: Generate new variants + val newVariants = mutableListOf() + + // Keep top performers from previous generation + val survivors = currentPopulation.sortedByDescending { it.score.overall_score }.take(selectionSize) + log.debug("Selected $selectionSize survivors for generation $generation") + + // Generate mutations from survivors + val mutationsNeeded = populationSize - survivors.size + val mutationsPerSurvivor = max(1, mutationsNeeded / survivors.size) + + survivors.forEachIndexed { survivorIndex, survivor -> + val mutationsToGenerate = if (survivorIndex == survivors.size - 1) { + // Last survivor gets any remaining slots + mutationsNeeded - (mutationsPerSurvivor * (survivors.size - 1)) + } else { + mutationsPerSurvivor + } + + repeat(mutationsToGenerate) { + val strategy = mutationStrategies.random() + log.debug("Generating mutation using strategy: $strategy") + val mutated = + generateMutation(survivor.text, strategy, optimizationGoal, constraints, priorContext + "\n\n" + inputFileContent, api) + if (mutated != null) { + newVariants.add( + EvaluatedVariant( + text = mutated.text, + score = EvaluationScore(overall_score = 0.0), + generation = generation, + parentIndex = survivorIndex, + strategy = strategy ) - appendLine() - appendLine("## Configuration") - appendLine() - appendLine("| Parameter | Value |") - appendLine("|-----------|-------|") - appendLine("| Optimization Goal | $optimizationGoal |") - appendLine("| Generations | $numGenerations |") - appendLine("| Population Size | $populationSize |") - appendLine("| Selection Size | $selectionSize |") - appendLine("| Mutation Strategies | ${mutationStrategies.joinToString(", ")} |") - appendLine("| Crossover | ${if (enableCrossover) "✓ Enabled" else "✗ Disabled"} |") - appendLine() - appendLine("## Evaluation Criteria") - appendLine() - evaluationWeights.forEach { (criterion, weight) -> - appendLine("- **$criterion**: ${String.format("%.0f%%", weight * 100)}") - } - if (constraints.isNotEmpty()) { - appendLine() - appendLine("## Constraints") - appendLine() - constraints.forEach { appendLine("- $it") } - } - appendLine() - appendLine("## Initial Text") - appendLine() - appendLine("```") - appendLine(initialText) - appendLine("```") - appendLine() - appendLine("---") - appendLine() - appendLine("## Progress") - appendLine() - appendLine("- ⏳ Initializing population...") + ) } - overviewTask.add(overviewContent.renderMarkdown) - task.update() - - // Gather context - log.debug("Gathering prior context") - val priorContext = getPriorCode(agent.executionState) - log.debug("Context gathered: length=${priorContext.length}") + } + } - // Initialize population with the seed text - var currentPopulation = listOf( - EvaluatedVariant( - text = initialText, - score = EvaluationScore(overall_score = 0.0), - generation = 0, - strategy = "seed" - ) + // Apply crossover if enabled + if (enableCrossover && survivors.size >= 2 && newVariants.size < populationSize) { + log.debug("Applying crossover") + val crossoverVariant = applyCrossover( + survivors[0].text, + survivors[1].text, + optimizationGoal, + constraints, + api + ) + if (crossoverVariant != null) { + newVariants.add( + EvaluatedVariant( + text = crossoverVariant, + score = EvaluationScore(overall_score = 0.0), + generation = generation, + strategy = "crossover" + ) ) + } + } - // Evaluate initial text - log.info("Evaluating initial text") - val initialEvaluation = - evaluateVariant(initialText, optimizationGoal, evaluationWeights, constraints, api) - currentPopulation = listOf( - currentPopulation[0].copy(score = initialEvaluation) + // Combine survivors and new variants + currentPopulation = survivors + newVariants + + // Step 2: Evaluate all variants + log.info("Evaluating ${currentPopulation.size} variants in generation $generation") + currentPopulation = currentPopulation.map { variant -> + if (variant.score.overall_score == 0.0) { + val evaluation = evaluateVariant( + variant.text, + optimizationGoal, + evaluationWeights, + constraints, + api, + inputFileContent ) + variant.copy(score = evaluation) + } else { + variant + } + } - log.info("Initial evaluation: score=${initialEvaluation.overall_score}") + evolutionHistory.add(currentPopulation) - // Update overview with initial score - overviewTask.add(buildString { - appendLine() - appendLine("- ✓ Initial evaluation: **${String.format("%.1f", initialEvaluation.overall_score)}/100**") - appendLine("- ⏳ Starting evolution...") - }.renderMarkdown) - task.update() - - // Track best variant across all generations - var bestVariant = currentPopulation[0] - val evolutionHistory = mutableListOf>() - evolutionHistory.add(currentPopulation) - - // Evolution loop - for (generation in 1..numGenerations) { - log.info("Starting generation $generation/$numGenerations") - - val generationTask = task.ui.newTask(false) - tabs["Generation $generation"] = generationTask.placeholder - generationTask.add(buildString { - appendLine("# Generation $generation") - appendLine() - appendLine("**Status:** In Progress") - appendLine() - appendLine("Generating $populationSize variants...") - }.renderMarkdown) - task.update() - - // Step 1: Generate new variants - val newVariants = mutableListOf() - - // Keep top performers from previous generation - val survivors = currentPopulation.sortedByDescending { it.score.overall_score }.take(selectionSize) - log.debug("Selected $selectionSize survivors for generation $generation") - - // Generate mutations from survivors - val mutationsNeeded = populationSize - survivors.size - val mutationsPerSurvivor = max(1, mutationsNeeded / survivors.size) - - survivors.forEachIndexed { survivorIndex, survivor -> - val mutationsToGenerate = if (survivorIndex == survivors.size - 1) { - // Last survivor gets any remaining slots - mutationsNeeded - (mutationsPerSurvivor * (survivors.size - 1)) - } else { - mutationsPerSurvivor - } - - repeat(mutationsToGenerate) { - val strategy = mutationStrategies.random() - log.debug("Generating mutation using strategy: $strategy") - val mutated = - generateMutation(survivor.text, strategy, optimizationGoal, constraints, priorContext, api) - if (mutated != null) { - newVariants.add( - EvaluatedVariant( - text = mutated.text, - score = EvaluationScore(overall_score = 0.0), - generation = generation, - parentIndex = survivorIndex, - strategy = strategy - ) - ) - } - } - } - - // Apply crossover if enabled - if (enableCrossover && survivors.size >= 2 && newVariants.size < populationSize) { - log.debug("Applying crossover") - val crossoverVariant = applyCrossover( - survivors[0].text, - survivors[1].text, - optimizationGoal, - constraints, - api - ) - if (crossoverVariant != null) { - newVariants.add( - EvaluatedVariant( - text = crossoverVariant, - score = EvaluationScore(overall_score = 0.0), - generation = generation, - strategy = "crossover" - ) - ) - } - } - - // Combine survivors and new variants - currentPopulation = survivors + newVariants - - // Step 2: Evaluate all variants - log.info("Evaluating ${currentPopulation.size} variants in generation $generation") - currentPopulation = currentPopulation.map { variant -> - if (variant.score.overall_score == 0.0) { - val evaluation = evaluateVariant( - variant.text, - optimizationGoal, - evaluationWeights, - constraints, - api - ) - variant.copy(score = evaluation) - } else { - variant - } - } - - evolutionHistory.add(currentPopulation) - - // Update best variant - val generationBest = currentPopulation.maxByOrNull { it.score.overall_score }!! - if (generationBest.score.overall_score > bestVariant.score.overall_score) { - log.info("New best variant found in generation $generation: score=${generationBest.score.overall_score}") - bestVariant = generationBest - } - - // Display generation results - generationTask.add(buildString { - appendLine() - appendLine("---") - appendLine() - appendLine("## Generation $generation Results") - appendLine() - appendLine("**Status:** ✓ Complete") - appendLine() - appendLine("### Population Statistics") - appendLine() - val scores = currentPopulation.map { it.score.overall_score } - appendLine("- **Best Score:** ${String.format("%.1f", scores.maxOrNull() ?: 0.0)}/100") - appendLine("- **Average Score:** ${String.format("%.1f", scores.average())}/100") - appendLine("- **Worst Score:** ${String.format("%.1f", scores.minOrNull() ?: 0.0)}/100") - appendLine( - "- **Improvement:** ${ - String.format( - "%.1f", - (generationBest.score.overall_score - survivors[0].score.overall_score) - ) - }" - ) - appendLine() - appendLine("### Top Variants") - appendLine() - currentPopulation.sortedByDescending { it.score.overall_score }.take(3) - .forEachIndexed { index, variant -> - appendLine( - "#### ${index + 1}. Score: ${ - String.format( - "%.1f", - variant.score.overall_score - ) - }/100 (${variant.strategy})" - ) - appendLine() - appendLine("```") - appendLine(variant.text) - appendLine("```") - appendLine() - appendLine("**Strengths:**") - variant.score.strengths.forEach { appendLine("- $it") } - appendLine() - if (variant.score.weaknesses.isNotEmpty()) { - appendLine("**Weaknesses:**") - variant.score.weaknesses.forEach { appendLine("- $it") } - appendLine() - } - appendLine("**Criteria Breakdown:**") - variant.score.criteria_scores.forEach { (criterion, score) -> - appendLine("- $criterion: ${String.format("%.1f", score)}/100") - } - appendLine() - appendLine("---") - appendLine() - } - }.renderMarkdown) - task.update() - - // Update overview - overviewTask.add(buildString { - appendLine() - appendLine( - "- ✓ Generation $generation: Best=${ - String.format( - "%.1f", - generationBest.score.overall_score - ) - }, Avg=${String.format("%.1f", currentPopulation.map { it.score.overall_score }.average())}" - ) - }.renderMarkdown) - task.update() - } + // Update best variant + val generationBest = currentPopulation.maxByOrNull { it.score.overall_score }!! + if (generationBest.score.overall_score > bestVariant.score.overall_score) { + log.info("New best variant found in generation $generation: score=${generationBest.score.overall_score}") + bestVariant = generationBest + } - // Create evolution visualization tab - log.info("Creating evolution visualization") - val evolutionTask = task.ui.newTask(false) - tabs["Evolution Analysis"] = evolutionTask.placeholder - evolutionTask.add(buildString { - appendLine("# Evolution Analysis") - appendLine() - appendLine("## Fitness Progression") - appendLine() - appendLine("| Generation | Best Score | Average Score | Improvement |") - appendLine("|------------|------------|---------------|-------------|") - evolutionHistory.forEachIndexed { index, population -> - val scores = population.map { it.score.overall_score } - val improvement = if (index > 0) { - scores.maxOrNull()!! - evolutionHistory[index - 1].maxOf { it.score.overall_score } - } else { - 0.0 - } - appendLine( - "| $index | ${String.format("%.1f", scores.maxOrNull() ?: 0.0)} | ${ - String.format( - "%.1f", - scores.average() - ) - } | ${String.format("%+.1f", improvement)} |" - ) - } - appendLine() - appendLine("## Strategy Effectiveness") - appendLine() - val strategyStats = mutableMapOf>() - evolutionHistory.flatten().forEach { variant -> - if (variant.strategy.isNotEmpty()) { - strategyStats.getOrPut(variant.strategy) { mutableListOf() }.add(variant.score.overall_score) - } - } - appendLine("| Strategy | Avg Score | Count | Success Rate |") - appendLine("|----------|-----------|-------|--------------|") - strategyStats.forEach { (strategy, scores) -> - val avgScore = scores.average() - val successRate = - scores.count { it > initialEvaluation.overall_score }.toDouble() / scores.size * 100 - appendLine( - "| $strategy | ${ - String.format( - "%.1f", - avgScore - ) - } | ${scores.size} | ${String.format("%.0f%%", successRate)} |" - ) - } - appendLine() - appendLine("## Best Variant Evolution") - appendLine() - appendLine("### Initial Text (Score: ${String.format("%.1f", initialEvaluation.overall_score)})") - appendLine("```") - appendLine(initialText) - appendLine("```") - appendLine() - appendLine( - "### Final Optimized Text (Score: ${ - String.format( - "%.1f", - bestVariant.score.overall_score - ) - })" - ) - appendLine("```") - appendLine(bestVariant.text) - appendLine("```") - appendLine() - appendLine("### Improvement Summary") - appendLine() - appendLine( - "- **Score Improvement:** ${ - String.format( - "%+.1f", - bestVariant.score.overall_score - initialEvaluation.overall_score - ) - } points" - ) - appendLine("- **Generation Found:** ${bestVariant.generation}") - appendLine("- **Strategy Used:** ${bestVariant.strategy}") - appendLine() - appendLine("### Detailed Analysis") - appendLine() - appendLine("**Strengths:**") - bestVariant.score.strengths.forEach { appendLine("- $it") } - appendLine() - if (bestVariant.score.weaknesses.isNotEmpty()) { - appendLine("**Remaining Areas for Improvement:**") - bestVariant.score.weaknesses.forEach { appendLine("- $it") } - appendLine() - } - appendLine("**Criteria Scores:**") - bestVariant.score.criteria_scores.forEach { (criterion, score) -> - val initialScore = initialEvaluation.criteria_scores[criterion] ?: 0.0 - val improvement = score - initialScore - appendLine( - "- $criterion: ${String.format("%.1f", score)}/100 (${ - String.format( - "%+.1f", - improvement - ) - })" - ) - } - appendLine() - appendLine("**Justification:**") - appendLine(bestVariant.score.justification) - }.renderMarkdown) - task.update() - - // Build final result - val totalTime = System.currentTimeMillis() - startTime - val finalResult = buildString { - appendLine("# Genetic Optimization Results") - appendLine() - appendLine("**Optimization Goal:** $optimizationGoal") - appendLine() - appendLine("## Final Optimized Text") - appendLine() - appendLine("```") - appendLine(bestVariant.text) - appendLine("```") - appendLine() - appendLine("## Performance Metrics") - appendLine() - appendLine("- **Initial Score:** ${String.format("%.1f", initialEvaluation.overall_score)}/100") - appendLine("- **Final Score:** ${String.format("%.1f", bestVariant.score.overall_score)}/100") - appendLine( - "- **Improvement:** ${ - String.format( - "%+.1f", - bestVariant.score.overall_score - initialEvaluation.overall_score - ) - } points" - ) - appendLine("- **Generations:** $numGenerations") - appendLine("- **Total Variants Evaluated:** ${evolutionHistory.flatten().size}") - appendLine("- **Best Found in Generation:** ${bestVariant.generation}") - appendLine() - appendLine("## Key Improvements") - appendLine() - bestVariant.score.strengths.forEach { appendLine("- $it") } - appendLine() - appendLine("*See the Evolution Analysis tab for detailed progression and strategy effectiveness*") - } - // Final overview update - overviewTask.add(buildString { - appendLine() - appendLine("---") - appendLine() - appendLine("## ✅ Optimization Complete") - appendLine() - appendLine("| Metric | Value |") - appendLine("|--------|-------|") - appendLine("| Initial Score | ${String.format("%.1f", initialEvaluation.overall_score)}/100 |") - appendLine("| Final Score | ${String.format("%.1f", bestVariant.score.overall_score)}/100 |") - appendLine( - "| Improvement | ${ - String.format( - "%+.1f", - bestVariant.score.overall_score - initialEvaluation.overall_score - ) - } |" - ) - appendLine("| Generations | $numGenerations |") - appendLine("| Total Variants | ${evolutionHistory.flatten().size} |") - appendLine("| Total Time | ${totalTime / 1000}s |") - appendLine() - appendLine("**Status:** ✓ Complete") - }.renderMarkdown) - task.update() - - log.info("GeneticOptimizationTask completed successfully: total_time=${totalTime}ms, improvement=${bestVariant.score.overall_score - initialEvaluation.overall_score}, generations=$numGenerations") - task.complete( - "Optimization complete: improved by ${ - String.format( - "%.1f", - bestVariant.score.overall_score - initialEvaluation.overall_score - ) - } points in ${totalTime / 1000}s" + // Display generation results + val generationResults = buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## Generation $generation Results") + appendLine() + appendLine("**Status:** ✓ Complete") + appendLine() + appendLine("### Population Statistics") + appendLine() + val scores = currentPopulation.map { it.score.overall_score } + appendLine("- **Best Score:** ${String.format("%.1f", scores.maxOrNull() ?: 0.0)}/100") + appendLine("- **Average Score:** ${String.format("%.1f", scores.average())}/100") + appendLine("- **Worst Score:** ${String.format("%.1f", scores.minOrNull() ?: 0.0)}/100") + appendLine( + "- **Improvement:** ${ + String.format( + "%.1f", + (generationBest.score.overall_score - survivors[0].score.overall_score) + ) + }" + ) + appendLine() + appendLine("### Top Variants") + appendLine() + currentPopulation.sortedByDescending { it.score.overall_score }.take(3) + .forEachIndexed { index, variant -> + appendLine( + "#### ${index + 1}. Score: ${ + String.format( + "%.1f", + variant.score.overall_score + ) + }/100 (${variant.strategy})" + ) + appendLine() + appendLine("```") + appendLine(variant.text) + appendLine("```") + appendLine() + appendLine("**Strengths:**") + variant.score.strengths.forEach { appendLine("- $it") } + appendLine() + if (variant.score.weaknesses.isNotEmpty()) { + appendLine("**Weaknesses:**") + variant.score.weaknesses.forEach { appendLine("- $it") } + appendLine() + } + appendLine("**Criteria Breakdown:**") + variant.score.criteria_scores.forEach { (criterion, score) -> + appendLine("- $criterion: ${String.format("%.1f", score)}/100") + } + appendLine() + appendLine("---") + appendLine() + } + } + generationTask.add(generationResults.renderMarkdown) + task.update() + transcript?.write(generationResults.toByteArray(StandardCharsets.UTF_8)) + + // Update overview + overviewTask.add(buildString { + appendLine() + appendLine( + "- ✓ Generation $generation: Best=${ + String.format( + "%.1f", + generationBest.score.overall_score + ) + }, Avg=${String.format("%.1f", currentPopulation.map { it.score.overall_score }.average())}" + ) + }.renderMarkdown) + task.update() + } + + // Create evolution visualization tab + log.info("Creating evolution visualization") + val evolutionTask = task.ui.newTask(false) + tabs["Evolution Analysis"] = evolutionTask.placeholder + val evolutionAnalysis = buildString { + appendLine("# Evolution Analysis") + appendLine() + appendLine("## Fitness Progression") + appendLine() + appendLine("| Generation | Best Score | Average Score | Improvement |") + appendLine("|------------|------------|---------------|-------------|") + evolutionHistory.forEachIndexed { index, population -> + val scores = population.map { it.score.overall_score } + val improvement = if (index > 0) { + scores.maxOrNull()!! - evolutionHistory[index - 1].maxOf { it.score.overall_score } + } else { + 0.0 + } + appendLine( + "| $index | ${String.format("%.1f", scores.maxOrNull() ?: 0.0)} | ${ + String.format( + "%.1f", + scores.average() + ) + } | ${String.format("%+.1f", improvement)} |" + ) + } + appendLine() + appendLine("## Strategy Effectiveness") + appendLine() + val strategyStats = mutableMapOf>() + evolutionHistory.flatten().forEach { variant -> + if (variant.strategy.isNotEmpty()) { + strategyStats.getOrPut(variant.strategy) { mutableListOf() }.add(variant.score.overall_score) + } + } + appendLine("| Strategy | Avg Score | Count | Success Rate |") + appendLine("|----------|-----------|-------|--------------|") + strategyStats.forEach { (strategy, scores) -> + val avgScore = scores.average() + val successRate = + scores.count { it > initialEvaluation.overall_score }.toDouble() / scores.size * 100 + appendLine( + "| $strategy | ${ + String.format( + "%.1f", + avgScore + ) + } | ${scores.size} | ${String.format("%.0f%%", successRate)} |" + ) + } + appendLine() + appendLine("## Best Variant Evolution") + appendLine() + appendLine("### Initial Text (Score: ${String.format("%.1f", initialEvaluation.overall_score)})") + appendLine("```") + appendLine(initialText) + appendLine("```") + appendLine() + appendLine( + "### Final Optimized Text (Score: ${ + String.format( + "%.1f", + bestVariant.score.overall_score ) - resultFn(finalResult) - - } catch (e: Exception) { - log.error("Error during GeneticOptimizationTask execution", e) - task.error(e) - task.complete("Failed with error: ${e.message}") - resultFn("ERROR: ${e.message}") + })" + ) + appendLine("```") + appendLine(bestVariant.text) + appendLine("```") + appendLine() + appendLine("### Improvement Summary") + appendLine() + appendLine( + "- **Score Improvement:** ${ + String.format( + "%+.1f", + bestVariant.score.overall_score - initialEvaluation.overall_score + ) + } points" + ) + appendLine("- **Generation Found:** ${bestVariant.generation}") + appendLine("- **Strategy Used:** ${bestVariant.strategy}") + appendLine() + appendLine("### Detailed Analysis") + appendLine() + appendLine("**Strengths:**") + bestVariant.score.strengths.forEach { appendLine("- $it") } + appendLine() + if (bestVariant.score.weaknesses.isNotEmpty()) { + appendLine("**Remaining Areas for Improvement:**") + bestVariant.score.weaknesses.forEach { appendLine("- $it") } + appendLine() + } + appendLine("**Criteria Scores:**") + bestVariant.score.criteria_scores.forEach { (criterion, score) -> + val initialScore = initialEvaluation.criteria_scores[criterion] ?: 0.0 + val improvement = score - initialScore + appendLine( + "- $criterion: ${String.format("%.1f", score)}/100 (${ + String.format( + "%+.1f", + improvement + ) + })" + ) } + appendLine() + appendLine("**Justification:**") + appendLine(bestVariant.score.justification) + } + evolutionTask.add(evolutionAnalysis.renderMarkdown) + task.update() + transcript?.write("\n\n---\n\n".toByteArray(StandardCharsets.UTF_8)) + transcript?.write(evolutionAnalysis.toByteArray(StandardCharsets.UTF_8)) + + // Build final result + val totalTime = System.currentTimeMillis() - startTime + buildString { + appendLine("# Genetic Optimization Results") + appendLine() + appendLine("**Optimization Goal:** $optimizationGoal") + appendLine() + appendLine("## Final Optimized Text") + appendLine() + appendLine("```") + appendLine(bestVariant.text) + appendLine("```") + appendLine() + appendLine("## Performance Metrics") + appendLine() + appendLine("- **Initial Score:** ${String.format("%.1f", initialEvaluation.overall_score)}/100") + appendLine("- **Final Score:** ${String.format("%.1f", bestVariant.score.overall_score)}/100") + appendLine( + "- **Improvement:** ${ + String.format( + "%+.1f", + bestVariant.score.overall_score - initialEvaluation.overall_score + ) + } points" + ) + appendLine("- **Generations:** $numGenerations") + appendLine("- **Total Variants Evaluated:** ${evolutionHistory.flatten().size}") + appendLine("- **Best Found in Generation:** ${bestVariant.generation}") + appendLine() + appendLine("## Key Improvements") + appendLine() + bestVariant.score.strengths.forEach { appendLine("- $it") } + appendLine() + appendLine("*See the Evolution Analysis tab for detailed progression and strategy effectiveness*") + } + + // Final overview update + val finalOverview = buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Optimization Complete") + appendLine() + appendLine("| Metric | Value |") + appendLine("|--------|-------|") + appendLine("| Initial Score | ${String.format("%.1f", initialEvaluation.overall_score)}/100 |") + appendLine("| Final Score | ${String.format("%.1f", bestVariant.score.overall_score)}/100 |") + appendLine( + "| Improvement | ${ + String.format( + "%+.1f", + bestVariant.score.overall_score - initialEvaluation.overall_score + ) + } |" + ) + appendLine("| Generations | $numGenerations |") + appendLine("| Total Variants | ${evolutionHistory.flatten().size} |") + appendLine("| Total Time | ${totalTime / 1000}s |") + appendLine() + appendLine("**Status:** ✓ Complete") + } + overviewTask.add(finalOverview.renderMarkdown) + task.update() + transcript?.write("\n\n---\n\n".toByteArray(StandardCharsets.UTF_8)) + transcript?.write(finalOverview.toByteArray(StandardCharsets.UTF_8)) + transcript?.close() + + log.info("GeneticOptimizationTask completed successfully: total_time=${totalTime}ms, improvement=${bestVariant.score.overall_score - initialEvaluation.overall_score}, generations=$numGenerations") + task.complete( + "Optimization complete: improved by ${ + String.format( + "%.1f", + bestVariant.score.overall_score - initialEvaluation.overall_score + ) + } points in ${totalTime / 1000}s" + ) + val (link, _) = Pair(task.linkTo("optimization_results.md"), task.resolve("optimization_results.md")) + val summaryMessage = buildString { + appendLine("Optimization complete: improved by ${String.format("%.1f", bestVariant.score.overall_score - initialEvaluation.overall_score)} points") + appendLine() + appendLine("**Final Score:** ${String.format("%.1f", bestVariant.score.overall_score)}/100") + appendLine("**Generations:** $numGenerations") + appendLine("**Total Time:** ${totalTime / 1000}s") + appendLine() + appendLine( + "Detailed results: $link " + + "html " + + "pdf" + ) + } + resultFn(summaryMessage) + + } catch (e: Exception) { + log.error("Error during GeneticOptimizationTask execution", e) + transcript?.close() + task.error(e) + task.complete("Failed with error: ${e.message}") + resultFn("ERROR: ${e.message}") } - - private fun generateMutation( - text: String, - strategy: String, - goal: String, - constraints: List, - context: String, - api: ChatInterface - ): TextVariant? { - try { - val constraintsText = if (constraints.isNotEmpty()) { - "\n\nConstraints to maintain:\n${constraints.joinToString("\n") { "- $it" }}" - } else "" - - val contextText = if (context.isNotBlank()) { - "\n\nAdditional context:\n${context.take(5000)}" - } else "" - - val prompt = """ + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun getInputFileContent(): String { + return (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (com.simiacryptus.cognotik.util.FileSelectionUtils.filteredWalk(root.toFile()) { + when { + com.simiacryptus.cognotik.util.FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + "# $relativePath\n\n${file.readText()}" + } + } + + + private fun generateMutation( + text: String, + strategy: String, + goal: String, + constraints: List, + context: String, + api: ChatInterface + ): TextVariant? { + try { + val constraintsText = if (constraints.isNotEmpty()) { + "\n\nConstraints to maintain:\n${constraints.joinToString("\n") { "- $it" }}" + } else "" + + val contextText = if (context.isNotBlank()) { + "\n\nAdditional context:\n${context.take(5000)}" + } else "" + + val prompt = """ You are a text optimization expert applying genetic algorithm mutations. ## Current Text @@ -696,38 +810,38 @@ Mutation strategies: Generate ONE variant that applies this strategy effectively. """.trimIndent() - val mutationParser = ParsedAgent( - resultClass = TextVariant::class.java, - prompt = prompt, - model = api, - temperature = 0.8, - name = "MutationGenerator", - parsingChatter = orchestrationConfig.parsingChatter, - ) - - val result = mutationParser.answer(listOf(prompt)).obj - log.debug("Generated mutation using $strategy: ${result.text.take(50)}...") - return result - - } catch (e: Exception) { - log.warn("Failed to generate mutation with strategy $strategy", e) - return null - } + val mutationParser = ParsedAgent( + resultClass = TextVariant::class.java, + prompt = prompt, + model = api, + temperature = 0.8, + name = "MutationGenerator", + parsingChatter = orchestrationConfig.parsingChatter, + ) + + val result = mutationParser.answer(listOf(prompt)).obj + log.debug("Generated mutation using $strategy: ${result.text.take(50)}...") + return result + + } catch (e: Exception) { + log.warn("Failed to generate mutation with strategy $strategy", e) + return null } - - private fun applyCrossover( - text1: String, - text2: String, - goal: String, - constraints: List, - api: ChatInterface - ): String? { - try { - val constraintsText = if (constraints.isNotEmpty()) { - "\n\nConstraints to maintain:\n${constraints.joinToString("\n") { "- $it" }}" - } else "" - - val prompt = """ + } + + private fun applyCrossover( + text1: String, + text2: String, + goal: String, + constraints: List, + api: ChatInterface + ): String? { + try { + val constraintsText = if (constraints.isNotEmpty()) { + "\n\nConstraints to maintain:\n${constraints.joinToString("\n") { "- $it" }}" + } else "" + + val prompt = """ You are a text optimization expert applying genetic algorithm crossover. ## Parent Text 1 @@ -754,40 +868,48 @@ Create a new variant by combining the best elements from both parent texts. Generate the crossover variant now. """.trimIndent() - val crossoverAgent = ChatAgent( - prompt = "You are a text optimization expert.", - model = api, - temperature = 0.7 - ) + val crossoverAgent = ChatAgent( + prompt = "You are a text optimization expert.", + model = api, + temperature = 0.7 + ) - val result = crossoverAgent.answer(listOf(prompt)) - log.debug("Generated crossover variant: ${result.take(50)}...") - return result + val result = crossoverAgent.answer(listOf(prompt)) + log.debug("Generated crossover variant: ${result.take(50)}...") + return result - } catch (e: Exception) { - log.warn("Failed to apply crossover", e) - return null - } + } catch (e: Exception) { + log.warn("Failed to apply crossover", e) + return null } - - private fun evaluateVariant( - text: String, - goal: String, - weights: Map, - constraints: List, - api: ChatInterface - ): EvaluationScore { - try { - val constraintsText = if (constraints.isNotEmpty()) { - "\n\nConstraints:\n${constraints.joinToString("\n") { "- $it" }}" - } else "" - - val weightsText = weights.entries.joinToString("\n") { (criterion, weight) -> - "- $criterion (${String.format("%.0f%%", weight * 100)} weight)" - } - - val prompt = """ -You are an expert evaluator for text optimization using genetic algorithms. + } + + private fun evaluateVariant( + text: String, + goal: String, + weights: Map, + constraints: List, + api: ChatInterface, + inputFileContent: String = "" + ): EvaluationScore { + try { + val constraintsText = if (constraints.isNotEmpty()) { + "\n\nConstraints:\n${constraints.joinToString("\n") { "- $it" }}" + } else "" + + val weightsText = weights.entries.joinToString("\n") { (criterion, weight) -> + "- $criterion (${String.format("%.0f%%", weight * 100)} weight)" + } + val contextText = if (inputFileContent.isNotBlank()) { + "\n\nAdditional context from input files:\n${inputFileContent.take(5000)}" + } else { + "" + } + + + val prompt = """ + You are an expert evaluator for text optimization using genetic algorithms. +$contextText ## Text to Evaluate ``` @@ -819,51 +941,28 @@ Also provide: Be objective and consistent in your evaluation. """.trimIndent() - val evaluationParser = ParsedAgent( - resultClass = EvaluationScore::class.java, - prompt = prompt, - model = api, - temperature = 0.3, - name = "VariantEvaluator", - parsingChatter = orchestrationConfig.parsingChatter, - ) - - val result = evaluationParser.answer(listOf(prompt)).obj - log.debug("Evaluated variant: overall_score=${result.overall_score}") - return result - - } catch (e: Exception) { - log.warn("Failed to evaluate variant", e) - return EvaluationScore( - overall_score = 0.0, - criteria_scores = weights.keys.associateWith { 0.0 }, - strengths = emptyList(), - weaknesses = listOf("Evaluation failed: ${e.message}"), - justification = "Error during evaluation" - ) - } - } - - companion object { - private val log: Logger = LoggerFactory.getLogger(GeneticOptimizationTask::class.java) - val GeneticOptimization = TaskType( - "GeneticOptimization", - GeneticOptimizationTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Iteratively evolve and perfect text through genetic algorithms", - """ - Uses genetic algorithms to optimize text through iterative evolution. -
      -
    • Generates variations using configurable mutation strategies
    • -
    • Evaluates variants against optimization criteria
    • -
    • Selects top performers for next generation
    • -
    • Applies crossover to combine successful traits
    • -
    • Tracks fitness progression across generations
    • -
    • Provides detailed analysis of evolution
    • -
    • Supports custom evaluation criteria and weights
    • -
    • Useful for perfecting prompts, copy, documentation, and messaging
    • -
    - """ - ) + val evaluationParser = ParsedAgent( + resultClass = EvaluationScore::class.java, + prompt = prompt, + model = api, + temperature = 0.3, + name = "VariantEvaluator", + parsingChatter = orchestrationConfig.parsingChatter, + ) + + val result = evaluationParser.answer(listOf(prompt)).obj + log.debug("Evaluated variant: overall_score=${result.overall_score}") + return result + + } catch (e: Exception) { + log.warn("Failed to evaluate variant", e) + return EvaluationScore( + overall_score = 0.0, + criteria_scores = weights.keys.associateWith { 0.0 }, + strengths = emptyList(), + weaknesses = listOf("Evaluation failed: ${e.message}"), + justification = "Error during evaluation" + ) } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/LateralThinkingTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/LateralThinkingTask.kt index b850092d3..9895754bd 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/LateralThinkingTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/LateralThinkingTask.kt @@ -1,25 +1,58 @@ package com.simiacryptus.cognotik.plan.tools.reasoning - import com.simiacryptus.cognotik.actors.ChatAgent - import com.simiacryptus.cognotik.actors.ParsedAgent - import com.simiacryptus.cognotik.apps.general.renderMarkdown - import com.simiacryptus.cognotik.describe.Description - import com.simiacryptus.cognotik.plan.* - import com.simiacryptus.cognotik.util.LoggerFactory - import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.PaginatedDocumentReader +import com.simiacryptus.cognotik.input.getReader +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.platform.model.ApiChatModel +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject - import com.simiacryptus.cognotik.webui.session.SessionTask - import org.slf4j.Logger - import java.time.LocalDateTime - import java.time.format.DateTimeFormatter - - class LateralThinkingTask( +import com.simiacryptus.cognotik.webui.session.SessionTask +import com.simiacryptus.cognotik.webui.session.getChildClient +import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.nio.file.Path +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class LateralThinkingTask( orchestrationConfig: OrchestrationConfig, planTask: LateralThinkingTaskExecutionConfigData? - ) : AbstractTask( +) : AbstractTask( orchestrationConfig, planTask ) { + companion object { + private val log: Logger = LoggerFactory.getLogger(LateralThinkingTask::class.java) + + val LateralThinking = TaskType( + "LateralThinking", + LateralThinkingTaskExecutionConfigData::class.java, + LateralThinkingTaskTypeConfig::class.java, + "Break conventional thinking patterns to find innovative solutions", + """ + Applies lateral thinking techniques to generate unconventional solutions. +
      +
    • Supports multiple techniques: reversal, random stimulus, challenge assumptions, exaggeration, escape, metaphor, provocation
    • +
    • Generates multiple alternatives per technique
    • +
    • Identifies breakthrough aspects and novel perspectives
    • +
    • Evaluates novelty and feasibility of ideas
    • +
    • Synthesizes insights across techniques
    • +
    • Optionally performs detailed feasibility evaluation
    • +
    • Suggests hybrid approaches combining multiple ideas
    • +
    • Ideal for innovation, breaking design impasses, and creative problem-solving
    • +
    + """ + ) + } + val maxDescriptionLength = 1500 class LateralThinkingTaskExecutionConfigData( @@ -41,6 +74,8 @@ import com.simiacryptus.cognotik.util.ValidatedObject val domain_context: String? = null, @Description("Additional constraints or requirements to consider") val constraints: List? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input context for the task") + val input_files: List? = null, task_description: String? = null, task_dependencies: List? = null, state: TaskState? = TaskState.Pending, @@ -51,6 +86,7 @@ import com.simiacryptus.cognotik.util.ValidatedObject task_dependencies = task_dependencies?.toMutableList(), state = state ), ValidatedObject { + override fun validate(): String? { if (problem.isNullOrBlank()) { return "Problem must be specified and cannot be blank" @@ -60,7 +96,7 @@ import com.simiacryptus.cognotik.util.ValidatedObject } techniques?.forEach { technique -> val validTechniques = listOf( - "reversal", "random_stimulus", "challenge_assumptions", + "reversal", "random_stimulus", "challenge_assumptions", "exaggeration", "escape", "metaphor", "provocation" ) if (technique !in validTechniques) { @@ -71,6 +107,16 @@ import com.simiacryptus.cognotik.util.ValidatedObject } } + class LateralThinkingTaskTypeConfig( + task_type: String? = LateralThinking.name, + name: String? = null, + model: ApiChatModel? = null + ) : TaskTypeConfig( + task_type = task_type, + name = name, + model = model + ), ValidatedObject + data class LateralIdea( @Description("Title of the idea") val title: String = "", @@ -210,6 +256,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti ) { try { val startTime = System.currentTimeMillis() + val transcript = transcript(task) log.info("Starting LateralThinkingTask for problem='${executionConfig?.problem?.take(50)}...', techniques=${executionConfig?.techniques}") val problem = executionConfig?.problem @@ -268,6 +315,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti appendLine("| Alternatives per Technique | $numAlternatives |") appendLine("| Feasibility Evaluation | ${if (evaluateFeasibility) "✓ Enabled" else "✗ Disabled"} |") appendLine() + transcript?.write(this.toString().toByteArray()) appendLine("## Progress") appendLine() appendLine("- ⏳ Gathering context...") @@ -277,10 +325,13 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti log.debug("Gathering prior context") val priorContext = getPriorCode(agent.executionState) - log.debug("Context gathered: priorContext length=${priorContext.length}") + val fileContext = getInputFileCode(agent.root) + val combinedContext = priorContext + "\n\n" + fileContext + log.debug("Context gathered: priorContext length=${priorContext.length}, fileContext length=${fileContext.length}") overviewTask.add(buildString { appendLine() + transcript?.write("\n- ✓ Context gathered\n- ⏳ Applying lateral thinking techniques...\n".toByteArray()) appendLine("- ✓ Context gathered") appendLine("- ⏳ Applying lateral thinking techniques...") }.renderMarkdown) @@ -300,6 +351,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti techniqueTask.add(buildString { appendLine("# ${technique.capitalize()} Technique") appendLine() + transcript?.write("# ${technique.capitalize()} Technique\n\n**Status:** ⏳ Generating ideas...\n\n".toByteArray()) appendLine("**Status:** ⏳ Generating ideas...") appendLine() appendLine(getTechniqueDescription(technique)) @@ -312,13 +364,13 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti numAlternatives, domainContext, constraints, - priorContext + combinedContext ) val techniqueParser = ParsedAgent( resultClass = TechniqueApplication::class.java, prompt = techniquePrompt, - model = api, + model = api.getChildClient(task), temperature = 0.8, name = "LateralThinking_${technique}", parsingChatter = orchestrationConfig.parsingChatter, @@ -333,6 +385,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti // Display technique results techniqueTask.add(buildString { + transcript?.write("\n---\n\n## Results\n\n**Status:** ✓ Complete\n\n".toByteArray()) appendLine() appendLine("---") appendLine() @@ -381,6 +434,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti appendLine("---") appendLine() } + transcript?.write(this.toString().toByteArray()) if (application.insights.isNotEmpty()) { appendLine("### Key Insights") appendLine() @@ -390,6 +444,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti task.update() } else { log.warn("Failed to generate ideas for technique: $technique") + transcript?.write("\n**Status:** ⚠️ Failed to generate ideas\n".toByteArray()) techniqueTask.add(buildString { appendLine() appendLine("**Status:** ⚠️ Failed to generate ideas") @@ -399,6 +454,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti overviewTask.add(buildString { appendLine() + transcript?.write("\n- ✓ ${technique.capitalize()} complete (${application?.ideas?.size ?: 0} ideas)\n".toByteArray()) appendLine("- ✓ ${technique.capitalize()} complete (${application?.ideas?.size ?: 0} ideas)") }.renderMarkdown) task.update() @@ -408,6 +464,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti overviewTask.add(buildString { appendLine() + transcript?.write("\n- ✓ All techniques applied (${allIdeas.size} total ideas)\n- ⏳ Synthesizing insights...\n".toByteArray()) appendLine("- ✓ All techniques applied (${allIdeas.size} total ideas)") appendLine("- ⏳ Synthesizing insights...") }.renderMarkdown) @@ -420,6 +477,7 @@ LateralThinking - Break conventional thinking patterns to find innovative soluti synthesisTask.add(buildString { appendLine("# Cross-Technique Synthesis") + transcript?.write("\n# Cross-Technique Synthesis\n\n**Status:** ⏳ Analyzing patterns and insights...\n".toByteArray()) appendLine() appendLine("**Status:** ⏳ Analyzing patterns and insights...") }.renderMarkdown) @@ -462,7 +520,7 @@ Provide a comprehensive synthesis. val synthesisAgent = ChatAgent( prompt = "You are an expert in creative synthesis and innovation strategy.", - model = api, + model = api.getChildClient(task), temperature = 0.6 ) @@ -470,6 +528,7 @@ Provide a comprehensive synthesis. synthesisTask.add(buildString { appendLine() + transcript?.write("\n---\n\n## Synthesis Results\n\n**Status:** ✓ Complete\n\n${synthesisText}\n".toByteArray()) appendLine("---") appendLine() appendLine("## Synthesis Results") @@ -485,6 +544,7 @@ Provide a comprehensive synthesis. overviewTask.add(buildString { appendLine() + transcript?.write("\n- ✓ Synthesis complete\n".toByteArray()) appendLine("- ✓ Synthesis complete") if (evaluateFeasibility) { appendLine("- ⏳ Evaluating feasibility...") @@ -501,6 +561,7 @@ Provide a comprehensive synthesis. feasibilityTask.add(buildString { appendLine("# Feasibility Evaluation") + transcript?.write("\n# Feasibility Evaluation\n\n**Status:** ⏳ Evaluating ${allIdeas.size} ideas...\n".toByteArray()) appendLine() appendLine("**Status:** ⏳ Evaluating ${allIdeas.size} ideas...") }.renderMarkdown) @@ -543,7 +604,7 @@ Provide a structured evaluation. val feasibilityParser = ParsedAgent( resultClass = FeasibilityEvaluation::class.java, prompt = feasibilityPrompt, - model = api, + model = api.getChildClient(task), temperature = 0.4, name = "FeasibilityEvaluation", parsingChatter = orchestrationConfig.parsingChatter, @@ -553,6 +614,7 @@ Provide a structured evaluation. if (feasibilityEvaluation != null) { feasibilityTask.add(buildString { + transcript?.write("\n---\n\n## Evaluation Results\n\n**Status:** ✓ Complete\n\n".toByteArray()) appendLine() appendLine("---") appendLine() @@ -580,10 +642,12 @@ Provide a structured evaluation. feasibilityEvaluation.hybrid_approaches.forEach { appendLine("- $it") } } }.renderMarkdown) + transcript?.write(this.toString().toByteArray()) task.update() } overviewTask.add(buildString { + transcript?.write("\n- ✓ Feasibility evaluation complete\n".toByteArray()) appendLine() appendLine("- ✓ Feasibility evaluation complete") }.renderMarkdown) @@ -606,6 +670,7 @@ Provide a structured evaluation. val summaryContent = formatSummary(result, problem, techniques) summaryTask.add(summaryContent.renderMarkdown) + transcript?.write("\n${summaryContent}\n".toByteArray()) task.update() // Create concise result text @@ -686,6 +751,7 @@ Provide a structured evaluation. appendLine("| Total Time | ${totalTime / 1000}s |") appendLine() appendLine("**Status:** ✓ Complete") + transcript?.write(this.toString().toByteArray()) }.renderMarkdown) task.update() @@ -699,6 +765,23 @@ Provide a structured evaluation. log ) resultFn(resultText) + transcript?.close() + // Create summary message with transcript link + val (transcriptLink, _) = task.createFile("lateral_thinking_summary.md") + val summaryMessage = buildString { + appendLine(resultText) + appendLine() + appendLine("---") + appendLine() + appendLine( + "📄 **Full Analysis:** [View Transcript]($transcriptLink) | [HTML](${transcriptLink.removeSuffix(".md")}.html) | [PDF](${ + transcriptLink.removeSuffix( + ".md" + ) + }.pdf)" + ) + } + resultFn(summaryMessage) } catch (e: Exception) { log.error("Error during LateralThinkingTask execution", e) @@ -717,6 +800,20 @@ Provide a structured evaluation. } } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun getTechniqueDescription(technique: String): String { return when (technique.lowercase()) { "reversal" -> """ @@ -1136,29 +1233,78 @@ Generate $numAlternatives ideas using $technique. return this.replaceFirstChar { if (it.isLowerCase()) it.titlecase() else it.toString() } } - companion object { - private val log: Logger = LoggerFactory.getLogger(LateralThinkingTask::class.java) + private fun getInputFileCode(root: Path): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } - val LateralThinking = TaskType( - "LateralThinking", - LateralThinkingTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Break conventional thinking patterns to find innovative solutions", - """ - Applies lateral thinking techniques to generate unconventional solutions. -
      -
    • Supports multiple techniques: reversal, random stimulus, challenge assumptions, exaggeration, escape, metaphor, provocation
    • -
    • Generates multiple alternatives per technique
    • -
    • Identifies breakthrough aspects and novel perspectives
    • -
    • Evaluates novelty and feasibility of ideas
    • -
    • Synthesizes insights across techniques
    • -
    • Optionally performs detailed feasibility evaluation
    • -
    • Suggests hybrid approaches combining multiple ideas
    • -
    • Ideal for innovation, breaking design impasses, and creative problem-solving
    • -
    - """ + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: File) = try { + file.getReader().use { reader -> + when (reader) { + is PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + file.readText() } + } private fun String.removePrefix(prefix: Regex): String { diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MetaCognitiveReflectionTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MetaCognitiveReflectionTask.kt index 384d9b128..a34d9dc7c 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MetaCognitiveReflectionTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MetaCognitiveReflectionTask.kt @@ -1,14 +1,13 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.file.FileSystems class MetaCognitiveReflectionTask( orchestrationConfig: OrchestrationConfig, @@ -21,6 +20,12 @@ class MetaCognitiveReflectionTask( class MetaCognitiveReflectionTaskExecutionConfigData( @Description("The ID of the task whose reasoning process should be reflected upon") val subject_task_id: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as context for reflection") + val input_files: List? = null, + @Description("Additional context or questions to guide the reflection") + val reflection_questions: List? = null, + @Description("Whether to include file context in the reflection analysis") + val include_file_context: Boolean = true, @Description("Aspects to evaluate: 'assumptions', 'biases', 'alternatives', 'confidence', 'completeness', 'logic'") val reflection_aspects: List? = listOf("assumptions", "biases", "alternatives", "confidence"), @Description("Whether to suggest improvements to the reasoning process") @@ -45,6 +50,9 @@ class MetaCognitiveReflectionTask( if (reflection_aspects.isNullOrEmpty()) { return "reflection_aspects must not be null or empty" } + if (reflection_aspects.isNullOrEmpty()) { + return "reflection_aspects must not be null or empty" + } val validAspects = setOf("assumptions", "biases", "alternatives", "confidence", "completeness", "logic") val invalidAspects = reflection_aspects.filterNot { it in validAspects } if (invalidAspects.isNotEmpty()) { @@ -65,6 +73,9 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes - 'confidence': Evaluate certainty levels - 'completeness': Check for missing considerations - 'logic': Verify logical consistency + ** Optionally, list input files (supports glob patterns) to provide context + ** Optionally, specify reflection_questions to guide the analysis + ** Enable include_file_context to incorporate file content in reflection ** Enable suggest_improvements to get actionable recommendations ** Enable identify_gaps to surface knowledge uncertainties ** Enable evaluate_confidence to assess conclusion reliability @@ -115,6 +126,14 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes } val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + // Create transcript file + val (transcriptLink, transcript) = initializeTranscript(task) + transcript?.let { stream -> + stream.write("# Meta-Cognitive Reflection Transcript\n\n".toByteArray()) + stream.write("## Subject Task: `$subjectTaskId`\n\n".toByteArray()) + stream.write("**Timestamp**: ${java.time.Instant.now()}\n\n".toByteArray()) + } + val tabbedDisplay = TabbedDisplay(task) val overviewTask = task.ui.newTask() @@ -142,6 +161,25 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes ), log ) } + // Gather file context if enabled + val fileContext = if (executionConfig?.include_file_context == true) { + getInputFileContext(executionConfig?.input_files ?: listOf()) + } else { + "" + } + // Gather messages context + val messagesContext = messages.filter { it.isNotBlank() }.joinToString("\n\n") + // Gather reflection questions + val questionsContext = if (!executionConfig?.reflection_questions.isNullOrEmpty()) { + "## Reflection Questions:\n\n" + executionConfig?.reflection_questions?.mapIndexed { idx, q -> + "${idx + 1}. $q" + }?.joinToString("\n") + } else { + "" + } + transcript?.let { stream -> + writeToTranscript(stream, "## Input Context\n\n$fileContext\n\n$messagesContext\n\n$questionsContext\n\n") + } val reflectionAspects = @@ -153,6 +191,9 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes subjectTaskId = subjectTaskId, subjectTaskResult = subjectTaskResult, priorContext = priorContext, + fileContext = fileContext, + messagesContext = messagesContext, + questionsContext = questionsContext, reflectionAspects = reflectionAspects, suggestImprovements = executionConfig?.suggest_improvements ?: true, identifyGaps = executionConfig?.identify_gaps ?: true, @@ -167,6 +208,12 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes |**Subject Task**: `$subjectTaskId` | |**Reflection Aspects**: $aspectsText + | + |**Include File Context**: ${executionConfig?.include_file_context ?: true} + | + |**Input Files**: ${executionConfig?.input_files?.joinToString(", ") ?: "None"} + | + |**Reflection Questions**: ${executionConfig?.reflection_questions?.size ?: 0} questions | |**Suggest Improvements**: ${executionConfig?.suggest_improvements ?: true} | @@ -177,6 +224,18 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes ui = overviewTask.ui ) ) + transcript?.let { stream -> + stream.write("\n## Reflection Parameters\n\n".toByteArray()) + stream.write("- **Subject Task**: `$subjectTaskId`\n".toByteArray()) + stream.write("- **Reflection Aspects**: $aspectsText\n".toByteArray()) + stream.write("- **Include File Context**: ${executionConfig?.include_file_context ?: true}\n".toByteArray()) + stream.write("- **Input Files**: ${executionConfig?.input_files?.joinToString(", ") ?: "None"}\n".toByteArray()) + stream.write("- **Reflection Questions**: ${executionConfig?.reflection_questions?.size ?: 0}\n".toByteArray()) + stream.write("- **Suggest Improvements**: ${executionConfig?.suggest_improvements ?: true}\n".toByteArray()) + stream.write("- **Identify Gaps**: ${executionConfig?.identify_gaps ?: true}\n".toByteArray()) + stream.write("- **Evaluate Confidence**: ${executionConfig?.evaluate_confidence ?: true}\n\n".toByteArray()) + } + overviewTask.safeComplete("", log) // Step 4: Create agent and perform reflection val reflectionTask = task.ui.newTask() @@ -193,6 +252,12 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes try { val reflectionResult: String = chatAgent.answer(listOf(prompt)) + transcript?.let { stream -> + writeToTranscript(stream, "\n## Reflection Analysis\n\n") + stream.write(reflectionResult.toByteArray()) + stream.write("\n\n".toByteArray()) + } + reflectionTask.add( MarkdownUtil.renderMarkdown( @@ -211,6 +276,14 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes val summary = generateReflectionSummary(reflectionResult) + transcript?.let { stream -> + writeToTranscript(stream, "\n## Summary\n\n") + stream.write(summary.toByteArray()) + stream.write("\n\n---\n\n".toByteArray()) + stream.write("**Duration**: ${System.currentTimeMillis() - startTime}ms\n".toByteArray()) + stream.write("**Status**: Completed successfully\n".toByteArray()) + } + summaryTask.safeComplete( MarkdownUtil.renderMarkdown( @@ -232,10 +305,21 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes val duration = System.currentTimeMillis() - startTime log.info("MetaCognitiveReflection task completed successfully for subject_task_id: $subjectTaskId in ${duration}ms. Summary length: ${summary.length}") - resultFn(summary) + val finalOutput = "Meta-cognitive reflection completed. View detailed analysis: transcript.md html pdf\n\n$summary" + resultFn(finalOutput) + transcript?.close() + } catch (e: Exception) { log.error("Error during meta-cognitive reflection", e) + transcript?.let { stream -> + stream.write("\n## ❌ Error\n\n".toByteArray()) + stream.write("```\n${e.message}\n```\n".toByteArray()) + } + transcript?.close() + task.error(e) reflectionTask.error(e) task.add( @@ -258,9 +342,60 @@ MetaCognitiveReflection - Reflect on and critique reasoning processes } } + private fun initializeTranscript(task: SessionTask): Pair { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.add( + MarkdownUtil.renderMarkdown( + "Writing transcript to transcript.md " + + "html " + + "pdf", + ui = task.ui + ) + ) + return Pair(link, markdownTranscript) + } + + private fun writeToTranscript(stream: FileOutputStream, content: String) { + try { + stream.write(content.toByteArray(Charsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write to transcript", e) + } + } + + private fun getInputFileContext(inputFiles: List): String { + if (inputFiles.isEmpty()) return "" + return inputFiles.flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + } + }.filter { it.isFile && it.exists() } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + } + + private fun buildSystemPrompt(): String { return """ -You are a meta-cognitive analyst specializing in critical thinking and reasoning evaluation. + You are a meta-cognitive analyst specializing in critical thinking and reasoning evaluation. Your role is to provide thoughtful, constructive reflection on reasoning processes. You identify strengths, weaknesses, assumptions, biases, and opportunities for improvement. You are thorough, objective, and focused on enhancing the quality of thinking. @@ -271,11 +406,29 @@ You are thorough, objective, and focused on enhancing the quality of thinking. subjectTaskId: String, subjectTaskResult: String, priorContext: String, + fileContext: String, + messagesContext: String, + questionsContext: String, reflectionAspects: List, suggestImprovements: Boolean, identifyGaps: Boolean, evaluateConfidence: Boolean ): String { + + if (fileContext.isNotBlank()) """ +## File Context: +The following files provide additional context for the reflection: +$fileContext +""" else "" + if (messagesContext.isNotBlank()) """ +## Messages Context: +The following messages were provided as input: +$messagesContext +""" else "" + if (questionsContext.isNotBlank()) """ +$questionsContext +""" else "" + val contextBlock = if (priorContext.isNotBlank()) """ ## Overall Context from Prior Steps: The following context was available to the task being analyzed. Consider this when evaluating its reasoning. diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MultiPerspectiveAnalysisTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MultiPerspectiveAnalysisTask.kt index e9261df43..259691bd9 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MultiPerspectiveAnalysisTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/MultiPerspectiveAnalysisTask.kt @@ -1,14 +1,13 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets import java.nio.file.FileSystems class MultiPerspectiveAnalysisTask( @@ -25,6 +24,8 @@ class MultiPerspectiveAnalysisTask( val analysis_subject: String? = null, @Description("List of perspectives to consider (e.g., technical, business, ethical, user)") val perspectives: List? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the analysis") + val input_files: List? = null, @Description("Whether to synthesize perspectives into unified conclusion") val synthesize: Boolean = true, @Description("Minimum confidence threshold for perspective agreement (0.0-1.0)") @@ -49,6 +50,9 @@ class MultiPerspectiveAnalysisTask( if (consensus_threshold < 0.0 || consensus_threshold > 1.0) { return "consensus_threshold must be between 0.0 and 1.0, got: $consensus_threshold" } + if (!input_files.isNullOrEmpty() && input_files.any { it.isBlank() }) { + return "input_files cannot contain blank entries" + } // Call parent validation for nested ValidatedObject fields return ValidatedObject.validateFields(this) } @@ -56,12 +60,13 @@ class MultiPerspectiveAnalysisTask( override fun promptSegment(): String { return """ -MultiPerspectiveAnalysis - Analyze problems from multiple viewpoints with synthesis + MultiPerspectiveAnalysis - Analyze problems from multiple viewpoints with synthesis ** Specify the subject to analyze in analysis_subject ** Provide a list of perspectives to consider (e.g., technical, business, ethical, user experience) + ** Optionally, list input files (supports glob patterns) to provide context for the analysis ** Set synthesize=true to generate a unified conclusion from all perspectives ** Configure consensus_threshold (0.0-1.0) to determine minimum agreement level - ** Related files can provide additional context for the analysis + ** Additional context files can be specified via input_files ** Each perspective will be analyzed independently, then synthesized ** Useful for: - Architectural decision making @@ -90,7 +95,7 @@ MultiPerspectiveAnalysis - Analyze problems from multiple viewpoints with synthe resultFn("CONFIGURATION ERROR: $error") return } - + if (subject.isNullOrBlank()) { log.error("No analysis subject specified") task.safeComplete("CONFIGURATION ERROR: No analysis subject specified", log) @@ -135,6 +140,8 @@ MultiPerspectiveAnalysis - Analyze problems from multiple viewpoints with synthe } catch (e: Exception) { log.warn("Failed to create tabbed display", e) } + var transcriptStream: FileOutputStream? = null + val contextFiles = getContextFiles() val priorCode = getPriorCode(agent.executionState) @@ -143,6 +150,20 @@ MultiPerspectiveAnalysis - Analyze problems from multiple viewpoints with synthe val tabs = TabbedDisplay(task) val perspectiveResults = mutableMapOf() + try { + transcriptStream = initializeTranscript(task) + transcriptStream?.let { stream -> + writeToTranscript(stream, "# Multi-Perspective Analysis Transcript\n\n") + writeToTranscript(stream, "**Subject:** ${subject.truncateForDisplay(maxDescriptionLength)}\n\n") + writeToTranscript(stream, "**Perspectives:** ${perspectives.joinToString(", ")}\n\n") + writeToTranscript(stream, "**Consensus Threshold:** ${executionConfig.consensus_threshold}\n\n") + writeToTranscript(stream, "---\n\n") + } + } catch (e: Exception) { + log.warn("Failed to initialize transcript", e) + } + + // Analyze from each perspective perspectives.forEach { perspective -> val perspectiveTask = task.ui.newTask(false).apply { @@ -180,6 +201,10 @@ Provide a thorough analysis from the $perspective viewpoint. var analysis: String? = chatAgent.answer(listOf(prompt)) perspectiveResults[perspective] = analysis ?: "" + transcriptStream?.let { stream -> + writeToTranscript(stream, "## $perspective Perspective\n\n$analysis\n\n---\n\n") + } + perspectiveTask.complete( MarkdownUtil.renderMarkdown( "### $perspective Perspective\n\n$analysis", @@ -238,6 +263,10 @@ Provide a comprehensive synthesis that integrates all perspectives. try { val synthesis = synthesisAgent.answer(listOf(synthesisPrompt)) + transcriptStream?.let { stream -> + writeToTranscript(stream, "## Synthesis\n\n$synthesis\n\n") + } + synthesisTask.complete( MarkdownUtil.renderMarkdown( "## Synthesis\n\n$synthesis", @@ -282,11 +311,62 @@ Provide a comprehensive synthesis that integrates all perspectives. } } } + try { + transcriptStream?.flush() + } catch (e: Exception) { + log.warn("Failed to close transcript stream", e) + } + task.safeComplete("Multi-perspective analysis complete.", log) resultFn(finalResult) } + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = Pair(task.linkTo("analysis_transcript.md"), task.resolve("analysis_transcript.md")) + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + + private fun writeToTranscript(stream: FileOutputStream, content: String) { + try { + stream.write(content.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write to transcript", e) + } + } + + private fun getInputFileCode(): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + } + }.filter { it.isFile && it.exists() } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { file -> + "# ${root.relativize(file.toPath())}\n\n```\n${file.readText()}\n```" + } + + private fun getContextFiles(): String { val relatedFiles = executionConfig?.related_files ?: return "" @@ -308,6 +388,7 @@ Provide a comprehensive synthesis that integrates all perspectives. }.joinToString("\n\n") } + companion object { private val log: Logger = LoggerFactory.getLogger(MultiPerspectiveAnalysisTask::class.java) val MultiPerspectiveAnalysis = TaskType( diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ProbabilisticReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ProbabilisticReasoningTask.kt index d8dc3d448..e9806d7e7 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ProbabilisticReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/ProbabilisticReasoningTask.kt @@ -1,14 +1,18 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets +import java.nio.file.Path import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -20,6 +24,8 @@ class ProbabilisticReasoningTask( planTask ) { val maxDescriptionLength = 10000 + protected val codeFiles = mutableMapOf() + class ProbabilisticReasoningTaskExecutionConfigData( @Description("Map of hypotheses to their prior probabilities (must sum to 1.0)") @@ -34,6 +40,8 @@ class ProbabilisticReasoningTask( val suggest_experiments: Boolean = true, @Description("Risk tolerance level (low/medium/high)") val risk_tolerance: String = "medium", + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Decision context or problem statement") val decision_context: String? = null, task_description: String? = null, @@ -51,37 +59,37 @@ class ProbabilisticReasoningTask( if (hypotheses.isNullOrEmpty()) { return "Hypotheses map cannot be null or empty" } - + // Validate that all probabilities are between 0 and 1 hypotheses.forEach { (hypothesis, probability) -> if (probability < 0.0 || probability > 1.0) { return "Probability for hypothesis '$hypothesis' must be between 0.0 and 1.0, got: $probability" } } - + // Validate that probabilities sum to approximately 1.0 val probabilitySum = hypotheses.values.sum() if (probabilitySum < 0.99 || probabilitySum > 1.01) { return "Prior probabilities must sum to 1.0 (current sum: $probabilitySum)" } - + // Validate risk tolerance if (risk_tolerance !in listOf("low", "medium", "high")) { return "Risk tolerance must be one of: low, medium, high. Got: $risk_tolerance" } - + // Validate evidence list if present evidence?.forEach { evidenceItem -> if (evidenceItem.isBlank()) { return "Evidence items cannot be blank" } } - + // Validate decision context if present if (decision_context?.isBlank() == true) { return "Decision context cannot be blank if provided" } - + // Call parent validation return ValidatedObject.validateFields(this) } @@ -141,6 +149,18 @@ ProbabilisticReasoning - Reason under uncertainty using Bayesian analysis val ui = task.ui val tabs = TabbedDisplay(task) + // Create transcript file + val transcript = initializeTranscript(task) + transcript?.let { stream -> + stream.write("# Probabilistic Reasoning Analysis Transcript\n\n".toByteArray()) + stream.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + stream.write("**Decision Context:** $decisionContext\n\n".toByteArray()) + stream.write("**Hypotheses:** ${hypotheses.size}\n\n".toByteArray()) + stream.write("**Evidence Items:** ${evidence.size}\n\n".toByteArray()) + stream.write("**Risk Tolerance:** ${executionConfig.risk_tolerance}\n\n".toByteArray()) + stream.write("---\n\n".toByteArray()) + writeInputFilesSection(stream, agent) + } // Overview tab val overviewTask = ui.newTask(false) @@ -167,6 +187,16 @@ ProbabilisticReasoning - Reason under uncertainty using Bayesian analysis } overviewTask.add(overviewContent.renderMarkdown) task.update() + val inputFileContent = getInputFileCode(agent) + if (inputFileContent.isNotBlank()) { + log.debug("Found input files: ${inputFileContent.length} characters") + val filesTask = ui.newTask(false) + tabs["Input Files"] = filesTask.placeholder + filesTask.add( + "# Input Files\n\n$inputFileContent".renderMarkdown + ) + task.update() + } val priorContext = getPriorCode(agent.executionState) if (priorContext.isNotBlank()) { @@ -185,6 +215,7 @@ ProbabilisticReasoning - Reason under uncertainty using Bayesian analysis task.update() } val resultBuilder = StringBuilder() + transcript try { // Prior Probabilities tab @@ -262,8 +293,14 @@ Consider both the strength of evidence and its reliability. val updateResult = bayesianAgent.answer(listOf(updatePrompt)) var stepTime = System.currentTimeMillis() - stepStartTime log.debug("Bayesian update completed in ${stepTime}ms: ${updateResult.length} characters") + // Write to transcript + transcript?.write("\n## Bayesian Update\n\n".toByteArray()) + transcript?.write("**Time:** ${stepTime / 1000.0}s\n\n".toByteArray()) + transcript?.write(updateResult.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + - updateTask.add( buildString { appendLine("## Analysis Results") @@ -320,8 +357,14 @@ Consider both the strength of evidence and its reliability. val evResult = bayesianAgent.answer(listOf(evPrompt)) stepTime = System.currentTimeMillis() - stepStartTime log.debug("Expected value analysis completed in ${stepTime}ms: ${evResult.length} characters") + // Write to transcript + transcript?.write("\n## Expected Value Analysis\n\n".toByteArray()) + transcript?.write("**Time:** ${stepTime / 1000.0}s\n\n".toByteArray()) + transcript?.write(evResult.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + - evTask.add( buildString { appendLine("## Expected Value & Risk Analysis") @@ -374,9 +417,15 @@ Consider both the strength of evidence and its reliability. val uncertaintyResult = bayesianAgent.answer(listOf(uncertaintyPrompt)) stepTime = System.currentTimeMillis() - stepStartTime - log.debug("Uncertainty analysis completed in ${stepTime}ms: ${uncertaintyResult .length} characters") + log.debug("Uncertainty analysis completed in ${stepTime}ms: ${uncertaintyResult.length} characters") + // Write to transcript + transcript?.write("\n## Key Uncertainties\n\n".toByteArray()) + transcript?.write("**Time:** ${stepTime / 1000.0}s\n\n".toByteArray()) + transcript?.write(uncertaintyResult.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + - uncertaintyTask.add( buildString { appendLine("## Critical Uncertainties") @@ -430,8 +479,14 @@ Consider both the strength of evidence and its reliability. val experimentResult = bayesianAgent.answer(listOf(experimentPrompt)) stepTime = System.currentTimeMillis() - stepStartTime log.debug("Experiment suggestions completed in ${stepTime}ms: ${experimentResult.length} characters") + // Write to transcript + transcript?.write("\n## Suggested Experiments\n\n".toByteArray()) + transcript?.write("**Time:** ${stepTime / 1000.0}s\n\n".toByteArray()) + transcript?.write(experimentResult.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + - experimentTask.add( buildString { appendLine("## Recommended Experiments") @@ -461,6 +516,13 @@ Consider both the strength of evidence and its reliability. val totalTime = System.currentTimeMillis() - startTime log.info("ProbabilisticReasoningTask completed: total_time=${totalTime}ms, hypotheses=${hypotheses.size}, evidence=${evidence.size}") + // Write final summary to transcript + transcript?.write("\n---\n\n".toByteArray()) + transcript?.write("## Analysis Complete\n\n".toByteArray()) + transcript?.write("**Total Time:** ${totalTime / 1000.0}s\n\n".toByteArray()) + transcript?.write("**Hypotheses Analyzed:** ${hypotheses.size}\n\n".toByteArray()) + transcript?.write("**Evidence Processed:** ${evidence.size}\n\n".toByteArray()) + transcript?.write("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n".toByteArray()) // Final overview update overviewTask.add( @@ -484,10 +546,17 @@ Consider both the strength of evidence and its reliability. val finalResult = resultBuilder.toString() task.safeComplete("Completed Bayesian analysis of ${hypotheses.size} hypotheses in ${totalTime / 1000.0}s", log) resultFn(finalResult) + transcript?.close() } catch (e: Exception) { log.error("Error during probabilistic reasoning", e) task.error(e) + // Write error to transcript + transcript?.write("\n---\n\n".toByteArray()) + transcript?.write("## Error Occurred\n\n".toByteArray()) + transcript?.write("**Error:** ${e.message}\n\n".toByteArray()) + transcript?.write("**Type:** ${e.javaClass.simpleName}\n\n".toByteArray()) + transcript?.close() overviewTask.add( buildString { @@ -520,6 +589,75 @@ Consider both the strength of evidence and its reliability. } } + private fun getInputFileCode(agent: TaskOrchestrator): String { + return (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(agent.root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(agent.root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = agent.root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + } + + private fun writeInputFilesSection(stream: FileOutputStream, agent: TaskOrchestrator) { + try { + val inputFileContent = getInputFileCode(agent) + if (inputFileContent.isNotBlank()) { + stream.write("\n## Input Files\n\n".toByteArray(StandardCharsets.UTF_8)) + stream.write(inputFileContent.toByteArray(StandardCharsets.UTF_8)) + stream.write("\n\n".toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } + } catch (e: Exception) { + log.error("Failed to write input files section to transcript", e) + } + } + + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = task.createFile("reasoning_transcript.md") + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + + private fun writeToTranscript(stream: FileOutputStream, content: String) { + try { + stream.write(content.toByteArray(StandardCharsets.UTF_8)) + stream.flush() + } catch (e: Exception) { + log.error("Failed to write to transcript", e) + } + } + private fun buildBayesianUpdatePrompt( hypotheses: Map, evidence: List, @@ -700,7 +838,7 @@ Provide: 5. **Decision Criteria**: When to stop testing and make a decision Generate the experiment recommendations now: - """.trimIndent() +""".trimIndent() } companion object { diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SocraticDialogueTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SocraticDialogueTask.kt index 344a8caf3..884f00f59 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SocraticDialogueTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SocraticDialogueTask.kt @@ -1,15 +1,15 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* -import com.simiacryptus.cognotik.util.LoggerFactory -import com.simiacryptus.cognotik.util.MarkdownUtil -import com.simiacryptus.cognotik.util.TabbedDisplay -import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.util.* import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.nio.file.Path import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -26,6 +26,8 @@ class SocraticDialogueTask( class SocraticDialogueTaskExecutionConfigData( @Description("The initial question or hypothesis to explore") val initial_question: String? = null, + @Description("Optional input files (supports glob patterns) to provide context for the dialogue") + val input_files: List? = null, @Description("Maximum dialogue depth (number of question-answer exchanges)") val max_depth: Int = 5, @Description("Whether to challenge assumptions at each level") @@ -53,7 +55,17 @@ class SocraticDialogueTask( override fun promptSegment(): String { return """ -SocraticDialogue - Explore ideas through Socratic questioning + SocraticDialogue - Explore ideas through Socratic questioning + ** Specify the initial question or hypothesis to explore + ** Optionally provide input files (supports glob patterns) for context + ** Configure maximum dialogue depth (default: 5 exchanges) + ** Enable/disable assumption challenging + ** Optionally constrain to specific topics or domains + ** Creates a dialogue between questioner and responder agents + ** Explores definitions, assumptions, implications, and contradictions + ** Produces a structured dialogue transcript with insights + Available files: + ${getAvailableFiles(root).joinToString("\n") { " - $it" }} ** Specify the initial question or hypothesis to explore ** Configure maximum dialogue depth (default: 5 exchanges) ** Enable/disable assumption challenging @@ -64,6 +76,32 @@ SocraticDialogue - Explore ideas through Socratic questioning """.trimIndent() } + private fun getInputFileContext(): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + "# $relativePath\n\n```\n${file.readText()}\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + override fun run( agent: TaskOrchestrator, messages: List, @@ -71,6 +109,7 @@ SocraticDialogue - Explore ideas through Socratic questioning resultFn: (String) -> Unit, orchestrationConfig: OrchestrationConfig ) { + val inputFileContext = getInputFileContext() val startTime = System.currentTimeMillis() log.info("Starting SocraticDialogueTask with initial question: '${executionConfig?.initial_question}'") // Validate configuration @@ -138,6 +177,9 @@ SocraticDialogue - Explore ideas through Socratic questioning if (priorContext.isNotBlank()) { log.debug("Found prior context from previous tasks: ${priorContext.length} characters") } + val combinedContext = listOfNotNull(priorContext, inputFileContext) + .filter { it.isNotBlank() } + .joinToString("\n\n---\n\n") // Create the Socratic questioner agent log.info("Creating Socratic questioner agent") @@ -181,6 +223,18 @@ Provide substantive, well-reasoned responses that advance the dialogue. val dialogueBuilder = StringBuilder() val fullDialogueBuilder = StringBuilder() + // Create transcript file + val (transcriptLink, transcriptStream) = createTranscriptFile(task) + val transcriptWriter = transcriptStream?.bufferedWriter() + transcriptWriter?.apply { + write("# Socratic Dialogue Transcript\n\n") + write("**Initial Question:** $initialQuestion\n\n") + write("**Domain Constraints:** $domainConstraints\n\n") + write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n") + write("---\n\n") + flush() + } + // Concise output for final result dialogueBuilder.append("# Socratic Dialogue Analysis\n\n") @@ -191,9 +245,14 @@ Provide substantive, well-reasoned responses that advance the dialogue. fullDialogueBuilder.append("## Initial Question\n\n") fullDialogueBuilder.append("$initialQuestion\n\n") - if (priorContext.isNotBlank()) { + if (combinedContext.isNotBlank()) { fullDialogueBuilder.append("## Context from Previous Tasks\n\n") - fullDialogueBuilder.append("$priorContext\n\n") + fullDialogueBuilder.append("$combinedContext\n\n") + transcriptWriter?.apply { + write("## Context from Previous Tasks\n\n") + write("$combinedContext\n\n") + flush() + } // Add context tab val contextTask = task.ui.newTask(false) tabs["Context"] = contextTask.placeholder @@ -201,7 +260,7 @@ Provide substantive, well-reasoned responses that advance the dialogue. buildString { appendLine("# Context from Previous Tasks") appendLine() - appendLine(priorContext) + appendLine(combinedContext) }.renderMarkdown ) task.update() @@ -269,6 +328,13 @@ Provide substantive, well-reasoned responses that advance the dialogue. fullDialogueBuilder.append("## Exchange $depth\n\n") fullDialogueBuilder.append("**Question:** $currentQuestion\n\n") fullDialogueBuilder.append("**Response:** $currentResponse\n\n") + transcriptWriter?.apply { + write("## Exchange $depth\n\n") + write("**Question:** $currentQuestion\n\n") + write("**Response:** $currentResponse\n\n") + flush() + } + // Store only key points in concise output if (depth == 1 || depth == maxDepth) { @@ -330,6 +396,11 @@ Provide only the question, without preamble. appendLine() }.renderMarkdown ) + transcriptWriter?.apply { + write("**Next Question:** $currentQuestion\n\n") + flush() + } + task.update() } val exchangeTime = System.currentTimeMillis() - exchangeStartTime @@ -395,6 +466,13 @@ Provide a structured synthesis. dialogueBuilder.append("## Key Insights\n\n") dialogueBuilder.append(synthesis) + transcriptWriter?.apply { + write("## Synthesis\n\n") + write(synthesis) + write("\n\n") + flush() + } + // Add summary statistics dialogueBuilder.append("\n\n---\n\n") @@ -426,6 +504,14 @@ Provide a structured synthesis. val totalTime = System.currentTimeMillis() - startTime val avgExchangeTime = if (exchangeTimes.isNotEmpty()) exchangeTimes.average() else 0.0 log.info("SocraticDialogueTask completed: total_time=${totalTime}ms, exchanges=$maxDepth, avg_exchange_time=${avgExchangeTime}ms, output_size=${finalResult.length} chars (full: ${fullDialogue.length} chars)") + transcriptWriter?.apply { + write("---\n\n") + write("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n") + write("**Total Time:** ${totalTime / 1000.0}s | **Exchanges:** $maxDepth | **Avg Exchange Time:** ${avgExchangeTime / 1000.0}s\n") + flush() + close() + } + MarkdownUtil.renderMarkdown( """ @@ -457,11 +543,29 @@ Provide a structured synthesis. task.complete("Completed $maxDepth exchanges in ${totalTime / 1000}s. Concise analysis: ${finalResult.length} chars.") - resultFn(finalResult) + val summaryMessage = buildString { + appendLine(finalResult) + appendLine("\n---\n") + appendLine( + "Full dialogue transcript: $transcriptLink html pdf" + ) + } + resultFn(summaryMessage) } catch (e: Exception) { log.error("Error during Socratic dialogue", e) task.error(e) + transcriptWriter?.apply { + write("\n\n---\n\n## ❌ Error Occurred\n\n") + write("**Error:** ${e.message}\n\n") + flush() + close() + } + // Update overview with error overviewTask.add( @@ -497,6 +601,30 @@ Provide a structured synthesis. } } + private fun createTranscriptFile(task: SessionTask): Pair { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return Pair(link, markdownTranscript) + } + + private fun getAvailableFiles( + path: Path, + treatDocumentsAsText: Boolean = false, + ): List { + return try { + listOf(FileSelectionUtils.filteredWalkAsciiTree(path.toFile(), 20, treatDocumentsAsText = treatDocumentsAsText)) + } catch (e: Exception) { + log.error("Error listing available files", e) + listOf("Error listing files: ${e.message}") + } + } + + companion object { private val log: Logger = LoggerFactory.getLogger(SocraticDialogueTask::class.java) val SocraticDialogue = TaskType( @@ -517,4 +645,4 @@ Provide a structured synthesis. """ ) } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SystemsThinkingTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SystemsThinkingTask.kt index 637ced7af..c996b7b5a 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SystemsThinkingTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/SystemsThinkingTask.kt @@ -1,13 +1,15 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.time.LocalDateTime import java.time.format.DateTimeFormatter @@ -26,6 +28,8 @@ class SystemsThinkingTask( val system_description: String? = null, @Description("Whether to identify feedback loops (reinforcing and balancing)") val identify_feedback_loops: Boolean = true, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Whether to map delays and accumulations in the system") val map_delays: Boolean = true, @Description("Whether to find leverage points for intervention") @@ -79,6 +83,7 @@ SystemsThinking - Analyze complex systems through feedback loops and dynamics ) { val startTime = System.currentTimeMillis() log.info("Starting SystemsThinkingTask for system: '${executionConfig?.system_description}'") + var transcriptStream: FileOutputStream? = null val systemDescription = executionConfig?.system_description if (systemDescription.isNullOrBlank()) { @@ -93,6 +98,7 @@ SystemsThinking - Analyze complex systems through feedback loops and dynamics val ui = task.ui val tabs = TabbedDisplay(task) + transcriptStream = initializeTranscript(task) val overviewTask = ui.newTask(false) try { @@ -125,20 +131,33 @@ SystemsThinking - Analyze complex systems through feedback loops and dynamics appendLine("**Status:** 🔄 Gathering context...") }.renderMarkdown ) + transcriptStream?.write( + "# Systems Thinking Analysis\n\n**System:** $systemDescription\n\n**Time Horizon:** $timeHorizon\n\n**Started:** ${ + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) + }\n\n---\n\n".toByteArray() + ) task.update() // Gather context log.debug("Gathering context from prior tasks and related files") val priorContext = getPriorCode(agent.executionState) + val inputFileContext = getInputFileCode() val relatedContext = gatherRelatedFiles() - if (priorContext.isNotBlank() || relatedContext.isNotBlank()) { + if (priorContext.isNotBlank() || inputFileContext.isNotBlank() || relatedContext.isNotBlank()) { + transcriptStream?.write("## Context\n\n$priorContext\n\n$inputFileContext\n\n$relatedContext\n\n---\n\n".toByteArray()) val contextTask = ui.newTask(false) tabs["Context"] = contextTask.placeholder contextTask.add( buildString { appendLine("# Context") appendLine() + if (priorContext.isNotBlank()) { + appendLine("## Input Files") + appendLine() + appendLine(inputFileContext.truncateForDisplay()) + appendLine() + } if (priorContext.isNotBlank()) { appendLine("## Prior Task Results") appendLine() @@ -204,6 +223,7 @@ Provide a clear, structured analysis. appendLine(structureAnalysis) }.renderMarkdown ) + transcriptStream?.write("## System Structure\n\n$structureAnalysis\n\n---\n\n".toByteArray()) task.update() // Step 2: Feedback Loops @@ -240,6 +260,7 @@ Provide the analysis and diagram. ) val mermaidCode = extractMermaidCode(loopsAnalysis) + transcriptStream?.write("## Feedback Loops\n\n$loopsAnalysis\n\n---\n\n".toByteArray()) loopsTask.add( buildString { appendLine("## Feedback Loops") @@ -297,6 +318,7 @@ Provide specific examples with estimated time scales. appendLine(delaysAnalysis) }.renderMarkdown ) + transcriptStream?.write("## Delays & Accumulations\n\n$delaysAnalysis\n\n---\n\n".toByteArray()) task.update() } @@ -342,6 +364,7 @@ Focus on the most relevant archetypes. appendLine(archetypesAnalysis) }.renderMarkdown ) + transcriptStream?.write("## System Archetypes\n\n$archetypesAnalysis\n\n---\n\n".toByteArray()) task.update() } @@ -378,6 +401,7 @@ Consider both positive and negative emergent behaviors. appendLine(emergentAnalysis) }.renderMarkdown ) + transcriptStream?.write("## Emergent Behavior\n\n$emergentAnalysis\n\n---\n\n".toByteArray()) task.update() } @@ -426,6 +450,7 @@ Focus on the most impactful leverage points. appendLine(leverageAnalysis) }.renderMarkdown ) + transcriptStream?.write("## Leverage Points\n\n$leverageAnalysis\n\n---\n\n".toByteArray()) task.update() } @@ -490,6 +515,7 @@ $simulationAnalysis simulationResults.forEach { appendLine(it) } }.renderMarkdown ) + transcriptStream?.write("## Intervention Simulation\n\n${simulationResults.joinToString("\n\n")}\n\n---\n\n".toByteArray()) task.update() } @@ -529,6 +555,7 @@ $simulationAnalysis appendLine(synthesis) }.renderMarkdown ) + transcriptStream?.write("## Synthesis & Recommendations\n\n$synthesis\n\n---\n\n".toByteArray()) task.update() // Build concise final result @@ -543,16 +570,18 @@ $simulationAnalysis appendLine() appendLine("---") appendLine() - appendLine("**Analysis Components:** ${ - listOfNotNull( - if (executionConfig.identify_feedback_loops) "Feedback Loops" else null, - if (executionConfig.map_delays) "Delays" else null, - if (executionConfig.find_leverage_points) "Leverage Points" else null, - if (executionConfig.identify_archetypes) "Archetypes" else null, - if (executionConfig.analyze_emergent_behavior) "Emergent Behavior" else null, - if (interventions.isNotEmpty()) "Intervention Simulation (${interventions.size})" else null - ).joinToString(", ") - }") + appendLine( + "**Analysis Components:** ${ + listOfNotNull( + if (executionConfig.identify_feedback_loops) "Feedback Loops" else null, + if (executionConfig.map_delays) "Delays" else null, + if (executionConfig.find_leverage_points) "Leverage Points" else null, + if (executionConfig.identify_archetypes) "Archetypes" else null, + if (executionConfig.analyze_emergent_behavior) "Emergent Behavior" else null, + if (interventions.isNotEmpty()) "Intervention Simulation (${interventions.size})" else null + ).joinToString(", ") + }" + ) } val duration = System.currentTimeMillis() - startTime @@ -571,16 +600,18 @@ $simulationAnalysis appendLine() appendLine("**Total Time:** ${duration / 1000.0}s") appendLine() - appendLine("**Components Analyzed:** ${ - listOfNotNull( - if (executionConfig.identify_feedback_loops) "Feedback Loops" else null, - if (executionConfig.map_delays) "Delays & Accumulations" else null, - if (executionConfig.find_leverage_points) "Leverage Points" else null, - if (executionConfig.identify_archetypes) "System Archetypes" else null, - if (executionConfig.analyze_emergent_behavior) "Emergent Behavior" else null, - if (interventions.isNotEmpty()) "Intervention Simulation" else null - ).size - }") + appendLine( + "**Components Analyzed:** ${ + listOfNotNull( + if (executionConfig.identify_feedback_loops) "Feedback Loops" else null, + if (executionConfig.map_delays) "Delays & Accumulations" else null, + if (executionConfig.find_leverage_points) "Leverage Points" else null, + if (executionConfig.identify_archetypes) "System Archetypes" else null, + if (executionConfig.analyze_emergent_behavior) "Emergent Behavior" else null, + if (interventions.isNotEmpty()) "Intervention Simulation" else null + ).size + }" + ) appendLine() if (interventions.isNotEmpty()) { appendLine("**Interventions Simulated:** ${interventions.size}") @@ -589,13 +620,26 @@ $simulationAnalysis appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") }.renderMarkdown ) + transcriptStream?.write( + "\n\n## Analysis Complete\n\n**Total Time:** ${duration / 1000.0}s\n\n**Completed:** ${ + LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")) + }\n".toByteArray() + ) task.update() - task.safeComplete("Systems thinking analysis completed in ${duration / 1000}s", log) + val (transcriptLink, _) = Pair(task.linkTo("systems_thinking_transcript.md"), task.resolve("systems_thinking_transcript.md")) + task.safeComplete( + "Systems thinking analysis completed in ${duration / 1000}s. " + + "View detailed transcript: markdown " + + "html " + + "pdf", + log + ) resultFn(finalResult.toString()) } catch (e: Exception) { val duration = System.currentTimeMillis() - startTime + transcriptStream?.write("\n\n## Error Occurred\n\n**Error:** ${e.message}\n\n**Type:** ${e.javaClass.simpleName}\n".toByteArray()) log.error("SystemsThinkingTask failed after ${duration}ms for system: $systemDescription", e) task.error(e) @@ -621,6 +665,9 @@ $simulationAnalysis appendLine("**Error:** ${e.message}") } resultFn(errorOutput.toString()) + } finally { + transcriptStream?.flush() + transcriptStream?.close() } } @@ -696,12 +743,69 @@ Provide clear, actionable insights grounded in systems thinking principles. }.joinToString("\n\n") } + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun extractMermaidCode(response: String): String { val mermaidBlockRegex = "```mermaid\\s*([\\s\\S]*?)```".toRegex() val match = mermaidBlockRegex.find(response) return match?.groupValues?.get(1)?.trim() ?: "" } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun initializeTranscript(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = Pair(task.linkTo("systems_thinking_transcript.md"), task.resolve("systems_thinking_transcript.md")) + val transcriptStream = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + log.info("Initialized transcript file: $link") + transcriptStream + } catch (e: Exception) { + log.error("Failed to initialize transcript", e) + null + } + } + + companion object { private val log: Logger = LoggerFactory.getLogger(SystemsThinkingTask::class.java) val SystemsThinking = TaskType( @@ -723,4 +827,4 @@ Provide clear, actionable insights grounded in systems thinking principles. """ ) } -} \ No newline at end of file +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/TemporalReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/TemporalReasoningTask.kt index 82cd8419e..4f471d7dc 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/TemporalReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/TemporalReasoningTask.kt @@ -1,7 +1,7 @@ package com.simiacryptus.cognotik.plan.tools.reasoning -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.util.LoggerFactory @@ -10,6 +10,7 @@ import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.FileOutputStream import java.nio.file.FileSystems import java.time.LocalDate import java.time.format.DateTimeFormatter @@ -31,6 +32,8 @@ class TemporalReasoningTask( val time_range: String? = null, @Description("Granularity of analysis: daily, weekly, monthly, quarterly, yearly") val granularity: String = "weekly", + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, @Description("Whether to identify temporal patterns and cycles") val identify_patterns: Boolean = true, @Description("Whether to predict future states") @@ -142,6 +145,7 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return val ui = task.ui + val transcript = transcript(task) try { // Create tabbed display for organized output @@ -165,6 +169,24 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat """.trimMargin(), ui = ui ) ) + transcript?.write( + """ + |# Temporal Reasoning Analysis + | + |**Subject:** $subject + | + |**Time Range:** $timeRange + | + |**Granularity:** ${executionConfig.granularity} + | + |**Started:** ${java.time.LocalDateTime.now()} + | + |--- + | + |## Gathering Temporal Data + | + """.trimMargin().toByteArray() + ) task.update() // Gather temporal data from files @@ -179,6 +201,17 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat val temporalData = gatherTemporalData() log.debug("Temporal data gathered: ${temporalData.length} characters") dataLoading?.clear() + transcript?.write( + """ + | + |### Data Sources Processed: ${executionConfig?.related_files?.size ?: 0} + | + |${temporalData.truncateForDisplay(maxOutputLength)} + | + |--- + | + """.trimMargin().toByteArray() + ) dataTask.add( MarkdownUtil.renderMarkdown( """ @@ -248,6 +281,16 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat val timelineAnalysis = timelineAgent.answer(listOf(timelinePrompt)).obj log.debug("Timeline constructed with ${timelineAnalysis.timeline_events.size} events") + transcript?.write( + """ + | + |## Timeline Construction Complete + | + |**Events Identified:** ${timelineAnalysis.timeline_events.size} + | + |${formatTimeline(timelineAnalysis.timeline_events)} + """.trimMargin().toByteArray() + ) timelineLoading?.clear() timelineTask.add( @@ -269,6 +312,16 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat if (executionConfig.identify_patterns && !timelineAnalysis.patterns.isNullOrEmpty()) { log.debug("Analyzing temporal patterns") val patternsTask = ui.newTask(false) + transcript?.write( + """ + | + |## Temporal Patterns Analysis + | + |**Patterns Found:** ${timelineAnalysis.patterns.size} + | + |${formatPatterns(timelineAnalysis.patterns)} + """.trimMargin().toByteArray() + ) tabs["Patterns"] = patternsTask.placeholder patternsTask.add( MarkdownUtil.renderMarkdown( @@ -290,6 +343,15 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat if (executionConfig.analyze_rate_of_change && !timelineAnalysis.rate_of_change_analysis.isNullOrBlank()) { log.debug("Analyzing rate of change") val rateTask = ui.newTask(false) + transcript?.write( + """ + | + |## Rate of Change Analysis + | + |${timelineAnalysis.rate_of_change_analysis} + | + """.trimMargin().toByteArray() + ) tabs["Rate of Change"] = rateTask.placeholder rateTask.add( MarkdownUtil.renderMarkdown( @@ -309,6 +371,14 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat if (executionConfig.identify_transitions && !timelineAnalysis.transition_points.isNullOrEmpty()) { log.debug("Identifying critical transition points") val transitionsTask = ui.newTask(false) + transcript?.write( + """ + | + |## Critical Transition Points + | + |${formatTransitions(timelineAnalysis.transition_points)} + """.trimMargin().toByteArray() + ) tabs["Transition Points"] = transitionsTask.placeholder transitionsTask.add( MarkdownUtil.renderMarkdown( @@ -330,6 +400,14 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat if (executionConfig.predict_future && !timelineAnalysis.future_predictions.isNullOrEmpty()) { log.debug("Generating future predictions") val predictionsTask = ui.newTask(false) + transcript?.write( + """ + | + |## Future State Predictions + | + |${formatPredictions(timelineAnalysis.future_predictions)} + """.trimMargin().toByteArray() + ) tabs["Future Predictions"] = predictionsTask.placeholder predictionsTask.add( MarkdownUtil.renderMarkdown( @@ -368,6 +446,17 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat vizLoading?.clear() if (mermaidCode.isNotEmpty()) { + transcript?.write( + """ + | + |## Timeline Visualization + | + |```mermaid + |$mermaidCode + |``` + | + """.trimMargin().toByteArray() + ) vizTask.add( MarkdownUtil.renderMarkdown( """ @@ -398,6 +487,17 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat // Generate final summary val summary = buildSummary(timelineAnalysis, subject, timeRange) + transcript?.write( + """ + | + |--- + | + |## Summary + | + |$summary + | + """.trimMargin().toByteArray() + ) // Update overview with completion overviewStatus?.clear() @@ -427,6 +527,7 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat val duration = System.currentTimeMillis() - startTime val completionMsg = "Temporal analysis completed for '$subject' over $timeRange" log.info("$completionMsg (duration: ${duration}ms, events: ${timelineAnalysis.timeline_events.size}, patterns: ${timelineAnalysis.patterns?.size ?: 0})") + transcript?.close() task.safeComplete(completionMsg, log) resultFn(summary) @@ -434,6 +535,7 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat } catch (e: Exception) { val duration = System.currentTimeMillis() - startTime log.error("TemporalReasoning task failed after ${duration}ms for subject: $subject", e) + transcript?.close() task.error(e) val errorTask = ui.newTask(false) @@ -448,6 +550,20 @@ TemporalReasoning - Analyze how systems evolve over time and predict future stat } } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun buildTimelinePrompt( subject: String, timeRange: String, @@ -549,15 +665,16 @@ Generate the Mermaid timeline diagram now: } private fun gatherTemporalData(): String { - val relatedFiles = executionConfig?.related_files ?: emptyList() + val inputFiles = (executionConfig?.input_files ?: emptyList()) + + (executionConfig?.related_files ?: emptyList()) - if (relatedFiles.isEmpty()) { + if (inputFiles.isEmpty()) { return "No specific temporal data files provided." } val maxFileSize = 2000 - return relatedFiles.flatMap { pattern -> + return inputFiles.flatMap { pattern -> val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") root.toFile().walkTopDown() .filter { file -> @@ -577,6 +694,30 @@ Generate the Mermaid timeline diagram now: }.joinToString("\n\n") } + private fun getInputFileCode(): String { + val patterns = (executionConfig?.input_files ?: listOf()) + if (patterns.isEmpty()) { + return "" + } + return patterns.flatMap { pattern -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + root.toFile().walkTopDown() + .filter { file -> file.isFile && matcher.matches(root.relativize(file.toPath())) } + .map { it.toPath() } + .toList() + }.distinct().sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath.toString()) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + } + private fun formatTimeline(events: List): String { return buildString { appendLine() diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/tests.md b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/tests.md deleted file mode 100644 index efb6414b4..000000000 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/tests.md +++ /dev/null @@ -1,1249 +0,0 @@ -# Plain Language Reasoning Prompts for General Audiences - -## 1. AbstractionLadder - -### Prompt 1: Social Media Addiction - -"I want to understand social media addiction by starting with the concrete behavior of checking Instagram every 5 minutes. Take me up the abstraction ladder to -understand the broader patterns, then back down to see other specific examples of the same underlying issue." - -**Configuration hints:** - -- concrete_concept: "Checking Instagram every 5 minutes" -- direction: "both" -- levels: 4 -- identify_patterns: true - -### Prompt 2: Climate Action - -"Start with the specific action of someone choosing to bike to work instead of driving. I want to go up the abstraction ladder to understand what this -represents at higher levels of thinking, and then come back down to see what other concrete actions fit the same pattern." - -**Configuration hints:** - -- concrete_concept: "Choosing to bike to work instead of driving" -- direction: "both" -- levels: 3 -- identify_patterns: true - -### Prompt 3: Political Polarization - -"Begin with the concrete example of someone unfriending a family member on Facebook over political disagreements. Take me up to understand the broader social -patterns this represents, then show me other specific manifestations of the same phenomenon." - -**Configuration hints:** - -- concrete_concept: "Unfriending family members over political disagreements on social media" -- direction: "both" -- levels: 4 -- identify_patterns: true - ---- - -## 2. AnalogicalReasoning - -### Prompt 1: Education Reform - -"Use the way Netflix recommends shows based on viewing history as an analogy to help me think about how we could personalize education for students. Find me 3 -different analogies from the entertainment industry that could inspire new approaches to teaching." - -**Configuration hints:** - -- source_domain: "Netflix recommendation algorithms and personalized entertainment" -- target_problem: "Creating personalized education systems that adapt to individual student needs" -- num_analogies: 3 -- validate_mappings: true - -### Prompt 2: Healthcare Access - -"Think about how Uber solved the taxi problem - making rides available on-demand with transparent pricing. Use this and similar analogies to help me explore -solutions for making healthcare more accessible and affordable. Give me 3 different analogies from the transportation or delivery industries." - -**Configuration hints:** - -- source_domain: "Uber and on-demand transportation/delivery services" -- target_problem: "Making healthcare more accessible, affordable, and convenient for everyone" -- num_analogies: 3 -- validate_mappings: true - -### Prompt 3: Democracy and Voting - -"Use how Wikipedia is created and maintained by volunteers as an analogy for thinking about citizen participation in democracy. Find me 3 analogies from -collaborative online platforms that could inspire new ways for citizens to participate in governance." - -**Configuration hints:** - -- source_domain: "Wikipedia and collaborative online platforms" -- target_problem: "Increasing meaningful citizen participation in democratic governance" -- num_analogies: 3 -- validate_mappings: true - ---- - -## 3. CausalInference - -### Prompt 1: Rising Mental Health Issues - -"Young adult depression and anxiety rates have doubled in the last decade. Help me figure out what's actually causing this. Consider these potential causes: -social media use, economic uncertainty, academic pressure, reduced in-person socialization, climate anxiety, and pandemic effects. Build a causal graph and -identify which factors are truly driving the increase versus which are just correlated." - -**Configuration hints:** - -- observed_effect: "Depression and anxiety rates in young adults doubled in the last decade" -- -potential_causes: ["Social media use", "Economic uncertainty and job market stress", "Academic pressure and student debt", "Reduced face-to-face socialization", "Climate change anxiety", "COVID-19 pandemic effects"] -- build_causal_graph: true -- identify_confounders: true - -### Prompt 2: Declining Birth Rates - -"Birth rates are falling dramatically in developed countries. What's really causing this? Consider: women's education and career opportunities, cost of -childcare and housing, changing cultural values, economic instability, environmental concerns, and access to contraception. I need to understand which are -actual causes versus which are just things that happen to correlate." - -**Configuration hints:** - -- observed_effect: "Birth rates declining significantly in developed nations" -- -potential_causes: ["Women's increased education and career opportunities", "High cost of childcare and housing", "Shifting cultural values about parenthood", "Economic instability and uncertainty", "Environmental concerns about overpopulation", "Widespread access to contraception"] -- build_causal_graph: true -- identify_confounders: true - -### Prompt 3: Political Polarization - -"Political polarization in America has increased dramatically. What's actually causing people to become more extreme and less willing to compromise? Consider: -social media echo chambers, cable news, economic inequality, geographic sorting, loss of local news, and partisan gerrymandering. Help me distinguish true -causes from things that are just symptoms or correlations." - -**Configuration hints:** - -- observed_effect: "Dramatic increase in political polarization and unwillingness to compromise" -- -potential_causes: ["Social media echo chambers and algorithmic filtering", "Partisan cable news networks", "Growing economic inequality", "Geographic sorting (liberals and conservatives living in separate areas)", "Decline of local journalism", "Partisan gerrymandering"] -- build_causal_graph: true -- identify_confounders: true - ---- - -## 4. ChainOfThought - -### Prompt 1: Universal Basic Income - -"Walk me through the reasoning step-by-step: Would implementing a Universal Basic Income of $1,000/month for all adults improve society overall? Consider -economic effects, work incentives, poverty reduction, inflation, funding mechanisms, and social impacts. Validate each step of reasoning before moving to the -next." - -**Configuration hints:** - -- problem_statement: "Would implementing Universal Basic Income of $1,000/month for all adults improve society overall? Consider economic effects, work - incentives, poverty reduction, inflation, funding mechanisms, and social impacts." -- reasoning_depth: null -- validate_steps: true - -### Prompt 2: Artificial Intelligence Regulation - -"Think through this step-by-step: Should governments heavily regulate AI development now, or wait until we better understand the technology? Consider innovation -speed, safety risks, competitive dynamics between nations, unintended consequences of regulation, and the difficulty of regulating something we don't fully -understand yet. Validate your reasoning at each step." - -**Configuration hints:** - -- problem_statement: "Should governments implement heavy regulation of AI development now, or wait until we better understand the technology? Consider - innovation vs. safety, international competition, unintended consequences, and the challenge of regulating emerging technology." -- reasoning_depth: null -- validate_steps: true - -### Prompt 3: College Education Value - -"Reason through this carefully: Is a traditional 4-year college degree still worth the cost for most people? Consider: student debt levels ($30k-100k+), -opportunity cost of 4 years, changing job market, alternative education paths, signaling value of degrees, and lifetime earnings differences. Validate each -reasoning step." - -**Configuration hints:** - -- problem_statement: "Is a traditional 4-year college degree still worth the cost for most people? Consider student debt ($30k-100k+), opportunity cost, - changing job market, alternative education, signaling value, and lifetime earnings." -- reasoning_depth: null -- validate_steps: true - ---- - -## 5. ConstraintSatisfaction - -### Prompt 1: Career Change Decision - -"Help me decide on a career change. I must: keep income above $60k/year, stay in my current city, work no more than 45 hours/week, and start within 6 months. -I'd prefer to: maximize work-life balance (weight: 0.9), do meaningful work (weight: 0.85), have growth potential (weight: 0.8), and use my existing skills ( -weight: 0.7). Find the best career path that satisfies these constraints." - -**Configuration hints:** - -- problem_description: "Choosing a new career path" -- hard_constraints: ["Minimum income $60,000/year", "Must stay in current city", "Maximum 45 hours/week", "Can start within 6 months"] -- soft_constraints: {"Maximize work-life balance": 0.9, "Meaningful/purposeful work": 0.85, "Strong growth potential": 0.8, "Leverage existing skills": 0.7} -- search_strategy: "backtracking" - -### Prompt 2: Retirement Location - -"I'm choosing where to retire. Must have: affordable cost of living (under $3k/month), good healthcare access, safe neighborhood, and mild climate (no harsh -winters). I'd prefer to: be near family (weight: 0.9), have cultural activities (weight: 0.7), be in a walkable area (weight: 0.8), and have an active senior -community (weight: 0.75). Find the best location." - -**Configuration hints:** - -- problem_description: "Selecting retirement location" -- hard_constraints: ["Cost of living under $3,000/month", "Access to quality healthcare", "Low crime rate", "Mild climate without harsh winters"] -- soft_constraints: {"Proximity to family": 0.9, "Cultural activities and amenities": 0.7, "Walkable neighborhood": 0.8, "Active senior community": 0.75} -- search_strategy: "backtracking" - -### Prompt 3: Family Vacation Planning - -"Plan our family vacation. Must: fit $4,000 budget, accommodate 2 adults and 3 kids (ages 5-12), be reachable in one day of travel, and happen in July. We'd -prefer to: maximize kid-friendly activities (weight: 0.9), have educational value (weight: 0.7), include outdoor activities (weight: 0.8), and have some adult -relaxation time (weight: 0.75). Find the optimal destination and plan." - -**Configuration hints:** - -- problem_description: "Planning family vacation" -- hard_constraints: ["Total budget $4,000", "Suitable for ages 5-12", "Reachable in one day of travel", "Available in July"] -- soft_constraints: {"Kid-friendly activities": 0.9, "Educational value": 0.7, "Outdoor activities": 0.8, "Adult relaxation opportunities": 0.75} -- search_strategy: "forward" - ---- - -## 6. CounterfactualAnalysis - -### Prompt 1: Social Media Impact - -"We've had widespread social media for 15 years now, and we see increased anxiety, polarization, and attention problems. Analyze what would have happened if: 1) -Social media never became popular, 2) Social media remained chronological without algorithms, 3) Social media was age-restricted to 16+, 4) Social media -companies were held liable for content. Keep constant: same technology level, same internet access, same smartphone adoption." - -**Configuration hints:** - -- actual_scenario: "15 years of widespread social media with algorithmic feeds, available to all ages, with platform liability protections. Observable effects: - increased anxiety, political polarization, and attention problems." -- -counterfactuals: ["Social media never became popular", "Social media remained chronological without algorithmic curation", "Social media was age-restricted to 16+", "Social media companies held liable for harmful content"] -- compare_outcomes: true -- control_factors: ["Same technology level", "Same internet access", "Same smartphone adoption", "Same time period"] - -### Prompt 2: College Debt Crisis - -"We have $1.7 trillion in student loan debt affecting 45 million Americans. What would have happened if: 1) College remained affordable like in the 1970s, 2) We -had free community college for everyone, 3) Income-share agreements replaced loans, 4) Trade schools were promoted equally to universities. Keep constant: same -number of people seeking higher education, same job market, same technology changes." - -**Configuration hints:** - -- actual_scenario: "$1.7 trillion in student debt affecting 45 million Americans, with college costs rising 8x faster than wages since 1980." -- -counterfactuals: ["College costs remained at 1970s levels relative to income", "Free community college for all students", "Income-share agreements instead of traditional loans", "Trade schools promoted equally to universities"] -- compare_outcomes: true -- control_factors: ["Same number seeking higher education", "Same job market evolution", "Same technological changes"] - -### Prompt 3: Remote Work Revolution - -"COVID-19 forced a massive shift to remote work. Now many companies are mandating return to office. What would have happened if: 1) Remote work never became -widespread, 2) Companies embraced permanent remote-first policies, 3) We adopted hybrid 2-3 days in office, 4) Different industries made different choices. Keep -constant: same technology capabilities, same housing costs, same family situations." - -**Configuration hints:** - -- actual_scenario: "COVID-19 forced remote work experiment. Now many companies mandating return to office, creating tension and turnover." -- -counterfactuals: ["Remote work never became widespread (no pandemic)", "Companies embraced permanent remote-first policies", "Industry standard became hybrid 2-3 days in office", "Different industries made different choices based on work nature"] -- compare_outcomes: true -- control_factors: ["Same technology capabilities", "Same housing costs", "Same family situations", "Same worker preferences"] - ---- - -## 7. DecompositionSynthesis - -### Prompt 1: Solving Homelessness - -"Break down the complex problem of solving homelessness in a major city. Consider all the interconnected issues: mental health, addiction, affordable housing -shortage, job access, healthcare, criminal records, family breakdown, and systemic poverty. Decompose this into manageable subproblems, solve each one, then -synthesize a comprehensive solution. Use functional decomposition." - -**Configuration hints:** - -- complex_problem: "Solving homelessness in a major city, addressing mental health, addiction, affordable housing, employment, healthcare access, criminal - records, family breakdown, and systemic poverty" -- decomposition_strategy: "functional" -- max_depth: 3 -- synthesize_solution: true -- validate_coherence: true - -### Prompt 2: Reducing Carbon Emissions - -"Break down the massive challenge of reducing global carbon emissions by 50% in 10 years. This involves transportation, energy production, agriculture, -manufacturing, buildings, and changing consumer behavior across billions of people. Decompose this into solvable pieces, address each one, then synthesize a -coherent global strategy. Use hierarchical decomposition." - -**Configuration hints:** - -- complex_problem: "Reducing global carbon emissions by 50% within 10 years, addressing transportation, energy production, agriculture, manufacturing, - buildings, and consumer behavior worldwide" -- decomposition_strategy: "hierarchical" -- max_depth: 4 -- synthesize_solution: true -- validate_coherence: true - -### Prompt 3: Reforming Education System - -"Break down the challenge of reforming the K-12 education system to prepare students for the 21st century. This involves curriculum design, teacher training, -technology integration, assessment methods, equity issues, funding, parental involvement, and adapting to AI. Decompose into manageable parts, solve each, then -synthesize a complete reform plan. Use temporal decomposition." - -**Configuration hints:** - -- complex_problem: "Reforming K-12 education for the 21st century: curriculum, teacher training, technology, assessment, equity, funding, parental involvement, - and AI adaptation" -- decomposition_strategy: "temporal" -- max_depth: 3 -- synthesize_solution: true -- validate_coherence: true - ---- - -## 8. MetaCognitiveReflection - -### Prompt 1: Critique Climate Change Skepticism - -"Reflect on the reasoning behind climate change skepticism. Examine the assumptions, identify cognitive biases, explore what alternative evidence might exist, -assess the confidence levels, check for logical fallacies, and identify gaps in the reasoning. Suggest improvements to the thinking process." - -**Configuration hints:** - -- subject_task_id: "climate_skepticism_reasoning" -- reflection_aspects: ["assumptions", "biases", "alternatives", "confidence", "logic", "completeness"] -- suggest_improvements: true -- identify_gaps: true -- evaluate_confidence: true - -### Prompt 2: Critique Meritocracy Belief - -"Reflect on the belief that 'America is a meritocracy where anyone can succeed through hard work.' Examine underlying assumptions, identify biases in this -thinking, explore alternative perspectives, assess how confident we should be in this claim, check the logic, and identify what's missing from this analysis." - -**Configuration hints:** - -- subject_task_id: "meritocracy_belief_analysis" -- reflection_aspects: ["assumptions", "biases", "alternatives", "confidence", "logic", "completeness"] -- suggest_improvements: true -- identify_gaps: true -- evaluate_confidence: true - -### Prompt 3: Critique Free Speech Absolutism - -"Reflect on the position that 'free speech should be absolute with no restrictions.' Examine the assumptions behind this view, identify cognitive biases, -explore alternative frameworks, assess confidence levels, check for logical consistency, and identify gaps in the reasoning." - -**Configuration hints:** - -- subject_task_id: "free_speech_absolutism_position" -- reflection_aspects: ["assumptions", "biases", "alternatives", "confidence", "logic", "completeness"] -- suggest_improvements: true -- identify_gaps: true -- evaluate_confidence: true - ---- - -## 9. MultiPerspectiveAnalysis - -### Prompt 1: Legalizing Marijuana - -"Analyze marijuana legalization from multiple perspectives: public health, criminal justice, personal freedom, economic impact, social equity, and youth -protection. Synthesize these viewpoints into a unified recommendation. Use a consensus threshold of 0.7." - -**Configuration hints:** - -- analysis_subject: "Legalizing marijuana nationwide" -- -perspectives: ["public health and addiction", "criminal justice and incarceration", "personal freedom and liberty", "economic impact and tax revenue", "social equity and racial justice", "youth protection and access"] -- synthesize: true -- consensus_threshold: 0.7 - -### Prompt 2: Immigration Policy - -"Analyze immigration policy from these perspectives: economic impact on jobs and wages, humanitarian obligations, national security, cultural integration, -fiscal costs and benefits, and labor market needs. Synthesize into a coherent policy recommendation. Use consensus threshold of 0.65." - -**Configuration hints:** - -- analysis_subject: "Comprehensive immigration reform policy" -- -perspectives: ["economic impact on jobs and wages", "humanitarian obligations and asylum", "national security concerns", "cultural integration and social cohesion", "fiscal costs and tax contributions", "labor market needs and shortages"] -- synthesize: true -- consensus_threshold: 0.65 - -### Prompt 3: Universal Healthcare - -"Analyze universal healthcare from these angles: healthcare outcomes and quality, economic costs and efficiency, personal choice and freedom, business -competitiveness, innovation in medicine, and equity of access. Synthesize these perspectives into a unified conclusion. Use consensus threshold of 0.7." - -**Configuration hints:** - -- analysis_subject: "Implementing universal healthcare system" -- -perspectives: ["healthcare outcomes and quality of care", "economic costs and efficiency", "personal choice and freedom", "business competitiveness and labor mobility", "medical innovation and research", "equity and access to care"] -- synthesize: true -- consensus_threshold: 0.7 - ---- - -## 10. SocraticDialogue - -### Prompt 1: Nature of Happiness - -"Explore through Socratic questioning: What is happiness, and can we choose to be happy? Start with the question 'Is happiness something we find or something we -create?' Challenge assumptions about happiness being dependent on external circumstances versus internal mindset. Go 6 exchanges deep." - -**Configuration hints:** - -- initial_question: "Is happiness something we find or something we create?" -- max_depth: 6 -- challenge_assumptions: true -- domain_constraints: ["psychology", "philosophy", "well-being"] - -### Prompt 2: Justice and Fairness - -"Use Socratic dialogue to explore: What makes something 'fair' or 'just'? Start with 'Should everyone get equal outcomes, or equal opportunities?' Challenge -assumptions about equality, merit, need, and desert. Go 7 exchanges deep." - -**Configuration hints:** - -- initial_question: "Should everyone get equal outcomes, or equal opportunities?" -- max_depth: 7 -- challenge_assumptions: true -- domain_constraints: ["ethics", "political philosophy", "social justice"] - -### Prompt 3: Free Will - -"Explore through Socratic questioning: Do we have free will, or are our choices determined by factors beyond our control? Start with 'If our brains are physical -systems following natural laws, how can we have free will?' Challenge assumptions about consciousness, choice, and responsibility. Go 6 exchanges deep." - -**Configuration hints:** - -- initial_question: "If our brains are physical systems following natural laws, how can we have free will?" -- max_depth: 6 -- challenge_assumptions: true -- domain_constraints: ["philosophy", "neuroscience", "ethics"] - ---- - -## 11. Brainstorming - -### Prompt 1: Reducing Food Waste - -"I need creative solutions for reducing food waste in my household. We throw away about 30% of our groceries. Brainstorm 7-10 diverse options ranging from -practical to innovative. Include some unconventional ideas. Analyze each option's pros, cons, feasibility, and impact." -**Configuration hints:** - -- problem_statement: "Reducing household food waste - currently throwing away ~30% of groceries" -- target_option_count: 8 -- categories: ["storage solutions", "meal planning", "technology", "behavior change", "community solutions"] -- constraints: ["Must be implementable by average household", "Should not require major lifestyle changes"] -- include_creative_options: true -- analysis_depth: "moderate" - -### Prompt 2: Improving Local Community - -"Our neighborhood feels disconnected and people don't know each other. Brainstorm ways to build community and increase neighborly interaction. Generate 10 -options from simple to ambitious. Include both traditional and creative approaches. Analyze feasibility and potential impact of each." -**Configuration hints:** - -- problem_statement: "Building stronger community connections in a disconnected suburban neighborhood" -- target_option_count: 10 -- categories: ["events and gatherings", "shared spaces", "digital platforms", "regular activities", "infrastructure"] -- constraints: ["Must work in suburban setting", "Should appeal to diverse age groups", "Limited budget available"] -- include_creative_options: true -- analysis_depth: "moderate" - -### Prompt 3: Career Transition Strategy - -"I'm a 35-year-old accountant who wants to transition into a more creative field but need to maintain income. Brainstorm 6-8 realistic transition strategies. -Focus on practical, proven approaches but include one or two innovative options. Analyze risks and requirements for each path." -**Configuration hints:** - -- problem_statement: "Career transition from accounting to creative field while maintaining income stability" -- target_option_count: 7 -- categories: ["gradual transition", "education/training", "freelance/side hustle", "industry pivot", "entrepreneurship"] -- constraints: ["Must maintain current income level", "Prefer transition within 2-3 years", "Limited time for retraining"] -- include_creative_options: false -- analysis_depth: "detailed" - ---- - -## 12. GameTheory - -### Prompt 1: Salary Negotiation - -"I'm negotiating salary for a new job. The company has a budget but wants to pay less. I want maximum salary but don't want to lose the offer. Both sides have -incomplete information about the other's limits. Analyze this as a game theory problem. What's my optimal strategy? Should I reveal my current salary? Make the -first offer? What are the Nash equilibria?" -**Configuration hints:** - -- game_scenario: "Job salary negotiation between candidate and employer, both trying to maximize their outcome while reaching agreement" -- players: ["Job Candidate", "Hiring Manager/Company"] -- game_type: "non-cooperative" -- build_payoff_matrix: true -- find_nash_equilibria: true -- analyze_dominant_strategies: true -- provide_recommendations: true -- additional_context: "Incomplete information game - neither party knows the other's true reservation price. Candidate risks losing offer if demands too high. - Company risks losing candidate if offers too low." - -### Prompt 2: Climate Change Cooperation - -"Countries face a climate change dilemma: everyone benefits if all reduce emissions, but each country benefits most by not reducing while others do (free-rider -problem). Analyze this as a game theory problem. What are the Nash equilibria? How can cooperation be sustained? What role do repeated interactions and -reputation play?" -**Configuration hints:** - -- game_scenario: "International climate cooperation - countries must decide whether to reduce emissions (costly) or continue polluting (beneficial short-term)" -- players: ["Developed Nations", "Developing Nations", "Major Polluters"] -- game_type: "non-cooperative" -- build_payoff_matrix: true -- find_nash_equilibria: true -- find_pareto_optimal: true -- repeated_game_analysis: true -- iterations: 20 -- provide_recommendations: true -- additional_context: "Classic tragedy of the commons. Individual incentive to defect but collective benefit from cooperation. Repeated game with reputation - effects." - -### Prompt 3: Social Media Platform Competition - -"Two social media platforms (like Twitter and Threads) compete for users. Users prefer platforms where their friends are (network effects). Each platform -decides whether to allow easy data portability or lock users in. Analyze the strategic dynamics. What are the equilibria? Should platforms cooperate on -interoperability?" -**Configuration hints:** - -- game_scenario: "Social media platform competition with network effects - platforms decide on data portability and interoperability strategies" -- players: ["Incumbent Platform", "New Challenger Platform"] -- player_strategies: { - "Incumbent Platform": ["Allow data portability", "Lock-in users", "Selective interoperability"], - "New Challenger Platform": ["Full interoperability", "Closed ecosystem", "Gradual opening"] - } -- game_type: "non-cooperative" -- build_payoff_matrix: true -- find_nash_equilibria: true -- analyze_dominant_strategies: true -- provide_recommendations: true -- additional_context: "Strong network effects create winner-take-all dynamics. First-mover advantage vs. late-mover learning. Regulatory pressure for - interoperability." - ---- - -## 13. FiniteStateMachine - -### Prompt 1: Online Dating Journey - -"Model the journey of someone using a dating app as a finite state machine. Start from creating a profile through various stages of matching, messaging, dating, -and potential outcomes. Include error states like ghosting, catfishing, or burnout. Identify all possible states and transitions. Generate test scenarios for -different user journeys." -**Configuration hints:** - -- concept_to_model: "User journey through online dating app from profile creation to relationship outcome" -- domain_context: "Online dating and relationship formation" -- initial_states: ["No Profile"] -- known_events: ["Create profile", "Get match", "Send message", "Receive reply", "Schedule date", "Go on date", "Get ghosted", "Delete app"] -- identify_edge_cases: true -- validate_properties: true -- generate_test_scenarios: true - -### Prompt 2: Job Application Process - -"Model the job application process as a finite state machine. From seeing a job posting through applying, interviewing, negotiating, and final outcomes. Include -rejection states, ghosting, offer rescinding, and candidate withdrawal. Identify all states and transitions. Validate that the FSM is complete and handles all -edge cases." -**Configuration hints:** - -- concept_to_model: "Job application and hiring process from candidate perspective" -- domain_context: "Employment and recruitment" -- initial_states: ["Job Seeker"] -- -known_events: ["See job posting", "Submit application", "Get screening call", "Complete interview", "Receive offer", "Get rejected", "Withdraw application", "Negotiate offer"] -- identify_edge_cases: true -- validate_properties: true -- generate_test_scenarios: true - -### Prompt 3: Subscription Service Lifecycle - -"Model a customer's lifecycle with a subscription service (like Netflix or Spotify) as a finite state machine. Include trial periods, active subscription, -payment failures, pausing, cancellation, win-back attempts, and reactivation. Identify all states, transitions, and edge cases. Generate test scenarios for -different customer journeys." -**Configuration hints:** - -- concept_to_model: "Customer lifecycle with subscription service from trial to cancellation and potential reactivation" -- domain_context: "Subscription business model and customer retention" -- initial_states: ["Prospect"] -- -known_events: ["Start trial", "Convert to paid", "Payment succeeds", "Payment fails", "Pause subscription", "Cancel subscription", "Reactivate", "Receive win-back offer"] -- identify_edge_cases: true -- validate_properties: true -- generate_test_scenarios: true - - -# Test Prompts for New Reasoning Types - -## 1. TemporalReasoningTask - -### Prompt 1: Technical Debt Accumulation - -"I want to understand how technical debt has accumulated in our codebase over the past 2 years. Analyze the period from January 2022 to December 2023, looking at weekly changes. Identify patterns in when debt increases (after releases? during crunch time?), calculate the rate of accumulation, find critical transition points where debt spiked, and predict where we'll be in 6 months if trends continue." - -**Configuration hints:** -- subject: "Technical debt in main codebase" -- time_range: "2022-01-01 to 2023-12-31" -- granularity: "weekly" -- identify_patterns: true -- predict_future: true -- prediction_horizon: "6 months" -- analyze_rate_of_change: true -- identify_transitions: true -- related_files: ["**/git-log.txt", "**/code-quality-metrics.csv"] - -### Prompt 2: Performance Degradation Timeline - -"Our API response times have gotten worse over the last year. Analyze the timeline from Q1 2023 to Q4 2023 on a monthly basis. I need to see when performance started degrading, identify any sudden drops, understand if there are cyclical patterns (worse during business hours? end of month?), and predict when we'll hit our SLA limits if this continues." - -**Configuration hints:** -- subject: "API response time performance" -- time_range: "2023-01-01 to 2023-12-31" -- granularity: "monthly" -- identify_patterns: true -- predict_future: true -- prediction_horizon: "3 months" -- analyze_rate_of_change: true -- identify_transitions: true -- critical_events: ["Major releases", "Infrastructure changes", "Traffic spikes"] - -### Prompt 3: Team Velocity Evolution - -"Track how our development team's velocity has changed over the past 18 months. Look at sprint-by-sprint data from January 2022 to June 2023. Find patterns in productivity (seasonal? after hiring? after process changes?), identify when velocity accelerated or decelerated, and predict future capacity for planning purposes." - -**Configuration hints:** -- subject: "Development team sprint velocity" -- time_range: "2022-01-01 to 2023-06-30" -- granularity: "weekly" -- identify_patterns: true -- predict_future: true -- prediction_horizon: "6 months" -- analyze_rate_of_change: true -- identify_transitions: true -- critical_events: ["New hires", "Process changes", "Tool migrations"] - ---- - -## 2. SystemsThinkingTask - -### Prompt 1: CI/CD Pipeline Dynamics - -"Analyze our CI/CD pipeline as a system. We have feedback loops between build failures and developer behavior, delays between code commit and production deployment, and accumulating technical debt. Identify reinforcing loops (more failures → more fixes → more changes → more failures), balancing loops (monitoring → alerts → fixes → stability), find leverage points where small changes could have big impacts, and simulate what happens if we: 1) Add more automated tests, 2) Reduce batch sizes, 3) Implement feature flags." - -**Configuration hints:** -- system_description: "CI/CD pipeline with build, test, and deployment stages, including developer feedback loops and technical debt accumulation" -- identify_feedback_loops: true -- map_delays: true -- find_leverage_points: true -- simulate_interventions: ["Add 50% more automated tests", "Reduce deployment batch size by 75%", "Implement feature flags for all new features"] -- time_horizon: "6 months" -- identify_archetypes: true -- analyze_emergent_behavior: true - -### Prompt 2: On-Call Rotation System - -"Analyze our on-call rotation system. Engineers get burned out from too many alerts (vicious cycle), but reducing alerts means missing real issues. There are delays between implementing fixes and seeing alert reduction. Identify feedback loops, find the 'Shifting the Burden' archetype (are we treating symptoms instead of root causes?), locate leverage points, and simulate: 1) Doubling the on-call rotation size, 2) Implementing alert fatigue metrics, 3) Requiring root cause analysis for all pages." - -**Configuration hints:** -- system_description: "On-call rotation system with alert fatigue, burnout, incident response, and technical debt dynamics" -- identify_feedback_loops: true -- map_delays: true -- find_leverage_points: true -- simulate_interventions: ["Double on-call rotation size", "Implement alert fatigue tracking and thresholds", "Require documented root cause analysis for all pages"] -- time_horizon: "1 year" -- identify_archetypes: true -- analyze_emergent_behavior: true - -### Prompt 3: Code Review Process - -"Analyze our code review process as a system. Fast reviews encourage more PRs, but rushed reviews miss bugs. Thorough reviews improve quality but create bottlenecks. There are delays between review feedback and learning. Identify feedback loops, find leverage points, and simulate: 1) Requiring 2 reviewers instead of 1, 2) Setting 24-hour review SLA, 3) Implementing automated review tools." - -**Configuration hints:** -- system_description: "Code review process with quality vs. speed tradeoffs, learning delays, bottlenecks, and knowledge sharing dynamics" -- identify_feedback_loops: true -- map_delays: true -- find_leverage_points: true -- simulate_interventions: ["Require 2 reviewers for all PRs", "Implement 24-hour review SLA", "Deploy automated code review tools for 80% of checks"] -- time_horizon: "6 months" -- identify_archetypes: true -- analyze_emergent_behavior: true - ---- - -## 3. ProbabilisticReasoningTask - -### Prompt 1: Production Outage Root Cause - -"We had a production outage last night. Here's what we know: API latency spiked to 10 seconds, database CPU hit 100%, error rate jumped to 15%, and it happened during a deployment. What caused it? Consider these hypotheses with prior probabilities: Bad deployment (0.4), Database query regression (0.25), Sudden traffic spike (0.15), Infrastructure failure (0.1), DDoS attack (0.05), Memory leak (0.05). Calculate expected values for investigation time and downtime risk for each. Identify which uncertainties matter most. Suggest experiments to narrow it down." - -**Configuration hints:** -- hypotheses: { - "Bad deployment introduced performance regression": 0.4, - "Database query regression from recent schema change": 0.25, - "Sudden legitimate traffic spike": 0.15, - "Infrastructure failure (AWS issue)": 0.1, - "DDoS attack": 0.05, - "Memory leak in new feature": 0.05 - } -- evidence: ["API latency spiked to 10s", "Database CPU at 100%", "Error rate 15%", "Occurred during deployment window", "No AWS status page issues", "Traffic volume normal"] -- calculate_expected_value: true -- identify_key_uncertainties: true -- suggest_experiments: true -- risk_tolerance: "low" -- decision_context: "Production outage root cause analysis - need to prevent recurrence" - -### Prompt 2: Feature Adoption Prediction - -"We're launching a new feature. Will it succeed? Consider: Strong adoption (0.3), Moderate adoption (0.4), Weak adoption (0.2), Complete failure (0.1). Evidence: Beta users gave 4.2/5 rating, 60% of beta users activated it, similar features at competitors have 40% adoption, our last 3 features had mixed results (one hit, two misses). Calculate expected value of continued investment vs. cutting losses. What uncertainties matter most? What experiments would reduce uncertainty?" - -**Configuration hints:** -- hypotheses: { - "Strong adoption (>50% of users within 3 months)": 0.3, - "Moderate adoption (20-50% of users)": 0.4, - "Weak adoption (5-20% of users)": 0.2, - "Complete failure (<5% adoption)": 0.1 - } -- evidence: ["Beta users rated 4.2/5", "60% of beta users activated feature", "Competitor similar features have 40% adoption", "Our last 3 features: 1 success, 2 failures", "Feature requires behavior change", "Marketing budget is limited"] -- calculate_expected_value: true -- identify_key_uncertainties: true -- suggest_experiments: true -- risk_tolerance: "medium" -- decision_context: "Feature launch decision - invest more or cut losses?" - -### Prompt 3: Security Vulnerability Assessment - -"We discovered a potential security vulnerability. How serious is it? Consider: Critical exploitable vulnerability (0.15), High severity but hard to exploit (0.25), Medium severity (0.35), Low severity (0.2), False positive (0.05). Evidence: Static analysis flagged it, affects authentication code, requires specific conditions to trigger, no known exploits in the wild, similar pattern was exploited in another product last year. Calculate expected value of immediate hotfix vs. scheduled fix. What should we investigate first?" - -**Configuration hints:** -- hypotheses: { - "Critical - easily exploitable, high impact": 0.15, - "High severity but requires specific conditions": 0.25, - "Medium severity - limited impact or hard to exploit": 0.35, - "Low severity - minimal risk": 0.2, - "False positive - not actually exploitable": 0.05 - } -- evidence: ["Static analysis tool flagged it", "Affects authentication code path", "Requires specific race condition", "No known exploits in wild", "Similar pattern exploited in competitor product", "Affects 30% of user base"] -- calculate_expected_value: true -- identify_key_uncertainties: true -- suggest_experiments: true -- risk_tolerance: "low" -- decision_context: "Security vulnerability triage - immediate hotfix or scheduled fix?" - ---- - -## 4. NarrativeReasoningTask - -### Prompt 1: User Journey Through Onboarding - -"Analyze the user journey through our onboarding process as a narrative. Characters: new user (motivated but confused), onboarding flow (the guide), support team (helpers), product complexity (antagonist). Setting: first 30 minutes after signup. Conflict: user wants to accomplish their goal but faces friction. Timeline: signup → email verification → tutorial → first action → success or abandonment. Construct the narrative, identify plot points (inciting incident, obstacles, climax, resolution), analyze user motivations, predict 3 alternative outcomes, and find inconsistencies in the experience." - -**Configuration hints:** -- subject: "User onboarding journey from signup to first successful action" -- narrative_elements: { - "characters": ["New user", "Onboarding flow", "Support team", "Product complexity"], - "setting": "First 30 minutes after account creation", - "conflict": "User wants to accomplish goal but faces friction and confusion", - "timeline": "Signup → Email verification → Tutorial → First action attempt → Resolution" - } -- construct_narrative: true -- identify_plot_points: true -- predict_outcomes: true -- alternative_narratives: 3 -- analyze_motivations: true -- find_inconsistencies: true - -### Prompt 2: Technical Debt Story Arc - -"Tell the story of how technical debt accumulated in our payment processing system. Characters: original developers (heroes who shipped fast), new team members (inheritors of legacy), product managers (pushing for features), technical debt (growing antagonist), customers (affected by bugs). Timeline: Initial launch (2020) → rapid growth (2021) → scaling pains (2022) → crisis point (2023) → current state. Construct the narrative arc, identify key plot points, analyze character motivations (why did each group make their choices?), predict 3 possible endings, and find inconsistencies in our approach." - -**Configuration hints:** -- subject: "Evolution of technical debt in payment processing system from 2020 to present" -- narrative_elements: { - "characters": ["Original developers", "New team members", "Product managers", "Technical debt", "Customers"], - "setting": "Payment processing system over 4 years", - "conflict": "Speed vs. quality, short-term wins vs. long-term sustainability", - "timeline": "2020 launch → 2021 growth → 2022 scaling issues → 2023 crisis → 2024 present" - } -- construct_narrative: true -- identify_plot_points: true -- predict_outcomes: true -- alternative_narratives: 3 -- analyze_motivations: true -- find_inconsistencies: true - -### Prompt 3: Migration Project Narrative - -"Analyze our database migration project as a story. Characters: migration team (protagonists), legacy database (old world), new database (promised land), production traffic (constant pressure), stakeholders (demanding timeline). Setting: 6-month migration window. Conflict: need zero downtime but systems are tightly coupled. Timeline: planning → dual-write implementation → data sync → validation → cutover → cleanup. Construct narrative, identify plot points, analyze team motivations, predict 3 alternative outcomes (success, partial success, failure), find inconsistencies in the plan." - -**Configuration hints:** -- subject: "Zero-downtime database migration project" -- narrative_elements: { - "characters": ["Migration team", "Legacy database", "New database", "Production traffic", "Stakeholders"], - "setting": "6-month migration window with zero downtime requirement", - "conflict": "Need seamless migration but systems are tightly coupled and complex", - "timeline": "Planning → Dual-write → Data sync → Validation → Cutover → Cleanup" - } -- construct_narrative: true -- identify_plot_points: true -- predict_outcomes: true -- alternative_narratives: 3 -- analyze_motivations: true -- find_inconsistencies: true - ---- - -## 5. LateralThinkingTask - -### Prompt 1: Reducing Build Times - -"Our CI builds take 45 minutes and it's killing productivity. Apply lateral thinking to find unconventional solutions. Use these techniques: reversal (what if we made builds slower?), random stimulus (apply concepts from fast food restaurants), challenge assumptions (what if we didn't need to build everything?), exaggeration (what if builds took 10 seconds? 10 hours?), escape (ignore the constraint that we need reproducible builds). Generate 5 alternatives per technique. Evaluate feasibility. I want creative, breakthrough ideas, not just 'add more caching.'" - -**Configuration hints:** -- problem: "CI build pipeline takes 45 minutes, blocking developer productivity and slowing iteration" -- techniques: ["reversal", "random_stimulus", "challenge_assumptions", "exaggeration", "escape"] -- num_alternatives: 5 -- evaluate_feasibility: true -- domain_context: "Software CI/CD pipeline with Docker builds, tests, and deployments" -- constraints: ["Must maintain build reproducibility", "Limited infrastructure budget", "Can't break existing workflows"] - -### Prompt 2: Improving Code Review Quality - -"Code reviews are either too slow or too shallow. Apply lateral thinking: reversal (what if we eliminated code reviews?), random stimulus (apply concepts from peer review in academic publishing), challenge assumptions (what if reviewers didn't need to understand the whole codebase?), metaphor (code review as a restaurant kitchen inspection), provocation (Po: code reviews should happen before code is written). Generate creative alternatives. Evaluate feasibility." - -**Configuration hints:** -- problem: "Code reviews are either too slow (blocking PRs) or too shallow (missing bugs), can't find the right balance" -- techniques: ["reversal", "random_stimulus", "challenge_assumptions", "metaphor", "provocation"] -- num_alternatives: 5 -- evaluate_feasibility: true -- domain_context: "Software development team with 20 engineers, GitHub PRs, async communication" -- constraints: ["Must maintain code quality", "Can't add significant overhead", "Team is distributed across timezones"] - -### Prompt 3: Reducing Alert Fatigue - -"We get 500 alerts per day and ignore most of them. Apply lateral thinking: reversal (what if we had zero alerts?), random stimulus (apply concepts from email spam filters), challenge assumptions (what if alerts didn't go to humans?), exaggeration (what if we had 10,000 alerts? 1 alert?), escape (ignore the constraint that we need to know about every issue). Generate unconventional solutions. Evaluate feasibility." - -**Configuration hints:** -- problem: "Alert fatigue - 500 alerts/day, most ignored, real issues get missed in the noise" -- techniques: ["reversal", "random_stimulus", "challenge_assumptions", "exaggeration", "escape"] -- num_alternatives: 5 -- evaluate_feasibility: true -- domain_context: "Production monitoring with Datadog, PagerDuty, multiple microservices" -- constraints: ["Can't miss critical issues", "Limited time to tune alerts", "Multiple teams own different services"] - ---- - -## 6. DialecticalReasoningTask - -### Prompt 1: Microservices vs. Monolith - -"Resolve the tension between microservices and monolith architectures. Thesis: 'Microservices enable independent scaling, deployment, and team autonomy.' Antithesis: 'Monoliths are simpler, easier to debug, and avoid distributed system complexity.' Context: mid-size company with 50 engineers, growing product. Iterate through 3 synthesis levels to find a higher-level understanding that transcends this opposition. Preserve strengths from both sides." - -**Configuration hints:** -- thesis: "Microservices architecture enables independent scaling, deployment, and team autonomy, allowing faster iteration" -- antithesis: "Monolithic architecture is simpler, easier to debug, avoids distributed system complexity, and has better performance" -- context: "Mid-size company with 50 engineers, growing product, currently has a monolith but considering microservices" -- synthesis_levels: 3 -- preserve_strengths: true - -### Prompt 2: Type Safety vs. Flexibility - -"Resolve the tension between static typing and dynamic typing. Thesis: 'Static typing catches bugs at compile time, enables better tooling, and serves as documentation.' Antithesis: 'Dynamic typing enables faster prototyping, more flexible code, and less boilerplate.' Context: team choosing language for new service. Iterate through 3 synthesis levels. Preserve strengths from both." - -**Configuration hints:** -- thesis: "Static typing catches bugs at compile time, enables superior IDE support and refactoring, and serves as living documentation" -- antithesis: "Dynamic typing enables rapid prototyping, more flexible and concise code, and easier metaprogramming" -- context: "Engineering team choosing language/framework for new high-traffic API service, team has experience with both paradigms" -- synthesis_levels: 3 -- preserve_strengths: true - -### Prompt 3: Move Fast vs. Stability - -"Resolve the tension between moving fast and maintaining stability. Thesis: 'Move fast and break things - rapid iteration and experimentation drive innovation.' Antithesis: 'Stability and reliability are paramount - breaking production erodes user trust.' Context: SaaS company with paying customers but facing competitive pressure. Iterate through 4 synthesis levels. Preserve strengths." - -**Configuration hints:** -- thesis: "Move fast and break things - rapid iteration, experimentation, and risk-taking drive innovation and competitive advantage" -- antithesis: "Stability and reliability are paramount - production incidents erode user trust, cost money, and damage reputation" -- context: "SaaS company with 10,000 paying customers, facing aggressive competition, need to innovate but can't afford downtime" -- synthesis_levels: 4 -- preserve_strengths: true - ---- - -## 7. ConstraintRelaxationTask - -### Prompt 1: API Design Under Constraints - -"Design a new API endpoint with these constraints: Must be RESTful (0.9), Must return in <100ms (0.95), Must be backward compatible (1.0), Must support pagination (0.7), Must include HATEOAS links (0.4), Must use JSON (0.8), Must support filtering (0.6), Must be idempotent (0.85). Use progressive relaxation strategy, reintroduce by priority, find creative ways to satisfy multiple constraints simultaneously. Max 5 iterations." - -**Configuration hints:** -- problem: "Design new API endpoint for retrieving user activity history with complex requirements" -- constraints: { - "Must be backward compatible with existing API": 1.0, - "Must return in under 100ms": 0.95, - "Must follow RESTful principles": 0.9, - "Must be idempotent": 0.85, - "Must use JSON format": 0.8, - "Must support pagination": 0.7, - "Must support filtering and sorting": 0.6, - "Must include HATEOAS links": 0.4 - } -- relaxation_strategy: "progressive" -- reintroduction_order: "by_priority" -- find_creative_satisfactions: true -- max_iterations: 5 - -### Prompt 2: Database Schema Migration - -"Design a database schema migration with constraints: Zero downtime (1.0), No data loss (1.0), Rollback-able (0.95), Complete in <1 hour (0.8), No application changes (0.6), Maintain referential integrity (0.9), Support both old and new schemas (0.7), Minimal storage overhead (0.5). Use selective relaxation, reintroduce by difficulty, find creative solutions. Max 6 iterations." - -**Configuration hints:** -- problem: "Migrate database schema from single table to normalized structure with 3 tables, production system with 24/7 uptime requirement" -- constraints: { - "Zero downtime during migration": 1.0, - "No data loss": 1.0, - "Must be rollback-able": 0.95, - "Maintain referential integrity": 0.9, - "Complete migration in under 1 hour": 0.8, - "Support both old and new schemas during transition": 0.7, - "No application code changes": 0.6, - "Minimal storage overhead": 0.5 - } -- relaxation_strategy: "selective" -- reintroduction_order: "by_difficulty" -- find_creative_satisfactions: true -- max_iterations: 6 - -### Prompt 3: Monitoring System Design - -"Design a monitoring system with constraints: Real-time alerts (0.95), No false positives (0.9), Covers all services (1.0), Low overhead (<1% CPU) (0.85), Easy to configure (0.7), Supports custom metrics (0.6), Historical data for 90 days (0.75), Auto-remediation (0.5). Use hierarchical relaxation, reintroduce by dependency, find creative satisfactions. Max 5 iterations." - -**Configuration hints:** -- problem: "Design comprehensive monitoring system for microservices architecture with 50+ services" -- constraints: { - "Must cover all services": 1.0, - "Real-time alerting (<1 minute latency)": 0.95, - "No false positives (high precision)": 0.9, - "Low overhead (<1% CPU, <100MB memory per service)": 0.85, - "Historical data retention for 90 days": 0.75, - "Easy to configure and maintain": 0.7, - "Support custom application metrics": 0.6, - "Auto-remediation capabilities": 0.5 - } -- relaxation_strategy: "hierarchical" -- reintroduction_order: "by_dependency" -- find_creative_satisfactions: true -- max_iterations: 5 - ---- - -## 8. AdversarialReasoningTask - -### Prompt 1: Authentication System Red Team - -"Red team our authentication system. Attack vectors: security, logic, performance. Adversary capability: advanced. The system uses JWT tokens, OAuth2, rate limiting, and MFA. Generate detailed exploit scenarios. Suggest mitigations. Challenge these assumptions: 'JWTs are secure', 'Rate limiting prevents brute force', 'MFA makes accounts unbreakable'. Find 5 vulnerabilities per vector." - -**Configuration hints:** -- target_system: "Authentication system using JWT tokens, OAuth2 flows, rate limiting, and optional MFA" -- attack_vectors: ["security", "logic", "performance"] -- adversary_capability: "advanced" -- generate_exploits: true -- suggest_mitigations: true -- challenge_assumptions: [ - "JWT tokens are inherently secure", - "Rate limiting effectively prevents brute force attacks", - "MFA makes accounts effectively unbreakable", - "OAuth2 flows are foolproof" - ] -- max_vulnerabilities_per_vector: 5 - -### Prompt 2: API Gateway Red Team - -"Red team our API gateway. Attack vectors: security, performance, business logic. Adversary capability: intermediate. The gateway handles rate limiting, authentication, request routing, and caching. Don't generate detailed exploits (just describe vulnerabilities). Suggest mitigations. Challenge assumptions: 'Rate limits are per-user', 'Caching is safe', 'Routing logic is secure'. Find 5 vulnerabilities per vector." - -**Configuration hints:** -- target_system: "API gateway handling rate limiting, authentication, request routing, response caching, and load balancing" -- attack_vectors: ["security", "performance", "business"] -- adversary_capability: "intermediate" -- generate_exploits: false -- suggest_mitigations: true -- challenge_assumptions: [ - "Rate limits are properly enforced per user", - "Response caching doesn't leak sensitive data", - "Request routing logic is secure", - "Load balancing is fair and can't be gamed" - ] -- max_vulnerabilities_per_vector: 5 - -### Prompt 3: Payment Processing Red Team - -"Red team our payment processing flow. Attack vectors: security, logic, compliance, business. Adversary capability: nation-state. The system handles credit cards, refunds, subscriptions, and fraud detection. Generate detailed exploits. Suggest mitigations. Challenge assumptions: 'PCI compliance means we're secure', 'Fraud detection catches everything', 'Refund logic is bulletproof'. Find 5 vulnerabilities per vector." - -**Configuration hints:** -- target_system: "Payment processing system handling credit cards, refunds, subscriptions, fraud detection, and PCI compliance" -- attack_vectors: ["security", "logic", "compliance", "business"] -- adversary_capability: "nation-state" -- generate_exploits: true -- suggest_mitigations: true -- challenge_assumptions: [ - "PCI compliance certification means the system is secure", - "Fraud detection catches all fraudulent transactions", - "Refund logic is bulletproof and can't be exploited", - "Subscription billing is tamper-proof" - ] -- max_vulnerabilities_per_vector: 5 - ---- - -## 9. AbductiveReasoningTask - -### Prompt 1: Memory Leak Investigation - -"We have a memory leak. Observations: 1) Memory usage grows 100MB/hour, 2) Happens only in production, not staging, 3) Started after last deployment, 4) Garbage collection runs but doesn't help, 5) Heap dumps show growing array of user sessions, 6) CPU usage is normal. Generate 5 hypotheses explaining ALL these observations. Evaluate explanatory power, simplicity, testability, and prior probability. Suggest tests to validate top hypotheses." - -**Configuration hints:** -- observations: [ - "Memory usage grows steadily at ~100MB per hour", - "Issue only occurs in production, not in staging environment", - "Started immediately after last deployment (3 days ago)", - "Garbage collection runs but doesn't reclaim memory", - "Heap dumps show growing array of user session objects", - "CPU usage remains normal", - "No obvious memory leaks in new code" - ] -- generate_hypotheses: true -- max_hypotheses: 5 -- evaluate_criteria: ["explanatory_power", "simplicity", "testability", "prior_probability"] -- suggest_tests: true -- domain_context: "Node.js microservice handling user sessions, deployed on Kubernetes" - -### Prompt 2: Intermittent Test Failures - -"Tests fail randomly. Observations: 1) Fails 5% of the time, 2) Always the same 3 tests, 3) Failures are non-deterministic, 4) Happens more often on CI than locally, 5) Error messages vary (timeout, assertion failure, null pointer), 6) Tests pass when run individually, 7) Started 2 weeks ago. Generate hypotheses. Evaluate them. Suggest tests." - -**Configuration hints:** -- observations: [ - "Test suite fails randomly about 5% of the time", - "Always the same 3 tests that fail", - "Failures are non-deterministic - same test passes then fails", - "Happens more frequently on CI than on local machines", - "Error messages vary: timeouts, assertion failures, null pointers", - "Tests pass reliably when run individually", - "Issue started approximately 2 weeks ago", - "No obvious code changes in the failing tests" - ] -- generate_hypotheses: true -- max_hypotheses: 5 -- evaluate_criteria: ["explanatory_power", "simplicity", "testability", "prior_probability"] -- suggest_tests: true -- domain_context: "Python test suite using pytest, running on GitHub Actions CI" - -### Prompt 3: API Latency Spikes - -"API has latency spikes. Observations: 1) P99 latency spikes to 5 seconds every 10 minutes, 2) P50 latency stays normal at 50ms, 3) Spikes affect all endpoints equally, 4) Database queries are fast during spikes, 5) Happens at regular intervals, 6) CPU and memory are normal, 7) Network metrics show no issues. Generate hypotheses. Evaluate. Suggest tests." - -**Configuration hints:** -- observations: [ - "P99 latency spikes to 5 seconds approximately every 10 minutes", - "P50 latency remains normal at ~50ms during spikes", - "Spikes affect all API endpoints equally", - "Database query times remain fast during spikes", - "Spikes occur at very regular intervals", - "CPU and memory utilization are normal", - "Network metrics show no packet loss or bandwidth issues", - "Application logs show no errors during spikes" - ] -- generate_hypotheses: true -- max_hypotheses: 5 -- evaluate_criteria: ["explanatory_power", "simplicity", "testability", "prior_probability"] -- suggest_tests: true -- domain_context: "REST API built with Express.js, deployed on AWS ECS, using PostgreSQL database" - ---- - -## Usage Notes - -These prompts are designed to: - -1. **Be Realistic** - Based on actual software engineering scenarios -2. **Be Specific** - Include concrete details and constraints -3. **Be Challenging** - Require sophisticated reasoning, not simple answers -4. **Be Parseable** - Written in natural language that can be converted to configuration -5. **Demonstrate Tool Value** - Show how each reasoning type provides unique insights - -Each prompt includes: -- Natural language description suitable for speaking to an AI -- Specific configuration hints showing how to map to tool parameters -- Realistic context from software development -- Clear success criteria - -The prompts cover common software engineering challenges: -- **Temporal**: Performance degradation, technical debt, team dynamics -- **Systems**: CI/CD, on-call, code review processes -- **Probabilistic**: Outage analysis, feature adoption, security triage -- **Narrative**: User journeys, technical debt stories, migration projects -- **Lateral**: Build times, code review, alert fatigue -- **Dialectical**: Architecture debates, type systems, speed vs. stability -- **Constraint Relaxation**: API design, migrations, monitoring systems -- **Adversarial**: Authentication, API gateway, payment security -- **Abductive**: Memory leaks, test failures, latency spikes - ---- - -## 10. GeneticOptimization - -### Prompt 1: Optimize API Error Message - -"I have this error message that users find confusing: 'Request failed with status code 422: Unprocessable Entity. The server understood the request but was unable to process the contained instructions.' I want to optimize it for clarity and helpfulness. Run 4 generations with population size 6, using strategies: rephrase, simplify, elaborate. Evaluate based on: clarity (40%), helpfulness (35%), conciseness (25%). Constraint: must remain under 100 characters." - -**Configuration hints:** -- initial_text: "Request failed with status code 422: Unprocessable Entity. The server understood the request but was unable to process the contained instructions." -- optimization_goal: "Make error message clear, helpful, and actionable for end users" -- num_generations: 4 -- population_size: 6 -- selection_size: 2 -- mutation_strategies: ["rephrase", "simplify", "elaborate"] -- enable_crossover: true -- evaluation_weights: {"clarity": 0.4, "helpfulness": 0.35, "conciseness": 0.25} -- constraints: ["Must be under 100 characters", "Must indicate what user should do next"] - -### Prompt 2: Perfect Marketing Tagline - -"Optimize this product tagline: 'Our software helps teams collaborate better and get more done faster with less effort.' Goal is maximum impact and memorability. Run 5 generations with population 8. Use strategies: rephrase, simplify, emphasize. Evaluate on: impact (40%), memorability (30%), clarity (20%), brevity (10%). Enable crossover. Constraints: under 60 characters, no jargon, must mention 'teams'." - -**Configuration hints:** -- initial_text: "Our software helps teams collaborate better and get more done faster with less effort." -- optimization_goal: "Create a punchy, memorable tagline that captures the product value proposition" -- num_generations: 5 -- population_size: 8 -- selection_size: 3 -- mutation_strategies: ["rephrase", "simplify", "emphasize"] -- enable_crossover: true -- evaluation_weights: {"impact": 0.4, "memorability": 0.3, "clarity": 0.2, "brevity": 0.1} -- constraints: ["Under 60 characters", "No technical jargon", "Must mention 'teams'", "Should evoke emotion"] - -### Prompt 3: Refine Technical Documentation - -"Improve this API documentation intro: 'This endpoint allows you to retrieve user data. You need to provide authentication credentials in the header. The response will contain user information in JSON format. Rate limits apply.' Optimize for technical accuracy and developer experience. Run 6 generations, population 6. Strategies: elaborate, restructure, simplify. Evaluate: accuracy (35%), clarity (30%), completeness (20%), developer-friendliness (15%). Constraints: must mention authentication, rate limits, and response format." - -**Configuration hints:** -- initial_text: "This endpoint allows you to retrieve user data. You need to provide authentication credentials in the header. The response will contain user information in JSON format. Rate limits apply." -- optimization_goal: "Create clear, accurate, developer-friendly API documentation that covers all essential information" -- num_generations: 6 -- population_size: 6 -- selection_size: 2 -- mutation_strategies: ["elaborate", "restructure", "simplify"] -- enable_crossover: true -- evaluation_weights: {"technical_accuracy": 0.35, "clarity": 0.3, "completeness": 0.2, "developer_friendliness": 0.15} -- constraints: ["Must mention authentication method", "Must mention rate limits", "Must describe response format", "Should include example or next steps"] - -### Prompt 4: Optimize Prompt Engineering - -"Evolve this AI prompt: 'Write a summary of the following text. Make it concise but include all important points. Use clear language.' Goal is to get better, more consistent AI outputs. Run 5 generations with population 8. Use all strategies: rephrase, simplify, elaborate, restructure, emphasize. Evaluate: effectiveness (40%), clarity (25%), specificity (20%), consistency (15%). Enable crossover. Constraint: must specify output format." - -**Configuration hints:** -- initial_text: "Write a summary of the following text. Make it concise but include all important points. Use clear language." -- optimization_goal: "Create a prompt that produces high-quality, consistent summaries from AI models" -- num_generations: 5 -- population_size: 8 -- selection_size: 3 -- mutation_strategies: ["rephrase", "simplify", "elaborate", "restructure", "emphasize"] -- enable_crossover: true -- evaluation_weights: {"effectiveness": 0.4, "clarity": 0.25, "specificity": 0.2, "consistency": 0.15} -- constraints: ["Must specify desired output format", "Should include length guidance", "Should define 'important points'"] - -### Prompt 5: Improve Email Subject Line - -"Optimize this email subject: 'Important: Please review and approve the Q4 budget proposal by end of week.' Goal is to maximize open rate and urgency while remaining professional. Run 4 generations, population 6. Strategies: rephrase, emphasize, simplify. Evaluate: urgency (35%), professionalism (30%), clarity (20%), open-rate-potential (15%). Constraints: under 60 characters, must mention Q4 and deadline." - -**Configuration hints:** -- initial_text: "Important: Please review and approve the Q4 budget proposal by end of week." -- optimization_goal: "Create compelling email subject line that drives opens and action while maintaining professionalism" -- num_generations: 4 -- population_size: 6 -- selection_size: 2 -- mutation_strategies: ["rephrase", "emphasize", "simplify"] -- enable_crossover: true -- evaluation_weights: {"urgency": 0.35, "professionalism": 0.3, "clarity": 0.2, "open_rate_potential": 0.15} -- constraints: ["Under 60 characters", "Must mention Q4", "Must mention deadline", "Professional tone"] - -### Prompt 6: Refine Code Comment - -"Evolve this code comment: '// This function processes the input data and returns the result.' Goal is maximum clarity and usefulness for developers. Run 3 generations, population 6. Strategies: elaborate, restructure, simplify. Evaluate: clarity (40%), usefulness (35%), conciseness (25%). Constraints: single line comment, under 120 characters, must describe what and why." - -**Configuration hints:** -- initial_text: "// This function processes the input data and returns the result." -- optimization_goal: "Create informative code comment that helps developers understand purpose and usage" -- num_generations: 3 -- population_size: 6 -- selection_size: 2 -- mutation_strategies: ["elaborate", "restructure", "simplify"] -- enable_crossover: false -- evaluation_weights: {"clarity": 0.4, "usefulness": 0.35, "conciseness": 0.25} -- constraints: ["Single line comment", "Under 120 characters", "Must describe what function does", "Should hint at why it exists"] - ---- - -## Additional Usage Notes for New Tools - -### Brainstorming Tool - -- Best for: Generating diverse solution options when stuck or need fresh perspectives -- Works well with: Problems that have multiple possible approaches -- Tip: Set `include_creative_options: true` for innovation, `false` for proven approaches -- Output: Structured list of options with independent analysis of each - -### GameTheory Tool - -- Best for: Strategic situations with multiple parties and conflicting interests -- Works well with: Negotiations, competition, cooperation dilemmas -- Tip: Use `repeated_game_analysis: true` for ongoing relationships -- Output: Payoff matrices, Nash equilibria, strategic recommendations - -### FiniteStateMachine Tool - -- Best for: Understanding processes, workflows, and state-dependent systems -- Works well with: User journeys, business processes, protocols -- Tip: Enable all validation options to catch edge cases and missing transitions -- Output: State diagram, transition table, test scenarios, validation report - These tools complement the existing reasoning tools by adding: -- **Brainstorming**: Divergent thinking and option generation -- **GameTheory**: Strategic interaction analysis -- **FiniteStateMachine**: Process modeling and validation - ---- - -## Usage Notes - -These prompts are designed to: - -1. **Be accessible** - No technical jargon or specialized knowledge required -2. **Be controversial** - Touch on real debates people care about -3. **Be standalone** - Require only general knowledge, no fictional documents -4. **Be interesting** - Explore questions that matter to people's lives -5. **Be parseable** - Written in natural language that a planner agent can convert to configuration - -Each prompt can be spoken naturally to an AI assistant, which would then parse it into the appropriate tool configuration. The prompts cover topics like: - -- Social issues (polarization, mental health, education) -- Economic questions (UBI, student debt, healthcare) -- Personal decisions (career, retirement, family) -- Philosophical questions (happiness, justice, free will) -- Policy debates (immigration, climate, legalization) - -These are designed to produce genuinely interesting, thought-provoking analyses that demonstrate the power of structured reasoning tools for everyday questions people actually care about. ---- \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/CommandSessionTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/CommandSessionTask.kt index 4b747e0d5..3f842d2de 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/CommandSessionTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/CommandSessionTask.kt @@ -5,6 +5,7 @@ import com.simiacryptus.cognotik.plan.* import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.webui.session.SessionTask import java.io.BufferedReader +import java.io.FileOutputStream import java.io.InputStreamReader import java.io.PrintWriter import java.util.concurrent.ConcurrentHashMap @@ -99,6 +100,7 @@ class CommandSessionTask( } task.add(uiOutput.toString()) task.update() + val transcript = transcript(task) var process: Process? = null try { cleanupInactiveSessions() @@ -120,6 +122,7 @@ class CommandSessionTask( uiOutput.appendLine("```") uiOutput.appendLine(input) uiOutput.appendLine("```") + transcript?.write(uiOutput.toString().toByteArray()) task.add(uiOutput.toString()) task.update() val output = try { @@ -134,6 +137,7 @@ class CommandSessionTask( uiOutput.appendLine("```") uiOutput.appendLine(output.take(10000)) uiOutput.appendLine("```") + transcript?.write(uiOutput.toString().toByteArray()) task.add(uiOutput.toString()) task.update() } @@ -144,8 +148,10 @@ class CommandSessionTask( task.error(e) val errorResult = "Error in CommandSessionTask: ${e.message}" task.add(uiOutput.append("\n\n**ERROR:** $errorResult").toString()) + transcript?.write(uiOutput.toString().toByteArray()) resultFn(errorResult) } finally { + transcript?.close() if ((executionConfig.sessionId == null || executionConfig.closeSession) && process != null) { try { process.destroy() @@ -186,4 +192,15 @@ class CommandSessionTask( } return outputBuffer.toString() } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/RunShellCommandTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/RunShellCommandTask.kt index 664dbc78a..e2b23ac8b 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/RunShellCommandTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/RunShellCommandTask.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.plan.tools.session -import com.simiacryptus.cognotik.actors.CodeAgent +import com.simiacryptus.cognotik.agents.CodeAgent import com.simiacryptus.cognotik.apps.code.CodingTask import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.interpreter.ProcessCodeRuntime @@ -11,201 +11,229 @@ import com.simiacryptus.cognotik.util.ValidatedObject import com.simiacryptus.cognotik.webui.session.SessionTask import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.File +import java.io.FileOutputStream import java.util.concurrent.Semaphore import java.util.concurrent.atomic.AtomicInteger import kotlin.reflect.KClass class RunShellCommandTask( - orchestrationConfig: OrchestrationConfig, - planTask: RunShellCommandTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: RunShellCommandTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - class RunShellCommandTaskExecutionConfigData( - @Description("The shell command to be executed") - val command: String? = null, - @Description("The relative file path of the working directory") - val workingDir: String? = null, - @Description("Timeout in minutes for command execution (default: 15)") - val timeoutMinutes: Long? = null, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null - ) : ValidatedObject, TaskExecutionConfig( - task_type = RunShellCommand.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ) { - override fun validate(): String? { - // Validate command is not null or blank - if (command.isNullOrBlank()) { - return "Command cannot be null or blank" - } - - // Validate timeout is positive if specified - if (timeoutMinutes != null && timeoutMinutes <= 0) { - return "Timeout must be a positive number of minutes" - } - - // Call parent validation - return ValidatedObject.validateFields(this) - } + class RunShellCommandTaskExecutionConfigData( + @Description("The shell command to be executed") + val command: String? = null, + @Description("The relative file path of the working directory") + val workingDir: String? = null, + @Description("Timeout in minutes for command execution (default: 15)") + val timeoutMinutes: Long? = null, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null + ) : ValidatedObject, TaskExecutionConfig( + task_type = RunShellCommand.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ) { + override fun validate(): String? { + // Validate command is not null or blank + if (command.isNullOrBlank()) { + return "Command cannot be null or blank" + } + + // Validate timeout is positive if specified + if (timeoutMinutes != null && timeoutMinutes <= 0) { + return "Timeout must be a positive number of minutes" + } + + // Call parent validation + return ValidatedObject.validateFields(this) } + } - override fun promptSegment() = """ + override fun promptSegment() = """ RunShellCommand - Execute ${orchestrationConfig.language ?: "bash"} shell commands and provide the output ** Specify the command to be executed, or describe the task to be performed ** Optionally specify a working directory for the command execution ** Optionally specify a timeout in minutes (default: 15) """.trimIndent() - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val autoRunCounter = AtomicInteger(0) - val semaphore = Semaphore(0) - val typeConfig = typeConfig ?: throw RuntimeException() - val chatter = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } - ?: this.orchestrationConfig.defaultChatter).getChildClient(task) - val planTask = this.executionConfig - val shellCommandActor = CodeAgent( - name = "RunShellCommand", - codeRuntimeClass = ProcessCodeRuntime::class, - details = """ + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val autoRunCounter = AtomicInteger(0) + val semaphore = Semaphore(0) + val markdownTranscript = transcript(task) + val typeConfig = typeConfig ?: throw RuntimeException() + val chatter = (typeConfig.model?.let { this.orchestrationConfig.instance(it) } + ?: this.orchestrationConfig.defaultChatter).getChildClient(task) + val planTask = this.executionConfig + val shellCommandActor = CodeAgent( + name = "RunShellCommand", + codeRuntimeClass = ProcessCodeRuntime::class, + details = """ Execute the following shell command(s) and provide the output. Ensure to handle any errors or exceptions gracefully. Note: This task is for running simple and safe commands. Avoid executing commands that can cause harm to the system or compromise security. """.trimIndent(), - symbols = mapOf( - "env" to (this.orchestrationConfig.env ?: emptyMap()), - "workingDir" to ((planTask?.workingDir?.let { File(it).absolutePath } - ?: File(this.orchestrationConfig.absoluteWorkingDir ?: ".").absolutePath) - ?.let { a -> this.orchestrationConfig.absoluteWorkingDir?.let { b -> File(b).resolve(a) } } - ?: this.orchestrationConfig.absoluteWorkingDir ?: "."), - "language" to (this.orchestrationConfig.language ?: "bash"), - "command" to (this.orchestrationConfig.shellCmd), - "timeoutMinutes" to (planTask?.timeoutMinutes ?: 15L), - ), - model = chatter, - temperature = this.orchestrationConfig.temperature, - fallbackModel = chatter - ) - val codingAgent = object : CodingTask( - dataStorage = agent.dataStorage, - session = agent.session, - user = agent.user, - ui = task.ui, - interpreter = shellCommandActor.codeRuntimeClass as KClass, - symbols = shellCommandActor.symbols, - temperature = shellCommandActor.temperature, - details = shellCommandActor.details, - model = shellCommandActor.model, - mainTask = task, - retryable = false, - ) { - override fun execute( - task: SessionTask, - response: CodeAgent.CodeResult - ): String { - val result = super.execute(task, response) // Runs the interpreter, updates response.result - if (orchestrationConfig.autoFix) { - val resultString = - "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n" + - "## Result\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n" + // STDOUT - "## Output\n$TRIPLE_TILDE\n${response.result.resultOutput}\n$TRIPLE_TILDE\n" // STDERR - resultFn(resultString) - semaphore.release() - } - return result - } + symbols = mapOf( + "env" to (this.orchestrationConfig.env ?: emptyMap()), + "workingDir" to ((planTask?.workingDir?.let { File(it).absolutePath } + ?: File(this.orchestrationConfig.absoluteWorkingDir ?: ".").absolutePath) + ?.let { a -> this.orchestrationConfig.absoluteWorkingDir?.let { b -> File(b).resolve(a) } } + ?: this.orchestrationConfig.absoluteWorkingDir ?: "."), + "language" to (this.orchestrationConfig.language ?: "bash"), + "command" to (this.orchestrationConfig.shellCmd), + "timeoutMinutes" to (planTask?.timeoutMinutes ?: 15L), + ), + model = chatter, + temperature = this.orchestrationConfig.temperature, + fallbackModel = chatter + ) + val codingAgent = object : CodingTask( + dataStorage = agent.dataStorage, + session = agent.session, + user = agent.user, + ui = task.ui, + interpreter = shellCommandActor.codeRuntimeClass as KClass, + symbols = shellCommandActor.symbols, + temperature = shellCommandActor.temperature, + details = shellCommandActor.details, + model = shellCommandActor.model, + mainTask = task, + retryable = false, + ) { + override fun execute( + task: SessionTask, + response: CodeAgent.CodeResult + ): String { + val result = super.execute(task, response) // Runs the interpreter, updates response.result + if (orchestrationConfig.autoFix) { + val resultString = + "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n" + + "## Result\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n" + // STDOUT + "## Output\n$TRIPLE_TILDE\n${response.result.resultOutput}\n$TRIPLE_TILDE\n" // STDERR + markdownTranscript?.write(resultString.toByteArray()) + markdownTranscript?.flush() + resultFn(resultString) + "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n" + + "## Result\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n" + // STDOUT + "## Output\n$TRIPLE_TILDE\n${response.result.resultOutput}\n$TRIPLE_TILDE\n" // STDERR + resultFn(resultString) + semaphore.release() + } + return result + } - override fun displayFeedback( - task: SessionTask, - request: CodeAgent.CodeRequest, - response: CodeAgent.CodeResult - ) { - if (orchestrationConfig.autoFix && autoRunCounter.incrementAndGet() <= 1) { - super.responseAction(task, "Running...", null, StringBuilder()) { - this.execute(task, response, request) // Calls the overridden execute - } - } else if (!orchestrationConfig.autoFix) { - // Manual feedback UI - val formText = StringBuilder() - var formHandle: StringBuilder? = null - @Suppress("AssignedValueIsNeverRead") - formHandle = task.add( - "
    \n${ - if (!super.canPlay) "" else super.playButton( - task, - request, - response, - formText - ) { formHandle!! } - }\n${ - acceptButton( - response, - task - ) - }\n
    \n${ // Pass task to acceptButton if needed for consistency, or ensure response is sufficient - super.ui.textInput { feedback -> - super.responseAction( - task, - "Revising...", formHandle!!, formText - ) { - super.feedback( - task, feedback, request, response - ) - } - } - }", additionalClasses = "reply-message" - ) - // Omitted potentially problematic lines: - // formText.append(formHandle.toString()) - // formHandle.toString() + override fun displayFeedback( + task: SessionTask, + request: CodeAgent.CodeRequest, + response: CodeAgent.CodeResult + ) { + if (orchestrationConfig.autoFix && autoRunCounter.incrementAndGet() <= 1) { + super.responseAction(task, "Running...", null, StringBuilder()) { + this.execute(task, response, request) // Calls the overridden execute + } + } else if (!orchestrationConfig.autoFix) { + // Manual feedback UI + val formText = StringBuilder() + var formHandle: StringBuilder? = null + @Suppress("AssignedValueIsNeverRead") + formHandle = task.add( + "
    \n${ + if (!super.canPlay) "" else super.playButton( + task, + request, + response, + formText + ) { formHandle!! } + }\n${ + acceptButton( + response, + task + ) + }\n
    \n${ // Pass task to acceptButton if needed for consistency, or ensure response is sufficient + super.ui.textInput { feedback -> + super.responseAction( + task, + "Revising...", formHandle!!, formText + ) { + super.feedback( + task, feedback, request, response + ) } - task.complete() - } + } + }", additionalClasses = "reply-message" + ) + // Omitted potentially problematic lines: + // formText.append(formHandle.toString()) + // formHandle.toString() + } + task.complete() + } - fun acceptButton( - response: CodeAgent.CodeResult, - task: SessionTask // Added task param for potential future use or consistency - ): String { - return ui.hrefLink("Accept", "href-link play-button") { - response.let { - "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n" + - "## Result\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n" + - "## Output\n$TRIPLE_TILDE\n${response.result.resultOutput}\n$TRIPLE_TILDE\n" - }.apply { resultFn(this) } - semaphore.release() - } - } + fun acceptButton( + response: CodeAgent.CodeResult, + task: SessionTask // Added task param for potential future use or consistency + ): String { + return ui.hrefLink("Accept", "href-link play-button") { + response.let { + "## Command\n\n$TRIPLE_TILDE\n${response.code}\n$TRIPLE_TILDE\n" + + "## Result\n$TRIPLE_TILDE\n${response.result.resultValue}\n$TRIPLE_TILDE\n" + + "## Output\n$TRIPLE_TILDE\n${response.result.resultOutput}\n$TRIPLE_TILDE\n" + }.apply { + markdownTranscript?.write(this.toByteArray()) + markdownTranscript?.flush() + resultFn(this) + } + semaphore.release() } - codingAgent.start( - codingAgent.codeRequest( - messages.map { it to ModelSchema.Role.user } + - listOfNotNull( - this.executionConfig?.command?.takeIf { it.isNotBlank() }?.let { it to ModelSchema.Role.user } - ) + } + } + codingAgent.start( + codingAgent.codeRequest( + messages.map { it to ModelSchema.Role.user } + + listOfNotNull( + this.executionConfig?.command?.takeIf { it.isNotBlank() }?.let { it to ModelSchema.Role.user } ) - ) - try { - semaphore.acquire() - } catch (e: Throwable) { - log.warn("Error", e) - } + ) + ) + try { + semaphore.acquire() + } catch (e: Throwable) { + log.warn("Error", e) + } finally { + markdownTranscript?.close() } + } - companion object { - private val log = LoggerFactory.getLogger(RunShellCommandTask::class.java) - val RunShellCommand = TaskType( - "RunShellCommand", - RunShellCommandTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Execute shell commands safely", - """ + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + companion object { + private val log = LoggerFactory.getLogger(RunShellCommandTask::class.java) + val RunShellCommand = TaskType( + "RunShellCommand", + RunShellCommandTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Execute shell commands safely", + """ Executes shell commands in a controlled environment.
    • Safe command execution handling
    • @@ -215,7 +243,7 @@ class RunShellCommandTask(
    • Interactive result review
    """ - ) + ) - } + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/SeleniumSessionTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/SeleniumSessionTask.kt index 0bb3ac603..11a6a00b4 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/SeleniumSessionTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/session/SeleniumSessionTask.kt @@ -11,27 +11,28 @@ import org.openqa.selenium.chrome.ChromeOptions import org.openqa.selenium.devtools.v136.log.Log import org.openqa.selenium.devtools.v136.network.Network import org.openqa.selenium.remote.RemoteWebDriver +import java.io.FileOutputStream import java.util.* import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.ExecutorService class SeleniumSessionTask( - orchestrationConfig: OrchestrationConfig, - planTask: SeleniumSessionTaskExecutionConfigData? + orchestrationConfig: OrchestrationConfig, + planTask: SeleniumSessionTaskExecutionConfigData? ) : AbstractTask(orchestrationConfig, planTask) { - companion object { - private val log = LoggerFactory.getLogger(SeleniumSessionTask::class.java) - private val activeSessions = ConcurrentHashMap() - private const val TIMEOUT_MS = 30000L + companion object { + private val log = LoggerFactory.getLogger(SeleniumSessionTask::class.java) + private val activeSessions = ConcurrentHashMap() + private const val TIMEOUT_MS = 30000L - private const val MAX_SESSIONS = 10 + private const val MAX_SESSIONS = 10 - val SeleniumSession = TaskType( - "SeleniumSession", - SeleniumSessionTask.SeleniumSessionTaskExecutionConfigData::class.java, - TaskTypeConfig::class.java, - "Automate browser interactions with Selenium", - """ + val SeleniumSession = TaskType( + "SeleniumSession", + SeleniumSessionTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Automate browser interactions with Selenium", + """ Automates browser interactions using Selenium WebDriver.
    • Headless Chrome browser automation
    • @@ -41,74 +42,76 @@ class SeleniumSessionTask(
    • Detailed execution results
    """ - ) + ) - } + } - private fun cleanupInactiveSessions() { - activeSessions.entries.removeIf { (id, session) -> - try { - if (!session.isAlive()) { - log.info("Removing inactive session $id") - session.quit() - true - } else false - } catch (e: Exception) { - log.warn("Error checking session $id, removing", e) - try { - session.forceQuit() - } catch (e2: Exception) { - log.error("Failed to force quit session $id", e2) - } - true - } + private fun cleanupInactiveSessions() { + activeSessions.entries.removeIf { (id, session) -> + try { + if (!session.isAlive()) { + log.info("Removing inactive session $id") + session.quit() + true + } else false + } catch (e: Exception) { + log.warn("Error checking session $id, removing", e) + try { + session.forceQuit() + } catch (e2: Exception) { + log.error("Failed to force quit session $id", e2) } + true + } } + } - class SeleniumSessionTaskExecutionConfigData( - @Description("The URL to navigate to (optional if reusing existing session)") - val url: String = "", - @Description("JavaScript commands to execute") - val commands: List = listOf(), - @Description("Session ID for reusing existing sessions") - val sessionId: String? = null, - @Description("Timeout in milliseconds for commands") - val timeout: Long = TIMEOUT_MS, - @Description("Whether to close the session after execution") - val closeSession: Boolean = false, - task_description: String? = null, - task_dependencies: List? = null, - state: TaskState? = null, - @Description("Include CSS data in page source: styles, classes, etc.") - val includeCssData: Boolean? = null, - @Description("Whether to simplify the HTML structure by combining nested elements") - val simplifyStructure: Boolean = true, - @Description("Whether to keep object IDs in the HTML output") - val keepObjectIds: Boolean = false, - @Description("Whether to preserve whitespace in text nodes") - val preserveWhitespace: Boolean = false, - ) : TaskExecutionConfig( - task_type = SeleniumSession.name, - task_description = task_description, - task_dependencies = task_dependencies?.toMutableList(), - state = state - ), ValidatedObject { - override fun validate(): String? { - if (url.isBlank() && sessionId == null) { - return "Either 'url' must be provided or 'sessionId' must be specified to reuse an existing session" - } - if (timeout <= 0) { - return "Timeout must be greater than 0" - } - if (commands.isEmpty() && url.isBlank() && sessionId == null) { - return "At least one command must be provided, or a URL/sessionId must be specified" - } - return ValidatedObject.validateFields(this) - } + class SeleniumSessionTaskExecutionConfigData( + @Description("The URL to navigate to (optional if reusing existing session)") + val url: String = "", + @Description("JavaScript commands to execute") + val commands: List = listOf(), + @Description("Session ID for reusing existing sessions") + val sessionId: String? = null, + @Description("Timeout in milliseconds for commands") + val timeout: Long = TIMEOUT_MS, + @Description("Whether to close the session after execution") + val closeSession: Boolean = false, + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = null, + @Description("Include CSS data in page source: styles, classes, etc.") + val includeCssData: Boolean? = null, + @Description("Whether to simplify the HTML structure by combining nested elements") + val simplifyStructure: Boolean = true, + @Description("Whether to keep object IDs in the HTML output") + val keepObjectIds: Boolean = false, + @Description("Whether to preserve whitespace in text nodes") + val preserveWhitespace: Boolean = false, + @Description("Whether to create a transcript file of the session") + val createTranscript: Boolean = false, + ) : TaskExecutionConfig( + task_type = SeleniumSession.name, + task_description = task_description, + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (url.isBlank() && sessionId == null) { + return "Either 'url' must be provided or 'sessionId' must be specified to reuse an existing session" + } + if (timeout <= 0) { + return "Timeout must be greater than 0" + } + if (commands.isEmpty() && url.isBlank() && sessionId == null) { + return "At least one command must be provided, or a URL/sessionId must be specified" + } + return ValidatedObject.validateFields(this) } + } - override fun promptSegment() = """ + override fun promptSegment() = """ SeleniumSession - Create and manage a stateful Selenium browser session * Specify the URL to navigate to * Provide JavaScript commands to execute in sequence through Selenium's executeScript method @@ -129,171 +132,200 @@ class SeleniumSessionTask( Active Sessions: """.trimIndent() + activeSessions.entries.joinToString("\n") { (id, session: Selenium) -> - buildString { - append(" ** Session $id:\n") - append(" URL: ${session.getCurrentUrl()}\n") - try { - append(" Title: ${session.executeScript("return document.title;")}\n") - val logs = session.getLogs() - if (logs.isNotEmpty()) { - append(" Recent Logs:\n") - logs.takeLast(3).forEach { log -> - append(" - $log\n") - } - } - } catch (e: Exception) { - append(" Error getting session details: ${e.message}\n") - } + buildString { + append(" ** Session $id:\n") + append(" URL: ${session.getCurrentUrl()}\n") + try { + append(" Title: ${session.executeScript("return document.title;")}\n") + val logs = session.getLogs() + if (logs.isNotEmpty()) { + append(" Recent Logs:\n") + logs.takeLast(3).forEach { log -> + append(" - $log\n") + } } + } catch (e: Exception) { + append(" Error getting session details: ${e.message}\n") + } } + } - override fun run( - agent: TaskOrchestrator, - messages: List, - task: SessionTask, - resultFn: (String) -> Unit, - orchestrationConfig: OrchestrationConfig - ) { - val seleniumFactory: (pool: ExecutorService, cookies: Array?) -> Selenium = - { pool, cookies -> - try { - Selenium2S3( - pool = pool, - cookies = cookies, - driver = driver() - ) - } catch (e: Exception) { - throw IllegalStateException("Failed to initialize Selenium", e) - } - } - requireNotNull(executionConfig) { "SeleniumSessionTaskData is required" } - var selenium: Selenium? = null + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val seleniumFactory: (pool: ExecutorService, cookies: Array?) -> Selenium = + { pool, cookies -> try { + Selenium2S3( + pool = pool, + cookies = cookies, + driver = driver() + ) + } catch (e: Exception) { + throw IllegalStateException("Failed to initialize Selenium", e) + } + } + requireNotNull(executionConfig) { "SeleniumSessionTaskData is required" } + var selenium: Selenium? = null + var transcriptStream: FileOutputStream? = null + try { - cleanupInactiveSessions() + cleanupInactiveSessions() - if (activeSessions.size >= MAX_SESSIONS && executionConfig.sessionId == null) { - throw IllegalStateException("Maximum number of concurrent sessions ($MAX_SESSIONS) reached") - } - selenium = executionConfig.sessionId?.let { id -> activeSessions[id] } - ?: seleniumFactory(agent.pool, null).also { newSession -> - executionConfig.sessionId?.let { id -> activeSessions[id] = newSession } - } - log.info("Starting Selenium session ${executionConfig.sessionId ?: "temporary"} for URL: ${executionConfig.url} with timeout ${executionConfig.timeout}ms") - selenium.setScriptTimeout(executionConfig.timeout) + if (activeSessions.size >= MAX_SESSIONS && executionConfig.sessionId == null) { + throw IllegalStateException("Maximum number of concurrent sessions ($MAX_SESSIONS) reached") + } + selenium = executionConfig.sessionId?.let { id -> activeSessions[id] } + ?: seleniumFactory(agent.pool, null).also { newSession -> + executionConfig.sessionId?.let { id -> activeSessions[id] = newSession } + } + if (executionConfig.createTranscript) { + transcriptStream = createTranscript(task) + transcriptStream?.write("# Selenium Session Transcript\n\n".toByteArray()) + } + log.info("Starting Selenium session ${executionConfig.sessionId ?: "temporary"} for URL: ${executionConfig.url} with timeout ${executionConfig.timeout}ms") + selenium.setScriptTimeout(executionConfig.timeout) - if (executionConfig.url.isNotBlank()) { - selenium.navigate(executionConfig.url) - } + if (executionConfig.url.isNotBlank()) { + selenium.navigate(executionConfig.url) + transcriptStream?.write("## Navigation\nNavigated to: ${executionConfig.url}\n\n".toByteArray()) + } - val results = executionConfig.commands.map { command -> - try { - log.debug("Executing command: $command") - val startTime = System.currentTimeMillis() - val result = selenium.executeScript(command)?.toString() ?: "null" - val duration = System.currentTimeMillis() - startTime - log.debug("Command completed in ${duration}ms") - result - } catch (e: Exception) { - task.error(e) - log.error("Error executing command: $command", e) - e.message ?: "Error executing command" - } - } - val result = formatResults(executionConfig, selenium, results) - task.add(MarkdownUtil.renderMarkdown(result)) - resultFn(result) - } finally { + val results = executionConfig.commands.map { command -> + try { + log.debug("Executing command: $command") + transcriptStream?.write("### Executing Command\n```javascript\n$command\n```\n\n".toByteArray()) + val startTime = System.currentTimeMillis() + val result = selenium.executeScript(command)?.toString() ?: "null" + val duration = System.currentTimeMillis() - startTime + log.debug("Command completed in ${duration}ms") + transcriptStream?.write("**Duration:** ${duration}ms\n\n".toByteArray()) + transcriptStream?.write("**Result:**\n```\n${result.take(1000)}\n```\n\n".toByteArray()) + result + } catch (e: Exception) { + task.error(e) + log.error("Error executing command: $command", e) + transcriptStream?.write("**Error:** ${e.message}\n\n".toByteArray()) + e.message ?: "Error executing command" + } + } + transcriptStream?.write("## Final State\n".toByteArray()) + transcriptStream?.write("**Final URL:** ${selenium.getCurrentUrl()}\n\n".toByteArray()) - if ((executionConfig.sessionId == null || executionConfig.closeSession) && selenium != null) { - log.info("Closing temporary session") - try { - selenium.quit() - if (executionConfig.sessionId != null) { - activeSessions.remove(executionConfig.sessionId) - } - } catch (e: Exception) { - log.error("Error closing temporary session", e) - selenium.forceQuit() - if (executionConfig.sessionId != null) { - activeSessions.remove(executionConfig.sessionId) - } - } - } + val result = formatResults(executionConfig, selenium, results) + task.add(MarkdownUtil.renderMarkdown(result)) + resultFn(result) + transcriptStream?.flush() + transcriptStream?.close() + } catch (e: Exception) { + transcriptStream?.close() + throw e + } finally { + + if ((executionConfig.sessionId == null || executionConfig.closeSession) && selenium != null) { + log.info("Closing temporary session") + try { + selenium.quit() + if (executionConfig.sessionId != null) { + activeSessions.remove(executionConfig.sessionId) + } + } catch (e: Exception) { + log.error("Error closing temporary session", e) + selenium.forceQuit() + if (executionConfig.sessionId != null) { + activeSessions.remove(executionConfig.sessionId) + } } + } } + } - val chromeDriver: WebDriverManager by lazy { WebDriverManager.chromedriver().apply { setup() } } - fun driver(): RemoteWebDriver { - requireNotNull(chromeDriver) - val driver = ChromeDriver(ChromeOptions().apply { - addArguments("--headless") - addArguments("--disable-gpu") - addArguments("--no-sandbox") - addArguments("--disable-dev-shm-usage") - }) + val chromeDriver: WebDriverManager by lazy { WebDriverManager.chromedriver().apply { setup() } } + fun driver(): RemoteWebDriver { + requireNotNull(chromeDriver) + val driver = ChromeDriver(ChromeOptions().apply { + addArguments("--headless") + addArguments("--disable-gpu") + addArguments("--no-sandbox") + addArguments("--disable-dev-shm-usage") + }) - val devTools = driver.devTools - devTools.createSession() + val devTools = driver.devTools + devTools.createSession() - devTools.send(Network.enable(Optional.empty(), Optional.empty(), Optional.empty())) - devTools.addListener(Network.requestWillBeSent()) { request -> - println("Request URL: " + request.request.url) - } + devTools.send(Network.enable(Optional.empty(), Optional.empty(), Optional.empty())) + devTools.addListener(Network.requestWillBeSent()) { request -> + println("Request URL: " + request.request.url) + } - devTools.send(Log.enable()) - devTools.addListener(Log.entryAdded()) { logEntry -> - println("Console: " + logEntry.text) - } - return driver + devTools.send(Log.enable()) + devTools.addListener(Log.entryAdded()) { logEntry -> + println("Console: " + logEntry.text) } + return driver + } - private fun formatResults( - planTask: SeleniumSessionTaskExecutionConfigData, - selenium: Selenium, - results: List - ): String = buildString(capacity = 163840) { + private fun formatResults( + planTask: SeleniumSessionTaskExecutionConfigData, + selenium: Selenium, + results: List + ): String = buildString(capacity = 163840) { - appendLine("## Selenium Session Results") - if (planTask.url.isNotBlank()) { - appendLine("Initial URL: ${planTask.url}") - } - appendLine("Session ID: ${planTask.sessionId ?: "temporary"}") - appendLine("Final URL: ${selenium.getCurrentUrl()}") - appendLine("Timeout: ${planTask.timeout}ms") - appendLine("Browser Info: ${selenium.getBrowserInfo()}") - appendLine("\nCommand Results:") - results.forEachIndexed { index, result -> - appendLine("### Command ${index + 1}") - appendLine("```javascript") - appendLine(planTask.commands[index]) - appendLine("```") - if (result != "null") { - appendLine("Result:") - appendLine("```") - appendLine(result.take(5000)) + appendLine("## Selenium Session Results") + if (planTask.url.isNotBlank()) { + appendLine("Initial URL: ${planTask.url}") + } + appendLine("Session ID: ${planTask.sessionId ?: "temporary"}") + appendLine("Final URL: ${selenium.getCurrentUrl()}") + appendLine("Timeout: ${planTask.timeout}ms") + appendLine("Browser Info: ${selenium.getBrowserInfo()}") + appendLine("\nCommand Results:") + results.forEachIndexed { index, result -> + appendLine("### Command ${index + 1}") + appendLine("```javascript") + appendLine(planTask.commands[index]) + appendLine("```") + if (result != "null") { + appendLine("Result:") + appendLine("```") + appendLine(result.take(5000)) - appendLine("```") - } - } - try { - appendLine("\nFinal Page Source:") - appendLine("```html") - appendLine( - HtmlSimplifier.scrubHtml( - str = selenium.getPageSource(), - baseUrl = selenium.getCurrentUrl(), - includeCssData = executionConfig?.includeCssData ?: false, - simplifyStructure = executionConfig?.simplifyStructure ?: true, - keepObjectIds = executionConfig?.keepObjectIds ?: false, - preserveWhitespace = executionConfig?.preserveWhitespace ?: false - ) - ) + appendLine("```") + } + } + try { + appendLine("\nFinal Page Source:") + appendLine("```html") + appendLine( + HtmlSimplifier.scrubHtml( + str = selenium.getPageSource(), + baseUrl = selenium.getCurrentUrl(), + includeCssData = executionConfig?.includeCssData ?: false, + simplifyStructure = executionConfig?.simplifyStructure ?: true, + keepObjectIds = executionConfig?.keepObjectIds ?: false, + preserveWhitespace = executionConfig?.preserveWhitespace ?: false + ) + ) - appendLine("```") - } catch (e: Exception) { - appendLine("\nError getting page source: ${e.message}") - } + appendLine("```") + } catch (e: Exception) { + appendLine("\nError getting page source: ${e.message}") } + } + + private fun createTranscript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link " + + "html " + + "pdf" + ) + return markdownTranscript + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ArticleGenerationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ArticleGenerationTask.kt new file mode 100644 index 000000000..d64834359 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ArticleGenerationTask.kt @@ -0,0 +1,813 @@ +package com.simiacryptus.cognotik.plan.tools.writing + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.OrchestrationConfig +import com.simiacryptus.cognotik.plan.TaskOrchestrator +import com.simiacryptus.cognotik.plan.TaskType +import com.simiacryptus.cognotik.plan.TaskTypeConfig +import com.simiacryptus.cognotik.plan.tools.file.AnalysisTask.Companion.extractDocumentContent +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class ArticleGenerationTask( + orchestrationConfig: OrchestrationConfig, + planTask: ArticleGenerationTaskExecutionConfigData? +) : JournalismReasoningTask( + orchestrationConfig, + planTask +) { + + class ArticleGenerationTaskExecutionConfigData( + @Description("The story topic or event to write about") + story_topic: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + input_files: List? = null, + + @Description("Journalism elements to consider (who, what, when, where, why, how)") + journalism_elements: Map? = null, + + @Description("Target word count for the article") + val target_word_count: Int = 1000, + + @Description("Article format (e.g., 'news', 'feature', 'investigative', 'opinion', 'profile')") + val article_format: String = "news", + + @Description("Writing style (e.g., 'AP style', 'narrative', 'analytical', 'conversational')") + val writing_style: String = "AP style", + + @Description("Target publication or audience (affects tone and depth)") + val target_publication: String = "general news", + + @Description("Whether to include quotes from sources") + val include_quotes: Boolean = true, + + @Description("Whether to include data and statistics") + val include_data: Boolean = true, + + @Description("Whether to include expert analysis") + val include_expert_analysis: Boolean = true, + + @Description("Whether to include related context and background") + val include_context: Boolean = true, + + @Description("Number of revision passes for quality improvement") + val revision_passes: Int = 1, + + @Description("Whether to generate headline and subheadline") + val generate_headlines: Boolean = true, + + @Description("Whether to generate social media snippets") + val generate_social_snippets: Boolean = false, + + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : JournalismReasoningTaskExecutionConfigData( + story_topic = story_topic, + journalism_elements = journalism_elements, + verify_facts = true, + identify_perspectives = true, + analyze_context = true, + identify_biases = true, + find_gaps = true, + alternative_angles = 1, + assess_newsworthiness = true, + task_dependencies = task_dependencies, + state = state, + input_files = input_files + ) { + override val task_type: String = ArticleGeneration.name + override var task_description: String? = "Generate $article_format article about '$story_topic'" + override fun validate(): String? { + // First validate parent class + super.validate()?.let { return it } + // Validate target_word_count + if (target_word_count <= 0) { + return "target_word_count must be positive, got: $target_word_count" + } + // Validate revision_passes + if (revision_passes < 0) { + return "revision_passes cannot be negative, got: $revision_passes" + } + return null + } + } + + data class ArticleStructure( + val headline: String = "", + val subheadline: String = "", + val lede: String = "", + val sections: List = emptyList(), + val conclusion: String = "", + val estimated_word_count: Int = 0 + ) + + data class ArticleSection( + val section_title: String? = null, + val purpose: String = "", + val key_points: List = emptyList(), + val sources_to_include: List = emptyList(), + val estimated_word_count: Int = 0 + ) + + data class GeneratedArticle( + val headline: String = "", + val subheadline: String = "", + val byline: String = "", + val dateline: String = "", + val content: String = "", + val word_count: Int = 0, + val key_facts: List = emptyList(), + val sources_cited: List = emptyList() + ) + + data class SocialSnippets( + val twitter: String = "", + val facebook: String = "", + val linkedin: String = "" + ) + + override fun promptSegment(): String { + return """ +ArticleGeneration - Generate complete journalistic articles from investigation and analysis + ** Extends JournalismReasoning with full article writing + ** Specify the story topic to write about + ** Define journalism elements: who, what, when, where, why, how + ** Set target word count and article format (news, feature, investigative, etc.) + ** Configure writing style and target publication + ** Enable quotes, data, expert analysis, and context + ** Performs investigation, creates structure, then writes article + ** Optional revision passes for quality improvement + ** Can generate headlines and social media snippets + ** Produces publication-ready articles with proper journalistic structure + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + val genConfig = executionConfig as? ArticleGenerationTaskExecutionConfigData + log.info("Starting ArticleGenerationTask for story: '${genConfig?.story_topic}'") + + if (genConfig == null) { + log.error("Invalid configuration type for ArticleGenerationTask") + task.safeComplete("CONFIGURATION ERROR: Invalid configuration type", log) + resultFn("CONFIGURATION ERROR: Invalid configuration type") + return + } + + val storyTopic = genConfig.story_topic + if (storyTopic.isNullOrBlank()) { + log.error("No story topic specified for article generation") + task.safeComplete("CONFIGURATION ERROR: No story topic specified", log) + resultFn("CONFIGURATION ERROR: No story topic specified") + return + } + val inputFileContent = getInputFileCode() + val messageContent = messages.filter { it.isNotBlank() }.joinToString("\n\n") + val combinedInput = listOfNotNull( + messageContent.takeIf { it.isNotBlank() }, + inputFileContent.takeIf { it.isNotBlank() } + ).joinToString("\n\n---\n\n") + + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + // Create transcript file + val transcript = transcript(task) + transcript?.let { out -> + out.write("# Article Generation Transcript\n\n".toByteArray()) + out.write("**Story Topic:** $storyTopic\n\n".toByteArray()) + out.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + out.write("---\n\n".toByteArray()) + } + + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Article Generation") + appendLine() + if (combinedInput.isNotBlank()) { + appendLine("**Input Context:** ${combinedInput.take(200)}...") + appendLine() + } + appendLine("**Story Topic:** $storyTopic") + appendLine() + appendLine("## Configuration") + appendLine("- Target Word Count: ${genConfig.target_word_count}") + appendLine("- Article Format: ${genConfig.article_format}") + appendLine("- Writing Style: ${genConfig.writing_style}") + appendLine("- Target Publication: ${genConfig.target_publication}") + appendLine("- Include Quotes: ${if (genConfig.include_quotes) "✓" else "✗"}") + appendLine("- Include Data: ${if (genConfig.include_data) "✓" else "✗"}") + appendLine("- Include Expert Analysis: ${if (genConfig.include_expert_analysis) "✓" else "✗"}") + appendLine("- Include Context: ${if (genConfig.include_context) "✓" else "✗"}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Journalism Investigation") + appendLine("*Running comprehensive journalism analysis...*") + } + overviewTask.add(overviewContent.renderMarkdown) + task.update() + transcript?.write(overviewContent.toByteArray()) + + + val resultBuilder = StringBuilder() + resultBuilder.append("# Generated Article: $storyTopic\n\n") + + try { + // Phase 1: Run the base journalism investigation + log.info("Phase 1: Running journalism investigation") + val investigationResult = StringBuilder() + + super.run(agent, messages, task, { result -> + investigationResult.append(result) + transcript?.write("\n## Investigation Results\n\n".toByteArray()) + transcript?.write(result.toByteArray()) + transcript?.write("\n\n".toByteArray()) + }, orchestrationConfig) + + overviewTask.add("\n✅ Phase 1 Complete: Investigation finished\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Article Structure\n*Creating article outline and structure...*\n".renderMarkdown) + task.update() + + // Phase 2: Generate article structure + log.info("Phase 2: Generating article structure") + val structureTask = task.ui.newTask(false) + tabs["Article Structure"] = structureTask.placeholder + + structureTask.add( + buildString { + appendLine("# Article Structure") + appendLine() + appendLine("**Status:** Planning article organization...") + appendLine() + }.renderMarkdown + ) + task.update() + + val structureAgent = ParsedAgent( + resultClass = ArticleStructure::class.java, + prompt = """ +You are an experienced news editor. Create a detailed structure for this article. + +${if (combinedInput.isNotBlank()) "Reference Material:\n$combinedInput\n\n" else ""} + +Story Topic: $storyTopic + +Investigation Results: +${investigationResult.toString().truncateForDisplay(8000)} + +Journalism Elements: +${genConfig.journalism_elements?.entries?.joinToString("\n") { (key, value) -> "- $key: $value" } ?: ""} + +Article Specifications: +- Format: ${genConfig.article_format} +- Style: ${genConfig.writing_style} +- Target: ${genConfig.target_publication} +- Word Count: ${genConfig.target_word_count} + +Create a structure with: +- Compelling headline and subheadline +- Strong lede (opening paragraph) that hooks readers +- 3-5 main sections with clear purposes +- Logical flow from most to least important (inverted pyramid for news) +- Conclusion that provides context or forward-looking perspective + +For each section, specify: +- Section title (if applicable) +- Purpose (what this section accomplishes) +- Key points to cover +- Sources or quotes to include +- Estimated word count + +Ensure the structure: +- Follows ${genConfig.article_format} format conventions +- Matches ${genConfig.writing_style} style guidelines +- Serves the ${genConfig.target_publication} audience +- Totals approximately ${genConfig.target_word_count} words + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val structure = structureAgent.answer(listOf("Generate structure")).obj + log.info("Generated structure: ${structure.sections.size} sections, ${structure.estimated_word_count} words") + + val structureContent = buildString { + appendLine("## ${structure.headline}") + appendLine() + appendLine("**Subheadline:** ${structure.subheadline}") + appendLine() + appendLine("**Estimated Word Count:** ${structure.estimated_word_count}") + appendLine() + appendLine("---") + appendLine() + appendLine("### Lede") + appendLine(structure.lede) + appendLine() + appendLine("---") + appendLine() + structure.sections.forEachIndexed { index, section -> + appendLine("### Section ${index + 1}${section.section_title?.let { ": $it" } ?: ""}") + appendLine() + appendLine("**Purpose:** ${section.purpose}") + appendLine() + appendLine("**Key Points:**") + section.key_points.forEach { point -> + appendLine("- $point") + } + appendLine() + if (section.sources_to_include.isNotEmpty()) { + appendLine("**Sources to Include:**") + section.sources_to_include.forEach { source -> + appendLine("- $source") + } + appendLine() + } + appendLine("**Est. Words:** ${section.estimated_word_count}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("### Conclusion") + appendLine(structure.conclusion) + appendLine() + appendLine("**Status:** ✅ Complete") + } + structureTask.add(structureContent.renderMarkdown) + task.update() + transcript?.write("\n## Article Structure\n\n".toByteArray()) + transcript?.write(structureContent.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + + overviewTask.add("✅ Phase 2 Complete: Structure created (${structure.sections.size} sections)\n".renderMarkdown) + overviewTask.add("\n### Phase 3: Article Writing\n*Writing full article...*\n".renderMarkdown) + task.update() + + // Phase 3: Write the article + log.info("Phase 3: Writing article") + val writingTask = task.ui.newTask(false) + tabs["Article Draft"] = writingTask.placeholder + + writingTask.add( + buildString { + appendLine("# Article Draft") + appendLine() + appendLine("**Status:** Writing article...") + appendLine() + }.renderMarkdown + ) + task.update() + + val writingPrompt = """ +You are a professional journalist writing for ${genConfig.target_publication}. Write the complete article. + +${if (combinedInput.isNotBlank()) "Reference Material:\n$combinedInput\n\n" else ""} + +Story Topic: $storyTopic + +Article Structure: +${structureContent} + +Investigation Findings: +${investigationResult.toString().truncateForDisplay(6000)} + +Writing Guidelines: +- Format: ${genConfig.article_format} +- Style: ${genConfig.writing_style} +- Target Word Count: ${genConfig.target_word_count} +${if (genConfig.include_quotes) "- Include direct quotes from sources" else "- Minimize direct quotes"} +${if (genConfig.include_data) "- Include relevant data and statistics" else "- Focus on narrative over data"} +${if (genConfig.include_expert_analysis) "- Include expert analysis and interpretation" else "- Stick to factual reporting"} +${if (genConfig.include_context) "- Provide historical context and background" else "- Focus on current events"} + +Write the complete article with: +- Compelling headline and subheadline +- Byline and dateline +- Strong lede that captures the essence +- Clear, concise body following the structure +- Proper attribution for all claims and quotes +- Smooth transitions between sections +- Engaging conclusion +- Approximately ${genConfig.target_word_count} words + +Follow journalistic best practices: +- Active voice and strong verbs +- Short paragraphs (2-3 sentences) +- Concrete details and specific examples +- Proper AP style (or specified style) +- Objective tone (unless opinion piece) +- Fact-based reporting + +After writing, provide: +- The complete article content +- Actual word count +- Key facts covered +- Sources cited + """.trimIndent() + + val articleAgent = ParsedAgent( + resultClass = GeneratedArticle::class.java, + prompt = writingPrompt, + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var article = articleAgent.answer(listOf("Write the article")).obj + + // Optional revision passes + if (genConfig.revision_passes > 0) { + repeat(genConfig.revision_passes) { revisionNum -> + log.debug("Revision pass ${revisionNum + 1}") + + val revisionAgent = ChatAgent( + prompt = """ +You are a senior editor. Review and improve this article while maintaining its factual accuracy. + +Article: +${article.content} + +Improve: +- Clarity and readability +- Flow and transitions +- Lead strength and hook +- Quote integration +- Fact presentation +- Conclusion impact + +Maintain: +- All verified facts +- Source attributions +- Word count (currently ${article.word_count}, target ${genConfig.target_word_count}) +- Journalistic standards +- ${genConfig.writing_style} style + +Provide the revised article content only. + """.trimIndent(), + model = api, + temperature = 0.6 + ) + + val revisedContent = revisionAgent.answer(listOf("Revise the article")) + article = article.copy( + content = revisedContent, + word_count = revisedContent.split("\\s+".toRegex()).size + ) + } + } + + val articleContent = buildString { + appendLine("# ${article.headline}") + appendLine() + appendLine("## ${article.subheadline}") + appendLine() + appendLine("**${article.byline}**") + appendLine() + appendLine("*${article.dateline}*") + appendLine() + appendLine("---") + appendLine() + appendLine(article.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${article.word_count}") + appendLine() + appendLine("**Key Facts:**") + article.key_facts.forEach { fact -> + appendLine("- $fact") + } + appendLine() + appendLine("**Sources Cited:**") + article.sources_cited.forEach { source -> + appendLine("- $source") + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + writingTask.add(articleContent.renderMarkdown) + task.update() + transcript?.write("\n## Generated Article\n\n".toByteArray()) + transcript?.write(articleContent.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + + resultBuilder.append("# ${article.headline}\n\n") + resultBuilder.append("## ${article.subheadline}\n\n") + resultBuilder.append("${article.byline}\n\n") + resultBuilder.append("${article.dateline}\n\n") + resultBuilder.append("---\n\n") + resultBuilder.append(article.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ Phase 3 Complete: Article written (${article.word_count} words)\n".renderMarkdown) + task.update() + + // Phase 4: Generate social snippets if requested + if (genConfig.generate_social_snippets) { + log.info("Phase 4: Generating social media snippets") + overviewTask.add("\n### Phase 4: Social Media\n*Creating social snippets...*\n".renderMarkdown) + task.update() + + val socialTask = task.ui.newTask(false) + tabs["Social Media"] = socialTask.placeholder + + socialTask.add( + buildString { + appendLine("# Social Media Snippets") + appendLine() + appendLine("**Status:** Creating platform-specific content...") + appendLine() + }.renderMarkdown + ) + task.update() + + val socialAgent = ParsedAgent( + resultClass = SocialSnippets::class.java, + prompt = """ +You are a social media editor. Create engaging snippets for different platforms. + +Article Headline: ${article.headline} +Article Summary: ${article.content.take(500)} + +Create platform-specific snippets: +- Twitter: 280 characters max, engaging hook, relevant hashtags +- Facebook: 2-3 sentences, conversational tone, question or call-to-action +- LinkedIn: Professional tone, business angle, 2-3 sentences + +Make each snippet: +- Platform-appropriate in tone and length +- Compelling and clickworthy +- Accurate to the article content +- Include relevant hashtags where appropriate + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val socialSnippets = socialAgent.answer(listOf("Generate snippets")).obj + + val socialContent = buildString { + appendLine("## Platform Snippets") + appendLine() + appendLine("### Twitter") + appendLine("```") + appendLine(socialSnippets.twitter) + appendLine("```") + appendLine() + appendLine("### Facebook") + appendLine("```") + appendLine(socialSnippets.facebook) + appendLine("```") + appendLine() + appendLine("### LinkedIn") + appendLine("```") + appendLine(socialSnippets.linkedin) + appendLine("```") + appendLine() + appendLine("**Status:** ✅ Complete") + } + socialTask.add(socialContent.renderMarkdown) + task.update() + transcript?.write("\n## Social Media Snippets\n\n".toByteArray()) + transcript?.write(socialContent.toByteArray()) + transcript?.write("\n\n".toByteArray()) + + + overviewTask.add("✅ Phase 4 Complete: Social snippets created\n".renderMarkdown) + task.update() + } + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + val targetAccuracy = (article.word_count.toFloat() / genConfig.target_word_count * 100).toInt() + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Article Format: ${genConfig.article_format}") + appendLine("- Word Count: ${article.word_count}") + appendLine("- Target Word Count: ${genConfig.target_word_count}") + appendLine("- Target Accuracy: $targetAccuracy%") + appendLine("- Sources Cited: ${article.sources_cited.size}") + appendLine("- Key Facts: ${article.key_facts.size}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + task.update() + transcript?.write("\n## Final Statistics\n\n".toByteArray()) + transcript?.write("- Article Format: ${genConfig.article_format}\n".toByteArray()) + transcript?.write("- Word Count: ${article.word_count}\n".toByteArray()) + transcript?.write("- Target Word Count: ${genConfig.target_word_count}\n".toByteArray()) + transcript?.write("- Target Accuracy: $targetAccuracy%\n".toByteArray()) + transcript?.write("- Sources Cited: ${article.sources_cited.size}\n".toByteArray()) + transcript?.write("- Key Facts: ${article.key_facts.size}\n".toByteArray()) + transcript?.write("- Total Time: ${totalTime / 1000.0}s\n".toByteArray()) + transcript?.write("\n**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n".toByteArray()) + + + // Per best practices, the final result passed to resultFn should be a concise summary + val finalResult = buildString { + appendLine("# Article Generation Summary") + appendLine() + appendLine("A complete **${genConfig.article_format}** article of **${article.word_count} words** was generated in **${totalTime / 1000.0}s**.") + appendLine() + appendLine("## ${article.headline}") + appendLine() + appendLine("### ${article.subheadline}") + appendLine() + appendLine("> The full article is available in the Article Draft tab for review.") + appendLine() + appendLine("**Key Metrics:**") + appendLine("- Sources: ${article.sources_cited.size}") + appendLine("- Key Facts: ${article.key_facts.size}") + appendLine("- Target Accuracy: $targetAccuracy%") + } + + log.info("ArticleGenerationTask completed: words=${article.word_count}, sources=${article.sources_cited.size}, time=${totalTime}ms") + transcript?.close() + + task.safeComplete("Article generation complete: ${article.word_count} words in ${totalTime / 1000}s", log) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during article generation", e) + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + transcript?.write("\n## Error\n\n".toByteArray()) + transcript?.write("**Error:** ${e.message}\n".toByteArray()) + transcript?.write("**Type:** ${e.javaClass.simpleName}\n".toByteArray()) + transcript?.close() + + + val errorOutput = buildString { + appendLine("# Error in Article Generation") + appendLine() + appendLine("**Story:** $storyTopic") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + } + + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun isTextFile(file: File): Boolean { + return textExtensions.contains(file.extension.lowercase()) + } + + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + companion object { + private val log: Logger = LoggerFactory.getLogger(ArticleGenerationTask::class.java) + private val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" + ) + + val ArticleGeneration = TaskType( + "ArticleGeneration", + ArticleGenerationTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Generate complete journalistic articles from investigation and analysis", + """ + Extends JournalismReasoning to generate publication-ready articles. +
      +
    • Performs comprehensive journalism investigation (inherited from JournalismReasoning)
    • +
    • Creates detailed article structure and outline
    • +
    • Writes complete article following journalistic standards
    • +
    • Supports multiple formats (news, feature, investigative, opinion, profile)
    • +
    • Configurable style, tone, and target publication
    • +
    • Includes quotes, data, expert analysis, and context as configured
    • +
    • Optional revision passes for quality improvement
    • +
    • Can generate headlines and social media snippets
    • +
    • Produces publication-ready articles with proper structure and attribution
    • +
    • Ideal for news writing, content creation, journalism training
    • +
    + """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/BusinessProposalTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/BusinessProposalTask.kt new file mode 100644 index 000000000..d46caf995 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/BusinessProposalTask.kt @@ -0,0 +1,1705 @@ +package com.simiacryptus.cognotik.plan.tools.writing + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class BusinessProposalTask( + orchestrationConfig: OrchestrationConfig, + planTask: BusinessProposalTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + + class BusinessProposalTaskExecutionConfigData( + @Description("The title or name of the proposal") + val proposal_title: String? = null, + + @Description("The type of proposal (e.g., 'project', 'investment', 'grant', 'partnership', 'rfp_response')") + val proposal_type: String = "project", + + @Description("The primary objective or goal of the proposal") + val objective: String? = null, + + @Description("The organization or individual submitting the proposal") + val proposing_organization: String? = null, + + @Description("The target audience or decision-makers who will evaluate the proposal") + val decision_makers: List? = null, + + @Description("Budget range or financial scope (e.g., '$50,000-$100,000', 'under $1M')") + val budget_range: String? = null, + + @Description("Project timeline or duration (e.g., '6 months', '2024-2025', 'Q1-Q3')") + val timeline: String? = null, + + @Description("Key stakeholders and their interests") + val stakeholders: Map? = null, + + @Description("Whether to include detailed ROI calculations and financial projections") + val include_roi_analysis: Boolean = true, + + @Description("Whether to include risk assessment and mitigation strategies") + val include_risk_assessment: Boolean = true, + + @Description("Whether to include competitive analysis or alternatives comparison") + val include_competitive_analysis: Boolean = true, + + @Description("Whether to include detailed timeline with milestones") + val include_timeline_milestones: Boolean = true, + + @Description("Whether to include team/resource requirements") + val include_resource_requirements: Boolean = true, + + @Description("Whether to include appendices and supporting documents") + val include_appendices: Boolean = true, + + @Description("Urgency level of the opportunity (e.g., 'critical', 'high', 'moderate', 'low')") + val urgency_level: String = "moderate", + + @Description("Tone of the proposal (e.g., 'formal', 'professional', 'persuasive', 'collaborative')") + val tone: String = "professional", + + @Description("Target word count for the complete proposal") + val target_word_count: Int = 3000, + + @Description("Number of revision passes for quality improvement") + val revision_passes: Int = 1, + + @Description("Related files or research to incorporate") + val related_files: List? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, + + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = BusinessProposal.name, + task_description = task_description ?: "Generate business proposal: '$proposal_title'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (proposal_title.isNullOrBlank()) { + return "proposal_title must not be null or blank" + } + if (objective.isNullOrBlank()) { + return "objective must not be null or blank" + } + if (target_word_count <= 0) { + return "target_word_count must be positive, got: $target_word_count" + } + if (revision_passes < 0 || revision_passes > 5) { + return "revision_passes must be between 0 and 5, got: $revision_passes" + } + if (proposal_type.isBlank()) { + return "proposal_type must not be blank" + } + if (urgency_level.isBlank()) { + return "urgency_level must not be blank" + } + if (tone.isBlank()) { + return "tone must not be blank" + } + return ValidatedObject.validateFields(this) + } + } + + data class ProposalOutline( + @Description("The proposal title") + val title: String = "", + @Description("Executive summary overview") + val executive_summary: String = "", + @Description("Problem statement or opportunity") + val problem_statement: String = "", + @Description("Proposed solution overview") + val solution_overview: String = "", + @Description("Main sections of the proposal") + val sections: List = emptyList(), + @Description("Key success metrics") + val success_metrics: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (executive_summary.isBlank()) return "executive_summary must not be blank" + if (problem_statement.isBlank()) return "problem_statement must not be blank" + if (sections.isEmpty()) return "sections must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class ProposalSection( + @Description("Section title") + val title: String = "", + @Description("Section purpose") + val purpose: String = "", + @Description("Key points to cover") + val key_points: List = emptyList(), + @Description("Estimated word count") + val estimated_word_count: Int = 0 + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (purpose.isBlank()) return "purpose must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class StakeholderAnalysis( + @Description("Stakeholder analyses") + val stakeholders: List = emptyList() + ) : ValidatedObject + + data class StakeholderProfile( + @Description("Stakeholder name or role") + val name: String = "", + @Description("Their primary interests") + val interests: List = emptyList(), + @Description("Their concerns or objections") + val concerns: List = emptyList(), + @Description("How to address their needs") + val addressing_strategy: String = "", + @Description("Their influence level") + val influence_level: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (name.isBlank()) return "name must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class ROIAnalysis( + @Description("Financial projections") + val financial_projections: FinancialProjections = FinancialProjections(), + @Description("Cost breakdown") + val cost_breakdown: List = emptyList(), + @Description("Expected benefits") + val expected_benefits: List = emptyList(), + @Description("ROI calculation summary") + val roi_summary: String = "", + @Description("Payback period") + val payback_period: String = "" + ) : ValidatedObject + + data class FinancialProjections( + @Description("Total investment required") + val total_investment: String = "", + @Description("Year 1 projected return") + val year_1_return: String = "", + @Description("Year 2 projected return") + val year_2_return: String = "", + @Description("Year 3 projected return") + val year_3_return: String = "", + @Description("Break-even point") + val break_even_point: String = "" + ) : ValidatedObject + + data class CostItem( + @Description("Cost category") + val category: String = "", + @Description("Amount") + val amount: String = "", + @Description("Justification") + val justification: String = "" + ) : ValidatedObject + + data class Benefit( + @Description("Benefit type") + val type: String = "", + @Description("Description") + val description: String = "", + @Description("Quantifiable value") + val quantifiable_value: String = "", + @Description("Timeline to realize") + val timeline: String = "" + ) : ValidatedObject + + data class RiskAssessment( + @Description("Identified risks") + val risks: List = emptyList(), + @Description("Overall risk level") + val overall_risk_level: String = "" + ) : ValidatedObject + + data class Risk( + @Description("Risk category") + val category: String = "", + @Description("Risk description") + val description: String = "", + @Description("Probability") + val probability: String = "", + @Description("Impact level") + val impact: String = "", + @Description("Mitigation strategy") + val mitigation_strategy: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (description.isBlank()) return "description must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class CompetitiveAnalysis( + @Description("Alternative approaches") + val alternatives: List = emptyList(), + @Description("Competitive advantages") + val competitive_advantages: List = emptyList(), + @Description("Why this proposal is superior") + val superiority_statement: String = "" + ) : ValidatedObject + + data class Alternative( + @Description("Alternative name") + val name: String = "", + @Description("Description") + val description: String = "", + @Description("Pros") + val pros: List = emptyList(), + @Description("Cons") + val cons: List = emptyList(), + @Description("Why our proposal is better") + val comparison: String = "" + ) : ValidatedObject + + data class TimelineMilestones( + @Description("Project phases") + val phases: List = emptyList(), + @Description("Critical path items") + val critical_path: List = emptyList() + ) : ValidatedObject + + data class ProjectPhase( + @Description("Phase name") + val name: String = "", + @Description("Duration") + val duration: String = "", + @Description("Key deliverables") + val deliverables: List = emptyList(), + @Description("Dependencies") + val dependencies: List = emptyList() + ) : ValidatedObject + + data class ProposalContent( + @Description("Section title") + val section_title: String = "", + @Description("Section content") + val content: String = "", + @Description("Word count") + val word_count: Int = 0, + @Description("Key messages") + val key_messages: List = emptyList() + ) : ValidatedObject + + override fun promptSegment(): String { + return """ +BusinessProposal - Generate comprehensive business proposals with ROI analysis and risk assessment + ** Specify the proposal title and objective + ** Define proposal type (project, investment, grant, partnership, RFP response) + ** Identify decision-makers and stakeholders + ** Set budget range and timeline + ** Enable ROI calculations and financial projections + ** Include risk assessment and mitigation strategies + ** Add competitive analysis and alternatives comparison + ** Generate timeline with milestones + ** Specify resource requirements + ** Produces complete, persuasive business proposal + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + // Create transcript file + val transcriptStream = transcript(task) + val proposalStream = proposalFile(task) + transcriptStream?.let { stream -> + stream.write("# Business Proposal Generation Transcript\n\n".toByteArray()) + stream.write("**Proposal:** ${executionConfig?.proposal_title}\n".toByteArray()) + stream.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + stream.write("---\n\n".toByteArray()) + } + fun logToTranscript(message: String) { + transcriptStream?.write("$message\n".toByteArray()) + } + + fun writeToProposal(message: String) { + proposalStream?.write("$message\n".toByteArray()) + } + + val startTime = System.currentTimeMillis() + log.info("Starting BusinessProposalTask for: '${executionConfig?.proposal_title}'") + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + logToTranscript("## Configuration Validation Failed\n\n$validationError\n") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + resultFn("CONFIGURATION ERROR: $validationError") + return + } + + val proposalTitle = executionConfig?.proposal_title + if (proposalTitle.isNullOrBlank()) { + log.error("No proposal title specified") + logToTranscript("## Error: No Proposal Title\n") + task.safeComplete("CONFIGURATION ERROR: No proposal title specified", log) + resultFn("CONFIGURATION ERROR: No proposal title specified") + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Business Proposal Generation") + appendLine() + appendLine("**Proposal:** $proposalTitle") + appendLine() + appendLine("## Configuration") + appendLine("- Type: ${executionConfig.proposal_type}") + appendLine("- Objective: ${executionConfig.objective}") + appendLine("- Proposing Organization: ${executionConfig.proposing_organization ?: "Not specified"}") + appendLine("- Budget Range: ${executionConfig.budget_range ?: "Not specified"}") + appendLine("- Timeline: ${executionConfig.timeline ?: "Not specified"}") + appendLine("- Urgency: ${executionConfig.urgency_level}") + appendLine("- Tone: ${executionConfig.tone}") + appendLine("- Target Word Count: ${executionConfig.target_word_count}") + appendLine() + appendLine("## Analysis Components") + appendLine("- ROI Analysis: ${if (executionConfig.include_roi_analysis) "✓" else "✗"}") + appendLine("- Risk Assessment: ${if (executionConfig.include_risk_assessment) "✓" else "✗"}") + appendLine("- Competitive Analysis: ${if (executionConfig.include_competitive_analysis) "✓" else "✗"}") + appendLine("- Timeline & Milestones: ${if (executionConfig.include_timeline_milestones) "✓" else "✗"}") + appendLine("- Resource Requirements: ${if (executionConfig.include_resource_requirements) "✓" else "✗"}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Strategic Analysis") + appendLine("*Analyzing stakeholders and strategic positioning...*") + } + overviewTask.add(overviewContent.renderMarkdown) + task.update() + + val resultBuilder = StringBuilder() + resultBuilder.append("# Business Proposal: $proposalTitle\n\n") + // Load input files if specified + val inputFileContent = getInputFileContent() + val messagesWithContext = if (inputFileContent.isNotBlank()) { + messages + listOf( + "## Input Files Context\n\n$inputFileContent" + ) + } else { + messages + } + // Include messages in context + val messagesContext = if (messagesWithContext.isNotEmpty()) { + buildString { + appendLine("## User Input") + appendLine() + messagesWithContext.forEach { msg -> + appendLine(msg) + appendLine() + } + } + } else { + "" + } + + + try { + // Gather context + val priorContext = getPriorCode(agent.executionState) + val contextFiles = getContextFiles() + + if (priorContext.isNotBlank() || contextFiles.isNotBlank()) { + log.debug("Found context: priorContext=${priorContext.length} chars, contextFiles=${contextFiles.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Research Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Research Context") + appendLine() + if (priorContext.isNotBlank()) { + appendLine("## Prior Context") + appendLine(priorContext.truncateForDisplay(2000)) + appendLine() + } + if (contextFiles.isNotBlank()) { + appendLine("## Related Files") + appendLine(contextFiles.truncateForDisplay(2000)) + } + if (messagesContext.isNotBlank()) { + appendLine() + appendLine(messagesContext.truncateForDisplay(2000)) + } + }.renderMarkdown + ) + task.update() + } + + // Phase 1: Stakeholder Analysis + log.info("Phase 1: Analyzing stakeholders") + logToTranscript("## Phase 1: Stakeholder Analysis\n\n") + val stakeholderTask = task.ui.newTask(false) + tabs["Stakeholder Analysis"] = stakeholderTask.placeholder + + stakeholderTask.add( + buildString { + appendLine("# Stakeholder Analysis") + appendLine() + appendLine("**Status:** Analyzing decision-makers and stakeholders...") + appendLine() + }.renderMarkdown + ) + task.update() + + val stakeholderAgent = ParsedAgent( + resultClass = StakeholderAnalysis::class.java, + prompt = """ +You are a strategic business analyst. Analyze the stakeholders for this business proposal. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} + +Decision Makers: ${executionConfig.decision_makers?.joinToString(", ") ?: "Not specified"} +Known Stakeholders: ${executionConfig.stakeholders?.entries?.joinToString("\n") { (name, interest) -> "- $name: $interest" } ?: "Not specified"} + +${if (priorContext.isNotBlank()) "Context:\n${priorContext.truncateForDisplay(2000)}\n" else ""} + +For each key stakeholder (decision-makers and influencers), provide: +- Name or role +- Their primary interests and priorities +- Potential concerns or objections they might have +- Strategy for addressing their needs in the proposal +- Their influence level (High/Medium/Low) + +Consider: +- What motivates each stakeholder? +- What are their success criteria? +- What risks or concerns might they have? +- How can the proposal align with their goals? + +Identify 3-5 key stakeholders who will influence the decision. + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val stakeholderAnalysis = stakeholderAgent.answer(listOf("Analyze stakeholders")).obj + log.debug("Analyzed ${stakeholderAnalysis.stakeholders.size} stakeholders") + logToTranscript("Identified ${stakeholderAnalysis.stakeholders.size} key stakeholders\n\n") + writeToProposal("## Key Stakeholders\n\n") + + val stakeholderContent = buildString { + appendLine("## Key Stakeholders") + appendLine() + stakeholderAnalysis.stakeholders.forEach { stakeholder -> + val influenceIcon = when (stakeholder.influence_level.lowercase()) { + "high" -> "🔴" + "medium" -> "🟡" + else -> "🟢" + } + appendLine("### $influenceIcon ${stakeholder.name}") + appendLine() + appendLine("**Influence Level:** ${stakeholder.influence_level}") + appendLine() + appendLine("**Interests:**") + stakeholder.interests.forEach { interest -> + appendLine("- $interest") + } + appendLine() + if (stakeholder.concerns.isNotEmpty()) { + appendLine("**Concerns:**") + stakeholder.concerns.forEach { concern -> + appendLine("- $concern") + } + appendLine() + } + appendLine("**Addressing Strategy:** ${stakeholder.addressing_strategy}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + stakeholderTask.add(stakeholderContent.renderMarkdown) + task.update() + writeToProposal(stakeholderContent) + + overviewTask.add("✅ Phase 1 Complete: Stakeholder analysis finished\n".renderMarkdown) + + // Phase 2: ROI Analysis (if enabled) + var roiAnalysis: ROIAnalysis? = null + if (executionConfig.include_roi_analysis) { + logToTranscript("## Phase 2: ROI Analysis\n\n") + overviewTask.add("\n### Phase 2: ROI Analysis\n*Calculating financial projections and ROI...*\n".renderMarkdown) + task.update() + + log.info("Phase 2: Performing ROI analysis") + val roiTask = task.ui.newTask(false) + tabs["ROI Analysis"] = roiTask.placeholder + + roiTask.add( + buildString { + appendLine("# ROI Analysis") + appendLine() + appendLine("**Status:** Calculating financial projections...") + appendLine() + }.renderMarkdown + ) + task.update() + + val roiAgent = ParsedAgent( + resultClass = ROIAnalysis::class.java, + prompt = """ +You are a financial analyst. Create a comprehensive ROI analysis for this business proposal. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} +Budget Range: ${executionConfig.budget_range ?: "Not specified"} +Timeline: ${executionConfig.timeline ?: "Not specified"} + +${if (priorContext.isNotBlank()) "Context:\n${priorContext.truncateForDisplay(2000)}\n" else ""} + +Provide: +1. Financial Projections: + - Total investment required + - Year 1, 2, and 3 projected returns + - Break-even point + +2. Cost Breakdown: + - Major cost categories (personnel, technology, operations, etc.) + - Amount for each category + - Justification for each cost + +3. Expected Benefits: + - Quantifiable benefits (revenue increase, cost savings, efficiency gains) + - Timeline to realize each benefit + - Both tangible and intangible benefits + +4. ROI Summary: + - Overall ROI calculation + - Payback period + - Key financial metrics + +Be realistic and conservative in projections. Include assumptions. +If specific numbers aren't provided, use reasonable estimates based on the proposal type and industry standards. + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + roiAnalysis = roiAgent.answer(listOf("Perform ROI analysis")).obj + log.debug("ROI analysis complete") + logToTranscript("ROI Analysis complete: ${roiAnalysis.roi_summary.take(200)}\n\n") + + val roiContent = buildString { + appendLine("## Financial Projections") + appendLine() + appendLine("| Metric | Value |") + appendLine("|--------|-------|") + appendLine("| Total Investment | ${roiAnalysis.financial_projections.total_investment} |") + appendLine("| Year 1 Return | ${roiAnalysis.financial_projections.year_1_return} |") + appendLine("| Year 2 Return | ${roiAnalysis.financial_projections.year_2_return} |") + appendLine("| Year 3 Return | ${roiAnalysis.financial_projections.year_3_return} |") + appendLine("| Break-Even Point | ${roiAnalysis.financial_projections.break_even_point} |") + appendLine() + appendLine("### Cost Breakdown") + appendLine() + roiAnalysis.cost_breakdown.forEach { cost -> + appendLine("**${cost.category}:** ${cost.amount}") + appendLine("- ${cost.justification}") + appendLine() + } + appendLine("### Expected Benefits") + appendLine() + roiAnalysis.expected_benefits.forEach { benefit -> + appendLine("**${benefit.type}**") + appendLine("- Description: ${benefit.description}") + appendLine("- Value: ${benefit.quantifiable_value}") + appendLine("- Timeline: ${benefit.timeline}") + appendLine() + } + appendLine("### ROI Summary") + appendLine() + appendLine(roiAnalysis.roi_summary) + appendLine() + appendLine("**Payback Period:** ${roiAnalysis.payback_period}") + appendLine() + appendLine("**Status:** ✅ Complete") + } + roiTask.add(roiContent.renderMarkdown) + task.update() + writeToProposal(roiContent) + + overviewTask.add("✅ Phase 2 Complete: ROI analysis finished\n".renderMarkdown) + } + + // Phase 3: Risk Assessment (if enabled) + var riskAssessment: RiskAssessment? = null + if (executionConfig.include_risk_assessment) { + logToTranscript("## Phase 3: Risk Assessment\n\n") + overviewTask.add("\n### Phase 3: Risk Assessment\n*Identifying and mitigating risks...*\n".renderMarkdown) + task.update() + + log.info("Phase 3: Performing risk assessment") + val riskTask = task.ui.newTask(false) + tabs["Risk Assessment"] = riskTask.placeholder + + riskTask.add( + buildString { + appendLine("# Risk Assessment") + appendLine() + appendLine("**Status:** Identifying risks and mitigation strategies...") + appendLine() + }.renderMarkdown + ) + task.update() + + val riskAgent = ParsedAgent( + resultClass = RiskAssessment::class.java, + prompt = """ +You are a risk management expert. Identify and assess risks for this business proposal. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} +Timeline: ${executionConfig.timeline ?: "Not specified"} + +${if (priorContext.isNotBlank()) "Context:\n${priorContext.truncateForDisplay(2000)}\n" else ""} + +Identify 5-7 key risks across categories: +- Technical risks +- Financial risks +- Operational risks +- Market/competitive risks +- Organizational/people risks +- Timeline/schedule risks + +For each risk, provide: +- Category +- Clear description of the risk +- Probability (High/Medium/Low) +- Impact level (High/Medium/Low) +- Specific mitigation strategy + +Also provide an overall risk level assessment (Low/Moderate/High/Critical). + +Be realistic but not alarmist. Focus on actionable mitigation strategies. + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + riskAssessment = riskAgent.answer(listOf("Assess risks")).obj + log.debug("Identified ${riskAssessment.risks.size} risks") + logToTranscript("Identified ${riskAssessment.risks.size} risks. Overall risk level: ${riskAssessment.overall_risk_level}\n\n") + + val riskContent = buildString { + appendLine("## Overall Risk Level: ${riskAssessment.overall_risk_level}") + appendLine() + appendLine("## Identified Risks") + appendLine() + riskAssessment.risks.forEach { risk -> + val riskIcon = when { + risk.probability.lowercase() == "high" && risk.impact.lowercase() == "high" -> "🔴" + risk.probability.lowercase() == "high" || risk.impact.lowercase() == "high" -> "🟡" + else -> "🟢" + } + appendLine("### $riskIcon ${risk.category}") + appendLine() + appendLine("**Description:** ${risk.description}") + appendLine() + appendLine("**Probability:** ${risk.probability} | **Impact:** ${risk.impact}") + appendLine() + appendLine("**Mitigation Strategy:**") + appendLine(risk.mitigation_strategy) + appendLine() + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + riskTask.add(riskContent.renderMarkdown) + task.update() + writeToProposal(riskContent) + + overviewTask.add("✅ Phase 3 Complete: Risk assessment finished\n".renderMarkdown) + } + + // Phase 4: Competitive Analysis (if enabled) + var competitiveAnalysis: CompetitiveAnalysis? = null + if (executionConfig.include_competitive_analysis) { + logToTranscript("## Phase 4: Competitive Analysis\n\n") + overviewTask.add("\n### Phase 4: Competitive Analysis\n*Analyzing alternatives and competitive advantages...*\n".renderMarkdown) + task.update() + + log.info("Phase 4: Performing competitive analysis") + val competitiveTask = task.ui.newTask(false) + tabs["Competitive Analysis"] = competitiveTask.placeholder + + competitiveTask.add( + buildString { + appendLine("# Competitive Analysis") + appendLine() + appendLine("**Status:** Analyzing alternatives and positioning...") + appendLine() + }.renderMarkdown + ) + task.update() + + val competitiveAgent = ParsedAgent( + resultClass = CompetitiveAnalysis::class.java, + prompt = """ +You are a competitive strategy analyst. Analyze alternatives and competitive positioning for this proposal. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} + +${if (priorContext.isNotBlank()) "Context:\n${priorContext.truncateForDisplay(2000)}\n" else ""} + +Identify 3-4 alternative approaches or competing solutions, including: +- Status quo (doing nothing) +- Alternative vendors or approaches +- In-house vs. outsourced options +- Different implementation strategies + +For each alternative: +- Name and brief description +- Pros (advantages) +- Cons (disadvantages) +- Why our proposal is better (specific comparison) + +Also provide: +- List of competitive advantages of this proposal +- A clear superiority statement explaining why this proposal is the best choice + +Be fair to alternatives but make a compelling case for this proposal. + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + competitiveAnalysis = competitiveAgent.answer(listOf("Analyze competition")).obj + log.debug("Analyzed ${competitiveAnalysis.alternatives.size} alternatives") + logToTranscript("Analyzed ${competitiveAnalysis.alternatives.size} alternative approaches\n\n") + + val competitiveContent = buildString { + appendLine("## Competitive Advantages") + appendLine() + competitiveAnalysis.competitive_advantages.forEach { advantage -> + appendLine("- $advantage") + } + appendLine() + appendLine("## Alternative Approaches") + appendLine() + competitiveAnalysis.alternatives.forEach { alt -> + appendLine("### ${alt.name}") + appendLine() + appendLine(alt.description) + appendLine() + appendLine("**Pros:**") + alt.pros.forEach { pro -> + appendLine("- $pro") + } + appendLine() + appendLine("**Cons:**") + alt.cons.forEach { con -> + appendLine("- $con") + } + appendLine() + appendLine("**Why Our Proposal is Better:**") + appendLine(alt.comparison) + appendLine() + appendLine("---") + appendLine() + } + appendLine("## Why This Proposal is Superior") + appendLine() + appendLine(competitiveAnalysis.superiority_statement) + appendLine() + appendLine("**Status:** ✅ Complete") + } + competitiveTask.add(competitiveContent.renderMarkdown) + task.update() + writeToProposal(competitiveContent) + + overviewTask.add("✅ Phase 4 Complete: Competitive analysis finished\n".renderMarkdown) + } + + // Phase 5: Timeline & Milestones (if enabled) + var timelineMilestones: TimelineMilestones? = null + if (executionConfig.include_timeline_milestones) { + logToTranscript("## Phase 5: Timeline & Milestones\n\n") + overviewTask.add("\n### Phase 5: Timeline & Milestones\n*Creating project timeline...*\n".renderMarkdown) + task.update() + + log.info("Phase 5: Creating timeline and milestones") + val timelineTask = task.ui.newTask(false) + tabs["Timeline & Milestones"] = timelineTask.placeholder + + timelineTask.add( + buildString { + appendLine("# Timeline & Milestones") + appendLine() + appendLine("**Status:** Creating project timeline...") + appendLine() + }.renderMarkdown + ) + task.update() + + val timelineAgent = ParsedAgent( + resultClass = TimelineMilestones::class.java, + prompt = """ +You are a project management expert. Create a detailed timeline with milestones for this proposal. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} +Timeline: ${executionConfig.timeline ?: "Not specified"} + +${if (priorContext.isNotBlank()) "Context:\n${priorContext.truncateForDisplay(2000)}\n" else ""} + +Create a project timeline with: +1. 4-6 major phases (e.g., Planning, Design, Implementation, Testing, Launch, Optimization) +2. For each phase: + - Name + - Duration + - Key deliverables + - Dependencies on other phases + +3. Critical path items (tasks that must be completed on time to avoid delays) + +Be realistic about timelines. Include buffer time for unexpected issues. +Ensure phases flow logically and dependencies are clear. + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + timelineMilestones = timelineAgent.answer(listOf("Create timeline")).obj + log.debug("Created timeline with ${timelineMilestones.phases.size} phases") + logToTranscript("Created project timeline with ${timelineMilestones.phases.size} phases\n\n") + + val timelineContent = buildString { + appendLine("## Project Phases") + appendLine() + timelineMilestones.phases.forEachIndexed { index, phase -> + appendLine("### Phase ${index + 1}: ${phase.name}") + appendLine() + appendLine("**Duration:** ${phase.duration}") + appendLine() + appendLine("**Key Deliverables:**") + phase.deliverables.forEach { deliverable -> + appendLine("- $deliverable") + } + appendLine() + if (phase.dependencies.isNotEmpty()) { + appendLine("**Dependencies:**") + phase.dependencies.forEach { dep -> + appendLine("- $dep") + } + appendLine() + } + appendLine("---") + appendLine() + } + appendLine("## Critical Path") + appendLine() + timelineMilestones.critical_path.forEach { item -> + appendLine("- $item") + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + timelineTask.add(timelineContent.renderMarkdown) + task.update() + writeToProposal(timelineContent) + + overviewTask.add("✅ Phase 5 Complete: Timeline created\n".renderMarkdown) + } + + // Phase 6: Create Proposal Outline + logToTranscript("## Phase 6: Proposal Structure\n\n") + overviewTask.add("\n### Phase 6: Proposal Structure\n*Creating detailed outline...*\n".renderMarkdown) + task.update() + + log.info("Phase 6: Creating proposal outline") + val outlineTask = task.ui.newTask(false) + tabs["Proposal Outline"] = outlineTask.placeholder + + outlineTask.add( + buildString { + appendLine("# Proposal Outline") + appendLine() + appendLine("**Status:** Creating detailed structure...") + appendLine() + }.renderMarkdown + ) + task.update() + + val wordsPerSection = executionConfig.target_word_count / 8 // Rough estimate for 8 main sections + + val outlineAgent = ParsedAgent( + resultClass = ProposalOutline::class.java, + prompt = """ +You are a business proposal expert. Create a detailed outline for this proposal. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} +Target Word Count: ${executionConfig.target_word_count} + +Stakeholder Analysis Summary: +${stakeholderAnalysis.stakeholders.take(3).joinToString("\n") { "- ${it.name}: ${it.interests.firstOrNull() ?: ""}" }} + +${if (roiAnalysis != null) "ROI Summary: ${roiAnalysis.roi_summary.take(200)}" else ""} +${if (riskAssessment != null) "Risk Level: ${riskAssessment.overall_risk_level}" else ""} + +Create a comprehensive outline with: +1. Title +2. Executive Summary (compelling 1-paragraph overview) +3. Problem Statement (what opportunity or challenge this addresses) +4. Solution Overview (high-level description of the proposal) +5. Main sections (6-8 sections covering): + - Background/Context + - Proposed Solution (detailed) + - Implementation Approach + ${if (roiAnalysis != null) "- Financial Analysis" else ""} + ${if (riskAssessment != null) "- Risk Management" else ""} + ${if (competitiveAnalysis != null) "- Competitive Positioning" else ""} + ${if (timelineMilestones != null) "- Timeline & Milestones" else ""} + ${if (executionConfig.include_resource_requirements) "- Resource Requirements" else ""} + - Conclusion & Next Steps + +For each section: +- Clear title +- Purpose (what this section accomplishes) +- 3-5 key points to cover +- Estimated word count (~$wordsPerSection words per section) + +6. Success metrics (how success will be measured) + +Tailor the outline to the ${executionConfig.proposal_type} proposal type and ${executionConfig.tone} tone. + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val outline = outlineAgent.answer(listOf("Create outline")).obj + log.debug("Outline created with ${outline.sections.size} sections") + logToTranscript("Created outline with ${outline.sections.size} main sections\n\n") + + val outlineContent = buildString { + appendLine("## ${outline.title}") + appendLine() + appendLine("### Executive Summary") + appendLine(outline.executive_summary) + appendLine() + appendLine("### Problem Statement") + appendLine(outline.problem_statement) + appendLine() + appendLine("### Solution Overview") + appendLine(outline.solution_overview) + appendLine() + appendLine("---") + appendLine() + appendLine("### Main Sections") + appendLine() + outline.sections.forEach { section -> + appendLine("#### ${section.title}") + appendLine() + appendLine("**Purpose:** ${section.purpose}") + appendLine() + appendLine("**Key Points:**") + section.key_points.forEach { point -> + appendLine("- $point") + } + appendLine() + appendLine("**Est. Words:** ${section.estimated_word_count}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("### Success Metrics") + appendLine() + outline.success_metrics.forEach { metric -> + appendLine("- $metric") + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + outlineTask.add(outlineContent.renderMarkdown) + task.update() + writeToProposal(outlineContent) + + overviewTask.add("✅ Phase 6 Complete: Outline created\n".renderMarkdown) + + // Phase 7: Write Proposal Sections + logToTranscript("## Phase 7: Content Generation\n\n") + overviewTask.add("\n### Phase 7: Content Generation\n*Writing proposal sections...*\n".renderMarkdown) + task.update() + + log.info("Phase 7: Writing proposal sections") + val proposalSections = mutableListOf() + var cumulativeWordCount = 0 + + // Write Executive Summary + val execSummaryTask = task.ui.newTask(false) + tabs["Executive Summary"] = execSummaryTask.placeholder + + execSummaryTask.add( + buildString { + appendLine("# Executive Summary") + appendLine() + appendLine("**Status:** Writing executive summary...") + appendLine() + }.renderMarkdown + ) + task.update() + + val execSummaryAgent = ParsedAgent( + resultClass = ProposalContent::class.java, + prompt = """ +You are a business proposal writer. Write a compelling executive summary. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} +Tone: ${executionConfig.tone} + +Outline Summary: ${outline.executive_summary} + +Write an executive summary (300-400 words) that: +1. Opens with a strong hook that captures attention +2. Clearly states the problem or opportunity +3. Presents the proposed solution at a high level +4. Highlights key benefits and ROI +5. Mentions critical success factors +6. Ends with a clear call to action or next steps + +Make it compelling and persuasive. Decision-makers should understand the value immediately. +Target audience: ${executionConfig.decision_makers?.joinToString(", ") ?: "Senior executives"} + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var execSummary = execSummaryAgent.answer(listOf("Write executive summary")).obj + proposalSections.add(execSummary) + cumulativeWordCount += execSummary.word_count + logToTranscript("Executive Summary written: ${execSummary.word_count} words\n") + + val execSummaryContent = buildString { + appendLine("## Executive Summary") + appendLine() + appendLine(execSummary.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${execSummary.word_count}") + appendLine() + appendLine("**Status:** ✅ Complete") + } + execSummaryTask.add( + execSummaryContent.renderMarkdown + ) + task.update() + writeToProposal(execSummaryContent) + + resultBuilder.append("## Executive Summary\n\n") + resultBuilder.append(execSummary.content) + resultBuilder.append("\n\n") + + overviewTask.add("- Executive Summary ✅ (${execSummary.word_count} words)\n".renderMarkdown) + task.update() + + // Write each main section + outline.sections.forEachIndexed { index, sectionOutline -> + log.info("Writing section ${index + 1}/${outline.sections.size}: ${sectionOutline.title}") + logToTranscript("Writing section: ${sectionOutline.title}\n") + + overviewTask.add("- ${sectionOutline.title} ".renderMarkdown) + task.update() + + val sectionTask = task.ui.newTask(false) + tabs[sectionOutline.title] = sectionTask.placeholder + + sectionTask.add( + buildString { + appendLine("# ${sectionOutline.title}") + appendLine() + appendLine("**Status:** Writing section...") + appendLine() + }.renderMarkdown + ) + task.update() + + // Build context from previous sections + val previousContext = if (proposalSections.isNotEmpty()) { + buildString { + appendLine("## Previous Sections Summary") + proposalSections.takeLast(2).forEach { prevSection -> + appendLine("**${prevSection.section_title}:** ${prevSection.key_messages.firstOrNull() ?: ""}") + appendLine() + } + } + } else { + "This is the first main section after the executive summary." + } + + // Determine if this section should incorporate analysis results + val analysisContext = buildString { + when { + sectionOutline.title.contains("Financial", ignoreCase = true) && roiAnalysis != null -> { + appendLine("## ROI Analysis to Incorporate") + appendLine("ROI Summary: ${roiAnalysis.roi_summary}") + appendLine("Payback Period: ${roiAnalysis.payback_period}") + appendLine() + } + + sectionOutline.title.contains("Risk", ignoreCase = true) && riskAssessment != null -> { + appendLine("## Risk Assessment to Incorporate") + appendLine("Overall Risk Level: ${riskAssessment.overall_risk_level}") + riskAssessment.risks.take(3).forEach { risk -> + appendLine("- ${risk.category}: ${risk.description.take(100)}") + } + appendLine() + } + + sectionOutline.title.contains("Competitive", ignoreCase = true) && competitiveAnalysis != null -> { + appendLine("## Competitive Analysis to Incorporate") + appendLine(competitiveAnalysis.superiority_statement.take(200)) + appendLine() + } + + sectionOutline.title.contains("Timeline", ignoreCase = true) && timelineMilestones != null -> { + appendLine("## Timeline to Incorporate") + timelineMilestones.phases.take(3).forEach { phase -> + appendLine("- ${phase.name}: ${phase.duration}") + } + appendLine() + } + } + } + + val sectionAgent = ParsedAgent( + resultClass = ProposalContent::class.java, + prompt = """ +You are a business proposal writer. Write the "${sectionOutline.title}" section. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Tone: ${executionConfig.tone} + +Section Purpose: ${sectionOutline.purpose} + +Key Points to Cover: +${sectionOutline.key_points.joinToString("\n") { "- $it" }} + +Target Word Count: ${sectionOutline.estimated_word_count} + +$previousContext + +$analysisContext + +${if (contextFiles.isNotBlank()) "Additional Context:\n${contextFiles.truncateForDisplay(1000)}\n" else ""} + +Write a well-structured section that: +1. Opens with a clear topic statement +2. Develops each key point with supporting details +3. Uses concrete examples and data where appropriate +4. Maintains a ${executionConfig.tone} tone +5. Connects to the overall proposal objective +6. Transitions smoothly to the next section + +Make it persuasive and professional. Use clear, concise language. +Aim for approximately ${sectionOutline.estimated_word_count} words. + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var sectionContent = sectionAgent.answer(listOf("Write section")).obj + proposalSections.add(sectionContent) + cumulativeWordCount += sectionContent.word_count + logToTranscript("Section '${sectionOutline.title}' completed: ${sectionContent.word_count} words\n") + + sectionTask.add( + buildString { + appendLine("## ${sectionOutline.title}") + appendLine() + appendLine(sectionContent.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${sectionContent.word_count}") + if (sectionContent.key_messages.isNotEmpty()) { + appendLine() + appendLine("**Key Messages:**") + sectionContent.key_messages.forEach { msg -> + appendLine("- $msg") + } + } + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + writeToProposal(sectionContent.content) + + resultBuilder.append("## ${sectionOutline.title}\n\n") + resultBuilder.append(sectionContent.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ (${sectionContent.word_count} words)\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 7 Complete: All sections written\n".renderMarkdown) + + // Phase 8: Conclusion & Next Steps + logToTranscript("\n## Phase 8: Conclusion & Next Steps\n\n") + overviewTask.add("\n### Phase 8: Conclusion\n*Writing conclusion and next steps...*\n".renderMarkdown) + task.update() + + log.info("Phase 8: Writing conclusion") + val conclusionTask = task.ui.newTask(false) + tabs["Conclusion"] = conclusionTask.placeholder + + conclusionTask.add( + buildString { + appendLine("# Conclusion & Next Steps") + appendLine() + appendLine("**Status:** Writing conclusion...") + appendLine() + }.renderMarkdown + ) + task.update() + + val conclusionAgent = ParsedAgent( + resultClass = ProposalContent::class.java, + prompt = """ +You are a business proposal writer. Write a compelling conclusion and next steps section. + +Proposal: $proposalTitle +Type: ${executionConfig.proposal_type} +Objective: ${executionConfig.objective} +Urgency Level: ${executionConfig.urgency_level} + +Success Metrics: +${outline.success_metrics.joinToString("\n") { "- $it" }} + +Write a conclusion (200-300 words) that: +1. Summarizes the key value proposition +2. Reinforces why this proposal is the best choice +3. Reiterates the urgency (${executionConfig.urgency_level} urgency) +4. Provides clear, specific next steps +5. Includes a call to action +6. Expresses confidence and readiness to proceed + +Make it action-oriented and compelling. The reader should feel motivated to move forward. + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var conclusion = conclusionAgent.answer(listOf("Write conclusion")).obj + cumulativeWordCount += conclusion.word_count + logToTranscript("Conclusion written: ${conclusion.word_count} words\n\n") + + val conclusionContent = buildString { + appendLine("## Conclusion & Next Steps") + appendLine() + appendLine(conclusion.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${conclusion.word_count}") + appendLine() + appendLine("**Status:** ✅ Complete") + } + conclusionTask.add( + conclusionContent.renderMarkdown + ) + task.update() + writeToProposal(conclusionContent) + + resultBuilder.append("## Conclusion & Next Steps\n\n") + resultBuilder.append(conclusion.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ Phase 8 Complete: Conclusion written (${conclusion.word_count} words)\n".renderMarkdown) + + // Phase 9: Revision (if enabled) + if (executionConfig.revision_passes > 0) { + logToTranscript("## Phase 9: Revision Process\n\n") + overviewTask.add("\n### Phase 9: Revision\n*Refining and polishing...*\n".renderMarkdown) + task.update() + + log.info("Phase 9: Performing ${executionConfig.revision_passes} revision pass(es)") + val revisionTask = task.ui.newTask(false) + tabs["Revision"] = revisionTask.placeholder + + revisionTask.add( + buildString { + appendLine("# Revision Process") + appendLine() + appendLine("**Status:** Performing ${executionConfig.revision_passes} revision pass(es)...") + appendLine() + }.renderMarkdown + ) + task.update() + + val fullProposal = resultBuilder.toString() + + repeat(executionConfig.revision_passes) { passNum -> + log.debug("Revision pass ${passNum + 1}/${executionConfig.revision_passes}") + logToTranscript("Performing revision pass ${passNum + 1}/${executionConfig.revision_passes}\n") + + val revisionAgent = ChatAgent( + prompt = """ +You are an expert business proposal editor. Review and improve this proposal. + +Current Proposal: +$fullProposal + +Focus on: +1. Strengthening persuasive language and value proposition +2. Ensuring logical flow and coherence +3. Improving clarity and conciseness +4. Verifying alignment with ${executionConfig.tone} tone +5. Enhancing professional presentation +6. Ensuring all stakeholder concerns are addressed +7. Maximizing impact on decision-makers + +Maintain: +- All key points and data +- The proposal structure +- Approximate word count ($cumulativeWordCount words) +- The ${executionConfig.tone} tone + +Provide the complete revised proposal. + """.trimIndent(), + model = api, + temperature = 0.6 + ) + + val revisedProposal = revisionAgent.answer(listOf("Revise the proposal")) + resultBuilder.clear() + resultBuilder.append(revisedProposal) + + revisionTask.add( + buildString { + appendLine("## Revision Pass ${passNum + 1}") + appendLine() + appendLine("✅ Complete") + appendLine() + }.renderMarkdown + ) + task.update() + } + + overviewTask.add("✅ Phase 9 Complete: ${executionConfig.revision_passes} revision pass(es) completed\n".renderMarkdown) + } + + // Phase 10: Final Assembly + logToTranscript("\n## Phase 10: Final Assembly\n\n") + overviewTask.add("\n### Phase 10: Final Assembly\n*Compiling complete proposal...*\n".renderMarkdown) + task.update() + + log.info("Phase 10: Assembling final proposal") + val finalTask = task.ui.newTask(false) + tabs["Complete Proposal"] = finalTask.placeholder + + val finalProposal = buildString { + appendLine("# ${outline.title}") + appendLine() + appendLine("**Prepared by:** ${executionConfig.proposing_organization ?: "Your Organization"}") + appendLine() + appendLine("**Date:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("MMMM d, yyyy"))}") + appendLine() + appendLine("---") + appendLine() + appendLine(resultBuilder.toString()) + appendLine() + if (executionConfig.include_appendices) { + appendLine("---") + appendLine() + appendLine("## Appendices") + appendLine() + appendLine("### Appendix A: Detailed Financial Projections") + appendLine("*[Include detailed spreadsheets and financial models]*") + appendLine() + appendLine("### Appendix B: Technical Specifications") + appendLine("*[Include technical documentation and specifications]*") + appendLine() + appendLine("### Appendix C: Team Biographies") + appendLine("*[Include key team member profiles and qualifications]*") + appendLine() + appendLine("### Appendix D: References and Case Studies") + appendLine("*[Include relevant case studies and client references]*") + appendLine() + } + appendLine("---") + appendLine() + appendLine("**Total Word Count:** $cumulativeWordCount") + appendLine() + appendLine("**Target Word Count:** ${executionConfig.target_word_count}") + appendLine() + appendLine("**Completion:** ${(cumulativeWordCount.toFloat() / executionConfig.target_word_count * 100).toInt()}%") + } + + finalTask.add(finalProposal.renderMarkdown) + task.update() + writeToProposal(finalProposal) + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + logToTranscript("\n## Generation Complete\n\nTotal time: ${totalTime / 1000.0}s\nTotal words: $cumulativeWordCount\n") + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Total Word Count: $cumulativeWordCount") + appendLine("- Target Word Count: ${executionConfig.target_word_count}") + appendLine("- Completion: ${(cumulativeWordCount.toFloat() / executionConfig.target_word_count * 100).toInt()}%") + appendLine("- Number of Sections: ${proposalSections.size}") + appendLine("- Stakeholders Analyzed: ${stakeholderAnalysis.stakeholders.size}") + if (roiAnalysis != null) appendLine("- ROI Analysis: ✓ Included") + if (riskAssessment != null) appendLine("- Risk Assessment: ✓ Included (${riskAssessment.risks.size} risks)") + if (competitiveAnalysis != null) appendLine("- Competitive Analysis: ✓ Included") + if (timelineMilestones != null) appendLine("- Timeline: ✓ Included (${timelineMilestones.phases.size} phases)") + appendLine("- Revision Passes: ${executionConfig.revision_passes}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + task.update() + + // Concise summary for resultFn + val finalResult = buildString { + appendLine("# Business Proposal Summary: ${outline.title}") + appendLine() + appendLine("A complete business proposal of **$cumulativeWordCount words** was generated in **${totalTime / 1000.0}s**.") + appendLine() + appendLine("**Objective:** ${executionConfig.objective}") + appendLine() + appendLine("## Output Files") + appendLine() + val (proposalLink, _) = Pair(task.linkTo("proposal.md"), task.resolve("proposal.md")) + val (transcriptLink, _) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + appendLine("- **Complete Proposal:** [View](${proposalLink}) | [HTML](${proposalLink.removeSuffix(".md")}.html) | [PDF](${proposalLink.removeSuffix(".md")}.pdf)") + appendLine("- **Transcript:** [View](${transcriptLink}) | [HTML](${transcriptLink.removeSuffix(".md")}.html) | [PDF](${transcriptLink.removeSuffix(".md")}.pdf)") + appendLine() + appendLine("**Key Components:**") + appendLine("- Executive Summary") + appendLine("- ${outline.sections.size} main sections") + if (roiAnalysis != null) appendLine("- ROI Analysis with financial projections") + if (riskAssessment != null) appendLine("- Risk Assessment (${riskAssessment.overall_risk_level} risk level)") + if (competitiveAnalysis != null) appendLine("- Competitive Analysis") + if (timelineMilestones != null) appendLine("- Timeline with ${timelineMilestones.phases.size} phases") + appendLine("- Conclusion with next steps") + appendLine() + appendLine("**Statistics:**") + appendLine("- Total Word Count: $cumulativeWordCount / ${executionConfig.target_word_count}") + appendLine("- Sections: ${proposalSections.size}") + appendLine("- Generation Time: ${totalTime / 1000.0}s") + } + + log.info("BusinessProposalTask completed: words=$cumulativeWordCount, sections=${proposalSections.size}, time=${totalTime}ms") + + task.safeComplete("Business proposal generation complete: $cumulativeWordCount words in ${totalTime / 1000}s", log) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during business proposal generation", e) + logToTranscript("\n## Error Occurred\n\n${e.message}\n") + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + + val errorOutput = buildString { + appendLine("# Error in Business Proposal Generation") + appendLine() + appendLine("**Proposal:** $proposalTitle") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + proposalStream?.close() + } + + private fun getContextFiles(): String { + val relatedFiles = executionConfig?.related_files ?: return "" + if (relatedFiles.isEmpty()) return "" + log.debug("Loading ${relatedFiles.size} related context files") + + return buildString { + appendLine("## Related Research Files") + appendLine() + relatedFiles.forEach { file -> + try { + val filePath = root.resolve(file) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded context file: $file") + appendLine("### $file") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(1500)) + appendLine("```") + appendLine() + } else { + log.warn("Context file not found: $file") + } + } catch (e: Exception) { + log.warn("Error reading file: $file", e) + } + } + } + } + + private fun getInputFileContent(): String { + val inputFiles = executionConfig?.input_files ?: return "" + if (inputFiles.isEmpty()) return "" + log.debug("Loading ${inputFiles.size} input files") + return inputFiles + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .filterNotNull() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun proposalFile(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("proposal.md"), task.resolve("proposal.md")) + val proposalStream = file?.outputStream() + log.info("Initialized proposal file: $link") + return proposalStream + } + + + companion object { + private val log: Logger = LoggerFactory.getLogger(BusinessProposalTask::class.java) + val BusinessProposal = TaskType( + "BusinessProposal", + BusinessProposalTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Generate comprehensive business proposals with ROI analysis and risk assessment", + """ + Generates complete, professional business proposals for various purposes. +
      +
    • Performs stakeholder analysis to understand decision-makers
    • +
    • Creates detailed ROI analysis with financial projections
    • +
    • Conducts risk assessment with mitigation strategies
    • +
    • Analyzes competitive alternatives and positioning
    • +
    • Develops timeline with milestones and dependencies
    • +
    • Writes compelling executive summary and sections
    • +
    • Includes optional revision passes for quality
    • +
    • Supports multiple proposal types (project, investment, grant, partnership, RFP)
    • +
    • Ideal for project proposals, funding requests, vendor responses, and business plans
    • +
    + """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/EmailCampaignTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/EmailCampaignTask.kt new file mode 100644 index 000000000..1c5def8b0 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/EmailCampaignTask.kt @@ -0,0 +1,1178 @@ +package com.simiacryptus.cognotik.plan.tools.writing + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class EmailCampaignTask( + orchestrationConfig: OrchestrationConfig, + planTask: EmailCampaignTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + + class EmailCampaignTaskExecutionConfigData( + @Description("The goal or purpose of the email campaign") + val campaign_goal: String? = null, + + @Description("The product, service, or topic being promoted") + val subject_matter: String? = null, + + @Description("Target audience description (demographics, role, pain points)") + val target_audience: String = "general audience", + + @Description("Campaign type (e.g., 'welcome_series', 'nurture', 'sales', 're_engagement', 'newsletter', 'event_promotion')") + val campaign_type: String = "nurture", + + @Description("Number of emails in the sequence") + val num_emails: Int = 3, + + @Description("Recommended days between emails (e.g., [1, 3, 7] for day 1, day 4, day 11)") + val send_intervals: List? = null, + + @Description("Brand voice and tone (e.g., 'professional', 'friendly', 'casual', 'authoritative', 'playful')") + val brand_voice: String = "professional", + + @Description("Primary call-to-action (e.g., 'schedule_demo', 'download_resource', 'make_purchase', 'register_event')") + val primary_cta: String = "learn_more", + + @Description("Whether to generate A/B test variants for subject lines") + val generate_subject_variants: Boolean = true, + + @Description("Number of subject line variants per email (if enabled)") + val subject_variants_count: Int = 3, + + @Description("Whether to include personalization tokens (e.g., {{first_name}}, {{company}})") + val include_personalization: Boolean = true, + + @Description("Whether to include preview text (the snippet shown in inbox)") + val include_preview_text: Boolean = true, + + @Description("Whether to include emoji in subject lines") + val use_emoji: Boolean = false, + + @Description("Maximum subject line length in characters") + val max_subject_length: Int = 60, + + @Description("Target email body length (MUST BE on of: 'short' <150 words, 'medium' 150-300, 'long' >300)") + val body_length: String = "medium", + + @Description("Whether to include PS (postscript) sections") + val include_ps: Boolean = true, + + @Description("Number of revision passes for quality improvement") + val revision_passes: Int = 1, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as brand context for the task") + val input_files: List? = null, + + + @Description("Related files or brand guidelines to incorporate") + val related_files: List? = null, + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = EmailCampaign.name, + task_description = task_description ?: "Generate email campaign for: '$campaign_goal'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (campaign_goal.isNullOrBlank()) { + return "campaign_goal must not be null or blank" + } + if (subject_matter.isNullOrBlank()) { + return "subject_matter must not be null or blank" + } + if (num_emails < 1 || num_emails > 10) { + return "num_emails must be between 1 and 10, got: $num_emails" + } + if (subject_variants_count < 1 || subject_variants_count > 5) { + return "subject_variants_count must be between 1 and 5, got: $subject_variants_count" + } + if (max_subject_length < 20 || max_subject_length > 100) { + return "max_subject_length must be between 20 and 100, got: $max_subject_length" + } + if (revision_passes < 0 || revision_passes > 5) { + return "revision_passes must be between 0 and 5, got: $revision_passes" + } + if (campaign_type.isBlank()) { + return "campaign_type must not be blank" + } + val validBodyLengths = setOf("short", "medium", "long") + if (body_length.lowercase() !in validBodyLengths) { + return "body_length must be one of: ${validBodyLengths.joinToString(", ")}, got: $body_length" + } + send_intervals?.let { intervals -> + if (intervals.size != num_emails - 1) { + return "send_intervals must have ${num_emails - 1} values (one less than num_emails), got: ${intervals.size}" + } + if (intervals.any { it < 0 }) { + return "send_intervals must all be non-negative" + } + } + return ValidatedObject.validateFields(this) + } + } + + data class CampaignStrategy( + @Description("Overall campaign strategy and approach") + val strategy: String = "", + @Description("Key messages to communicate across the sequence") + val key_messages: List = emptyList(), + @Description("Progression logic (how emails build on each other)") + val progression_logic: String = "", + @Description("Audience pain points to address") + val pain_points: List = emptyList(), + @Description("Value propositions to emphasize") + val value_propositions: List = emptyList(), + @Description("Recommended send timing") + val timing_recommendations: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (strategy.isBlank()) return "strategy must not be blank" + if (key_messages.isEmpty()) return "key_messages must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class EmailOutline( + @Description("Email number in sequence") + val email_number: Int = 1, + @Description("Email purpose and goal") + val purpose: String = "", + @Description("Main message or theme") + val main_message: String = "", + @Description("Key points to cover") + val key_points: List = emptyList(), + @Description("Call-to-action for this email") + val cta: String = "", + @Description("Emotional tone for this email") + val emotional_tone: String = "", + @Description("Connection to previous email (if applicable)") + val connection_to_previous: String = "", + @Description("Estimated word count") + val estimated_word_count: Int = 0 + ) : ValidatedObject { + override fun validate(): String? { + if (email_number < 1) return "email_number must be positive" + if (purpose.isBlank()) return "purpose must not be blank" + if (main_message.isBlank()) return "main_message must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class SubjectLineVariants( + @Description("List of subject line options") + val variants: List = emptyList() + ) : ValidatedObject + + data class SubjectLine( + @Description("The subject line text") + val text: String = "", + @Description("Approach or technique used (e.g., 'curiosity', 'urgency', 'benefit-focused')") + val approach: String = "", + @Description("Character count") + val character_count: Int = 0, + @Description("Whether it includes personalization tokens") + val has_personalization: Boolean = false + ) : ValidatedObject { + override fun validate(): String? { + if (text.isBlank()) return "text must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class EmailContent( + @Description("Email number in sequence") + val email_number: Int = 1, + @Description("Selected subject line") + val subject_line: String = "", + @Description("Preview text (inbox snippet)") + val preview_text: String = "", + @Description("Email body content") + val body: String = "", + @Description("Call-to-action text") + val cta_text: String = "", + @Description("CTA button/link text") + val cta_button: String = "", + @Description("PS section (if applicable)") + val ps_section: String = "", + @Description("Word count") + val word_count: Int = 0, + @Description("Personalization tokens used") + val personalization_tokens: List = emptyList(), + @Description("Key persuasive elements") + val persuasive_elements: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (email_number < 1) return "email_number must be positive" + if (subject_line.isBlank()) return "subject_line must not be blank" + if (body.isBlank()) return "body must not be blank" + return ValidatedObject.validateFields(this) + } + } + + override fun promptSegment(): String { + return """ +EmailCampaign - Generate complete email sequences for marketing, sales, or outreach + ** Specify the campaign goal and subject matter + ** Define target audience and campaign type + ** Set number of emails and send intervals + ** Configure brand voice and primary CTA + ** Enable A/B test variants for subject lines + ** Include personalization tokens and preview text + ** Control body length and formatting options + ** Performs strategy planning, outlining, and email generation + ** Produces complete, ready-to-use email sequence + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + log.info("Starting EmailCampaignTask for goal: '${executionConfig?.campaign_goal}'") + val transcript = transcript(task) + + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + resultFn("CONFIGURATION ERROR: $validationError") + transcript?.close() + return + } + + val campaignGoal = executionConfig?.campaign_goal + if (campaignGoal.isNullOrBlank()) { + log.error("No campaign goal specified") + task.safeComplete("CONFIGURATION ERROR: No campaign goal specified", log) + resultFn("CONFIGURATION ERROR: No campaign goal specified") + transcript?.close() + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: run { + transcript?.close() + return + } + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Email Campaign Generation") + appendLine() + appendLine("**Generated:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("**Campaign Goal:** $campaignGoal") + appendLine() + appendLine("**Subject Matter:** ${executionConfig.subject_matter}") + appendLine() + appendLine("## Configuration") + appendLine("- Campaign Type: ${executionConfig.campaign_type}") + appendLine("- Target Audience: ${executionConfig.target_audience}") + appendLine("- Number of Emails: ${executionConfig.num_emails}") + appendLine("- Brand Voice: ${executionConfig.brand_voice}") + appendLine("- Primary CTA: ${executionConfig.primary_cta}") + appendLine("- Body Length: ${executionConfig.body_length}") + appendLine("- Subject Variants: ${if (executionConfig.generate_subject_variants) "${executionConfig.subject_variants_count} per email" else "Single per email"}") + appendLine("- Personalization: ${if (executionConfig.include_personalization) "✓" else "✗"}") + appendLine("- Preview Text: ${if (executionConfig.include_preview_text) "✓" else "✗"}") + appendLine("- Emoji: ${if (executionConfig.use_emoji) "✓" else "✗"}") + appendLine() + if (executionConfig.send_intervals != null) { + appendLine("**Send Schedule:**") + appendLine("- Email 1: Day 0 (immediate)") + executionConfig.send_intervals.forEachIndexed { index, interval -> + val cumulativeDays = executionConfig.send_intervals.take(index + 1).sum() + appendLine("- Email ${index + 2}: Day $cumulativeDays (+$interval days)") + } + appendLine() + } + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Campaign Strategy") + appendLine("*Developing overall campaign approach...*") + } + overviewTask.add(overviewContent.renderMarkdown) + transcript?.write(overviewContent.toByteArray(Charsets.UTF_8)) + task.update() + + val resultBuilder = StringBuilder() + resultBuilder.append("# Email Campaign: $campaignGoal\n\n") + + try { + // Gather context + val priorContext = getPriorCode(agent.executionState) ?: "" + val contextFiles = getContextFiles() + + if (priorContext.isNotBlank() || contextFiles.isNotBlank()) { + log.debug("Found context: priorContext=${priorContext.length} chars, contextFiles=${contextFiles.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Brand Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Brand & Campaign Context") + appendLine() + if (priorContext.isNotBlank()) { + appendLine("## Prior Context") + appendLine(priorContext.truncateForDisplay(2000)) + appendLine() + } + if (contextFiles.isNotBlank()) { + appendLine("## Brand Guidelines") + appendLine(contextFiles.truncateForDisplay(2000)) + } + }.renderMarkdown + ) + transcript?.write(contextTask.placeholder.toString().toByteArray(Charsets.UTF_8)) + task.update() + } + + // Phase 1: Develop campaign strategy + log.info("Phase 1: Developing campaign strategy") + val strategyTask = task.ui.newTask(false) + tabs["Strategy"] = strategyTask.placeholder + + strategyTask.add( + buildString { + appendLine("# Campaign Strategy") + appendLine() + appendLine("---") + appendLine() + appendLine("**Status:** Analyzing audience and developing approach...") + appendLine() + }.renderMarkdown + ) + task.update() + + val targetWordCount = when (executionConfig.body_length.lowercase()) { + "short" -> 125 + "medium" -> 225 + "long" -> 400 + else -> 225 + } + + val strategyAgent = ParsedAgent( + resultClass = CampaignStrategy::class.java, + prompt = """ +You are an expert email marketing strategist. Develop a comprehensive strategy for this email campaign. + +Campaign Goal: $campaignGoal +Subject Matter: ${executionConfig.subject_matter} +Campaign Type: ${executionConfig.campaign_type} +Target Audience: ${executionConfig.target_audience} +Number of Emails: ${executionConfig.num_emails} +Brand Voice: ${executionConfig.brand_voice} +Primary CTA: ${executionConfig.primary_cta} + +${if (priorContext.isNotBlank()) "Brand Context:\n${priorContext.truncateForDisplay(2000)}\n" else ""} +${if (contextFiles.isNotBlank()) "Brand Guidelines:\n${contextFiles.truncateForDisplay(2000)}\n" else ""} + +Create a strategy that includes: +1. Overall approach and positioning +2. 3-5 key messages to communicate across the sequence +3. How emails will build on each other (progression logic) +4. Audience pain points to address +5. Value propositions to emphasize +6. Timing recommendations for maximum engagement + +Consider: +- The ${executionConfig.campaign_type} campaign type requires specific pacing and messaging +- The ${executionConfig.target_audience} has specific needs and preferences +- Each email should move the recipient closer to the ${executionConfig.primary_cta} +- Maintain ${executionConfig.brand_voice} voice throughout +- Build trust and value before asking for action + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val strategy = strategyAgent.answer(listOf("Develop strategy")).obj + log.info("Campaign strategy developed: ${strategy.key_messages.size} key messages") + + val strategyContent = buildString { + appendLine("## Campaign Approach") + appendLine() + appendLine(strategy.strategy) + appendLine() + appendLine("---") + appendLine() + appendLine("### Key Messages") + strategy.key_messages.forEachIndexed { index, message -> + appendLine("${index + 1}. $message") + } + appendLine() + appendLine("### Progression Logic") + appendLine(strategy.progression_logic) + appendLine() + appendLine("---") + appendLine() + appendLine("### Audience Pain Points") + strategy.pain_points.forEach { pain -> + appendLine("- $pain") + } + appendLine() + appendLine("### Value Propositions") + strategy.value_propositions.forEach { value -> + appendLine("- $value") + } + appendLine() + appendLine("---") + appendLine() + appendLine("### Timing Recommendations") + appendLine(strategy.timing_recommendations) + appendLine() + appendLine("**Status:** ✅ Complete") + } + strategyTask.add(strategyContent.renderMarkdown) + transcript?.write(("\n\n" + strategyContent).toByteArray(Charsets.UTF_8)) + task.update() + + overviewTask.add("✅ Phase 1 Complete: Strategy developed\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Email Sequence Outline\n*Creating detailed outline for each email...*\n".renderMarkdown) + task.update() + + // Phase 2: Create email outlines + log.info("Phase 2: Creating email sequence outline") + val outlineTask = task.ui.newTask(false) + tabs["Sequence Outline"] = outlineTask.placeholder + + outlineTask.add( + buildString { + appendLine("# Email Sequence Outline") + appendLine() + appendLine("---") + appendLine() + appendLine("**Status:** Planning ${executionConfig.num_emails} emails...") + appendLine() + }.renderMarkdown + ) + task.update() + + val outlines = mutableListOf() + for (emailNum in 1..executionConfig.num_emails) { + log.debug("Creating outline for email $emailNum") + + val previousOutlines = if (outlines.isNotEmpty()) { + buildString { + appendLine("Previous Emails:") + outlines.forEach { prev -> + appendLine("Email ${prev.email_number}: ${prev.main_message}") + } + } + } else "" + + val outlineAgent = ParsedAgent( + resultClass = EmailOutline::class.java, + prompt = """ +You are an email marketing expert. Create a detailed outline for Email $emailNum of ${executionConfig.num_emails}. + +Campaign Strategy: +${strategy.strategy} + +Key Messages: ${strategy.key_messages.joinToString("; ")} +Progression Logic: ${strategy.progression_logic} + +$previousOutlines + +For Email $emailNum, specify: +- Purpose and goal of this specific email +- Main message or theme +- 3-5 key points to cover +- Specific call-to-action +- Emotional tone (e.g., 'welcoming', 'educational', 'urgent', 'supportive') +- How it connects to the previous email (if applicable) +- Estimated word count (~$targetWordCount words) + +Email $emailNum should: +${ + when (emailNum) { + 1 -> "- Establish connection and set expectations\n- Introduce the value proposition\n- Build initial trust" + executionConfig.num_emails -> "- Reinforce key benefits\n- Create urgency or final push\n- Make the primary CTA compelling" + else -> "- Build on previous email's message\n- Deepen engagement\n- Move closer to conversion" + } + } + +Maintain ${executionConfig.brand_voice} voice and address ${executionConfig.target_audience}. + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val outline = outlineAgent.answer(listOf("Create outline")).obj + outlines.add(outline) + } + + val outlineContent = buildString { + appendLine("## Email Sequence Plan") + appendLine() + outlines.forEach { outline -> + appendLine("### Email ${outline.email_number}: ${outline.main_message}") + appendLine() + appendLine("**Purpose:** ${outline.purpose}") + appendLine() + appendLine("**Emotional Tone:** ${outline.emotional_tone}") + appendLine() + appendLine("**Key Points:**") + outline.key_points.forEach { point -> + appendLine("- $point") + } + appendLine() + appendLine("**Call-to-Action:** ${outline.cta}") + appendLine() + if (outline.connection_to_previous.isNotBlank()) { + appendLine("**Connection to Previous:** ${outline.connection_to_previous}") + appendLine() + } + appendLine("**Est. Words:** ${outline.estimated_word_count}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + outlineTask.add(outlineContent.renderMarkdown) + transcript?.write(("\n\n" + outlineContent).toByteArray(Charsets.UTF_8)) + task.update() + + overviewTask.add("✅ Phase 2 Complete: ${outlines.size} emails outlined\n".renderMarkdown) + overviewTask.add("\n### Phase 3: Email Generation\n*Writing emails with subject lines...*\n".renderMarkdown) + task.update() + + // Phase 3: Generate each email + log.info("Phase 3: Generating emails") + val generatedEmails = mutableListOf() + val allSubjectVariants = mutableMapOf>() + + outlines.forEach { outline -> + log.info("Generating email ${outline.email_number}/${executionConfig.num_emails}") + + overviewTask.add("- Email ${outline.email_number}: ${outline.main_message.truncateForDisplay(50)} ".renderMarkdown) + task.update() + + val emailTask = task.ui.newTask(false) + tabs["Email ${outline.email_number}"] = emailTask.placeholder + + emailTask.add( + buildString { + appendLine("# Email ${outline.email_number}") + appendLine() + appendLine("---") + appendLine() + appendLine("**Status:** Generating content...") + appendLine() + }.renderMarkdown + ) + task.update() + + // Generate subject line variants + val subjectVariants = if (executionConfig.generate_subject_variants) { + log.debug("Generating ${executionConfig.subject_variants_count} subject line variants") + + val subjectAgent = ParsedAgent( + resultClass = SubjectLineVariants::class.java, + prompt = """ +You are an expert at writing compelling email subject lines. Generate ${executionConfig.subject_variants_count} different subject line options. + +Email Purpose: ${outline.purpose} +Main Message: ${outline.main_message} +Target Audience: ${executionConfig.target_audience} +Brand Voice: ${executionConfig.brand_voice} +Max Length: ${executionConfig.max_subject_length} characters + +Create ${executionConfig.subject_variants_count} variants using different approaches: +- Curiosity-driven (make them want to know more) +- Benefit-focused (highlight the value) +- Urgency/scarcity (create FOMO) +- Question-based (engage their thinking) +- Direct/clear (straightforward value) + +Requirements: +- Each must be under ${executionConfig.max_subject_length} characters +- Match ${executionConfig.brand_voice} voice +- Be specific and relevant to ${outline.main_message} +${if (executionConfig.include_personalization) "- Include personalization tokens like {{first_name}} where appropriate" else ""} +${if (executionConfig.use_emoji) "- Consider using relevant emoji (but don't overdo it)" else "- Do NOT use emoji"} +- Avoid spam trigger words (FREE, !!!, ALL CAPS) +- Make each variant distinctly different in approach + +For each variant, specify the approach used and character count. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + subjectAgent.answer(listOf("Generate subject lines")).obj.variants + } else { + // Generate single subject line + val subjectAgent = ParsedAgent( + resultClass = SubjectLineVariants::class.java, + prompt = """ +You are an expert at writing compelling email subject lines. Generate 1 subject line. + +Email Purpose: ${outline.purpose} +Main Message: ${outline.main_message} +Target Audience: ${executionConfig.target_audience} +Brand Voice: ${executionConfig.brand_voice} +Max Length: ${executionConfig.max_subject_length} characters + +Create a subject line that: +- Is under ${executionConfig.max_subject_length} characters +- Matches ${executionConfig.brand_voice} voice +- Is specific and relevant to ${outline.main_message} +${if (executionConfig.include_personalization) "- Includes personalization tokens like {{first_name}} where appropriate" else ""} +${if (executionConfig.use_emoji) "- Uses relevant emoji if appropriate" else "- Does NOT use emoji"} +- Avoids spam trigger words + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + subjectAgent.answer(listOf("Generate subject line")).obj.variants + } + + allSubjectVariants[outline.email_number] = subjectVariants + log.debug("Generated ${subjectVariants.size} subject line variants") + + // Generate email body + val previousContext = if (generatedEmails.isNotEmpty()) { + buildString { + appendLine("Previous Email Context:") + val lastEmail = generatedEmails.last() + appendLine("Email ${lastEmail.email_number} Subject: ${lastEmail.subject_line}") + appendLine("Key CTA: ${lastEmail.cta_text}") + appendLine("Ending: ${lastEmail.body.takeLast(200)}") + } + } else { + "This is the first email in the sequence." + } + + val emailAgent = ParsedAgent( + resultClass = EmailContent::class.java, + prompt = """ +You are an expert email copywriter. Write Email ${outline.email_number} of the campaign. + +Campaign Goal: $campaignGoal +Subject Matter: ${executionConfig.subject_matter} +Target Audience: ${executionConfig.target_audience} +Brand Voice: ${executionConfig.brand_voice} + +Email Outline: +Purpose: ${outline.purpose} +Main Message: ${outline.main_message} +Key Points: ${outline.key_points.joinToString("; ")} +CTA: ${outline.cta} +Emotional Tone: ${outline.emotional_tone} +Target Words: ${outline.estimated_word_count} + +Selected Subject Line: ${subjectVariants.first().text} + +$previousContext + +Write the complete email including: +1. ${if (executionConfig.include_preview_text) "Preview text (40-90 characters that appear in inbox)" else ""} +2. Email body (~${outline.estimated_word_count} words) +3. Clear call-to-action section +4. CTA button text +${if (executionConfig.include_ps) "5. PS section (optional but recommended for key point or urgency)" else ""} + +Email Body Guidelines: +- Open with a ${if (outline.email_number == 1) "warm greeting" else "reference to previous email"} +- Use short paragraphs (2-3 sentences max) +- Include white space for readability +- Write in ${executionConfig.brand_voice} voice +- Address ${executionConfig.target_audience} directly +- Focus on benefits, not features +- Use "you" language (not "we") +${if (executionConfig.include_personalization) "- Include personalization tokens: {{first_name}}, {{company}}, etc." else ""} +- Build to the CTA naturally +- Make the CTA specific and action-oriented + +Length: ${executionConfig.body_length} (~${outline.estimated_word_count} words) + +Provide: +- The complete email body +- CTA text and button text +- Preview text +${if (executionConfig.include_ps) "- PS section" else ""} +- List of personalization tokens used +- Key persuasive elements employed + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var emailContent = emailAgent.answer(listOf("Write email")).obj.copy( + email_number = outline.email_number, + subject_line = subjectVariants.first().text + ) + + generatedEmails.add(emailContent) + + // Display email + val emailDisplay = buildString { + appendLine("## Email ${outline.email_number}: ${outline.main_message}") + appendLine() + appendLine("### Subject Line Options") + subjectVariants.forEachIndexed { index, variant -> + val badge = if (index == 0) "**[SELECTED]** " else "" + appendLine("${index + 1}. $badge**${variant.text}** (${variant.character_count} chars)") + appendLine(" - *Approach: ${variant.approach}*") + if (variant.has_personalization) { + appendLine(" - *Includes personalization*") + } + appendLine() + } + appendLine("---") + appendLine() + if (executionConfig.include_preview_text && emailContent.preview_text.isNotBlank()) { + appendLine("### Preview Text") + appendLine("> ${emailContent.preview_text}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("### Email Body") + appendLine() + appendLine(emailContent.body) + appendLine() + appendLine("---") + appendLine() + appendLine("### Call-to-Action") + appendLine() + appendLine("**CTA Text:** ${emailContent.cta_text}") + appendLine() + appendLine("**Button:** `${emailContent.cta_button}`") + appendLine() + if (executionConfig.include_ps && emailContent.ps_section.isNotBlank()) { + appendLine("---") + appendLine() + appendLine("### P.S.") + appendLine(emailContent.ps_section) + appendLine() + } + appendLine("---") + appendLine() + appendLine("**Word Count:** ${emailContent.word_count}") + if (emailContent.personalization_tokens.isNotEmpty()) { + appendLine() + appendLine("**Personalization Tokens:** ${emailContent.personalization_tokens.joinToString(", ")}") + } + if (emailContent.persuasive_elements.isNotEmpty()) { + appendLine() + appendLine("**Persuasive Elements:** ${emailContent.persuasive_elements.joinToString(", ")}") + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + emailTask.add(emailDisplay.renderMarkdown) + transcript?.write(("\n\n" + emailDisplay).toByteArray(Charsets.UTF_8)) + task.update() + + overviewTask.add("✅ (${emailContent.word_count} words)\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 3 Complete: All emails generated\n".renderMarkdown) + + // Phase 4: Revision (if enabled) + if (executionConfig.revision_passes > 0) { + overviewTask.add("\n### Phase 4: Revision\n*Refining email sequence...*\n".renderMarkdown) + task.update() + + log.info("Phase 4: Performing ${executionConfig.revision_passes} revision pass(es)") + val revisionTask = task.ui.newTask(false) + tabs["Revision"] = revisionTask.placeholder + + revisionTask.add( + buildString { + appendLine("# Revision Process") + appendLine() + appendLine("---") + appendLine() + appendLine("**Status:** Performing ${executionConfig.revision_passes} revision pass(es)...") + appendLine() + }.renderMarkdown + ) + task.update() + + repeat(executionConfig.revision_passes) { passNum -> + log.debug("Revision pass ${passNum + 1}/${executionConfig.revision_passes}") + transcript?.write(("\n\n## Revision Pass ${passNum + 1}\n✅ All ${generatedEmails.size} emails revised\n").toByteArray(Charsets.UTF_8)) + + generatedEmails.forEachIndexed { index, email -> + val revisionAgent = ChatAgent( + prompt = """ +You are an expert email editor. Review and improve this email while maintaining its core message and structure. + +Email ${email.email_number} of ${executionConfig.num_emails} +Subject: ${email.subject_line} + +Current Body: +${email.body} + +CTA: ${email.cta_text} +${if (email.ps_section.isNotBlank()) "PS: ${email.ps_section}" else ""} + +Improve: +1. Clarity and conciseness +2. Persuasive impact +3. Flow and transitions +4. Call-to-action strength +5. Emotional resonance with ${executionConfig.target_audience} +6. ${executionConfig.brand_voice} voice consistency + +Maintain: +- All key points and messages +- Word count (~${email.word_count} words) +- Personalization tokens +- Overall structure + +Provide the complete revised email body only. + """.trimIndent(), + model = api, + temperature = 0.6 + ) + + val revisedBody = revisionAgent.answer(listOf("Revise email")) + generatedEmails[index] = email.copy( + body = revisedBody, + word_count = revisedBody.split("\\s+".toRegex()).size + ) + } + + revisionTask.add( + buildString { + appendLine("## Revision Pass ${passNum + 1}") + appendLine() + appendLine("✅ All ${generatedEmails.size} emails revised") + appendLine() + }.renderMarkdown + ) + task.update() + } + + overviewTask.add("✅ Phase 4 Complete: ${executionConfig.revision_passes} revision pass(es) completed\n".renderMarkdown) + } + + // Phase 5: Final Assembly + overviewTask.add("\n### Phase 5: Final Assembly\n*Compiling complete campaign...*\n".renderMarkdown) + task.update() + + log.info("Phase 5: Assembling final campaign") + val finalTask = task.ui.newTask(false) + tabs["Complete Campaign"] = finalTask.placeholder + + val finalCampaign = buildString { + appendLine("# Email Campaign: $campaignGoal") + appendLine() + appendLine("## Campaign Overview") + appendLine() + appendLine("**Subject Matter:** ${executionConfig.subject_matter}") + appendLine("**Target Audience:** ${executionConfig.target_audience}") + appendLine("**Campaign Type:** ${executionConfig.campaign_type}") + appendLine("**Number of Emails:** ${executionConfig.num_emails}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Campaign Strategy") + appendLine() + appendLine(strategy.strategy) + appendLine() + appendLine("**Key Messages:**") + strategy.key_messages.forEach { message -> + appendLine("- $message") + } + appendLine() + appendLine("---") + appendLine() + + generatedEmails.forEachIndexed { index, email -> + val daysSinceStart = if (index == 0) 0 else executionConfig.send_intervals?.take(index)?.sum() ?: 0 + + appendLine("## Email ${email.email_number} - Day $daysSinceStart") + appendLine() + + // Show all subject line variants + val variants = allSubjectVariants[email.email_number] ?: emptyList() + if (variants.size > 1) { + appendLine("### Subject Line Options (A/B Test)") + variants.forEachIndexed { variantIndex, variant -> + val badge = if (variantIndex == 0) "**[A]** " else "**[B${if (variants.size > 2) "${variantIndex}" else ""}]** " + appendLine("$badge${variant.text}") + appendLine() + } + } else { + appendLine("**Subject:** ${email.subject_line}") + appendLine() + } + + if (email.preview_text.isNotBlank()) { + appendLine("**Preview:** ${email.preview_text}") + appendLine() + } + + appendLine("---") + appendLine() + appendLine(email.body) + appendLine() + appendLine("**${email.cta_button}**") + appendLine() + if (email.ps_section.isNotBlank()) { + appendLine("*P.S. ${email.ps_section}*") + appendLine() + } + appendLine("---") + appendLine() + } + + appendLine() + appendLine("## Campaign Metrics") + appendLine() + val totalWords = generatedEmails.sumOf { it.word_count } + val avgWords = totalWords / generatedEmails.size + appendLine("- Total Emails: ${generatedEmails.size}") + appendLine("- Total Word Count: $totalWords") + appendLine("- Average Words per Email: $avgWords") + if (executionConfig.send_intervals != null) { + val totalDays = executionConfig.send_intervals.sum() + appendLine("- Campaign Duration: $totalDays days") + } + appendLine() + appendLine("## Implementation Notes") + appendLine() + appendLine("1. **Personalization Tokens:** Ensure your email platform supports the tokens used") + appendLine("2. **A/B Testing:** Test subject line variants to optimize open rates") + appendLine("3. **Timing:** Send emails at optimal times for your audience (typically 10am-2pm)") + appendLine("4. **Mobile Optimization:** Preview on mobile devices before sending") + appendLine("5. **Unsubscribe Link:** Always include an easy unsubscribe option") + appendLine("6. **Tracking:** Set up UTM parameters for link tracking") + appendLine("7. **Compliance:** Ensure compliance with CAN-SPAM, GDPR, or relevant regulations") + } + + finalTask.add(finalCampaign.renderMarkdown) + transcript?.write(("\n\n" + finalCampaign).toByteArray(Charsets.UTF_8)) + task.update() + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + val totalWords = generatedEmails.sumOf { it.word_count } + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Campaign Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Emails Generated: ${generatedEmails.size}") + appendLine("- Total Word Count: $totalWords") + appendLine("- Average Words per Email: ${totalWords / generatedEmails.size}") + appendLine("- Subject Line Variants: ${allSubjectVariants.values.sumOf { it.size }}") + appendLine("- Revision Passes: ${executionConfig.revision_passes}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + transcript?.write( + ("\n\n---\n\n## Campaign Complete\n\n**Statistics:**\n- Emails: ${generatedEmails.size}\n- Words: $totalWords\n- Time: ${totalTime / 1000.0}s\n").toByteArray( + Charsets.UTF_8 + ) + ) + task.update() + + // Concise summary for resultFn + val finalResult = buildString { + appendLine("# Email Campaign Summary: $campaignGoal") + appendLine() + appendLine("A complete email campaign of **${generatedEmails.size} emails** with **$totalWords total words** was generated in **${totalTime / 1000.0}s**.") + appendLine() + appendLine("**Campaign Type:** ${executionConfig.campaign_type}") + appendLine("**Target Audience:** ${executionConfig.target_audience}") + appendLine() + appendLine("**Email Sequence:**") + generatedEmails.forEach { email -> + appendLine("${email.email_number}. ${email.subject_line} (${email.word_count} words)") + } + appendLine() + appendLine("> The complete campaign with all subject line variants and implementation notes is available in the Complete Campaign tab.") + } + + log.info("EmailCampaignTask completed: emails=${generatedEmails.size}, words=$totalWords, time=${totalTime}ms") + transcript?.close() + + val (transcriptLink, _) = Pair(task.linkTo("campaign_summary.md"), task.resolve("campaign_summary.md")) + task.safeComplete( + "Email campaign generation complete: ${generatedEmails.size} emails, $totalWords words in ${totalTime / 1000}s. Full details: transcript", + log + ) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during email campaign generation", e) + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + transcript?.close() + + val errorOutput = buildString { + appendLine("# Error in Email Campaign Generation") + appendLine() + appendLine("**Campaign Goal:** $campaignGoal") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + } + + private fun getInputFileCode(): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + "# $relativePath\n\n```\n${file.readText()}\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun getContextFiles(): String { + val relatedFiles = executionConfig?.related_files ?: return "" + if (relatedFiles.isEmpty()) return "" + log.debug("Loading ${relatedFiles.size} related context files") + + return buildString { + appendLine("## Related Brand Files") + appendLine() + relatedFiles.forEach { file -> + try { + val filePath = root.resolve(file) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded context file: $file") + appendLine("### $file") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(1500)) + appendLine("```") + appendLine() + } else { + log.warn("Context file not found: $file") + } + } catch (e: Exception) { + log.warn("Error reading file: $file", e) + } + } + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + companion object { + private val log: Logger = LoggerFactory.getLogger(EmailCampaignTask::class.java) + val EmailCampaign = TaskType( + "EmailCampaign", + EmailCampaignTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Generate complete email sequences for marketing, sales, or outreach", + """ + Generates complete, ready-to-use email campaigns with strategic planning. +
      +
    • Develops comprehensive campaign strategy and messaging
    • +
    • Creates detailed outline for each email in the sequence
    • +
    • Generates A/B test variants for subject lines
    • +
    • Writes complete email bodies with CTAs
    • +
    • Includes personalization tokens and preview text
    • +
    • Supports multiple campaign types (welcome, nurture, sales, etc.)
    • +
    • Configurable brand voice, tone, and length
    • +
    • Optional revision passes for quality improvement
    • +
    • Provides implementation notes and best practices
    • +
    • Ideal for marketing automation, sales outreach, and customer engagement
    • +
    + """ + ) + } +} + diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/InteractiveStoryTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/InteractiveStoryTask.kt new file mode 100644 index 000000000..3abdc0a3f --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/InteractiveStoryTask.kt @@ -0,0 +1,1294 @@ +package com.simiacryptus.cognotik.plan.tools.writing + + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.nio.file.Path +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class InteractiveStoryTask( + orchestrationConfig: OrchestrationConfig, + planTask: InteractiveStoryTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + protected val codeFiles = mutableMapOf() + + class InteractiveStoryTaskExecutionConfigData( + @Description("The premise or starting scenario for the interactive story") + val premise: String? = null, + + @Description("The genre of the story (e.g., 'fantasy', 'sci-fi', 'mystery', 'horror', 'romance')") + val genre: String = "fantasy", + + @Description("The target audience (e.g., 'children', 'young_adult', 'adult')") + val target_audience: String = "young_adult", + + @Description("The tone of the story (e.g., 'lighthearted', 'serious', 'dark', 'humorous')") + val tone: String = "serious", + + @Description("Number of major decision points in the story") + val num_decision_points: Int = 5, + + @Description("Number of choices at each decision point") + val choices_per_decision: Int = 3, + + @Description("Whether to track state variables (inventory, relationships, stats)") + val track_state_variables: Boolean = true, + + @Description("State variables to track (e.g., 'health', 'reputation', 'gold', 'ally_trust')") + val state_variables: List? = null, + + @Description("Whether to ensure all paths lead to meaningful endings") + val prevent_dead_ends: Boolean = true, + + @Description("Number of distinct endings to create") + val num_endings: Int = 3, + + @Description("Whether to optimize for replay value with distinct experiences") + val optimize_replay_value: Boolean = true, + + @Description("Average word count per story segment") + val segment_word_count: Int = 300, + + @Description("Whether to include consequence tracking across choices") + val track_consequences: Boolean = true, + + @Description("Writing style (e.g., 'descriptive', 'action-packed', 'dialogue-heavy', 'introspective')") + val writing_style: String = "descriptive", + + @Description("Point of view (e.g., 'second_person', 'first_person', 'third_person')") + val point_of_view: String = "second_person", + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input context for the story") + val input_files: List? = null, + + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = InteractiveStory.name, + task_description = task_description ?: "Generate interactive story: '$premise'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (premise.isNullOrBlank()) { + return "premise must not be null or blank" + } + if (num_decision_points < 1 || num_decision_points > 20) { + return "num_decision_points must be between 1 and 20, got: $num_decision_points" + } + if (choices_per_decision < 2 || choices_per_decision > 5) { + return "choices_per_decision must be between 2 and 5, got: $choices_per_decision" + } + if (num_endings < 1 || num_endings > 10) { + return "num_endings must be between 1 and 10, got: $num_endings" + } + if (segment_word_count < 100 || segment_word_count > 1000) { + return "segment_word_count must be between 100 and 1000, got: $segment_word_count" + } + if (genre.isNullOrBlank()) { + return "genre must not be null or blank" + } + if (point_of_view.isBlank()) { + return "point_of_view must not be blank" + } + if (!input_files.isNullOrEmpty()) { + input_files.forEach { pattern -> + if (pattern.isBlank()) { + return "input_files patterns must not be blank" + } + } + } + return ValidatedObject.validateFields(this) + } + } + + data class StoryStructure( + @Description("The story title") + val title: String = "", + @Description("Opening segment that sets the scene") + val opening: String = "", + @Description("Decision points in the story") + val decision_points: List = emptyList(), + @Description("Possible endings") + val endings: List = emptyList(), + @Description("State variables being tracked") + val tracked_variables: Map = emptyMap() + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (opening.isBlank()) return "opening must not be blank" + if (decision_points.isEmpty()) return "decision_points must not be empty" + if (endings.isEmpty()) return "endings must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class DecisionPoint( + @Description("Unique identifier for this decision point") + val id: String = "", + @Description("The narrative segment leading to this decision") + val narrative: String = "", + @Description("The question or situation requiring a choice") + val decision_prompt: String = "", + @Description("Available choices") + val choices: List = emptyList(), + @Description("Current state variable values at this point") + val state_snapshot: Map = emptyMap() + ) : ValidatedObject { + override fun validate(): String? { + if (id.isBlank()) return "id must not be blank" + if (narrative.isBlank()) return "narrative must not be blank" + if (decision_prompt.isBlank()) return "decision_prompt must not be blank" + if (choices.isEmpty()) return "choices must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class Choice( + @Description("The choice text presented to the reader") + val text: String = "", + @Description("ID of the next decision point or ending this leads to") + val leads_to: String = "", + @Description("State variable changes from this choice") + val state_changes: Map = emptyMap(), + @Description("Immediate consequence description") + val immediate_consequence: String = "", + @Description("Long-term impact on the story") + val long_term_impact: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (text.isBlank()) return "text must not be blank" + if (leads_to.isBlank()) return "leads_to must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class Ending( + @Description("Unique identifier for this ending") + val id: String = "", + @Description("Type of ending (e.g., 'triumph', 'tragedy', 'bittersweet', 'twist')") + val ending_type: String = "", + @Description("The final narrative segment") + val narrative: String = "", + @Description("Required state conditions to reach this ending") + val required_conditions: Map = emptyMap(), + @Description("Choices that led to this ending") + val path_summary: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (id.isBlank()) return "id must not be blank" + if (ending_type.isBlank()) return "ending_type must not be blank" + if (narrative.isBlank()) return "narrative must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class StorySegment( + @Description("The segment ID") + val id: String = "", + @Description("The narrative content") + val content: String = "", + @Description("Word count") + val word_count: Int = 0, + @Description("State changes in this segment") + val state_changes: Map = emptyMap() + ) : ValidatedObject + + override fun promptSegment(): String { + return """ + InteractiveStory - Create choose-your-own-adventure narratives with branching paths + ** Optionally, list input files (supports glob patterns) to be examined for context + ** Specify the premise or starting scenario + ** Define genre, tone, and target audience + ** Set number of decision points and choices per decision + ** Enable state variable tracking (health, reputation, inventory, etc.) + ** Prevent dead ends to ensure all paths lead somewhere meaningful + ** Create multiple distinct endings based on player choices + ** Optimize for replay value with different experiences + ** Track consequences across choices for coherent storytelling + ** Produces complete interactive narrative with decision tree + Available files: + ${getAvailableFiles(root).joinToString("\n") { " - $it" }} + """.trimIndent() + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + // Initialize transcript + val transcriptStream = transcript(task) + val transcriptWriter = transcriptStream?.bufferedWriter() + // Gather input context from files and messages + val inputContext = getInputFileCode() + + if (messages.isNotEmpty()) "\n\n## User Input\n\n${messages.joinToString("\n\n")}" else "" + + + log.info("Starting InteractiveStoryTask for premise: '${executionConfig?.premise}'") + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + transcriptWriter?.close() + resultFn("CONFIGURATION ERROR: $validationError") + return + } + + val premise = executionConfig?.premise + if (premise.isNullOrBlank()) { + log.error("No premise specified for interactive story") + task.safeComplete("CONFIGURATION ERROR: No premise specified", log) + transcriptWriter?.close() + resultFn("CONFIGURATION ERROR: No premise specified") + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Interactive Story Generation") + appendLine() + appendLine("**Premise:** $premise") + appendLine() + appendLine("## Configuration") + appendLine("- Genre: ${executionConfig.genre}") + appendLine("- Target Audience: ${executionConfig.target_audience}") + appendLine("- Tone: ${executionConfig.tone}") + appendLine("- Point of View: ${executionConfig.point_of_view}") + appendLine("- Writing Style: ${executionConfig.writing_style}") + appendLine("- Decision Points: ${executionConfig.num_decision_points}") + appendLine("- Choices per Decision: ${executionConfig.choices_per_decision}") + appendLine("- Number of Endings: ${executionConfig.num_endings}") + appendLine("- Track State Variables: ${if (executionConfig.track_state_variables) "✓" else "✗"}") + if (executionConfig.track_state_variables && !executionConfig.state_variables.isNullOrEmpty()) { + appendLine("- State Variables: ${executionConfig.state_variables.joinToString(", ")}") + } + appendLine("- Prevent Dead Ends: ${if (executionConfig.prevent_dead_ends) "✓" else "✗"}") + appendLine("- Optimize Replay Value: ${if (executionConfig.optimize_replay_value) "✓" else "✗"}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Story Structure Planning") + appendLine("*Creating decision tree and story architecture...*") + } + // Write to transcript + transcriptWriter?.apply { + write("# Interactive Story Generation Transcript\n\n") + write("**Premise:** $premise\n\n") + write("## Configuration\n\n") + write("- Genre: ${executionConfig.genre}\n") + write("- Target Audience: ${executionConfig.target_audience}\n") + write("- Tone: ${executionConfig.tone}\n") + write("- Point of View: ${executionConfig.point_of_view}\n") + write("- Writing Style: ${executionConfig.writing_style}\n") + write("- Decision Points: ${executionConfig.num_decision_points}\n") + write("- Choices per Decision: ${executionConfig.choices_per_decision}\n") + write("- Number of Endings: ${executionConfig.num_endings}\n") + write("- Track State Variables: ${if (executionConfig.track_state_variables) "✓" else "✗"}\n") + if (executionConfig.track_state_variables && !executionConfig.state_variables.isNullOrEmpty()) { + write("- State Variables: ${executionConfig.state_variables.joinToString(", ")}\n") + } + write("\n**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n") + write("---\n\n") + flush() + } + overviewTask.add(overviewContent.renderMarkdown) + task.update() + + val resultBuilder = StringBuilder() + resultBuilder.append("# Interactive Story: $premise\n\n") + + try { + // Gather context from input files and messages + val priorContext = getPriorCode(agent.executionState) + val combinedContext = (if (inputContext.isNotBlank()) inputContext else "") + + (if (priorContext.isNotBlank()) "\n\n## Prior Context\n\n$priorContext" else "") + + // Gather context + if (priorContext.isNotBlank()) { + transcriptWriter?.apply { + write("## Context from Previous Tasks\n\n") + write(priorContext.truncateForDisplay(2000)) + write("\n\n---\n\n") + flush() + } + log.debug("Found prior context: ${priorContext.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Context from Previous Tasks") + appendLine() + appendLine(priorContext.truncateForDisplay(2000)) + }.renderMarkdown + ) + task.update() + } + + // Phase 1: Create story structure and decision tree + transcriptWriter?.apply { + write("## Phase 1: Story Structure Planning\n\n") + write("Creating decision tree and story architecture...\n\n") + flush() + } + log.info("Phase 1: Creating story structure") + val structureTask = task.ui.newTask(false) + tabs["Story Structure"] = structureTask.placeholder + + structureTask.add( + buildString { + appendLine("# Story Structure & Decision Tree") + appendLine() + appendLine("**Status:** Planning narrative branches and decision points...") + appendLine() + }.renderMarkdown + ) + task.update() + + val stateVars = if (executionConfig.track_state_variables) { + executionConfig.state_variables ?: listOf("health", "reputation", "resources") + } else { + emptyList() + } + // First, create a high-level outline + val outlineAgent = ChatAgent( + prompt = """ + You are an expert interactive fiction designer. Create a high-level outline for a branching story. + Premise: $premise + Story Parameters: +- Genre: ${executionConfig.genre} +- Target Audience: ${executionConfig.target_audience} +- Tone: ${executionConfig.tone} +- Decision Points: ${executionConfig.num_decision_points} +- Choices per Decision: ${executionConfig.choices_per_decision} +- Number of Endings: ${executionConfig.num_endings} +${if (combinedContext.isNotBlank()) "Additional Context:\n${combinedContext.truncateForDisplay(1000)}\n" else ""} +Create a brief outline with: +1. A compelling title +2. A one-paragraph opening concept +3. List of ${executionConfig.num_decision_points} decision point IDs and brief descriptions (e.g., "decision_1: Choose path in forest") +4. List of ${executionConfig.num_endings} ending IDs and types (e.g., "ending_triumph: Hero succeeds") +5. A simple flow showing how decisions connect (decision_1 -> decision_2 or ending_1) +Keep it concise - just the structure, not full narratives. + """.trimIndent(), + model = api, + temperature = 0.7 + ) + val outline = outlineAgent.answer(listOf("Create outline")) + log.debug("Generated outline: ${outline.length} chars") + transcriptWriter?.apply { + write("### Story Outline\n\n") + write(outline) + write("\n\n") + flush() + } + structureTask.add( + buildString { + appendLine("## Story Outline") + appendLine() + appendLine(outline) + appendLine() + appendLine("---") + appendLine() + appendLine("**Status:** Building detailed structure...") + appendLine() + }.renderMarkdown + ) + task.update() + // Now create the detailed structure in smaller pieces + + val structureAgent = ParsedAgent( + resultClass = StoryStructure::class.java, + prompt = """ +You are an expert interactive fiction designer. Create a detailed story structure based on this outline. + +OUTLINE: +$outline + +Story Parameters: +- Genre: ${executionConfig.genre} +- Target Audience: ${executionConfig.target_audience} +- Tone: ${executionConfig.tone} +${if (executionConfig.track_state_variables) "- State Variables to Track: ${stateVars.joinToString(", ")}" else ""} + + +Expand the outline into a complete structure with: +1. The title from the outline +2. A brief opening description (2-3 sentences, NOT the full narrative) +3. Decision points with: + - A unique ID (e.g., "decision_1", "decision_2") + - A brief narrative description (1-2 sentences) + - A clear decision prompt + - ${executionConfig.choices_per_decision} meaningful choices + - Each choice should lead to another decision point or an ending +4. Endings with unique IDs and types +${if (executionConfig.track_state_variables) "5. State variable definitions and how they're affected by choices" else ""} + + +IMPORTANT: Keep descriptions brief. Full narratives will be written later. +Focus on structure and connections, not detailed prose. + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val structure = structureAgent.answer(listOf("Create detailed structure from outline")).obj + + // Validate structure + structure.validate()?.let { validationError -> + log.error("Structure validation failed: $validationError") + structureTask.error(ValidatedObject.ValidationError(validationError, structure)) + transcriptWriter?.apply { + write("**ERROR:** Structure validation failed: $validationError\n\n") + flush() + close() + } + task.safeComplete("Structure validation failed: $validationError", log) + resultFn("ERROR: Structure validation failed: $validationError") + return + } + + log.info("Generated structure: ${structure.decision_points.size} decision points, ${structure.endings.size} endings") + transcriptWriter?.apply { + write("### Generated Story Structure\n\n") + write("**Title:** ${structure.title}\n\n") + write("**Opening:** ${structure.opening}\n\n") + write("**Decision Points:** ${structure.decision_points.size}\n\n") + structure.decision_points.forEach { dp -> + write("- ${dp.id}: ${dp.decision_prompt}\n") + dp.choices.forEach { choice -> + write(" - ${choice.text} → ${choice.leads_to}\n") + } + } + write("\n**Endings:** ${structure.endings.size}\n\n") + structure.endings.forEach { ending -> + write("- ${ending.id}: ${ending.ending_type}\n") + } + if (structure.tracked_variables.isNotEmpty()) { + write("\n**Tracked Variables:**\n\n") + structure.tracked_variables.forEach { (name, description) -> + write("- $name: $description\n") + } + } + write("\n---\n\n") + flush() + } + + + val structureContent = buildString { + appendLine("## ${structure.title}") + appendLine() + appendLine("### Opening") + appendLine(structure.opening.truncateForDisplay(500)) + appendLine() + if (structure.tracked_variables.isNotEmpty()) { + appendLine("### Tracked Variables") + structure.tracked_variables.forEach { (name, description) -> + appendLine("- **$name:** $description") + } + appendLine() + } + appendLine("---") + appendLine() + appendLine("### Decision Tree") + appendLine() + appendLine("```") + appendLine("START") + appendLine(" ↓") + structure.decision_points.forEachIndexed { index, dp -> + appendLine("${dp.id}: ${dp.decision_prompt.truncateForDisplay(60)}") + dp.choices.forEach { choice -> + appendLine(" → ${choice.text.truncateForDisplay(50)} → ${choice.leads_to}") + } + if (index < structure.decision_points.size - 1) { + appendLine(" ↓") + } + } + appendLine() + appendLine("ENDINGS:") + structure.endings.forEach { ending -> + appendLine(" • ${ending.id}: ${ending.ending_type}") + } + appendLine("```") + appendLine() + appendLine("---") + appendLine() + appendLine("### Decision Points Summary") + structure.decision_points.forEach { dp -> + appendLine("#### ${dp.id}") + appendLine("**Prompt:** ${dp.decision_prompt}") + appendLine() + appendLine("**Choices:**") + dp.choices.forEach { choice -> + appendLine("- ${choice.text}") + if (choice.state_changes.isNotEmpty()) { + appendLine(" - State changes: ${choice.state_changes.entries.joinToString(", ") { "${it.key} ${if (it.value >= 0) "+" else ""}${it.value}" }}") + } + appendLine(" - Leads to: ${choice.leads_to}") + } + appendLine() + } + appendLine("---") + appendLine() + appendLine("### Endings Summary") + structure.endings.forEach { ending -> + appendLine("#### ${ending.id}: ${ending.ending_type}") + if (ending.required_conditions.isNotEmpty()) { + appendLine("**Conditions:** ${ending.required_conditions.entries.joinToString(", ") { "${it.key} ${it.value}" }}") + } + appendLine() + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + structureTask.add(structureContent.renderMarkdown) + task.update() + + overviewTask.add("✅ Phase 1 Complete: Story structure created\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Opening Segment\n*Writing the story opening...*\n".renderMarkdown) + task.update() + + // Phase 2: Write opening segment + transcriptWriter?.apply { + write("## Phase 2: Opening Segment\n\n") + write("Writing the story opening...\n\n") + flush() + } + log.info("Phase 2: Writing opening segment") + val openingTask = task.ui.newTask(false) + tabs["Opening"] = openingTask.placeholder + + openingTask.add( + buildString { + appendLine("# Opening Segment") + appendLine() + appendLine("**Status:** Writing opening narrative...") + appendLine() + }.renderMarkdown + ) + task.update() + + val openingAgent = ParsedAgent( + resultClass = StorySegment::class.java, + prompt = """ +You are a skilled ${executionConfig.genre} writer. Write the opening segment of this interactive story. + +Title: ${structure.title} +Premise: $premise + +Story Parameters: +- Genre: ${executionConfig.genre} +- Tone: ${executionConfig.tone} +- Point of View: ${executionConfig.point_of_view} +- Writing Style: ${executionConfig.writing_style} +- Target Audience: ${executionConfig.target_audience} + +Opening Outline: ${structure.opening} + +Write an engaging opening segment (~${executionConfig.segment_word_count} words) that: +1. Immediately hooks the reader +2. Establishes the setting and atmosphere +3. Introduces the protagonist (the reader in ${executionConfig.point_of_view} POV) +4. Sets up the initial situation +5. Creates anticipation for the first decision +6. Matches the ${executionConfig.tone} tone and ${executionConfig.writing_style} style + +${if (executionConfig.track_state_variables) "Initialize state variables: ${stateVars.joinToString(", ")}" else ""} + +Make it immersive and compelling. The reader should feel invested immediately. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var openingSegment = openingAgent.answer(listOf("Write opening")).obj + transcriptWriter?.apply { + write("### Opening Segment\n\n") + write(openingSegment.content) + write("\n\n**Word Count:** ${openingSegment.word_count}\n\n") + write("---\n\n") + flush() + } + + openingTask.add( + buildString { + appendLine("## ${structure.title}") + appendLine() + appendLine(openingSegment.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${openingSegment.word_count}") + if (openingSegment.state_changes.isNotEmpty()) { + appendLine() + appendLine("**Initial State:**") + openingSegment.state_changes.forEach { (variable, value) -> + appendLine("- $variable: $value") + } + } + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + + resultBuilder.append("## ${structure.title}\n\n") + resultBuilder.append(openingSegment.content) + resultBuilder.append("\n\n---\n\n") + + overviewTask.add("✅ Phase 2 Complete: Opening written (${openingSegment.word_count} words)\n".renderMarkdown) + overviewTask.add("\n### Phase 3: Decision Points\n*Writing branching narrative segments...*\n".renderMarkdown) + task.update() + + // Phase 3: Write each decision point + transcriptWriter?.apply { + write("## Phase 3: Decision Points\n\n") + write("Writing branching narrative segments...\n\n") + flush() + } + log.info("Phase 3: Writing decision points") + val decisionSegments = mutableMapOf() + var cumulativeWordCount = openingSegment.word_count + + structure.decision_points.forEachIndexed { index, decisionPoint -> + log.info("Writing decision point ${index + 1}/${structure.decision_points.size}: ${decisionPoint.id}") + + overviewTask.add("- ${decisionPoint.id}: ${decisionPoint.decision_prompt.truncateForDisplay(50)} ".renderMarkdown) + task.update() + + val dpTask = task.ui.newTask(false) + tabs["${decisionPoint.id}"] = dpTask.placeholder + + dpTask.add( + buildString { + appendLine("# ${decisionPoint.id}") + appendLine() + appendLine("**Status:** Writing decision point narrative...") + appendLine() + }.renderMarkdown + ) + task.update() + + // Build context from previous segments + val previousContext = if (decisionSegments.isNotEmpty()) { + buildString { + appendLine("## Previous Story Context") + val recentSegments = decisionSegments.values.toList().takeLast(2) + recentSegments.forEach { seg -> + appendLine("### ${seg.id}") + appendLine(seg.content.takeLast(300)) + appendLine() + } + } + } else { + buildString { + appendLine("## Opening Context") + appendLine(openingSegment.content.takeLast(300)) + } + } + + val decisionAgent = ParsedAgent( + resultClass = StorySegment::class.java, + prompt = """ +You are a skilled ${executionConfig.genre} writer. Write the narrative segment leading to this decision point. + +Title: ${structure.title} +Decision Point: ${decisionPoint.id} + +Decision Outline: +- Narrative: ${decisionPoint.narrative} +- Decision Prompt: ${decisionPoint.decision_prompt} + +Available Choices: +${decisionPoint.choices.joinToString("\n") { "- ${it.text}" }} + +$previousContext + +Story Parameters: +- Genre: ${executionConfig.genre} +- Tone: ${executionConfig.tone} +- Point of View: ${executionConfig.point_of_view} +- Writing Style: ${executionConfig.writing_style} + +Write a narrative segment (~${executionConfig.segment_word_count} words) that: +1. Flows naturally from the previous segment +2. Develops the story and builds tension +3. Presents the situation requiring a decision +4. Makes all choices feel meaningful and distinct +5. Maintains the ${executionConfig.tone} tone +6. Ends with the decision prompt clearly presented + +${ + if (executionConfig.track_state_variables && decisionPoint.state_snapshot.isNotEmpty()) { + "Current State: ${decisionPoint.state_snapshot.entries.joinToString(", ") { "${it.key}: ${it.value}" }}" + } else "" + } + +Make the reader feel the weight of their choice. Each option should feel viable but lead to different outcomes. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var segment = decisionAgent.answer(listOf("Write decision point")).obj.copy(id = decisionPoint.id) + decisionSegments[decisionPoint.id] = segment + cumulativeWordCount += segment.word_count + transcriptWriter?.apply { + write("### ${decisionPoint.id}\n\n") + write(segment.content) + write("\n\n**Decision:** ${decisionPoint.decision_prompt}\n\n") + decisionPoint.choices.forEach { choice -> + write("- ${choice.text} → ${choice.leads_to}\n") + } + write("\n**Word Count:** ${segment.word_count}\n\n---\n\n") + flush() + } + + dpTask.add( + buildString { + appendLine("## ${decisionPoint.id}") + appendLine() + appendLine(segment.content) + appendLine() + appendLine("---") + appendLine() + appendLine("### ${decisionPoint.decision_prompt}") + appendLine() + decisionPoint.choices.forEachIndexed { choiceIndex, choice -> + appendLine("**${choiceIndex + 1}. ${choice.text}**") + if (choice.immediate_consequence.isNotBlank()) { + appendLine(" - *${choice.immediate_consequence}*") + } + if (choice.state_changes.isNotEmpty()) { + appendLine(" - State changes: ${choice.state_changes.entries.joinToString(", ") { "${it.key} ${if (it.value >= 0) "+" else ""}${it.value}" }}") + } + appendLine() + } + appendLine("---") + appendLine() + appendLine("**Word Count:** ${segment.word_count}") + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + + resultBuilder.append("## ${decisionPoint.id}\n\n") + resultBuilder.append(segment.content) + resultBuilder.append("\n\n") + resultBuilder.append("### ${decisionPoint.decision_prompt}\n\n") + decisionPoint.choices.forEachIndexed { choiceIndex, choice -> + resultBuilder.append("${choiceIndex + 1}. ${choice.text}\n") + } + resultBuilder.append("\n---\n\n") + + overviewTask.add("✅ (${segment.word_count} words)\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 3 Complete: All decision points written\n".renderMarkdown) + overviewTask.add("\n### Phase 4: Endings\n*Writing story conclusions...*\n".renderMarkdown) + task.update() + + // Phase 4: Write endings + transcriptWriter?.apply { + write("## Phase 4: Endings\n\n") + write("Writing story conclusions...\n\n") + flush() + } + log.info("Phase 4: Writing endings") + val endingSegments = mutableMapOf() + + structure.endings.forEachIndexed { index, ending -> + log.info("Writing ending ${index + 1}/${structure.endings.size}: ${ending.id}") + + overviewTask.add("- ${ending.id}: ${ending.ending_type} ".renderMarkdown) + task.update() + + val endingTask = task.ui.newTask(false) + tabs["${ending.id}"] = endingTask.placeholder + + endingTask.add( + buildString { + appendLine("# ${ending.id}") + appendLine() + appendLine("**Status:** Writing ending narrative...") + appendLine() + }.renderMarkdown + ) + task.update() + + val endingAgent = ParsedAgent( + resultClass = StorySegment::class.java, + prompt = """ +You are a skilled ${executionConfig.genre} writer. Write a satisfying ending for this interactive story. + +Title: ${structure.title} +Ending: ${ending.id} +Ending Type: ${ending.ending_type} + +Ending Outline: ${ending.narrative} + +${ + if (ending.required_conditions.isNotEmpty()) { + "This ending is reached when: ${ending.required_conditions.entries.joinToString(", ") { "${it.key} ${it.value}" }}" + } else "" + } + +${ + if (ending.path_summary.isNotEmpty()) { + "Key choices that led here:\n${ending.path_summary.joinToString("\n") { "- $it" }}" + } else "" + } + +Story Parameters: +- Genre: ${executionConfig.genre} +- Tone: ${executionConfig.tone} +- Point of View: ${executionConfig.point_of_view} +- Writing Style: ${executionConfig.writing_style} + +Write an ending segment (~${executionConfig.segment_word_count} words) that: +1. Provides a satisfying conclusion to the story +2. Reflects the consequences of the reader's choices +3. Matches the ${ending.ending_type} ending type +4. Maintains the ${executionConfig.tone} tone +5. Gives a sense of closure while honoring the journey +6. Makes the reader feel their choices mattered + +Make this ending feel earned and meaningful. It should resonate with the path taken. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var endingSegment = endingAgent.answer(listOf("Write ending")).obj.copy(id = ending.id) + endingSegments[ending.id] = endingSegment + cumulativeWordCount += endingSegment.word_count + transcriptWriter?.apply { + write("### ${ending.id}: ${ending.ending_type}\n\n") + write(endingSegment.content) + write("\n\n**Word Count:** ${endingSegment.word_count}\n\n---\n\n") + flush() + } + + endingTask.add( + buildString { + appendLine("## ${ending.id}: ${ending.ending_type}") + appendLine() + appendLine(endingSegment.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**THE END**") + appendLine() + appendLine("**Word Count:** ${endingSegment.word_count}") + if (ending.required_conditions.isNotEmpty()) { + appendLine() + appendLine("**Conditions to Reach:**") + ending.required_conditions.forEach { (condition, value) -> + appendLine("- $condition: $value") + } + } + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + + resultBuilder.append("## ${ending.id}: ${ending.ending_type}\n\n") + resultBuilder.append(endingSegment.content) + resultBuilder.append("\n\n**THE END**\n\n---\n\n") + + overviewTask.add("✅ (${endingSegment.word_count} words)\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 4 Complete: All endings written\n".renderMarkdown) + overviewTask.add("\n### Phase 5: Interactive Map\n*Generating playable story map...*\n".renderMarkdown) + task.update() + + // Phase 5: Create interactive story map + log.info("Phase 5: Creating interactive story map") + val mapTask = task.ui.newTask(false) + tabs["Story Map"] = mapTask.placeholder + + val storyMap = buildString { + appendLine("# ${structure.title} - Interactive Story Map") + appendLine() + appendLine("## How to Play") + appendLine("1. Start with the Opening segment") + appendLine("2. At each decision point, choose one of the available options") + appendLine("3. Follow the path indicated by your choice") + appendLine("4. Continue until you reach an ending") + appendLine("5. Try different choices to discover all ${structure.endings.size} endings!") + appendLine() + if (structure.tracked_variables.isNotEmpty()) { + appendLine("## Tracked Variables") + structure.tracked_variables.forEach { (name, description) -> + appendLine("- **$name:** $description") + } + appendLine() + } + appendLine("---") + appendLine() + appendLine("## START: Opening") + appendLine() + appendLine(openingSegment.content) + appendLine() + appendLine("**→ Continue to: ${structure.decision_points.firstOrNull()?.id ?: "ending"}**") + appendLine() + appendLine("---") + appendLine() + + structure.decision_points.forEach { dp -> + val segment = decisionSegments[dp.id] + appendLine("## ${dp.id}") + appendLine() + if (segment != null) { + appendLine(segment.content) + appendLine() + } + appendLine("### ${dp.decision_prompt}") + appendLine() + dp.choices.forEachIndexed { index, choice -> + appendLine("**Choice ${index + 1}: ${choice.text}**") + if (choice.immediate_consequence.isNotBlank()) { + appendLine() + appendLine("*${choice.immediate_consequence}*") + } + if (choice.state_changes.isNotEmpty()) { + appendLine() + appendLine("State changes: ${choice.state_changes.entries.joinToString(", ") { "${it.key} ${if (it.value >= 0) "+" else ""}${it.value}" }}") + } + appendLine() + appendLine("**→ Continue to: ${choice.leads_to}**") + appendLine() + } + appendLine("---") + appendLine() + } + + structure.endings.forEach { ending -> + val segment = endingSegments[ending.id] + appendLine("## ${ending.id}: ${ending.ending_type}") + appendLine() + if (segment != null) { + appendLine(segment.content) + appendLine() + } + appendLine("**THE END**") + appendLine() + if (ending.required_conditions.isNotEmpty()) { + appendLine("*This ending is reached when: ${ending.required_conditions.entries.joinToString(", ") { "${it.key} ${it.value}" }}*") + appendLine() + } + appendLine("---") + appendLine() + } + + appendLine("## Story Statistics") + appendLine() + appendLine("- Total Word Count: $cumulativeWordCount") + appendLine("- Decision Points: ${structure.decision_points.size}") + appendLine("- Total Choices: ${structure.decision_points.sumOf { it.choices.size }}") + appendLine("- Possible Endings: ${structure.endings.size}") + appendLine("- Unique Paths: ~${calculateUniquePaths(structure)}") + } + + mapTask.add(storyMap.renderMarkdown) + task.update() + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + transcriptWriter?.apply { + write("## Generation Complete\n\n") + write("**Statistics:**\n\n") + write("- Total Word Count: $cumulativeWordCount\n") + write("- Decision Points: ${structure.decision_points.size}\n") + write("- Total Choices: ${structure.decision_points.sumOf { it.choices.size }}\n") + write("- Endings: ${structure.endings.size}\n") + write("- Estimated Unique Paths: ~${calculateUniquePaths(structure)}\n") + write("- Total Time: ${totalTime / 1000.0}s\n\n") + write("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n") + flush() + close() + } + + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Total Word Count: $cumulativeWordCount") + appendLine("- Decision Points: ${structure.decision_points.size}") + appendLine("- Total Choices: ${structure.decision_points.sumOf { it.choices.size }}") + appendLine("- Endings: ${structure.endings.size}") + appendLine("- Estimated Unique Paths: ~${calculateUniquePaths(structure)}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + task.update() + + // Concise summary for resultFn + val finalResult = buildString { + appendLine("# Interactive Story Summary: ${structure.title}") + appendLine() + appendLine("A complete interactive story of **$cumulativeWordCount words** with **${structure.decision_points.size} decision points** and **${structure.endings.size} endings** was generated in **${totalTime / 1000.0}s**.") + appendLine() + appendLine("**Structure:**") + appendLine("- Opening segment") + appendLine("- ${structure.decision_points.size} branching decision points") + appendLine("- ${structure.decision_points.sumOf { it.choices.size }} total choices") + appendLine("- ${structure.endings.size} distinct endings") + appendLine("- Estimated ${calculateUniquePaths(structure)} unique story paths") + appendLine() + appendLine("> The complete interactive story map is available in the Story Map tab for play-through.") + } + + log.info("InteractiveStoryTask completed: words=$cumulativeWordCount, decisions=${structure.decision_points.size}, endings=${structure.endings.size}, time=${totalTime}ms") + + task.safeComplete( + "Interactive story generation complete: $cumulativeWordCount words, ${structure.decision_points.size} decisions, ${structure.endings.size} endings in ${totalTime / 1000}s", + log + ) + resultFn(buildFinalResultWithLinks(task, finalResult, storyMap, cumulativeWordCount, structure, totalTime)) + + } catch (e: Exception) { + log.error("Error during interactive story generation", e) + transcriptWriter?.close() + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + + val errorOutput = buildString { + appendLine("# Error in Interactive Story Generation") + appendLine() + appendLine("**Premise:** $premise") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + } + + private fun buildFinalResultWithLinks( + task: SessionTask, + summary: String, + storyMap: String, + wordCount: Int, + structure: StoryStructure, + totalTime: Long + ): String { + return try { + // Save story map to file + val (mapLink, mapFile) = task.createFile("story_map.md") + mapFile?.writeText(storyMap) + // Save summary to file + val (summaryLink, summaryFile) = task.createFile("story_summary.md") + summaryFile?.writeText(summary) + buildString { + appendLine("# Interactive Story Generation Complete") + appendLine() + appendLine("**Story:** ${structure.title}") + appendLine("**Word Count:** $wordCount") + appendLine("**Decision Points:** ${structure.decision_points.size}") + appendLine("**Endings:** ${structure.endings.size}") + appendLine("**Generation Time:** ${totalTime / 1000.0}s") + appendLine() + appendLine("## Output Files") + appendLine() + appendLine("- [Story Map (Interactive)]($mapLink) - Complete playable story with all paths") + appendLine(" - [HTML](${mapLink.removeSuffix(".md")}.html)") + appendLine(" - [PDF](${mapLink.removeSuffix(".md")}.pdf)") + appendLine() + appendLine("- [Story Summary]($summaryLink) - Generation summary and statistics") + appendLine(" - [HTML](${summaryLink.removeSuffix(".md")}.html)") + appendLine(" - [PDF](${summaryLink.removeSuffix(".md")}.pdf)") + appendLine() + appendLine("## Quick Stats") + appendLine() + appendLine("- Total Choices: ${structure.decision_points.sumOf { it.choices.size }}") + appendLine("- Unique Paths: ~${calculateUniquePaths(structure)}") + appendLine("- Tracked Variables: ${structure.tracked_variables.size}") + appendLine() + appendLine("---") + appendLine() + appendLine(summary) + } + } catch (e: Exception) { + log.error("Failed to create output files", e) + buildString { + appendLine("# Interactive Story Generation Complete") + appendLine() + appendLine("**Note:** Could not save detailed output files, but story was generated successfully.") + appendLine() + appendLine(summary) + } + } + } + + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + } + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = codeFiles[file.toPath()] ?: file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + + private fun calculateUniquePaths(structure: StoryStructure): Int { + // Simple estimation: multiply choices at each decision point + // This is an upper bound; actual paths may converge + val choicesPerDecision = structure.decision_points.map { it.choices.size } + return if (choicesPerDecision.isEmpty()) { + 1 + } else { + choicesPerDecision.fold(1) { acc, choices -> + (acc * choices).coerceAtMost(1000) // Cap at 1000 to avoid overflow + } + } + } + + companion object { + private val log: Logger = LoggerFactory.getLogger(InteractiveStoryTask::class.java) + val InteractiveStory = TaskType( + "InteractiveStory", + InteractiveStoryTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Create choose-your-own-adventure narratives with branching paths", + """ + Generates complete interactive stories with meaningful choices and multiple endings. +
      +
    • Creates detailed story structure with decision tree
    • +
    • Writes opening segment that hooks the reader
    • +
    • Develops branching narrative segments for each decision point
    • +
    • Generates multiple distinct endings based on player choices
    • +
    • Tracks state variables (health, reputation, inventory, etc.)
    • +
    • Ensures all paths lead to meaningful endings (no dead ends)
    • +
    • Optimizes for replay value with significantly different experiences
    • +
    • Tracks consequences across choices for coherent storytelling
    • +
    • Produces complete playable interactive story map
    • +
    • Ideal for interactive fiction, training scenarios, educational content, and games
    • +
    + """ + ) + + fun getAvailableFiles( + path: Path, + treatDocumentsAsText: Boolean = false, + ): List { + return try { + listOf(FileSelectionUtils.filteredWalkAsciiTree(path.toFile(), 20, treatDocumentsAsText = treatDocumentsAsText)) + } catch (e: Exception) { + log.error("Error listing available files", e) + listOf("Error listing files: ${e.message}") + } + } + + private val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", "c", "cpp", "h", "hpp", + "css", "html", "xml", "json", "yaml", "yml", "properties", "gradle", "maven" + ) + + fun isTextFile(file: File): Boolean { + return textExtensions.contains(file.extension.lowercase()) + } + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/JournalismReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/JournalismReasoningTask.kt new file mode 100644 index 000000000..822b30bc2 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/JournalismReasoningTask.kt @@ -0,0 +1,1208 @@ +package com.simiacryptus.cognotik.plan.tools.writing + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +open class JournalismReasoningTask( + orchestrationConfig: OrchestrationConfig, + planTask: T? +) : AbstractTask( + orchestrationConfig, + planTask +) { + + val maxDescriptionLength = 100 + + companion object { + private val log: Logger = LoggerFactory.getLogger(JournalismReasoningTask::class.java) + val JournalismReasoning = TaskType( + "JournalismReasoning", + JournalismReasoningTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Investigate stories through journalistic principles and methods", + """ + Analyzes stories using professional journalism standards and practices. +
      +
    • Verifies facts and checks claims against evidence
    • +
    • Identifies multiple perspectives and source credibility
    • +
    • Analyzes context, background, and broader implications
    • +
    • Detects potential biases and conflicts of interest
    • +
    • Finds information gaps and unanswered questions
    • +
    • Explores alternative story angles and approaches
    • +
    • Assesses newsworthiness and public interest
    • +
    • Useful for investigative reporting, fact-checking, editorial planning
    • +
    • Generates structured journalistic analysis with verified facts
    • +
    + """ + ) + } + + open class JournalismReasoningTaskExecutionConfigData( + @Description("The story topic or event to investigate") + val story_topic: String? = null, + + @Description("Input files or documents to inform the investigation (glob patterns)") + val input_files: List? = null, + + @Description("Journalism elements to consider (who, what, when, where, why, how)") + val journalism_elements: Map? = null, + + @Description("Whether to identify and verify key facts") + val verify_facts: Boolean = true, + + @Description("Whether to identify multiple perspectives and sources") + val identify_perspectives: Boolean = true, + + @Description("Whether to analyze context and background") + val analyze_context: Boolean = true, + + @Description("Whether to identify potential biases and conflicts of interest") + val identify_biases: Boolean = true, + + @Description("Whether to check for missing information or unanswered questions") + val find_gaps: Boolean = true, + + @Description("Number of alternative angles to explore") + val alternative_angles: Int = 3, + + @Description("Whether to assess newsworthiness and public interest") + val assess_newsworthiness: Boolean = true, + + @Description("Whether to include file content in the analysis") + val include_file_content: Boolean = false, + + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = JournalismReasoning.name, + task_description = "Investigate '$story_topic' through journalistic analysis", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (story_topic.isNullOrBlank()) { + return "Story topic must not be null or blank" + } + if (alternative_angles < 1 || alternative_angles > 10) { + return "Alternative angles must be between 1 and 10, got: $alternative_angles" + } + return ValidatedObject.validateFields(this) + } + } + + data class FactCheck( + val claim: String = "", + val source: String = "", + val verification_status: String = "", + val supporting_evidence: List = emptyList(), + val contradicting_evidence: List = emptyList(), + val confidence_level: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (claim.isBlank()) { + return "Fact claim must not be blank" + } + return ValidatedObject.validateFields(this) + } + } + + data class FactChecks( + val facts: List = emptyList() + ) : ValidatedObject + + data class SourcePerspective( + val source_name: String = "", + val role: String = "", + val perspective: String = "", + val key_quotes: List = emptyList(), + val potential_bias: String = "", + val credibility_assessment: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (source_name.isBlank()) { + return "Source name must not be blank" + } + if (perspective.isBlank()) { + return "Source perspective must not be blank" + } + return ValidatedObject.validateFields(this) + } + } + + data class SourcePerspectives( + val sources: List = emptyList() + ) : ValidatedObject + + data class ContextAnalysis( + val historical_background: String = "", + val relevant_trends: List = emptyList(), + val related_events: List = emptyList(), + val broader_implications: List = emptyList(), + val key_stakeholders: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (historical_background.isBlank()) { + return "Historical background must not be blank" + } + return ValidatedObject.validateFields(this) + } + } + + data class BiasAnalysis( + val potential_biases: List = emptyList(), + val conflicts_of_interest: List = emptyList(), + val missing_voices: List = emptyList(), + val framing_issues: List = emptyList(), + val balance_assessment: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (balance_assessment.isBlank()) { + return "Balance assessment must not be blank" + } + return ValidatedObject.validateFields(this) + } + } + + data class StoryAngle( + val angle_title: String = "", + val focus: String = "", + val target_audience: String = "", + val key_questions: List = emptyList(), + val unique_value: String = "", + val newsworthiness_score: Double = 0.0 + ) : ValidatedObject { + override fun validate(): String? { + if (angle_title.isBlank()) { + return "Angle title must not be blank" + } + if (newsworthiness_score < 0.0 || newsworthiness_score > 1.0) { + return "Newsworthiness score must be between 0.0 and 1.0" + } + return ValidatedObject.validateFields(this) + } + } + + data class StoryAngles( + val angles: List = emptyList() + ) : ValidatedObject + + data class InformationGap( + val question: String = "", + val importance: String = "", + val potential_sources: List = emptyList(), + val research_approach: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (question.isBlank()) { + return "Gap question must not be blank" + } + val validImportance = setOf("critical", "important", "minor") + if (importance.isNotBlank() && importance.lowercase() !in validImportance) { + return "Importance must be one of: ${validImportance.joinToString(", ")}" + } + return ValidatedObject.validateFields(this) + } + } + + data class InformationGaps( + val gaps: List = emptyList() + ) : ValidatedObject + + override fun promptSegment(): String { + return """ +JournalismReasoning - Investigate stories through journalistic principles and methods + ** Specify the story topic or event to investigate + ** Define journalism elements: who, what, when, where, why, how + ** Enable fact verification and source checking + ** Identify multiple perspectives and stakeholder voices + ** Analyze context, background, and broader implications + ** Detect potential biases and conflicts of interest + ** Find information gaps and unanswered questions + ** Explore alternative story angles + ** Assess newsworthiness and public interest + ** Produces structured journalistic analysis with verified facts + """.trimIndent() + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("journalism_transcript.md"), task.resolve("journalism_transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun getInputFileContent(): String { + val inputFiles = executionConfig?.input_files ?: return "" + if (inputFiles.isEmpty() || !executionConfig?.include_file_content!!) { + return "" + } + return inputFiles + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { file -> + "# ${root.relativize(file.toPath())}\n\n```\n${file.readText()}\n```" + } + } + + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + log.info("Starting JournalismReasoningTask for story: '${executionConfig?.story_topic}'") + // Initialize detailed output file + val transcriptStream = transcript(task) + val transcript = transcriptStream?.bufferedWriter() + transcript?.let { writer -> + + val storyTopic = executionConfig?.story_topic + if (storyTopic.isNullOrBlank()) { + log.error("No story topic specified for journalism reasoning") + task.safeComplete("CONFIGURATION ERROR: No story topic specified", log) + resultFn("CONFIGURATION ERROR: No story topic specified") + return + } + + val journalismElements = executionConfig.journalism_elements ?: emptyMap() + val verifyFacts = executionConfig.verify_facts + val identifyPerspectives = executionConfig.identify_perspectives + val analyzeContext = executionConfig.analyze_context + val identifyBiases = executionConfig.identify_biases + val findGaps = executionConfig.find_gaps + val alternativeAngles = executionConfig.alternative_angles.coerceIn(1, 10) + val assessNewsworthiness = executionConfig.assess_newsworthiness + + log.info( + "Configuration: verifyFacts=$verifyFacts, identifyPerspectives=$identifyPerspectives, " + + "analyzeContext=$analyzeContext, identifyBiases=$identifyBiases, findGaps=$findGaps, " + + "alternativeAngles=$alternativeAngles, assessNewsworthiness=$assessNewsworthiness" + ) + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Journalism Investigation") + appendLine() + appendLine("**Story Topic:** $storyTopic") + appendLine() + appendLine("## Journalism Elements") + journalismElements.forEach { (key, value) -> + appendLine("- **${key.capitalize()}:** $value") + } + appendLine() + appendLine("## Investigation Configuration") + appendLine("- Verify Facts: ${if (verifyFacts) "✓" else "✗"}") + appendLine("- Identify Perspectives: ${if (identifyPerspectives) "✓" else "✗"}") + appendLine("- Analyze Context: ${if (analyzeContext) "✓" else "✗"}") + appendLine("- Identify Biases: ${if (identifyBiases) "✓" else "✗"}") + appendLine("- Find Information Gaps: ${if (findGaps) "✓" else "✗"}") + appendLine("- Alternative Angles: $alternativeAngles") + appendLine("- Assess Newsworthiness: ${if (assessNewsworthiness) "✓" else "✗"}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("*Initializing investigation...*") + } + // Write to transcript + writer.appendLine("# Journalism Investigation Transcript") + writer.appendLine() + writer.appendLine("**Story Topic:** $storyTopic") + writer.appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + writer.appendLine() + writer.flush() + // Include file content if requested + val fileContent = getInputFileContent() + if (fileContent.isNotBlank()) { + writer.appendLine("## Input Files") + writer.appendLine() + writer.appendLine(fileContent) + writer.appendLine() + writer.flush() + } + + overviewTask.add(overviewContent.renderMarkdown) + task.update() + + val priorContext = getPriorCode(agent.executionState) + if (priorContext.isNotBlank()) { + log.debug("Found prior context: ${priorContext.length} characters") + val contextTask = task.ui.newTask(false) + tabs["Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Context from Previous Tasks") + appendLine() + appendLine(priorContext.truncateForDisplay()) + }.renderMarkdown + ) + task.update() + } + + val resultBuilder = StringBuilder() + resultBuilder.append("# Journalism Investigation: $storyTopic\n\n") + + try { + // Step 1: Verify facts + if (verifyFacts) { + log.info("Step 1: Verifying facts") + overviewTask.add("\n✅ Verifying facts and claims...\n".renderMarkdown) + task.update() + + val factsTask = task.ui.newTask(false) + tabs["Fact Verification"] = factsTask.placeholder + + factsTask.add( + buildString { + appendLine("# Fact Verification") + appendLine() + appendLine("**Status:** Checking claims and evidence...") + appendLine() + }.renderMarkdown + ) + task.update() + // Write to transcript + writer.appendLine("## Step 1: Fact Verification") + writer.appendLine() + writer.flush() + + val factAgent = ParsedAgent( + resultClass = FactChecks::class.java, + prompt = """ +You are an expert fact-checker and investigative journalist. Verify the key facts and claims in this story. + +Story Topic: $storyTopic + +Journalism Elements: +${journalismElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} + +${if (priorContext.isNotBlank()) "Additional Context:\n$priorContext\n" else ""} + +Identify and verify 5-10 key factual claims, including: +- Core facts about the event or topic +- Statistical claims or data points +- Attributions and quotes +- Timeline elements +- Causal relationships + +For each fact, provide: +- The specific claim +- The source of the claim +- Verification status (verified, unverified, disputed, false, partially true) +- Supporting evidence +- Any contradicting evidence +- Confidence level in the verification + +Apply rigorous journalistic standards. Be skeptical but fair. + """.trimIndent(), + model = api, + temperature = 0.3, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val factChecks = factAgent.answer(listOf("Verify facts")).obj.facts + log.debug("Verified ${factChecks.size} facts") + + val factsContent = buildString { + appendLine("## Verified Facts") + appendLine() + factChecks.forEachIndexed { index, fact -> + val statusIcon = when (fact.verification_status.lowercase()) { + "verified" -> "✅" + "partially true" -> "⚠️" + "disputed" -> "❓" + "false" -> "❌" + else -> "⏳" + } + appendLine("### $statusIcon ${index + 1}. ${fact.claim.truncateForDisplay(80)}") + appendLine() + appendLine("**Status:** ${fact.verification_status}") + appendLine() + appendLine("**Source:** ${fact.source}") + appendLine() + appendLine("**Confidence:** ${fact.confidence_level}") + appendLine() + if (fact.supporting_evidence.isNotEmpty()) { + appendLine("**Supporting Evidence:**") + fact.supporting_evidence.forEach { evidence -> + appendLine("- $evidence") + } + appendLine() + } + if (fact.contradicting_evidence.isNotEmpty()) { + appendLine("**Contradicting Evidence:**") + fact.contradicting_evidence.forEach { evidence -> + appendLine("- $evidence") + } + appendLine() + } + if (index < factChecks.size - 1) { + appendLine("---") + appendLine() + } + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + // Write facts to transcript + writer.appendLine("### Verified Facts (${factChecks.size} total)") + writer.appendLine() + factChecks.forEach { fact -> + writer.appendLine("- **${fact.verification_status}**: ${fact.claim}") + writer.appendLine(" - Source: ${fact.source}") + writer.appendLine(" - Confidence: ${fact.confidence_level}") + writer.appendLine() + } + writer.flush() + + factsTask.add(factsContent.renderMarkdown) + task.update() + + resultBuilder.append("## Key Facts\n") + factChecks.take(3).forEach { fact -> + resultBuilder.append("- ${fact.verification_status.uppercase()}: ${fact.claim.truncateForDisplay(maxDescriptionLength)}\n") + } + resultBuilder.append("\n") + + overviewTask.add("✅ Facts verified (${factChecks.size} claims checked)\n".renderMarkdown) + task.update() + } + + // Step 2: Identify perspectives + if (identifyPerspectives) { + log.info("Step 2: Identifying source perspectives") + overviewTask.add("✅ Identifying perspectives and sources...\n".renderMarkdown) + task.update() + + val perspectivesTask = task.ui.newTask(false) + tabs["Perspectives"] = perspectivesTask.placeholder + + perspectivesTask.add( + buildString { + appendLine("# Source Perspectives") + appendLine() + appendLine("**Status:** Analyzing viewpoints and sources...") + appendLine() + }.renderMarkdown + ) + task.update() + // Write to transcript + writer.appendLine("## Step 2: Source Perspectives") + writer.appendLine() + writer.flush() + + val perspectiveAgent = ParsedAgent( + resultClass = SourcePerspectives::class.java, + prompt = """ +You are an expert journalist skilled at identifying diverse perspectives. Analyze the different viewpoints on this story. + +Story Topic: $storyTopic + +Journalism Elements: +${journalismElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} + +Identify 4-6 key sources or stakeholder perspectives, including: +- Primary sources directly involved +- Expert opinions +- Affected parties +- Opposing viewpoints +- Official statements + +For each source, provide: +- Name/identification +- Role or relationship to the story +- Their perspective or position +- Key quotes or statements (if available) +- Potential biases or interests +- Credibility assessment + +Ensure balanced representation of different viewpoints. + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val perspectives = perspectiveAgent.answer(listOf("Identify perspectives")).obj.sources + log.debug("Identified ${perspectives.size} perspectives") + + val perspectivesContent = buildString { + appendLine("## Source Perspectives") + appendLine() + perspectives.forEach { source -> + appendLine("### ${source.source_name}") + appendLine() + appendLine("**Role:** ${source.role}") + appendLine() + appendLine("**Perspective:** ${source.perspective}") + appendLine() + if (source.key_quotes.isNotEmpty()) { + appendLine("**Key Quotes:**") + source.key_quotes.forEach { quote -> + appendLine("> \"$quote\"") + } + appendLine() + } + appendLine("**Potential Bias:** ${source.potential_bias}") + appendLine() + appendLine("**Credibility:** ${source.credibility_assessment}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + // Write perspectives to transcript + writer.appendLine("### Source Perspectives (${perspectives.size} total)") + writer.appendLine() + perspectives.forEach { source -> + writer.appendLine("- **${source.source_name}** (${source.role})") + writer.appendLine(" - ${source.perspective}") + writer.appendLine() + } + writer.flush() + + perspectivesTask.add(perspectivesContent.renderMarkdown) + task.update() + + resultBuilder.append("## Key Perspectives\n") + perspectives.take(3).forEach { source -> + resultBuilder.append("- **${source.source_name}** (${source.role}): ${source.perspective.truncateForDisplay(maxDescriptionLength)}\n") + } + resultBuilder.append("\n") + + overviewTask.add("✅ Perspectives identified (${perspectives.size} sources)\n".renderMarkdown) + task.update() + } + + // Step 3: Analyze context + if (analyzeContext) { + log.info("Step 3: Analyzing context and background") + overviewTask.add("✅ Analyzing context and background...\n".renderMarkdown) + task.update() + + val contextTask = task.ui.newTask(false) + tabs["Context Analysis"] = contextTask.placeholder + + contextTask.add( + buildString { + appendLine("# Context Analysis") + appendLine() + appendLine("**Status:** Researching background and implications...") + appendLine() + }.renderMarkdown + ) + task.update() + // Write to transcript + writer.appendLine("## Step 3: Context Analysis") + writer.appendLine() + writer.flush() + + val contextAgent = ParsedAgent( + resultClass = ContextAnalysis::class.java, + prompt = """ +You are an expert journalist skilled at providing context. Analyze the broader context of this story. + +Story Topic: $storyTopic + +Journalism Elements: +${journalismElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} + +Provide comprehensive context including: +- Historical background (what led to this) +- Relevant trends or patterns +- Related events or precedents +- Broader implications (social, political, economic, etc.) +- Key stakeholders and their interests + +Help readers understand why this story matters and how it fits into the bigger picture. + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val context = contextAgent.answer(listOf("Analyze context")).obj + log.debug("Context analysis complete") + + val contextContent = buildString { + appendLine("## Background and Context") + appendLine() + appendLine("### Historical Background") + appendLine(context.historical_background) + appendLine() + appendLine("### Relevant Trends") + context.relevant_trends.forEach { trend -> + appendLine("- $trend") + } + appendLine() + appendLine("### Related Events") + context.related_events.forEach { event -> + appendLine("- $event") + } + appendLine() + appendLine("### Broader Implications") + context.broader_implications.forEach { implication -> + appendLine("- $implication") + } + appendLine() + appendLine("### Key Stakeholders") + context.key_stakeholders.forEach { stakeholder -> + appendLine("- $stakeholder") + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + // Write context to transcript + writer.appendLine("### Historical Background") + writer.appendLine(context.historical_background) + writer.appendLine() + writer.flush() + + contextTask.add(contextContent.renderMarkdown) + task.update() + + resultBuilder.append("## Context\n") + resultBuilder.append("${context.historical_background.truncateForDisplay(200)}\n\n") + + overviewTask.add("✅ Context analyzed\n".renderMarkdown) + task.update() + } + + // Step 4: Identify biases + if (identifyBiases) { + log.info("Step 4: Identifying biases and balance issues") + overviewTask.add("✅ Checking for biases and balance...\n".renderMarkdown) + task.update() + + val biasTask = task.ui.newTask(false) + tabs["Bias Analysis"] = biasTask.placeholder + + biasTask.add( + buildString { + appendLine("# Bias Analysis") + appendLine() + appendLine("**Status:** Examining potential biases...") + appendLine() + }.renderMarkdown + ) + task.update() + // Write to transcript + writer.appendLine("## Step 4: Bias Analysis") + writer.appendLine() + writer.flush() + + val biasAgent = ParsedAgent( + resultClass = BiasAnalysis::class.java, + prompt = """ +You are an expert media critic and journalism ethics specialist. Analyze potential biases in this story coverage. + +Story Topic: $storyTopic + +Journalism Elements: +${journalismElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} + +Examine: +- Potential biases in framing or language +- Conflicts of interest (sources, reporters, outlets) +- Missing or underrepresented voices +- Framing issues (what's emphasized vs. downplayed) +- Overall balance assessment + +Be thorough but fair. Distinguish between legitimate perspective and problematic bias. + """.trimIndent(), + model = api, + temperature = 0.4, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val biasAnalysis = biasAgent.answer(listOf("Analyze biases")).obj + log.debug("Bias analysis complete") + + val biasContent = buildString { + appendLine("## Bias and Balance Assessment") + appendLine() + if (biasAnalysis.potential_biases.isNotEmpty()) { + appendLine("### Potential Biases") + biasAnalysis.potential_biases.forEach { bias -> + appendLine("- $bias") + } + appendLine() + } + if (biasAnalysis.conflicts_of_interest.isNotEmpty()) { + appendLine("### Conflicts of Interest") + biasAnalysis.conflicts_of_interest.forEach { conflict -> + appendLine("- $conflict") + } + appendLine() + } + if (biasAnalysis.missing_voices.isNotEmpty()) { + appendLine("### Missing Voices") + biasAnalysis.missing_voices.forEach { voice -> + appendLine("- $voice") + } + appendLine() + } + if (biasAnalysis.framing_issues.isNotEmpty()) { + appendLine("### Framing Issues") + biasAnalysis.framing_issues.forEach { issue -> + appendLine("- $issue") + } + appendLine() + } + appendLine("### Overall Balance Assessment") + appendLine(biasAnalysis.balance_assessment) + appendLine() + appendLine("**Status:** ✅ Complete") + } + // Write bias analysis to transcript + writer.appendLine("### Balance Assessment") + writer.appendLine(biasAnalysis.balance_assessment) + writer.appendLine() + writer.flush() + + biasTask.add(biasContent.renderMarkdown) + task.update() + + resultBuilder.append("## Balance Assessment\n") + resultBuilder.append("${biasAnalysis.balance_assessment.truncateForDisplay(200)}\n\n") + + overviewTask.add("✅ Bias analysis complete\n".renderMarkdown) + task.update() + } + + // Step 5: Explore alternative angles + if (alternativeAngles > 0) { + log.info("Step 5: Exploring alternative story angles") + overviewTask.add("✅ Exploring alternative angles...\n".renderMarkdown) + task.update() + + val anglesTask = task.ui.newTask(false) + tabs["Story Angles"] = anglesTask.placeholder + + anglesTask.add( + buildString { + appendLine("# Alternative Story Angles") + appendLine() + appendLine("**Status:** Identifying different approaches...") + appendLine() + }.renderMarkdown + ) + task.update() + // Write to transcript + writer.appendLine("## Step 5: Alternative Story Angles") + writer.appendLine() + writer.flush() + + val anglesAgent = ParsedAgent( + resultClass = StoryAngles::class.java, + prompt = """ +You are a creative news editor. Identify $alternativeAngles different angles for covering this story. + +Story Topic: $storyTopic + +Journalism Elements: +${journalismElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} + +For each angle, provide: +- Compelling title/headline +- Focus (what aspect to emphasize) +- Target audience +- Key questions to answer +- Unique value (what makes this angle distinctive) +- Newsworthiness score (0-1) + +Consider angles that: +- Appeal to different audiences +- Emphasize different aspects (human interest, policy, impact, etc.) +- Offer fresh perspectives +- Have strong news value + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val angles = anglesAgent.answer(listOf("Explore angles")).obj.angles + log.debug("Identified ${angles.size} story angles") + + val anglesContent = buildString { + appendLine("## Story Angles") + appendLine() + angles.sortedByDescending { it.newsworthiness_score }.forEachIndexed { index, angle -> + appendLine("### ${index + 1}. ${angle.angle_title}") + appendLine() + appendLine("**Newsworthiness:** ${String.format("%.1f%%", angle.newsworthiness_score * 100)}") + appendLine() + appendLine("**Focus:** ${angle.focus}") + appendLine() + appendLine("**Target Audience:** ${angle.target_audience}") + appendLine() + appendLine("**Key Questions:**") + angle.key_questions.forEach { question -> + appendLine("- $question") + } + appendLine() + appendLine("**Unique Value:** ${angle.unique_value}") + appendLine() + if (index < angles.size - 1) { + appendLine("---") + appendLine() + } + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + // Write angles to transcript + writer.appendLine("### Story Angles (${angles.size} total)") + writer.appendLine() + angles.sortedByDescending { it.newsworthiness_score }.forEach { angle -> + writer.appendLine("- **${angle.angle_title}** (${String.format("%.1f%%", angle.newsworthiness_score * 100)})") + writer.appendLine(" - ${angle.focus}") + writer.appendLine() + } + writer.flush() + + anglesTask.add(anglesContent.renderMarkdown) + task.update() + + resultBuilder.append("## Story Angles\n") + angles.sortedByDescending { it.newsworthiness_score }.take(2).forEach { angle -> + resultBuilder.append("- **${angle.angle_title}**: ${angle.focus.truncateForDisplay(maxDescriptionLength)}\n") + } + resultBuilder.append("\n") + + overviewTask.add("✅ Story angles explored (${angles.size} angles)\n".renderMarkdown) + task.update() + } + + // Step 6: Find information gaps + if (findGaps) { + log.info("Step 6: Identifying information gaps") + overviewTask.add("✅ Identifying information gaps...\n".renderMarkdown) + task.update() + + val gapsTask = task.ui.newTask(false) + tabs["Information Gaps"] = gapsTask.placeholder + + gapsTask.add( + buildString { + appendLine("# Information Gaps") + appendLine() + appendLine("**Status:** Finding unanswered questions...") + appendLine() + }.renderMarkdown + ) + task.update() + // Write to transcript + writer.appendLine("## Step 6: Information Gaps") + writer.appendLine() + writer.flush() + + val gapsAgent = ParsedAgent( + resultClass = InformationGaps::class.java, + prompt = """ +You are an investigative journalist. Identify missing information and unanswered questions in this story. + +Story Topic: $storyTopic + +Journalism Elements: +${journalismElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} + +Identify 5-8 key information gaps, including: +- Unanswered questions +- Missing data or evidence +- Unclear causation or timeline +- Unverified claims needing follow-up +- Perspectives not yet represented + +For each gap, provide: +- The specific question or missing information +- Importance level (critical, important, minor) +- Potential sources to fill the gap +- Suggested research approach + +Prioritize gaps that are most important for understanding the full story. + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val gaps = gapsAgent.answer(listOf("Find gaps")).obj.gaps + log.debug("Found ${gaps.size} information gaps") + + val gapsContent = buildString { + if (gaps.isEmpty()) { + appendLine("## ✅ No Significant Information Gaps") + appendLine() + appendLine("The available information appears comprehensive for the current story scope.") + } else { + appendLine("## Identified Information Gaps") + appendLine() + gaps.sortedBy { gap -> + when (gap.importance.lowercase()) { + "critical" -> 0 + "important" -> 1 + else -> 2 + } + }.forEachIndexed { index, gap -> + val importanceIcon = when (gap.importance.lowercase()) { + "critical" -> "🔴" + "important" -> "🟡" + else -> "🟢" + } + appendLine("### $importanceIcon ${index + 1}. ${gap.question}") + appendLine() + appendLine("**Importance:** ${gap.importance}") + appendLine() + if (gap.potential_sources.isNotEmpty()) { + appendLine("**Potential Sources:**") + gap.potential_sources.forEach { source -> + appendLine("- $source") + } + appendLine() + } + appendLine("**Research Approach:** ${gap.research_approach}") + appendLine() + if (index < gaps.size - 1) { + appendLine("---") + appendLine() + } + } + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + // Write gaps to transcript + if (gaps.isEmpty()) { + writer.appendLine("### No significant information gaps identified") + } else { + writer.appendLine("### Information Gaps (${gaps.size} total)") + writer.appendLine() + gaps.forEach { gap -> + writer.appendLine("- **${gap.importance.uppercase()}**: ${gap.question}") + writer.appendLine() + } + } + writer.flush() + + gapsTask.add(gapsContent.renderMarkdown) + task.update() + + if (gaps.isNotEmpty()) { + resultBuilder.append("## Information Gaps\n") + gaps.take(3).forEach { gap -> + resultBuilder.append("- ${gap.importance.uppercase()}: ${gap.question.truncateForDisplay(maxDescriptionLength)}\n") + } + resultBuilder.append("\n") + } + + overviewTask.add("✅ Information gaps identified (${gaps.size} found)\n".renderMarkdown) + task.update() + } + + // Step 7: Generate editorial synthesis + log.info("Step 7: Generating editorial synthesis") + overviewTask.add("✅ Generating editorial synthesis...\n".renderMarkdown) + task.update() + + val synthesisTask = task.ui.newTask(false) + tabs["Editorial Synthesis"] = synthesisTask.placeholder + + synthesisTask.add( + buildString { + appendLine("# Editorial Synthesis") + appendLine() + appendLine("**Status:** Synthesizing findings...") + appendLine() + }.renderMarkdown + ) + task.update() + // Write to transcript + writer.appendLine("## Step 7: Editorial Synthesis") + writer.appendLine() + writer.flush() + + val synthesisAgent = ChatAgent( + prompt = """ +You are a senior news editor. Provide an editorial synthesis of this journalism investigation. + +Story Topic: $storyTopic + +Summarize: +1. The core story and its significance +2. Key verified facts and findings +3. Most important perspectives and voices +4. Critical context readers need +5. Remaining questions and next steps +6. Recommended editorial approach +7. Public interest assessment + +Be concise, authoritative, and focused on journalistic value. + """.trimIndent(), + model = api, + temperature = 0.5 + ) + + val synthesis = synthesisAgent.answer(listOf("Generate synthesis")) + log.debug("Synthesis generated: ${synthesis.length} characters") + // Write synthesis to transcript + writer.appendLine(synthesis) + writer.appendLine() + writer.flush() + + + synthesisTask.add( + buildString { + appendLine("## Editorial Assessment") + appendLine() + appendLine(synthesis) + appendLine() + appendLine("---") + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + + resultBuilder.append("## Editorial Synthesis\n") + resultBuilder.append(synthesis) + resultBuilder.append("\n\n") + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + resultBuilder.append("---\n\n") + resultBuilder.append("**Investigation Time:** ${totalTime / 1000}s | ") + resultBuilder.append("**Story:** $storyTopic\n") + // Write final statistics to transcript + writer.appendLine("---") + writer.appendLine() + writer.appendLine("**Investigation completed in ${totalTime / 1000.0}s**") + writer.appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + writer.flush() + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Investigation Complete") + appendLine() + appendLine("**Total Time:** ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + task.update() + + val finalResult = resultBuilder.toString() + log.info("JournalismReasoningTask completed: total_time=${totalTime}ms, output_size=${finalResult.length} chars") + + // Write full analysis to file + val (link, file) = Pair(task.linkTo("journalism_analysis.md"), task.resolve("journalism_analysis.md")) + file?.writeText(finalResult, StandardCharsets.UTF_8) + + val summaryMessage = buildString { + appendLine("✅ Journalism investigation complete in ${totalTime / 1000}s") + appendLine() + appendLine("📄 Full analysis: $link") + appendLine("📊 Transcript: transcript") + } + + task.safeComplete(summaryMessage, log) + resultFn(summaryMessage) + + } catch (e: Exception) { + log.error("Error during journalism reasoning", e) + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + // Write error to transcript + writer.appendLine() + writer.appendLine("---") + writer.appendLine("## ❌ Error Occurred") + writer.appendLine("**Error:** ${e.message}") + writer.flush() + + val errorOutput = buildString { + appendLine("# Error in Journalism Investigation") + appendLine() + appendLine("**Story:** $storyTopic") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + } // Close writer.use + transcriptStream?.close() + } + +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/NarrativeGenerationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/NarrativeGenerationTask.kt similarity index 69% rename from webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/NarrativeGenerationTask.kt rename to webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/NarrativeGenerationTask.kt index 1e9bd453c..02f3ac555 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/NarrativeGenerationTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/NarrativeGenerationTask.kt @@ -1,19 +1,30 @@ -package com.simiacryptus.cognotik.plan.tools.reasoning +package com.simiacryptus.cognotik.plan.tools.writing -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ImageAndText +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.agents.ImageModificationAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.OrchestrationConfig import com.simiacryptus.cognotik.plan.TaskOrchestrator import com.simiacryptus.cognotik.plan.TaskType import com.simiacryptus.cognotik.plan.TaskTypeConfig +import com.simiacryptus.cognotik.plan.tools.file.AnalysisTask +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.webui.chat.transcriptFilter import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.BufferedWriter +import java.io.File import java.time.LocalDateTime import java.time.format.DateTimeFormatter +import javax.imageio.ImageIO class NarrativeGenerationTask( orchestrationConfig: OrchestrationConfig, @@ -26,6 +37,8 @@ class NarrativeGenerationTask( class NarrativeGenerationTaskExecutionConfigData( @Description("The subject or scenario to develop into a full narrative") subject: String? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input context for the narrative") + input_files: List? = null, @Description("Narrative elements to consider (characters, setting, conflict, timeline, etc.)") narrative_elements: Map? = null, @@ -59,6 +72,17 @@ class NarrativeGenerationTask( @Description("Number of revision passes for each scene") val revision_passes: Int = 1, + @Description("Whether to generate images for each scene") + val generate_scene_images: Boolean = false, + @Description("Whether to generate a cover image for the narrative") + val generate_cover_image: Boolean = false, + @Description("Image generation model to use (e.g., 'DallE3', 'DallE2')") + image_model: String = "DallE3", + @Description("Width of generated images in pixels") + image_width: Int = 1024, + @Description("Height of generated images in pixels") + image_height: Int = 1024, + task_dependencies: List? = null, state: TaskState? = TaskState.Pending, @@ -68,15 +92,21 @@ class NarrativeGenerationTask( construct_narrative = true, identify_plot_points = true, predict_outcomes = true, - alternative_narratives = 1, + alternatives = 1, analyze_motivations = true, find_inconsistencies = true, + generate_images = generate_scene_images || generate_cover_image, + image_model = image_model, + image_width = image_width, + image_height = image_height, task_dependencies = task_dependencies, - state = state + state = state, + input_files = input_files, ) { override val task_type: String = NarrativeGeneration.name override var task_description: String? = "Generate full narrative for '$subject'" override fun validate(): String? { + // First validate parent class // First validate parent class super.validate()?.let { return it } // Validate target_word_count @@ -156,8 +186,10 @@ NarrativeGeneration - Generate complete narratives from analysis and outlines orchestrationConfig: OrchestrationConfig ) { val startTime = System.currentTimeMillis() - val genConfig = executionConfig as? NarrativeGenerationTaskExecutionConfigData + val transcript = transcript(task) + val genConfig = executionConfig log.info("Starting NarrativeGenerationTask for subject: '${genConfig?.subject}'") + transcript?.write("# Narrative Generation Task\n\n") if (genConfig == null) { log.error("Invalid configuration type for NarrativeGenerationTask") @@ -177,6 +209,17 @@ NarrativeGeneration - Generate complete narratives from analysis and outlines val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return val tabs = TabbedDisplay(task) + // Get input file context + val inputFileContext = getInputFileCode(agent.root.toFile()) + if (inputFileContext.isNotBlank()) { + transcript?.write("## Input Files Context\n\n$inputFileContext\n\n") + transcript?.flush() + } + // Combine messages with input files + val combinedMessages = messages + listOf(inputFileContext).filter { it.isNotBlank() } + transcript?.write("## Input Messages\n\n${combinedMessages.joinToString("\n\n")}\n\n") + transcript?.flush() + // Overview tab val overviewTask = task.ui.newTask(false) @@ -207,6 +250,8 @@ NarrativeGeneration - Generate complete narratives from analysis and outlines appendLine("*Running base narrative reasoning analysis...*") } overviewTask.add(overviewContent.renderMarkdown) + transcript?.write("\n## Overview\n\n$overviewContent\n\n") + transcript?.flush() task.update() val resultBuilder = StringBuilder() @@ -219,6 +264,8 @@ NarrativeGeneration - Generate complete narratives from analysis and outlines super.run(agent, messages, task, { result -> analysisResult.append(result) + transcript?.write(result) + transcript?.flush() }, orchestrationConfig) overviewTask.add("\n✅ Phase 1 Complete: Narrative analysis finished\n".renderMarkdown) @@ -320,6 +367,8 @@ Ensure the outline: appendLine("**Status:** ✅ Complete") } outlineTask.add(outlineContent.renderMarkdown) + transcript?.write("\n## Outline\n\n$outlineContent\n\n") + transcript?.flush() task.update() resultBuilder.append("## ${outline.title}\n\n") @@ -329,6 +378,18 @@ Ensure the outline: overviewTask.add("✅ Phase 2 Complete: Outline created (${outline.acts.sumOf { it.scenes?.size ?: 0 }} scenes)\n".renderMarkdown) overviewTask.add("\n### Phase 3: Scene Generation\n*Writing scenes iteratively with context...*\n".renderMarkdown) task.update() + // Generate cover image if enabled + if (genConfig.generate_cover_image) { + generateCoverImage( + task = task, + tabs = tabs, + title = outline.title, + premise = outline.premise, + transcriptWriter = transcript, + orchestrationConfig = orchestrationConfig + ) + } + // Phase 3: Generate each scene iteratively log.info("Phase 3: Generating scenes") @@ -508,6 +569,8 @@ Provide the revised scene content only. appendLine("**Status:** ✅ Complete") } sceneTask.add(sceneContent.renderMarkdown) + transcript?.write("\n## $sceneContent\n\n") + transcript?.flush() task.update() // Add to result @@ -517,6 +580,19 @@ Provide the revised scene content only. overviewTask.add("✅ (${generatedScene.word_count} words)\n".renderMarkdown) task.update() + // Generate scene image if enabled + if (genConfig.generate_scene_images) { + generateSceneImage( + task = task, + tabs = tabs, + sceneNumber = sceneOutline.scene_number, + sceneTitle = sceneOutline.title, + sceneContent = generatedScene.content, + setting = sceneOutline.setting, + transcriptWriter = transcript, + orchestrationConfig = orchestrationConfig + ) + } } overviewTask.add("\n✅ Phase 3 Complete: All scenes generated\n".renderMarkdown) @@ -588,6 +664,8 @@ Provide the revised scene content only. }.renderMarkdown ) task.update() + transcript?.write("\n## Final Statistics\n\n- Total Scenes: ${generatedScenes.size}\n- Total Word Count: $cumulativeWordCount\n- Time: ${totalTime / 1000.0}s\n\n") + transcript?.close() // Per best practices, the final result passed to resultFn should be a concise summary, @@ -596,7 +674,7 @@ Provide the revised scene content only. appendLine("# Narrative Generation Summary: ${outline.title}") appendLine() appendLine("A complete narrative of **$cumulativeWordCount words** across **${generatedScenes.size} scenes** was generated in **${totalTime / 1000.0}s**.") - appendLine("> The full text is available in the UI for detailed review.") + appendLine("> The full narrative and detailed transcript are available in the UI tabs for review.") appendLine() appendLine(outlineContent.substringBeforeLast("\n**Status:**").trim()) } @@ -641,6 +719,191 @@ Provide the revised scene content only. } } + private fun transcript(task: SessionTask): BufferedWriter? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return java.io.BufferedWriter(markdownTranscript?.let { java.io.OutputStreamWriter(it) }) + } + + private fun getInputFileCode(rootFile: File): String { + val executionConfig = executionConfig as? NarrativeGenerationTaskExecutionConfigData ?: return "" + return (executionConfig.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = java.nio.file.FileSystems.getDefault().getPathMatcher("glob:$pattern") + (com.simiacryptus.cognotik.util.FileSelectionUtils.filteredWalk(rootFile) { + when { + com.simiacryptus.cognotik.util.FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(rootFile.toPath().relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = rootFile.resolve(relativePath.name) + try { + val content = if (!AnalysisTask.isTextFile(file)) { + AnalysisTask.extractDocumentContent(file) + } else { + file.readText() + } + "# ${relativePath.name}\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: ${relativePath.name}", e) + "" + } + } + } + + private fun generateCoverImage( + task: SessionTask, + tabs: TabbedDisplay, + title: String, + premise: String, + transcriptWriter: java.io.BufferedWriter?, + orchestrationConfig: OrchestrationConfig + ) { + try { + log.info("Generating cover image for: $title") + val coverTask = task.ui.newTask(false) + tabs["Cover Image"] = coverTask.placeholder + coverTask.add( + buildString { + appendLine("# Cover Image") + appendLine() + appendLine("**Status:** Generating cover image...") + appendLine() + }.renderMarkdown + ) + task.update() + val imageAgent = ImageModificationAgent( + prompt = "Create a compelling book cover image that captures the essence of this narrative", + model = orchestrationConfig.imageChatChatter, + temperature = 0.8, + ) + val coverPrompt = "$title: $premise" + val result = imageAgent.answer(listOf(ImageAndText(coverPrompt))) + val image = result.image + // Save image + val imageFile = task.resolve("00_cover_image.png")!! + ImageIO.write(image, "png", imageFile) + log.debug("Saved cover image to: ${imageFile.absolutePath}") + // Create display link + val link = task.linkTo("00_cover_image.png") + val imageHtml = """ +
    +

    $title

    +

    $premise

    +

    Image Prompt: ${result.text}

    + + Cover + +
    + """.trimIndent() + coverTask.add(imageHtml.renderMarkdown) + task.update() + // Write to transcript + transcriptWriter?.appendLine("## Cover Image") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("**Prompt:** ${result.text}") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("![Cover Image]($link)".transcriptFilter()) + transcriptWriter?.appendLine() + transcriptWriter?.flush() + coverTask.add("\n**Status:** ✅ Complete\n".renderMarkdown) + task.update() + } catch (e: Exception) { + log.error("Failed to generate cover image", e) + transcriptWriter?.appendLine("**Cover Image Generation Failed:** ${e.message}") + transcriptWriter?.appendLine() + } + } + + private fun generateSceneImage( + task: SessionTask, + tabs: TabbedDisplay, + sceneNumber: Int, + sceneTitle: String, + sceneContent: String, + setting: String, + transcriptWriter: java.io.BufferedWriter?, + orchestrationConfig: OrchestrationConfig + ) { + try { + log.info("Generating image for scene $sceneNumber: $sceneTitle") + val sceneImageTask = task.ui.newTask(false) + tabs["Scene $sceneNumber Image"] = sceneImageTask.placeholder + sceneImageTask.add( + buildString { + appendLine("# Scene $sceneNumber Image") + appendLine() + appendLine("**Status:** Generating scene visualization...") + appendLine() + }.renderMarkdown + ) + task.update() + val imageAgent = ImageModificationAgent( + prompt = "Create a cinematic scene illustration that captures the key moment and atmosphere", + model = orchestrationConfig.imageChatChatter, + temperature = 0.7, + ) + // Extract key visual elements from scene + val scenePrompt = buildString { + append("Scene: $sceneTitle. ") + append("Setting: $setting. ") + // Take first 500 chars of scene content for context + append(sceneContent.take(500)) + } + val result = imageAgent.answer(listOf(ImageAndText(scenePrompt))) + val image = result.image + // Save image + val relativePath = "scene_${sceneNumber}_image.png" + val imageFile = task.resolve(relativePath)!! + ImageIO.write(image, "png", imageFile) + log.debug("Saved scene image to: ${imageFile.absolutePath}") + // Create display link + val link = task.linkTo(relativePath) + val imageHtml = """ +
    +

    Scene $sceneNumber: $sceneTitle

    +

    Setting: $setting

    +

    Image Prompt: ${result.text}

    + + Scene $sceneNumber + +
    + """.trimIndent() + sceneImageTask.add(imageHtml.renderMarkdown) + task.update() + // Write to transcript + transcriptWriter?.appendLine("#### Scene $sceneNumber Image") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("**Prompt:** ${result.text}") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("![Scene $sceneNumber]($link)".transcriptFilter()) + transcriptWriter?.appendLine() + transcriptWriter?.flush() + sceneImageTask.add("\n**Status:** ✅ Complete\n".renderMarkdown) + task.update() + } catch (e: Exception) { + log.error("Failed to generate scene image for scene $sceneNumber", e) + transcriptWriter?.appendLine("**Scene Image Generation Failed:** ${e.message}") + transcriptWriter?.appendLine() + } + } + + companion object { private val log: Logger = LoggerFactory.getLogger(NarrativeGenerationTask::class.java) val NarrativeGeneration = TaskType( diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/NarrativeReasoningTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/NarrativeReasoningTask.kt similarity index 64% rename from webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/NarrativeReasoningTask.kt rename to webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/NarrativeReasoningTask.kt index 5be905d88..34b582a26 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/reasoning/NarrativeReasoningTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/NarrativeReasoningTask.kt @@ -1,17 +1,31 @@ -package com.simiacryptus.cognotik.plan.tools.reasoning +package com.simiacryptus.cognotik.plan.tools.writing -import com.simiacryptus.cognotik.actors.ChatAgent -import com.simiacryptus.cognotik.actors.ParsedAgent + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.ImageAndText +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.agents.ImageModificationAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.describe.Description import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils import com.simiacryptus.cognotik.util.LoggerFactory import com.simiacryptus.cognotik.util.TabbedDisplay import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.chat.transcriptFilter import com.simiacryptus.cognotik.webui.session.SessionTask import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets +import java.nio.file.FileSystems import java.time.LocalDateTime import java.time.format.DateTimeFormatter +import javax.imageio.ImageIO private const val i = 100 @@ -28,6 +42,10 @@ open class NarrativeReasoningTask? = null, + @Description("Additional context or questions to guide the narrative analysis") + val additional_context: String? = null, @Description("Narrative elements to consider (characters, setting, conflict, timeline, etc.)") val narrative_elements: Map? = null, @@ -41,14 +59,23 @@ open class NarrativeReasoningTask? = null, state: TaskState? = TaskState.Pending, @@ -62,8 +89,14 @@ open class NarrativeReasoningTask 10) { - return "Alternative narratives must be between 1 and 10, got: $alternative_narratives" + if (alternatives < 1 || alternatives > 10) { + alternatives = alternatives.coerceIn(1, 10) + } + if (image_width < 256 || image_width > 2048) { + return "Image width must be between 256 and 2048, got: $image_width" + } + if (image_height < 256 || image_height > 2048) { + return "Image height must be between 256 and 2048, got: $image_height" } return ValidatedObject.validateFields(this) } @@ -151,7 +184,7 @@ open class NarrativeReasoningTask = emptyList() ) : ValidatedObject @@ -174,7 +207,7 @@ open class NarrativeReasoningTask = emptyList() ) : ValidatedObject @@ -201,7 +234,7 @@ open class NarrativeReasoningTask = emptyList() ) : ValidatedObject @@ -221,6 +254,20 @@ NarrativeReasoning - Understand scenarios through storytelling and narrative str """.trimIndent() } + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("narrative_transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + override fun run( agent: TaskOrchestrator, messages: List, @@ -230,6 +277,17 @@ NarrativeReasoning - Understand scenarios through storytelling and narrative str ) { val startTime = System.currentTimeMillis() log.info("Starting NarrativeReasoningTask for subject: '${executionConfig?.subject}'") + // Create output directory for detailed results + val narrativeDir = File(agent.root.toFile(), ".narrative_analysis") + if (!narrativeDir.exists()) { + if (!narrativeDir.mkdirs()) { + log.error("Failed to create narrative analysis directory: ${narrativeDir.absolutePath}") + resultFn("Error: Failed to create output directory") + return + } + log.debug("Created narrative analysis directory: ${narrativeDir.absolutePath}") + } + val subject = executionConfig?.subject if (subject.isNullOrBlank()) { @@ -238,23 +296,51 @@ NarrativeReasoning - Understand scenarios through storytelling and narrative str resultFn("CONFIGURATION ERROR: No subject specified") return } + // Read input files if specified + val inputFileContent = getInputFileContent() + val messageContent = messages.joinToString("\n\n") + val additionalContext = buildString { + if (messageContent.isNotBlank()) { + appendLine("## User Input") + appendLine(messageContent) + } + if (inputFileContent.isNotBlank()) { + appendLine("\n## Input Files") + appendLine(inputFileContent) + } + if (executionConfig.additional_context?.isNotBlank() == true) { + appendLine("\n## Additional Context") + appendLine(executionConfig.additional_context) + } + } + val narrativeElements = executionConfig.narrative_elements ?: emptyMap() val constructNarrative = executionConfig.construct_narrative val identifyPlotPoints = executionConfig.identify_plot_points val predictOutcomes = executionConfig.predict_outcomes - val alternativeNarratives = executionConfig.alternative_narratives.coerceIn(1, 10) + val alternativeNarratives = executionConfig.alternatives.coerceIn(1, 10) val analyzeMotivations = executionConfig.analyze_motivations val findInconsistencies = executionConfig.find_inconsistencies - log.info("Configuration: constructNarrative=$constructNarrative, identifyPlotPoints=$identifyPlotPoints, " + - "predictOutcomes=$predictOutcomes, alternativeNarratives=$alternativeNarratives, " + - "analyzeMotivations=$analyzeMotivations, findInconsistencies=$findInconsistencies") + log.info( + "Configuration: constructNarrative=$constructNarrative, identifyPlotPoints=$identifyPlotPoints, " + + "predictOutcomes=$predictOutcomes, alternativeNarratives=$alternativeNarratives, " + + "analyzeMotivations=$analyzeMotivations, findInconsistencies=$findInconsistencies" + ) val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return - val ui = task.ui + task.ui val tabs = TabbedDisplay(task) + // Initialize transcript + val transcriptStream = transcript(task) + val transcriptWriter = transcriptStream?.bufferedWriter() + transcriptWriter?.appendLine("# Narrative Reasoning Analysis Transcript") + transcriptWriter?.appendLine("**Subject:** $subject") + transcriptWriter?.appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + transcriptWriter?.appendLine() + // Overview tab val overviewTask = task.ui.newTask(false) @@ -279,6 +365,11 @@ NarrativeReasoning - Understand scenarios through storytelling and narrative str appendLine("- Find Inconsistencies: ${if (findInconsistencies) "✓" else "✗"}") appendLine() appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + if (additionalContext.isNotBlank()) { + appendLine() + appendLine("## Input Context") + appendLine(additionalContext.take(500) + if (additionalContext.length > 500) "..." else "") + } appendLine() appendLine("---") appendLine() @@ -306,6 +397,16 @@ NarrativeReasoning - Understand scenarios through storytelling and narrative str val resultBuilder = StringBuilder() resultBuilder.append("# Narrative Reasoning Analysis: $subject\n\n") + // Write configuration to transcript + transcriptWriter?.appendLine("## Configuration") + transcriptWriter?.appendLine("- Construct Narrative: $constructNarrative") + transcriptWriter?.appendLine("- Identify Plot Points: $identifyPlotPoints") + transcriptWriter?.appendLine("- Predict Outcomes: $predictOutcomes") + transcriptWriter?.appendLine("- Alternative Narratives: $alternativeNarratives") + transcriptWriter?.appendLine("- Analyze Motivations: $analyzeMotivations") + transcriptWriter?.appendLine("- Find Inconsistencies: $findInconsistencies") + transcriptWriter?.appendLine() + try { // Step 1: Construct the main narrative @@ -316,6 +417,9 @@ NarrativeReasoning - Understand scenarios through storytelling and narrative str val narrativeTask = task.ui.newTask(false) tabs["Main Narrative"] = narrativeTask.placeholder + transcriptWriter?.appendLine("## Step 1: Main Narrative Construction") + transcriptWriter?.appendLine() + narrativeTask.add( buildString { @@ -330,16 +434,18 @@ NarrativeReasoning - Understand scenarios through storytelling and narrative str val narrativeAgent = ParsedAgent( resultClass = ParsedNarrative::class.java, prompt = """ -You are an expert narrative analyst and storyteller. Construct a coherent narrative from the given elements. + You are an expert narrative analyst and storyteller. Construct a coherent narrative from the given elements. -Subject: $subject + Subject: $subject + + Narrative Elements: + ${narrativeElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} +${if (additionalContext.isNotBlank()) "Additional Context:\n$additionalContext\n" else ""} -Narrative Elements: -${narrativeElements.entries.joinToString("\n") { (key, value) -> "- $key: $value" }} -${if (priorContext.isNotBlank()) "Additional Context:\n$priorContext\n" else ""} + ${if (priorContext.isNotBlank()) "Additional Context:\n$priorContext\n" else ""} -Create a structured narrative with: + Create a structured narrative with: 1. A compelling title 2. A concise summary (2-3 sentences) 3. Three acts with key events and character developments @@ -399,6 +505,23 @@ Focus on clarity, coherence, and emotional resonance. } narrativeTask.add(narrativeContent.renderMarkdown) task.update() + // Save narrative to file + saveAnalysisToFile(narrativeDir, "01_main_narrative.md", narrativeContent) + + // Write to transcript + transcriptWriter?.appendLine("### ${narrative.title}") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("**Summary:** ${narrative.summary}") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("**Themes:** ${narrative.themes.joinToString(", ")}") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("**Tone:** ${narrative.tone}") + transcriptWriter?.appendLine() + narrative.acts.forEach { act -> + transcriptWriter?.appendLine("- **Act ${act.act_number}:** ${act.title}") + } + transcriptWriter?.appendLine() + resultBuilder.append("## Main Narrative: ${narrative.title}\n") resultBuilder.append("${narrative.summary}\n\n") @@ -406,6 +529,19 @@ Focus on clarity, coherence, and emotional resonance. overviewTask.add("✅ Main narrative constructed\n".renderMarkdown) task.update() + // Generate image for main narrative if enabled + if (executionConfig.generate_images) { + generateNarrativeImage( + task = task, + tabs = tabs, + title = "Main Narrative Visualization", + description = "${narrative.title}: ${narrative.summary}", + imageDir = narrativeDir, + filename = "01_main_narrative_image.png", + transcriptWriter = transcriptWriter, + orchestrationConfig = orchestrationConfig + ) + } } // Step 2: Identify plot points @@ -416,6 +552,9 @@ Focus on clarity, coherence, and emotional resonance. val plotPointsTask = task.ui.newTask(false) tabs["Plot Points"] = plotPointsTask.placeholder + transcriptWriter?.appendLine("## Step 2: Plot Points Analysis") + transcriptWriter?.appendLine() + plotPointsTask.add( buildString { @@ -487,6 +626,16 @@ Be specific and concrete. } plotPointsTask.add(plotPointsContent.renderMarkdown) task.update() + // Save plot points to file + saveAnalysisToFile(narrativeDir, "02_plot_points.md", plotPointsContent) + + // Write to transcript + transcriptWriter?.appendLine("### Identified ${plotPoints.size} Plot Points") + plotPoints.forEach { point -> + transcriptWriter?.appendLine("- **${point.type}:** ${point.description}") + } + transcriptWriter?.appendLine() + resultBuilder.append("## Key Plot Points\n") plotPoints.take(3).forEach { point -> @@ -496,6 +645,20 @@ Be specific and concrete. overviewTask.add("✅ Plot points identified (${plotPoints.size} points)\n".renderMarkdown) task.update() + // Generate images for key plot points if enabled + if (executionConfig.generate_images && plotPoints.isNotEmpty()) { + val keyPlotPoint = plotPoints.first() + generateNarrativeImage( + task = task, + tabs = tabs, + title = "Key Plot Point: ${keyPlotPoint.type}", + description = keyPlotPoint.description, + imageDir = narrativeDir, + filename = "02_plot_point_image.png", + transcriptWriter = transcriptWriter, + orchestrationConfig = orchestrationConfig + ) + } } // Step 3: Analyze character motivations @@ -506,6 +669,9 @@ Be specific and concrete. val charactersTask = task.ui.newTask(false) tabs["Characters"] = charactersTask.placeholder + transcriptWriter?.appendLine("## Step 3: Character Analysis") + transcriptWriter?.appendLine() + charactersTask.add( buildString { @@ -581,6 +747,17 @@ Consider stakeholder perspectives if analyzing organizational scenarios. } charactersTask.add(charactersContent.renderMarkdown) task.update() + // Save character analysis to file + saveAnalysisToFile(narrativeDir, "03_character_analysis.md", charactersContent) + + // Write to transcript + transcriptWriter?.appendLine("### Analyzed ${characterAnalyses.size} Characters") + characterAnalyses.forEach { char -> + transcriptWriter?.appendLine("- **${char.name}** (${char.role})") + transcriptWriter?.appendLine(" - Motivations: ${char.motivations.joinToString("; ")}") + } + transcriptWriter?.appendLine() + resultBuilder.append("## Character Motivations\n") characterAnalyses.take(2).forEach { char -> @@ -590,6 +767,21 @@ Consider stakeholder perspectives if analyzing organizational scenarios. overviewTask.add("✅ Character motivations analyzed (${characterAnalyses.size} characters)\n".renderMarkdown) task.update() + // Generate character portraits if enabled + if (executionConfig.generate_images && characterAnalyses.isNotEmpty()) { + characterAnalyses.take(2).forEachIndexed { index, char -> + generateNarrativeImage( + task = task, + tabs = tabs, + title = "Character: ${char.name}", + description = "${char.name}, ${char.role}. ${char.motivations.firstOrNull() ?: ""}", + imageDir = narrativeDir, + filename = "03_character_${index + 1}_${char.name.replace(" ", "_")}.png", + transcriptWriter = transcriptWriter, + orchestrationConfig = orchestrationConfig + ) + } + } } // Step 4: Predict outcomes @@ -600,6 +792,9 @@ Consider stakeholder perspectives if analyzing organizational scenarios. val outcomesTask = task.ui.newTask(false) tabs["Predicted Outcomes"] = outcomesTask.placeholder + transcriptWriter?.appendLine("## Step 4: Predicted Outcomes") + transcriptWriter?.appendLine() + outcomesTask.add( buildString { @@ -674,6 +869,16 @@ Be realistic and consider multiple perspectives. } outcomesTask.add(outcomesContent.renderMarkdown) task.update() + // Save outcomes to file + saveAnalysisToFile(narrativeDir, "04_predicted_outcomes.md", outcomesContent) + + // Write to transcript + transcriptWriter?.appendLine("### Predicted ${outcomes.size} Outcomes") + outcomes.forEach { outcome -> + transcriptWriter?.appendLine("- **${outcome.scenario}** (${outcome.probability})") + } + transcriptWriter?.appendLine() + resultBuilder.append("## Predicted Outcomes\n") outcomes.forEach { outcome -> @@ -693,6 +898,9 @@ Be realistic and consider multiple perspectives. val inconsistenciesTask = task.ui.newTask(false) tabs["Inconsistencies"] = inconsistenciesTask.placeholder + transcriptWriter?.appendLine("## Step 5: Inconsistency Analysis") + transcriptWriter?.appendLine() + inconsistenciesTask.add( buildString { @@ -774,6 +982,20 @@ For each inconsistency, provide: } inconsistenciesTask.add(inconsistenciesContent.renderMarkdown) task.update() + // Save inconsistencies to file + saveAnalysisToFile(narrativeDir, "05_inconsistencies.md", inconsistenciesContent) + + // Write to transcript + if (inconsistencies.isEmpty()) { + transcriptWriter?.appendLine("### No significant inconsistencies found") + } else { + transcriptWriter?.appendLine("### Found ${inconsistencies.size} Inconsistencies") + inconsistencies.forEach { inconsistency -> + transcriptWriter?.appendLine("- **${inconsistency.type}** (${inconsistency.severity}): ${inconsistency.description}") + } + } + transcriptWriter?.appendLine() + if (inconsistencies.isNotEmpty()) { resultBuilder.append("## Narrative Inconsistencies\n") @@ -794,6 +1016,9 @@ For each inconsistency, provide: val synthesisTask = task.ui.newTask(false) tabs["Synthesis"] = synthesisTask.placeholder + transcriptWriter?.appendLine("## Step 6: Synthesis") + transcriptWriter?.appendLine() + synthesisTask.add( buildString { @@ -840,6 +1065,12 @@ Be concise but insightful. Focus on actionable insights. }.renderMarkdown ) task.update() + // Save synthesis to file + saveAnalysisToFile(narrativeDir, "06_synthesis.md", synthesis) + + transcriptWriter?.appendLine(synthesis) + transcriptWriter?.appendLine() + resultBuilder.append("## Synthesis\n") resultBuilder.append(synthesis) @@ -850,6 +1081,10 @@ Be concise but insightful. Focus on actionable insights. resultBuilder.append("---\n\n") resultBuilder.append("**Analysis Time:** ${totalTime / 1000}s | ") resultBuilder.append("**Subject:** $subject\n") + // Write completion to transcript + transcriptWriter?.appendLine("---") + transcriptWriter?.appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + transcriptWriter?.appendLine("**Total Time:** ${totalTime / 1000.0}s") overviewTask.add( buildString { @@ -868,8 +1103,34 @@ Be concise but insightful. Focus on actionable insights. val finalResult = resultBuilder.toString() log.info("NarrativeReasoningTask completed: total_time=${totalTime}ms, output_size=${finalResult.length} chars") - task.safeComplete("Narrative analysis complete in ${totalTime / 1000}s. Generated ${finalResult.length} characters of analysis.", log) - resultFn(finalResult) + // Create summary message with file links + val summaryMessage = buildString { + appendLine("# Narrative Analysis Complete") + appendLine() + appendLine("**Subject:** $subject") + appendLine("**Time:** ${totalTime / 1000}s") + appendLine() + appendLine("## Detailed Results") + appendLine() + appendLine("Full analysis has been saved to the following files:") + appendLine() + appendLine("- [Main Narrative](${narrativeDir.name}/01_main_narrative.md)") + appendLine("- [Plot Points](${narrativeDir.name}/02_plot_points.md)") + appendLine("- [Character Analysis](${narrativeDir.name}/03_character_analysis.md)") + appendLine("- [Predicted Outcomes](${narrativeDir.name}/04_predicted_outcomes.md)") + appendLine("- [Inconsistencies](${narrativeDir.name}/05_inconsistencies.md)") + appendLine("- [Synthesis](${narrativeDir.name}/06_synthesis.md)") + appendLine() + appendLine("## Summary") + appendLine() + appendLine(finalResult.take(1000) + if (finalResult.length > 1000) "\n\n*See detailed files for complete analysis*" else "") + } + + task.safeComplete("Narrative analysis complete in ${totalTime / 1000}s. Results saved to .narrative_analysis directory.", log) + resultFn(summaryMessage) + // Close transcript + transcriptWriter?.flush() + transcriptWriter?.close() } catch (e: Exception) { log.error("Error during narrative reasoning", e) @@ -903,9 +1164,151 @@ Be concise but insightful. Focus on actionable insights. } } resultFn(errorOutput) + // Close transcript on error + transcriptWriter?.appendLine("**Error:** ${e.message}") + transcriptWriter?.flush() + transcriptWriter?.close() + } + } + + private fun getInputFileContent(): String = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun saveAnalysisToFile( + outputDir: File, + filename: String, + content: String + ) { + try { + val outputFile = File(outputDir, filename) + outputFile.writeText(content, StandardCharsets.UTF_8) + log.debug("Saved analysis to file: ${outputFile.absolutePath} (size: ${content.length} chars)") + } catch (e: Exception) { + log.error("Failed to save analysis to file: $filename", e) + } + } + + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun generateNarrativeImage( + task: SessionTask, + tabs: TabbedDisplay, + title: String, + description: String, + imageDir: File, + filename: String, + transcriptWriter: java.io.BufferedWriter?, + orchestrationConfig: OrchestrationConfig + ) { + try { + log.info("Generating image: $title") + val imageTask = task.ui.newTask(false) + tabs["Image: $title"] = imageTask.placeholder + imageTask.add( + buildString { + appendLine("# $title") + appendLine() + appendLine("**Status:** Generating image...") + appendLine() + }.renderMarkdown + ) + task.update() + val imageAgent = ImageModificationAgent( + prompt = "Transform the narrative description into a vivid, cinematic image", + model = orchestrationConfig.imageChatChatter, + temperature = 0.7, + ) + val result = imageAgent.answer(listOf(ImageAndText(""" +Draw an image based on the following narrative description: +${description.indent(" ")} + """))) + val image = result.image + // Save image to file + val imageFile = task.resolve(filename)!! + ImageIO.write(image, "png", imageFile) + log.debug("Saved image to: ${imageFile.absolutePath}") + // Create display link + val link = task.linkTo(filename) + val imageHtml = """ +
    +

    $title

    +

    Prompt: ${result.text}

    + + $title + +
    + """.trimIndent() + imageTask.add(imageHtml.renderMarkdown) + task.update() + // Write to transcript + transcriptWriter?.appendLine("### $title") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("**Prompt:** ${result.text}") + transcriptWriter?.appendLine() + transcriptWriter?.appendLine("![${title}]($link)".transcriptFilter()) + transcriptWriter?.appendLine() + transcriptWriter?.flush() + imageTask.add("\n**Status:** ✅ Complete\n".renderMarkdown) + task.update() + } catch (e: Exception) { + log.error("Failed to generate image: $title", e) + transcriptWriter?.appendLine("**Image Generation Failed:** ${e.message}") + transcriptWriter?.appendLine() } } + companion object { private val log: Logger = LoggerFactory.getLogger(NarrativeReasoningTask::class.java) val NarrativeReasoning = TaskType( diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/PersuasiveEssayTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/PersuasiveEssayTask.kt new file mode 100644 index 000000000..551fbfbff --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/PersuasiveEssayTask.kt @@ -0,0 +1,1129 @@ +package com.simiacryptus.cognotik.plan.tools.writing + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class PersuasiveEssayTask( + orchestrationConfig: OrchestrationConfig, + planTask: PersuasiveEssayTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + + class PersuasiveEssayTaskExecutionConfigData( + @Description("The thesis statement or position to argue for") + val thesis: String? = null, + + @Description("The target audience (e.g., 'general public', 'academics', 'policymakers', 'business leaders')") + val target_audience: String = "general public", + + @Description("The tone of the essay (e.g., 'formal', 'conversational', 'passionate', 'analytical')") + val tone: String = "formal", + + @Description("Target word count for the complete essay") + val target_word_count: Int = 1500, + + @Description("Number of main arguments to develop") + val num_arguments: Int = 3, + + @Description("Whether to include counterarguments and rebuttals") + val include_counterarguments: Boolean = true, + + @Description("Whether to use rhetorical devices (ethos, pathos, logos)") + val use_rhetorical_devices: Boolean = true, + + @Description("Whether to include statistical evidence and citations") + val include_evidence: Boolean = true, + + @Description("Whether to use analogies and examples") + val use_analogies: Boolean = true, + + @Description("Call to action type (MUST BE one of: 'strong', 'moderate', 'reflective', 'none')") + val call_to_action: String = "strong", + + @Description("Number of revision passes for quality improvement") + val revision_passes: Int = 1, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, + + + @Description("Related files or research to incorporate") + val related_files: List? = null, + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = PersuasiveEssay.name, + task_description = task_description ?: "Generate persuasive essay for thesis: '$thesis'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (thesis.isNullOrBlank()) { + return "thesis must not be null or blank" + } + if (target_word_count <= 0) { + return "target_word_count must be positive, got: $target_word_count" + } + if (num_arguments < 1 || num_arguments > 10) { + return "num_arguments must be between 1 and 10, got: $num_arguments" + } + if (revision_passes < 0 || revision_passes > 5) { + return "revision_passes must be between 0 and 5, got: $revision_passes" + } + if (target_audience.isBlank()) { + return "target_audience must not be blank" + } + if (tone.isBlank()) { + return "tone must not be blank" + } + val validCallToActions = setOf("strong", "moderate", "reflective", "none") + if (call_to_action.lowercase() !in validCallToActions) { + return "call_to_action must be one of: ${validCallToActions.joinToString(", ")}, got: $call_to_action" + } + return ValidatedObject.validateFields(this) + } + } + + data class EssayOutline( + @Description("The essay title") + val title: String = "", + @Description("Hook or opening statement") + val hook: String = "", + @Description("Background context") + val background: String = "", + @Description("Clear thesis statement") + val thesis_statement: String = "", + @Description("Main arguments to develop") + val arguments: List = emptyList(), + @Description("Counterarguments to address") + val counterarguments: List = emptyList(), + @Description("Conclusion strategy") + val conclusion_strategy: String = "" + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (thesis_statement.isBlank()) return "thesis_statement must not be blank" + if (arguments.isEmpty()) return "arguments must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class ArgumentOutline( + @Description("Argument number") + val number: Int = 1, + @Description("Main claim of this argument") + val claim: String = "", + @Description("Supporting points") + val supporting_points: List = emptyList(), + @Description("Evidence types to use") + val evidence_types: List = emptyList(), + @Description("Rhetorical approach") + val rhetorical_approach: String = "", + @Description("Estimated word count") + val estimated_word_count: Int = 0 + ) : ValidatedObject + + data class CounterargumentOutline( + @Description("The opposing viewpoint") + val opposing_view: String = "", + @Description("Rebuttal strategy") + val rebuttal_strategy: String = "", + @Description("Estimated word count") + val estimated_word_count: Int = 0 + ) : ValidatedObject + + data class EssaySection( + @Description("Section type") + val section_type: String = "", + @Description("Section content") + val content: String = "", + @Description("Word count") + val word_count: Int = 0, + @Description("Rhetorical devices used") + val rhetorical_devices: List = emptyList(), + @Description("Key persuasive elements") + val persuasive_elements: List = emptyList() + ) : ValidatedObject + + override fun promptSegment(): String { + return """ + PersuasiveEssay - Generate compelling persuasive essays with structured arguments + ** Specify the thesis statement or position to argue + ** Optionally provide input files (supports glob patterns) to incorporate as research + ** Define target audience and tone + ** Set target word count and number of main arguments + ** Enable counterarguments and rebuttals for balanced perspective + ** Use rhetorical devices (ethos, pathos, logos) for persuasive impact + ** Include statistical evidence and citations + ** Incorporate analogies and examples for clarity + ** Configure call to action strength + ** Performs outline creation, argument development, and iterative writing + ** Produces complete, well-structured persuasive essay + ** Detailed output saved to files with links in summary + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + log.info("Starting PersuasiveEssayTask for thesis: '${executionConfig?.thesis}', input_files: ${executionConfig?.input_files?.size ?: 0}") + // Create transcript file + val transcript = transcript(task) + transcript?.let { stream -> + stream.write("# Persuasive Essay Generation Transcript\n\n".toByteArray()) + stream.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + stream.write("**Thesis:** ${executionConfig?.thesis}\n\n".toByteArray()) + stream.write("---\n\n".toByteArray()) + stream.flush() + } + + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + resultFn("CONFIGURATION ERROR: $validationError") + return + } + + val thesis = executionConfig?.thesis + if (thesis.isNullOrBlank()) { + log.error("No thesis specified for persuasive essay") + task.safeComplete("CONFIGURATION ERROR: No thesis specified", log) + resultFn("CONFIGURATION ERROR: No thesis specified") + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Persuasive Essay Generation") + appendLine() + appendLine("**Thesis:** $thesis") + appendLine() + appendLine("## Configuration") + appendLine("- Target Audience: ${executionConfig.target_audience}") + appendLine("- Tone: ${executionConfig.tone}") + appendLine("- Target Word Count: ${executionConfig.target_word_count}") + appendLine("- Number of Arguments: ${executionConfig.num_arguments}") + appendLine("- Include Counterarguments: ${if (executionConfig.include_counterarguments) "✓" else "✗"}") + appendLine("- Use Rhetorical Devices: ${if (executionConfig.use_rhetorical_devices) "✓" else "✗"}") + appendLine("- Include Evidence: ${if (executionConfig.include_evidence) "✓" else "✗"}") + appendLine("- Use Analogies: ${if (executionConfig.use_analogies) "✓" else "✗"}") + appendLine("- Call to Action: ${executionConfig.call_to_action}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Research & Outline") + appendLine("*Analyzing thesis and creating essay structure...*") + } + overviewTask.add(overviewContent.renderMarkdown) + task.update() + transcript?.let { stream -> + stream.write("## Configuration\n\n".toByteArray()) + stream.write(overviewContent.toByteArray()) + stream.write("\n\n".toByteArray()) + stream.flush() + } + + val resultBuilder = StringBuilder() + resultBuilder.append("# Persuasive Essay: $thesis\n\n") + + try { + // Gather context + val priorContext = getPriorCode(agent.executionState) + val inputFileContent = getInputFileContent() + val contextFiles = getContextFiles() + + if (priorContext.isNotBlank() || inputFileContent.isNotBlank() || contextFiles.isNotBlank()) { + log.debug("Found context: priorContext=${priorContext.length} chars, contextFiles=${contextFiles.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Research Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Research Context") + appendLine() + if (priorContext.isNotBlank()) { + appendLine("## Prior Context") + appendLine(priorContext.truncateForDisplay(2000)) + appendLine() + } + if (inputFileContent.isNotBlank()) { + appendLine("## Input Files") + appendLine(inputFileContent.truncateForDisplay(3000)) + appendLine() + } + if (contextFiles.isNotBlank()) { + appendLine("## Related Files") + appendLine(contextFiles.truncateForDisplay(2000)) + } + }.renderMarkdown + ) + task.update() + } + + // Phase 1: Create outline + log.info("Phase 1: Creating essay outline") + val outlineTask = task.ui.newTask(false) + tabs["Outline"] = outlineTask.placeholder + + outlineTask.add( + buildString { + appendLine("# Essay Outline") + appendLine() + appendLine("**Status:** Creating structured outline...") + appendLine() + }.renderMarkdown + ) + task.update() + + val wordsPerArgument = (executionConfig.target_word_count * 0.6).toInt() / executionConfig.num_arguments + val counterargumentWords = if (executionConfig.include_counterarguments) { + (executionConfig.target_word_count * 0.15).toInt() + } else 0 + + val outlineAgent = ParsedAgent( + resultClass = EssayOutline::class.java, + prompt = """ +You are an expert in persuasive writing and rhetoric. Create a detailed outline for a persuasive essay. + +Thesis: $thesis + +Target Audience: ${executionConfig.target_audience} +Tone: ${executionConfig.tone} +Target Word Count: ${executionConfig.target_word_count} +Number of Arguments: ${executionConfig.num_arguments} + +${if (inputFileContent.isNotBlank()) "Input Files:\n${inputFileContent.truncateForDisplay(3000)}\n" else ""} +${if (priorContext.isNotBlank()) "Research Context:\n${priorContext.truncateForDisplay(3000)}\n" else ""} +${if (contextFiles.isNotBlank()) "Additional Research:\n${contextFiles.truncateForDisplay(3000)}\n" else ""} + +Create an outline with: +1. A compelling hook that grabs attention +2. Background context (100-150 words) +3. Clear, specific thesis statement +4. ${executionConfig.num_arguments} main arguments (~$wordsPerArgument words each) +${if (executionConfig.include_counterarguments) "5. 2-3 counterarguments with rebuttal strategies (~$counterargumentWords words total)" else ""} +6. Conclusion strategy + +For each argument, specify: +- The main claim +- 3-4 supporting points +- Types of evidence to use (statistics, expert testimony, examples, analogies) +${if (executionConfig.use_rhetorical_devices) "- Rhetorical approach (ethos/pathos/logos emphasis)" else ""} + +Ensure the outline: +- Builds a logical progression of ideas +- Addresses the ${executionConfig.target_audience} effectively +- Maintains a ${executionConfig.tone} tone +- Includes diverse types of support +- Anticipates and addresses objections + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var outline = outlineAgent.answer(listOf("Generate outline")).obj + + // Validate outline + outline.validate()?.let { validationError -> + log.error("Outline validation failed: $validationError") + outlineTask.error(ValidatedObject.ValidationError(validationError, outline)) + task.safeComplete("Outline validation failed: $validationError", log) + resultFn("ERROR: Outline validation failed: $validationError") + return + } + + log.info("Generated outline: ${outline.arguments.size} arguments, ${outline.counterarguments.size} counterarguments") + + val outlineContent = buildString { + appendLine("## ${outline.title}") + appendLine() + appendLine("### Hook") + appendLine(outline.hook) + appendLine() + appendLine("### Background") + appendLine(outline.background) + appendLine() + appendLine("### Thesis Statement") + appendLine("> ${outline.thesis_statement}") + appendLine() + appendLine("---") + appendLine() + appendLine("### Main Arguments") + outline.arguments.forEach { arg -> + appendLine("#### Argument ${arg.number}: ${arg.claim}") + appendLine() + appendLine("**Supporting Points:**") + arg.supporting_points.forEach { point -> + appendLine("- $point") + } + appendLine() + appendLine("**Evidence Types:** ${arg.evidence_types.joinToString(", ")}") + appendLine() + if (arg.rhetorical_approach.isNotBlank()) { + appendLine("**Rhetorical Approach:** ${arg.rhetorical_approach}") + appendLine() + } + appendLine("**Est. Words:** ${arg.estimated_word_count}") + appendLine() + appendLine("---") + appendLine() + } + if (outline.counterarguments.isNotEmpty()) { + appendLine("### Counterarguments & Rebuttals") + outline.counterarguments.forEach { counter -> + appendLine("**Opposing View:** ${counter.opposing_view}") + appendLine() + appendLine("**Rebuttal Strategy:** ${counter.rebuttal_strategy}") + appendLine() + appendLine("**Est. Words:** ${counter.estimated_word_count}") + appendLine() + } + appendLine("---") + appendLine() + } + appendLine("### Conclusion Strategy") + appendLine(outline.conclusion_strategy) + appendLine() + appendLine("**Status:** ✅ Complete") + } + outlineTask.add(outlineContent.renderMarkdown) + task.update() + transcript?.let { stream -> + stream.write("## Essay Outline\n\n".toByteArray()) + stream.write(outlineContent.toByteArray()) + stream.write("\n\n".toByteArray()) + stream.flush() + } + + overviewTask.add("✅ Phase 1 Complete: Outline created\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Introduction\n*Writing compelling introduction...*\n".renderMarkdown) + task.update() + + // Phase 2: Write Introduction + log.info("Phase 2: Writing introduction") + val introTask = task.ui.newTask(false) + tabs["Introduction"] = introTask.placeholder + + introTask.add( + buildString { + appendLine("# Introduction") + appendLine() + appendLine("**Status:** Writing introduction...") + appendLine() + }.renderMarkdown + ) + task.update() + + val introAgent = ParsedAgent( + resultClass = EssaySection::class.java, + prompt = """ +You are an expert persuasive writer. Write a compelling introduction for this essay. + +Thesis: $thesis +Target Audience: ${executionConfig.target_audience} +Tone: ${executionConfig.tone} + +Outline: +Hook: ${outline.hook} +Background: ${outline.background} +Thesis Statement: ${outline.thesis_statement} + +Write an introduction (200-300 words) that: +1. Opens with the compelling hook +2. Provides necessary background context +3. Establishes credibility and relevance +4. Builds toward the thesis statement +5. Clearly states the thesis +${if (executionConfig.use_rhetorical_devices) "6. Uses appropriate rhetorical devices (ethos to establish credibility)" else ""} + +Make it engaging and set the tone for the entire essay. +Speak directly to the ${executionConfig.target_audience}. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var introduction = introAgent.answer(listOf("Write introduction")).obj + + introTask.add( + buildString { + appendLine("## Introduction") + appendLine() + appendLine(introduction.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${introduction.word_count}") + if (introduction.rhetorical_devices.isNotEmpty()) { + appendLine() + appendLine("**Rhetorical Devices:** ${introduction.rhetorical_devices.joinToString(", ")}") + } + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + transcript?.let { stream -> + stream.write("## Introduction\n\n".toByteArray()) + stream.write(introduction.content.toByteArray()) + stream.write("\n\n**Word Count:** ${introduction.word_count}\n\n".toByteArray()) + stream.flush() + } + + + resultBuilder.append(introduction.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ Phase 2 Complete: Introduction written (${introduction.word_count} words)\n".renderMarkdown) + overviewTask.add("\n### Phase 3: Body Arguments\n*Developing main arguments...*\n".renderMarkdown) + task.update() + + // Phase 3: Write each argument + log.info("Phase 3: Writing body arguments") + val argumentSections = mutableListOf() + var cumulativeWordCount = introduction.word_count + + outline.arguments.forEachIndexed { index, argOutline -> + log.info("Writing argument ${index + 1}/${outline.arguments.size}: ${argOutline.claim}") + + overviewTask.add("- Argument ${index + 1}: ${argOutline.claim.truncateForDisplay(50)} ".renderMarkdown) + task.update() + + val argTask = task.ui.newTask(false) + tabs["Argument ${index + 1}"] = argTask.placeholder + + argTask.add( + buildString { + appendLine("# Argument ${index + 1}") + appendLine() + appendLine("**Status:** Writing argument...") + appendLine() + }.renderMarkdown + ) + task.update() + + // Build context from previous arguments + val previousContext = if (argumentSections.isNotEmpty()) { + buildString { + appendLine("## Previous Arguments Summary") + argumentSections.takeLast(1).forEach { prevArg -> + appendLine("**Previous Claim:** ${prevArg.persuasive_elements.firstOrNull() ?: ""}") + appendLine("**Key Points:** ${prevArg.content.take(200)}...") + appendLine() + } + } + } else { + "This is the first argument." + } + + val argumentAgent = ParsedAgent( + resultClass = EssaySection::class.java, + prompt = """ +You are an expert persuasive writer. Write a compelling body paragraph for this argument. + +Overall Thesis: $thesis +Target Audience: ${executionConfig.target_audience} +Tone: ${executionConfig.tone} + +Argument to Develop: +Claim: ${argOutline.claim} +Supporting Points: ${argOutline.supporting_points.joinToString("; ")} +Evidence Types: ${argOutline.evidence_types.joinToString(", ")} +${if (argOutline.rhetorical_approach.isNotBlank()) "Rhetorical Approach: ${argOutline.rhetorical_approach}" else ""} +Target Words: ${argOutline.estimated_word_count} + +$previousContext + +Write a well-developed argument paragraph that: +1. Opens with a clear topic sentence stating the claim +2. Provides detailed supporting points +${if (executionConfig.include_evidence) "3. Includes specific evidence (statistics, expert quotes, examples)" else ""} +${if (executionConfig.use_analogies) "4. Uses analogies or concrete examples for clarity" else ""} +${if (executionConfig.use_rhetorical_devices) "5. Employs appropriate rhetorical devices (${argOutline.rhetorical_approach})" else ""} +6. Connects back to the thesis +7. Transitions smoothly to the next point + +Make it persuasive, logical, and engaging. +Aim for approximately ${argOutline.estimated_word_count} words. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var argumentSection = argumentAgent.answer(listOf("Write argument")).obj + argumentSections.add(argumentSection) + cumulativeWordCount += argumentSection.word_count + + argTask.add( + buildString { + appendLine("## ${argOutline.claim}") + appendLine() + appendLine(argumentSection.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${argumentSection.word_count}") + if (argumentSection.rhetorical_devices.isNotEmpty()) { + appendLine() + appendLine("**Rhetorical Devices:** ${argumentSection.rhetorical_devices.joinToString(", ")}") + } + if (argumentSection.persuasive_elements.isNotEmpty()) { + appendLine() + appendLine("**Persuasive Elements:** ${argumentSection.persuasive_elements.joinToString(", ")}") + } + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + transcript?.let { stream -> + stream.write("## Argument ${index + 1}: ${argOutline.claim}\n\n".toByteArray()) + stream.write(argumentSection.content.toByteArray()) + stream.write("\n\n**Word Count:** ${argumentSection.word_count}\n\n".toByteArray()) + stream.flush() + } + + + resultBuilder.append(argumentSection.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ (${argumentSection.word_count} words)\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 3 Complete: All arguments written\n".renderMarkdown) + + // Phase 4: Counterarguments (if enabled) + if (executionConfig.include_counterarguments && outline.counterarguments.isNotEmpty()) { + overviewTask.add("\n### Phase 4: Counterarguments\n*Addressing opposing views...*\n".renderMarkdown) + task.update() + + log.info("Phase 4: Writing counterarguments and rebuttals") + val counterTask = task.ui.newTask(false) + tabs["Counterarguments"] = counterTask.placeholder + + counterTask.add( + buildString { + appendLine("# Counterarguments & Rebuttals") + appendLine() + appendLine("**Status:** Writing counterarguments...") + appendLine() + }.renderMarkdown + ) + task.update() + + val counterAgent = ParsedAgent( + resultClass = EssaySection::class.java, + prompt = """ +You are an expert persuasive writer. Write a section addressing counterarguments. + +Overall Thesis: $thesis +Target Audience: ${executionConfig.target_audience} +Tone: ${executionConfig.tone} + +Counterarguments to Address: +${outline.counterarguments.joinToString("\n") { "- ${it.opposing_view}\n Rebuttal: ${it.rebuttal_strategy}" }} + +Write a counterargument section that: +1. Acknowledges opposing viewpoints fairly and respectfully +2. Demonstrates understanding of the other side +3. Provides strong, logical rebuttals +4. Strengthens your original thesis +5. Maintains credibility through balanced treatment + +Use phrases like "While some argue...", "Critics may claim...", "However..." +Show why your position is stronger despite valid concerns. +Aim for approximately $counterargumentWords words. + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var counterSection = counterAgent.answer(listOf("Write counterarguments")).obj + cumulativeWordCount += counterSection.word_count + + counterTask.add( + buildString { + appendLine("## Addressing Opposing Views") + appendLine() + appendLine(counterSection.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${counterSection.word_count}") + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + transcript?.let { stream -> + stream.write("## Counterarguments & Rebuttals\n\n".toByteArray()) + stream.write(counterSection.content.toByteArray()) + stream.write("\n\n**Word Count:** ${counterSection.word_count}\n\n".toByteArray()) + stream.flush() + } + + + resultBuilder.append(counterSection.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ Phase 4 Complete: Counterarguments addressed (${counterSection.word_count} words)\n".renderMarkdown) + } + + // Phase 5: Conclusion + overviewTask.add("\n### Phase 5: Conclusion\n*Writing powerful conclusion...*\n".renderMarkdown) + task.update() + + log.info("Phase 5: Writing conclusion") + val conclusionTask = task.ui.newTask(false) + tabs["Conclusion"] = conclusionTask.placeholder + + conclusionTask.add( + buildString { + appendLine("# Conclusion") + appendLine() + appendLine("**Status:** Writing conclusion...") + appendLine() + }.renderMarkdown + ) + task.update() + + val conclusionAgent = ParsedAgent( + resultClass = EssaySection::class.java, + prompt = """ +You are an expert persuasive writer. Write a powerful conclusion for this essay. + +Overall Thesis: $thesis +Target Audience: ${executionConfig.target_audience} +Tone: ${executionConfig.tone} +Call to Action Type: ${executionConfig.call_to_action} + +Main Arguments Presented: +${argumentSections.mapIndexed { i, arg -> "${i + 1}. ${arg.persuasive_elements.firstOrNull() ?: arg.content.take(100)}" }.joinToString("\n")} + +Conclusion Strategy: ${outline.conclusion_strategy} + +Write a conclusion (200-250 words) that: +1. Restates the thesis in fresh language +2. Synthesizes the main arguments +3. Emphasizes the significance and implications +4. Leaves a lasting impression +${ + when (executionConfig.call_to_action.lowercase()) { + "strong" -> "5. Includes a powerful, specific call to action" + "moderate" -> "5. Suggests concrete next steps or considerations" + "reflective" -> "5. Invites thoughtful reflection on the topic" + else -> "" + } + } +${if (executionConfig.use_rhetorical_devices) "6. Uses rhetorical devices for emotional impact (pathos)" else ""} + +Make it memorable and motivating. +End on a strong note that reinforces your position. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var conclusion = conclusionAgent.answer(listOf("Write conclusion")).obj + cumulativeWordCount += conclusion.word_count + + conclusionTask.add( + buildString { + appendLine("## Conclusion") + appendLine() + appendLine(conclusion.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${conclusion.word_count}") + if (conclusion.rhetorical_devices.isNotEmpty()) { + appendLine() + appendLine("**Rhetorical Devices:** ${conclusion.rhetorical_devices.joinToString(", ")}") + } + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + transcript?.let { stream -> + stream.write("## Conclusion\n\n".toByteArray()) + stream.write(conclusion.content.toByteArray()) + stream.write("\n\n**Word Count:** ${conclusion.word_count}\n\n".toByteArray()) + stream.flush() + } + + + resultBuilder.append(conclusion.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ Phase 5 Complete: Conclusion written (${conclusion.word_count} words)\n".renderMarkdown) + + // Phase 6: Revision (if enabled) + if (executionConfig.revision_passes > 0) { + overviewTask.add("\n### Phase 6: Revision\n*Refining and polishing...*\n".renderMarkdown) + task.update() + + log.info("Phase 6: Performing ${executionConfig.revision_passes} revision pass(es)") + val revisionTask = task.ui.newTask(false) + tabs["Revision"] = revisionTask.placeholder + + revisionTask.add( + buildString { + appendLine("# Revision Process") + appendLine() + appendLine("**Status:** Performing ${executionConfig.revision_passes} revision pass(es)...") + appendLine() + }.renderMarkdown + ) + task.update() + + val fullEssay = resultBuilder.toString() + + repeat(executionConfig.revision_passes) { passNum -> + log.debug("Revision pass ${passNum + 1}/${executionConfig.revision_passes}") + + val revisionAgent = ChatAgent( + prompt = """ +You are an expert editor specializing in persuasive writing. Review and improve this essay. + +Current Essay: +$fullEssay + +Focus on: +1. Strengthening argument logic and flow +2. Enhancing persuasive language and rhetoric +3. Improving transitions between ideas +4. Ensuring consistent tone (${executionConfig.tone}) +5. Polishing sentence structure and word choice +6. Verifying thesis support throughout +7. Maximizing impact on ${executionConfig.target_audience} + +Maintain: +- All key arguments and evidence +- The thesis and main claims +- Approximate word count ($cumulativeWordCount words) +- The ${executionConfig.tone} tone + +Provide the complete revised essay. + """.trimIndent(), + model = api, + temperature = 0.6 + ) + + val revisedEssay = revisionAgent.answer(listOf("Revise the essay")) + resultBuilder.clear() + resultBuilder.append(revisedEssay) + + revisionTask.add( + buildString { + appendLine("## Revision Pass ${passNum + 1}") + appendLine() + appendLine("✅ Complete") + appendLine() + }.renderMarkdown + ) + task.update() + transcript?.let { stream -> + stream.write("### Revision Pass ${passNum + 1}\n\n".toByteArray()) + stream.write("Completed revision pass ${passNum + 1} of ${executionConfig.revision_passes}\n\n".toByteArray()) + stream.flush() + } + } + + overviewTask.add("✅ Phase 6 Complete: ${executionConfig.revision_passes} revision pass(es) completed\n".renderMarkdown) + } + + // Phase 7: Final Assembly + overviewTask.add("\n### Phase 7: Final Assembly\n*Compiling complete essay...*\n".renderMarkdown) + task.update() + + log.info("Phase 7: Assembling final essay") + val finalTask = task.ui.newTask(false) + tabs["Complete Essay"] = finalTask.placeholder + + val finalEssay = buildString { + appendLine("# ${outline.title}") + appendLine() + appendLine(resultBuilder.toString()) + appendLine() + appendLine("---") + appendLine() + appendLine("**Total Word Count:** $cumulativeWordCount") + appendLine() + appendLine("**Target Word Count:** ${executionConfig.target_word_count}") + appendLine() + appendLine("**Completion:** ${(cumulativeWordCount.toFloat() / executionConfig.target_word_count * 100).toInt()}%") + } + // Save complete essay to file + val (essayLink, essayFile) = task.createFile("persuasive_essay.md") + essayFile?.writeText(finalEssay, StandardCharsets.UTF_8) + log.info("Saved complete essay to: $essayLink") + + + finalTask.add(finalEssay.renderMarkdown) + task.update() + // Update transcript with final essay + transcript?.let { stream -> + stream.write("## Complete Essay\n\n".toByteArray()) + stream.write(finalEssay.toByteArray()) + stream.write("\n\n".toByteArray()) + stream.flush() + } + + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Total Word Count: $cumulativeWordCount") + appendLine("- Target Word Count: ${executionConfig.target_word_count}") + appendLine("- Completion: ${(cumulativeWordCount.toFloat() / executionConfig.target_word_count * 100).toInt()}%") + appendLine("- Number of Arguments: ${argumentSections.size}") + appendLine("- Counterarguments: ${if (executionConfig.include_counterarguments) "✓ Included" else "✗ Not included"}") + appendLine("- Revision Passes: ${executionConfig.revision_passes}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + task.update() + transcript?.let { stream -> + stream.write("---\n\n".toByteArray()) + stream.write("## Generation Complete\n\n".toByteArray()) + stream.write("**Total Word Count:** $cumulativeWordCount\n\n".toByteArray()) + stream.write("**Total Time:** ${totalTime / 1000.0}s\n\n".toByteArray()) + stream.write("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + stream.flush() + } + + + // Close transcript and get link + transcript?.close() + val (transcriptLink, _) = task.createFile("transcript.md") + + // Concise summary for resultFn with file links + val finalResult = buildString { + appendLine("# Persuasive Essay Summary: ${outline.title}") + appendLine() + appendLine("A complete persuasive essay of **$cumulativeWordCount words** was generated in **${totalTime / 1000.0}s**.") + appendLine() + appendLine("**Thesis:** $thesis") + appendLine() + appendLine("**Structure:**") + appendLine("- Introduction with compelling hook") + appendLine("- ${argumentSections.size} main arguments with evidence") + if (executionConfig.include_counterarguments) { + appendLine("- Counterarguments and rebuttals") + } + appendLine("- Conclusion with ${executionConfig.call_to_action} call to action") + appendLine() + appendLine("## Output Files") + appendLine() + appendLine("- **Complete Essay:** $essayLink") + appendLine(" - HTML") + appendLine(" - PDF") + appendLine() + appendLine("- **Transcript:** $transcriptLink") + appendLine(" - HTML") + appendLine(" - PDF") + } + + log.info("PersuasiveEssayTask completed: words=$cumulativeWordCount, arguments=${argumentSections.size}, time=${totalTime}ms") + + task.safeComplete("Persuasive essay generation complete: $cumulativeWordCount words in ${totalTime / 1000}s", log) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during persuasive essay generation", e) + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + transcript?.let { stream -> + stream.write("---\n\n".toByteArray()) + stream.write("## Error Occurred\n\n".toByteArray()) + stream.write("**Error:** ${e.message}\n\n".toByteArray()) + stream.flush() + } + transcript?.close() + + + val errorOutput = buildString { + appendLine("# Error in Persuasive Essay Generation") + appendLine() + appendLine("**Thesis:** $thesis") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + } + + private fun getInputFileContent(): String { + val inputFiles = executionConfig?.input_files ?: return "" + if (inputFiles.isEmpty()) return "" + log.debug("Loading ${inputFiles.size} input files") + return buildString { + appendLine("## Input Files Content") + appendLine() + inputFiles.forEach { pattern: String -> + try { + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + val matchedFiles = FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }.filter { it.isFile && it.exists() }.distinct().sortedBy { it } + matchedFiles.forEach { file -> + log.debug("Loading input file: ${file.path}") + appendLine("### ${root.relativize(file.toPath())}") + appendLine("```") + appendLine(file.readText().truncateForDisplay(1000)) + appendLine("```") + appendLine() + } + } catch (e: Exception) { + log.warn("Error reading input files matching pattern: $pattern", e) + } + } + } + } + + + private fun getContextFiles(): String { + val relatedFiles = executionConfig?.related_files ?: return "" + if (relatedFiles.isEmpty()) return "" + log.debug("Loading ${relatedFiles.size} related context files") + + return buildString { + appendLine("## Related Research Files") + appendLine() + relatedFiles.forEach { file -> + try { + val filePath = root.resolve(file) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded context file: $file") + appendLine("### $file") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(1500)) + appendLine("```") + appendLine() + } else { + log.warn("Context file not found: $file") + } + } catch (e: Exception) { + log.warn("Error reading file: $file", e) + } + } + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + companion object { + private val log: Logger = LoggerFactory.getLogger(PersuasiveEssayTask::class.java) + val PersuasiveEssay = TaskType( + "PersuasiveEssay", + PersuasiveEssayTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Generate compelling persuasive essays with structured arguments", + """ + Generates complete, well-structured persuasive essays using rhetorical techniques. +
      +
    • Creates detailed outline with thesis, arguments, and counterarguments
    • +
    • Writes compelling introduction with hook and background
    • +
    • Develops main arguments with evidence and rhetorical devices
    • +
    • Addresses counterarguments with strong rebuttals
    • +
    • Crafts powerful conclusion with call to action
    • +
    • Supports multiple tones and target audiences
    • +
    • Includes optional revision passes for quality
    • +
    • Uses ethos, pathos, and logos for persuasive impact
    • +
    • Ideal for opinion pieces, proposals, advocacy, and academic arguments
    • +
    + """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ReportGenerationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ReportGenerationTask.kt new file mode 100644 index 000000000..4cee6f417 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ReportGenerationTask.kt @@ -0,0 +1,1285 @@ +package com.simiacryptus.cognotik.plan.tools.writing + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.input.getReader +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class ReportGenerationTask( + orchestrationConfig: OrchestrationConfig, + planTask: ReportGenerationTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + + class ReportGenerationTaskExecutionConfigData( + @Description("The subject or topic of the report") + val report_topic: String? = null, + + @Description("Type of report (e.g., 'status_update', 'quarterly_review', 'incident_report', 'performance_analysis', 'market_research')") + val report_type: String = "status_update", + + @Description("Target audience for the report (e.g., 'executives', 'team_members', 'stakeholders', 'board_of_directors')") + val target_audience: String = "executives", + + @Description("Time period covered by the report (e.g., 'Q1 2024', 'January 2024', 'Last 30 days')") + val time_period: String? = null, + + @Description("Key metrics or KPIs to include in the report") + val key_metrics: List? = null, + + @Description("Data points or statistics to analyze") + val data_points: Map? = null, + + @Description("Whether to include trend analysis comparing to previous periods") + val include_trend_analysis: Boolean = true, + + @Description("Whether to include data visualization descriptions") + val include_visualizations: Boolean = true, + + @Description("Whether to include executive summary/dashboard") + val include_executive_summary: Boolean = true, + + @Description("Whether to include actionable recommendations") + val include_recommendations: Boolean = true, + + @Description("Whether to include comparative analysis (benchmarks, competitors, previous periods)") + val include_comparative_analysis: Boolean = true, + + @Description("Whether to include risk assessment or challenges section") + val include_risk_assessment: Boolean = true, + + @Description("Tone of the report (e.g., 'formal', 'professional', 'analytical', 'conversational')") + val tone: String = "professional", + + @Description("Target word count for the complete report") + val target_word_count: Int = 2000, + + @Description("Number of revision passes for quality improvement") + val revision_passes: Int = 1, + + @Description("Related files or data sources to incorporate") + val related_files: List? = null, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, + + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = ReportGeneration.name, + task_description = task_description ?: "Generate report on: '$report_topic'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (report_topic.isNullOrBlank()) { + return "report_topic must not be null or blank" + } + if (target_word_count <= 0) { + return "target_word_count must be positive, got: $target_word_count" + } + if (revision_passes < 0 || revision_passes > 5) { + return "revision_passes must be between 0 and 5, got: $revision_passes" + } + val validReportTypes = setOf( + "status_update", "quarterly_review", "incident_report", + "performance_analysis", "market_research", "post_mortem", + "financial_report", "project_summary" + ) + if (report_type.lowercase() !in validReportTypes) { + return "report_type must be one of: ${validReportTypes.joinToString(", ")}, got: $report_type" + } + val validTones = setOf("formal", "professional", "analytical", "conversational", "technical") + if (tone.lowercase() !in validTones) { + return "tone must be one of: ${validTones.joinToString(", ")}, got: $tone" + } + return ValidatedObject.validateFields(this) + } + } + + data class ReportOutline( + @Description("Report title") + val title: String = "", + @Description("Executive summary or key highlights") + val executive_summary: String = "", + @Description("Main sections of the report") + val sections: List = emptyList(), + @Description("Key findings or takeaways") + val key_findings: List = emptyList(), + @Description("Recommended visualizations") + val visualization_suggestions: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (sections.isEmpty()) return "sections must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class ReportSection( + @Description("Section number") + val section_number: Int = 1, + @Description("Section title") + val title: String = "", + @Description("Section purpose or focus") + val purpose: String = "", + @Description("Key points to cover") + val key_points: List = emptyList(), + @Description("Metrics or data to include") + val metrics: List = emptyList(), + @Description("Estimated word count") + val estimated_word_count: Int = 0 + ) : ValidatedObject { + override fun validate(): String? { + if (section_number < 1) return "section_number must be positive" + if (title.isBlank()) return "title must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class VisualizationSuggestion( + @Description("Type of visualization (e.g., 'line_chart', 'bar_chart', 'pie_chart', 'table', 'heatmap')") + val type: String = "", + @Description("What data to visualize") + val data_description: String = "", + @Description("Purpose of this visualization") + val purpose: String = "", + @Description("Suggested placement in report") + val placement: String = "" + ) : ValidatedObject + + data class DataAnalysis( + @Description("Metric or data point being analyzed") + val metric_name: String = "", + @Description("Current value or status") + val current_value: String = "", + @Description("Comparison to previous period") + val comparison: String = "", + @Description("Trend direction (e.g., 'increasing', 'decreasing', 'stable')") + val trend: String = "", + @Description("Interpretation of the data") + val interpretation: String = "", + @Description("Significance level (e.g., 'critical', 'important', 'notable', 'minor')") + val significance: String = "" + ) : ValidatedObject + + data class DataAnalyses( + val analyses: List = emptyList() + ) : ValidatedObject + + data class RecommendationSet( + @Description("Actionable recommendations") + val recommendations: List = emptyList() + ) : ValidatedObject + + data class Recommendation( + @Description("Priority level (e.g., 'high', 'medium', 'low')") + val priority: String = "", + @Description("The recommended action") + val action: String = "", + @Description("Rationale for this recommendation") + val rationale: String = "", + @Description("Expected impact or benefit") + val expected_impact: String = "", + @Description("Implementation timeline") + val timeline: String = "", + @Description("Resources required") + val resources_required: List = emptyList() + ) : ValidatedObject + + data class RiskAssessment( + @Description("Identified risks or challenges") + val risks: List = emptyList() + ) : ValidatedObject + + data class Risk( + @Description("Risk category (e.g., 'operational', 'financial', 'strategic', 'technical')") + val category: String = "", + @Description("Description of the risk") + val description: String = "", + @Description("Likelihood (e.g., 'high', 'medium', 'low')") + val likelihood: String = "", + @Description("Potential impact (e.g., 'high', 'medium', 'low')") + val impact: String = "", + @Description("Mitigation strategies") + val mitigation: String = "" + ) : ValidatedObject + + data class GeneratedSection( + @Description("Section number") + val section_number: Int = 1, + @Description("Section title") + val title: String = "", + @Description("Section content") + val content: String = "", + @Description("Word count") + val word_count: Int = 0, + @Description("Key insights from this section") + val key_insights: List = emptyList() + ) : ValidatedObject + + override fun promptSegment(): String { + return """ +ReportGeneration - Generate comprehensive business reports with data analysis and recommendations + ** Specify the report topic and type (status update, quarterly review, incident report, etc.) + ** Define target audience and time period + ** Provide key metrics, KPIs, and data points to analyze + ** Enable trend analysis, visualizations, and comparative analysis + ** Include executive summary/dashboard for quick insights + ** Generate actionable recommendations based on findings + ** Assess risks and challenges + ** Produces complete, professional report with clear structure + """.trimIndent() + } + + protected val codeFiles = mutableMapOf() + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + log.info("Starting ReportGenerationTask for topic: '${executionConfig?.report_topic}'") + val markdownTranscript = transcript(task) + // Read input from messages parameter + val messageContext = messages.filter { it.isNotBlank() }.joinToString("\n\n") + log.debug("Received ${messages.size} messages with total length: ${messageContext.length}") + // Load input files if specified + val inputFileContent = getInputFileCode() + log.debug("Loaded input files: ${inputFileContent.length} characters") + val fullContext = listOfNotNull(messageContext, inputFileContent).filter { it.isNotBlank() }.joinToString("\n\n---\n\n") + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + resultFn("CONFIGURATION ERROR: $validationError") + return + } + + val reportTopic = executionConfig?.report_topic + if (reportTopic.isNullOrBlank()) { + log.error("No report topic specified") + task.safeComplete("CONFIGURATION ERROR: No report topic specified", log) + resultFn("CONFIGURATION ERROR: No report topic specified") + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Report Generation") + appendLine() + appendLine("**Topic:** $reportTopic") + appendLine("**Type:** ${executionConfig.report_type}") + appendLine() + appendLine("## Configuration") + appendLine("- Report Type: ${executionConfig.report_type}") + appendLine("- Target Audience: ${executionConfig.target_audience}") + appendLine("- Time Period: ${executionConfig.time_period ?: "Not specified"}") + appendLine("- Target Word Count: ${executionConfig.target_word_count}") + appendLine("- Tone: ${executionConfig.tone}") + appendLine() + appendLine("## Features") + appendLine("- Executive Summary: ${if (executionConfig.include_executive_summary) "✓" else "✗"}") + appendLine("- Trend Analysis: ${if (executionConfig.include_trend_analysis) "✓" else "✗"}") + appendLine("- Visualizations: ${if (executionConfig.include_visualizations) "✓" else "✗"}") + appendLine("- Recommendations: ${if (executionConfig.include_recommendations) "✓" else "✗"}") + appendLine("- Comparative Analysis: ${if (executionConfig.include_comparative_analysis) "✓" else "✗"}") + appendLine("- Risk Assessment: ${if (executionConfig.include_risk_assessment) "✓" else "✗"}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Data Analysis") + appendLine("*Analyzing metrics and data points...*") + } + markdownTranscript?.write(overviewContent.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + overviewTask.add(overviewContent.renderMarkdown) + task.update() + + val resultBuilder = StringBuilder() + resultBuilder.append("# ${executionConfig.report_type.replace("_", " ").capitalize()} Report: $reportTopic\n\n") + + try { + // Gather context + val priorContext = getPriorCode(agent.executionState) + val contextFiles = getRelatedContextFiles() + + if (priorContext.isNotBlank() || contextFiles.isNotBlank()) { + log.debug("Found context: priorContext=${priorContext.length} chars, contextFiles=${contextFiles.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Data Sources"] = contextTask.placeholder + val contextContent = buildString { + appendLine("# Data Sources & Context") + appendLine() + if (fullContext.isNotBlank()) { + appendLine("## Input Context") + appendLine(fullContext.truncateForDisplay(3000)) + appendLine() + } + if (priorContext.isNotBlank()) { + appendLine("## Prior Context") + appendLine(priorContext.truncateForDisplay(2000)) + appendLine() + } + if (contextFiles.isNotBlank()) { + appendLine("## Related Files") + appendLine(contextFiles.truncateForDisplay(2000)) + } + } + contextTask.add(contextContent.renderMarkdown) + + markdownTranscript?.write(contextContent.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + + task.update() + } + + // Phase 1: Data Analysis + log.info("Phase 1: Analyzing data and metrics") + val dataAnalysisTask = task.ui.newTask(false) + tabs["Data Analysis"] = dataAnalysisTask.placeholder + + dataAnalysisTask.add( + buildString { + appendLine("# Data Analysis") + appendLine() + appendLine() + appendLine("**Status:** Analyzing metrics and trends...") + appendLine() + }.renderMarkdown + ) + task.update() + + val metricsContext = buildString { + if (!executionConfig.key_metrics.isNullOrEmpty()) { + appendLine("Key Metrics to Analyze:") + executionConfig.key_metrics.forEach { metric -> + appendLine("- $metric") + } + appendLine() + } + if (!executionConfig.data_points.isNullOrEmpty()) { + appendLine("Data Points:") + executionConfig.data_points.forEach { (key, value) -> + appendLine("- $key: $value") + } + appendLine() + } + } + + val dataAnalysisAgent = ParsedAgent( + resultClass = DataAnalyses::class.java, + prompt = """ +${if (fullContext.isNotBlank()) "Input Context:\n${fullContext.truncateForDisplay(3000)}\n" else ""} +You are a data analyst expert. Analyze the provided metrics and data points for this report. + +Report Topic: $reportTopic +Report Type: ${executionConfig.report_type} +Time Period: ${executionConfig.time_period ?: "Current period"} + +$metricsContext + +${if (priorContext.isNotBlank()) "Additional Context:\n${priorContext.truncateForDisplay(3000)}\n" else ""} +${if (contextFiles.isNotBlank()) "Data Sources:\n${contextFiles.truncateForDisplay(3000)}\n" else ""} + +For each key metric or data point, provide: +- Current value or status +- Comparison to previous period (if applicable) +- Trend direction (increasing, decreasing, stable) +- Interpretation of what the data means +- Significance level (critical, important, notable, minor) + +${if (executionConfig.include_trend_analysis) "Include trend analysis comparing to historical data where possible." else ""} +${if (executionConfig.include_comparative_analysis) "Include comparative analysis against benchmarks or competitors where relevant." else ""} + +Focus on insights that matter to ${executionConfig.target_audience}. +Be specific with numbers and percentages where available. + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val dataAnalyses = dataAnalysisAgent.answer(listOf("Analyze data")).obj.analyses + log.info("Analyzed ${dataAnalyses.size} metrics") + + val dataAnalysisContent = buildString { + appendLine() + appendLine("## Key Metrics Analysis") + appendLine() + dataAnalyses.forEach { analysis -> + val significanceIcon = when (analysis.significance.lowercase()) { + "critical" -> "🔴" + "important" -> "🟡" + "notable" -> "🔵" + else -> "⚪" + } + appendLine("### $significanceIcon ${analysis.metric_name}") + appendLine() + appendLine("**Current Value:** ${analysis.current_value}") + appendLine() + if (analysis.comparison.isNotBlank()) { + appendLine("**Comparison:** ${analysis.comparison}") + appendLine() + } + appendLine("**Trend:** ${analysis.trend}") + appendLine() + appendLine("**Analysis:** ${analysis.interpretation}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + markdownTranscript?.write(dataAnalysisContent.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + dataAnalysisTask.add(dataAnalysisContent.renderMarkdown) + task.update() + + overviewTask.add("✅ Phase 1 Complete: ${dataAnalyses.size} metrics analyzed\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Report Structure\n*Creating report outline...*\n".renderMarkdown) + task.update() + + // Phase 2: Create Report Outline + log.info("Phase 2: Creating report outline") + val outlineTask = task.ui.newTask(false) + tabs["Outline"] = outlineTask.placeholder + + outlineTask.add( + buildString { + appendLine("# Report Outline") + appendLine() + appendLine() + appendLine("**Status:** Structuring report sections...") + appendLine() + }.renderMarkdown + ) + task.update() + + val outlineAgent = ParsedAgent( + resultClass = ReportOutline::class.java, + prompt = """ +You are a business report writing expert. Create a detailed outline for this report. + +Report Topic: $reportTopic +Report Type: ${executionConfig.report_type} +Target Audience: ${executionConfig.target_audience} +Time Period: ${executionConfig.time_period ?: "Current period"} +Target Word Count: ${executionConfig.target_word_count} + +Data Analysis Summary: +${dataAnalyses.take(5).joinToString("\n") { "- ${it.metric_name}: ${it.interpretation.take(100)}" }} + +Create an outline with: +1. A compelling title +${if (executionConfig.include_executive_summary) "2. Executive summary highlighting key findings (150-200 words)" else ""} +3. 4-6 main sections covering: + - Current status/performance + - Key findings from data analysis + ${if (executionConfig.include_trend_analysis) "- Trend analysis and patterns" else ""} + ${if (executionConfig.include_comparative_analysis) "- Comparative analysis" else ""} + ${if (executionConfig.include_risk_assessment) "- Challenges and risks" else ""} + ${if (executionConfig.include_recommendations) "- Recommendations and next steps" else ""} + +For each section, specify: +- Section title and purpose +- Key points to cover +- Relevant metrics to include +- Estimated word count + +${ + if (executionConfig.include_visualizations) { + """Also suggest 3-5 data visualizations: +- Type of chart/graph (line chart, bar chart, pie chart, table, etc.) +- What data to visualize +- Purpose of the visualization +- Where to place it in the report""" + } else "" + } + +Structure should be appropriate for ${executionConfig.target_audience} with a ${executionConfig.tone} tone. + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val outline = outlineAgent.answer(listOf("Create outline")).obj + log.info("Created outline with ${outline.sections.size} sections") + + val outlineContent = buildString { + appendLine("## ${outline.title}") + appendLine() + if (outline.executive_summary.isNotBlank()) { + appendLine("### Executive Summary") + appendLine(outline.executive_summary) + appendLine() + appendLine("---") + appendLine() + } + appendLine("### Report Sections") + outline.sections.forEach { section -> + appendLine("#### ${section.section_number}. ${section.title}") + appendLine() + appendLine("**Purpose:** ${section.purpose}") + appendLine() + appendLine("**Key Points:**") + section.key_points.forEach { point -> + appendLine("- $point") + } + appendLine() + if (section.metrics.isNotEmpty()) { + appendLine("**Metrics:** ${section.metrics.joinToString(", ")}") + appendLine() + } + appendLine("**Est. Words:** ${section.estimated_word_count}") + appendLine() + appendLine("---") + appendLine() + } + if (outline.visualization_suggestions.isNotEmpty()) { + appendLine("### Suggested Visualizations") + outline.visualization_suggestions.forEach { viz -> + appendLine("- **${viz.type.replace("_", " ").capitalize()}:** ${viz.data_description}") + appendLine(" - Purpose: ${viz.purpose}") + appendLine(" - Placement: ${viz.placement}") + appendLine() + } + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + markdownTranscript?.write(outlineContent.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + outlineTask.add(outlineContent.renderMarkdown) + task.update() + + overviewTask.add("✅ Phase 2 Complete: Outline created (${outline.sections.size} sections)\n".renderMarkdown) + overviewTask.add("\n### Phase 3: Content Generation\n*Writing report sections...*\n".renderMarkdown) + task.update() + + // Phase 3: Generate Each Section + log.info("Phase 3: Generating report sections") + val generatedSections = mutableListOf() + var cumulativeWordCount = 0 + + outline.sections.forEachIndexed { index, sectionOutline -> + log.info("Generating section ${index + 1}/${outline.sections.size}: ${sectionOutline.title}") + + overviewTask.add("- Section ${sectionOutline.section_number}: ${sectionOutline.title} ".renderMarkdown) + task.update() + + val sectionTask = task.ui.newTask(false) + tabs["Section ${sectionOutline.section_number}"] = sectionTask.placeholder + + sectionTask.add( + buildString { + appendLine("# Section ${sectionOutline.section_number}: ${sectionOutline.title}") + appendLine() + appendLine("**Status:** Writing section...") + appendLine() + }.renderMarkdown + ) + task.update() + + // Build context from previous sections + val previousContext = if (generatedSections.isNotEmpty()) { + buildString { + appendLine("## Previous Sections Summary") + generatedSections.takeLast(1).forEach { prevSection -> + appendLine("### ${prevSection.title}") + appendLine("Key insights: ${prevSection.key_insights.joinToString("; ")}") + appendLine() + } + } + } else { + "This is the first section." + } + + // Find relevant data analyses for this section + val relevantAnalyses = dataAnalyses.filter { analysis -> + sectionOutline.metrics.any { metric -> + analysis.metric_name.contains(metric, ignoreCase = true) || + metric.contains(analysis.metric_name, ignoreCase = true) + } + } + + val sectionAgent = ParsedAgent( + resultClass = GeneratedSection::class.java, + prompt = """ +You are a professional business report writer. Write Section ${sectionOutline.section_number} of the report. + +Report Title: ${outline.title} +Report Type: ${executionConfig.report_type} +Target Audience: ${executionConfig.target_audience} +Tone: ${executionConfig.tone} + +Section Details: +- Title: ${sectionOutline.title} +- Purpose: ${sectionOutline.purpose} +- Target Word Count: ${sectionOutline.estimated_word_count} + +Key Points to Cover: +${sectionOutline.key_points.joinToString("\n") { "- $it" }} + +Relevant Data Analysis: +${relevantAnalyses.joinToString("\n") { "- ${it.metric_name}: ${it.interpretation}" }} + +$previousContext + +Write a complete section that: +1. Opens with a clear topic statement +2. Presents data and findings clearly +3. Uses specific numbers and metrics +${ + if (executionConfig.include_visualizations && outline.visualization_suggestions.any { + it.placement.contains( + sectionOutline.title, + ignoreCase = true + ) + }) { + "4. References suggested visualizations with [Chart: description] placeholders" + } else "" + } +5. Provides interpretation and context +6. Connects to the overall report narrative +7. Maintains a ${executionConfig.tone} tone appropriate for ${executionConfig.target_audience} + +After writing, provide: +- The section content +- Actual word count +- 3-5 key insights from this section + +Be specific, data-driven, and actionable. + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var generatedSection = sectionAgent.answer(listOf("Write section")).obj + generatedSections.add(generatedSection) + cumulativeWordCount += generatedSection.word_count + + sectionTask.add( + buildString { + appendLine("## ${sectionOutline.title}") + appendLine() + appendLine(generatedSection.content) + appendLine() + appendLine("---") + appendLine() + appendLine("**Word Count:** ${generatedSection.word_count}") + appendLine() + appendLine("**Key Insights:**") + generatedSection.key_insights.forEach { insight -> + appendLine("- $insight") + } + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + markdownTranscript?.write(sectionTask.toString().toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + task.update() + + resultBuilder.append("## ${sectionOutline.title}\n\n") + resultBuilder.append(generatedSection.content) + resultBuilder.append("\n\n") + + overviewTask.add("✅ (${generatedSection.word_count} words)\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 3 Complete: All sections written\n".renderMarkdown) + + // Phase 4: Recommendations (if enabled) + if (executionConfig.include_recommendations) { + overviewTask.add("\n### Phase 4: Recommendations\n*Generating actionable recommendations...*\n".renderMarkdown) + task.update() + + log.info("Phase 4: Generating recommendations") + val recommendationsTask = task.ui.newTask(false) + tabs["Recommendations"] = recommendationsTask.placeholder + + recommendationsTask.add( + buildString { + appendLine("# Recommendations") + appendLine() + appendLine("**Status:** Generating actionable recommendations...") + appendLine() + }.renderMarkdown + ) + task.update() + + val recommendationAgent = ParsedAgent( + resultClass = RecommendationSet::class.java, + prompt = """ +You are a strategic business advisor. Based on the report findings, provide actionable recommendations. + +Report Topic: $reportTopic +Report Type: ${executionConfig.report_type} +Target Audience: ${executionConfig.target_audience} + +Key Findings: +${outline.key_findings.joinToString("\n") { "- $it" }} + +Data Analysis Summary: +${dataAnalyses.take(5).joinToString("\n") { "- ${it.metric_name}: ${it.interpretation}" }} + +Section Insights: +${generatedSections.flatMap { it.key_insights }.take(10).joinToString("\n") { "- $it" }} + +Provide 3-5 prioritized recommendations that: +- Are specific and actionable +- Address the key findings and challenges +- Are realistic and achievable +- Have clear expected impact +- Include implementation timeline +- Specify required resources + +For each recommendation, provide: +- Priority level (high, medium, low) +- The specific action to take +- Rationale based on the data +- Expected impact or benefit +- Suggested timeline +- Resources needed + +Tailor recommendations to ${executionConfig.target_audience}. + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val recommendations = recommendationAgent.answer(listOf("Generate recommendations")).obj.recommendations + log.info("Generated ${recommendations.size} recommendations") + + val recommendationsContent = buildString { + appendLine("## Actionable Recommendations") + appendLine() + recommendations.sortedByDescending { + when (it.priority.lowercase()) { + "high" -> 3 + "medium" -> 2 + else -> 1 + } + }.forEach { rec -> + val priorityIcon = when (rec.priority.lowercase()) { + "high" -> "🔴" + "medium" -> "🟡" + else -> "🟢" + } + appendLine("### $priorityIcon ${rec.action}") + appendLine() + appendLine("**Priority:** ${rec.priority}") + appendLine() + appendLine("**Rationale:** ${rec.rationale}") + appendLine() + appendLine("**Expected Impact:** ${rec.expected_impact}") + appendLine() + appendLine("**Timeline:** ${rec.timeline}") + appendLine() + if (rec.resources_required.isNotEmpty()) { + appendLine("**Resources Required:**") + rec.resources_required.forEach { resource -> + appendLine("- $resource") + } + appendLine() + } + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + recommendationsTask.add(recommendationsContent.renderMarkdown) + markdownTranscript?.write(recommendationsContent.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + task.update() + + resultBuilder.append("## Recommendations\n\n") + recommendations.forEach { rec -> + resultBuilder.append("### ${rec.action}\n") + resultBuilder.append("**Priority:** ${rec.priority} | **Timeline:** ${rec.timeline}\n\n") + resultBuilder.append("${rec.rationale}\n\n") + resultBuilder.append("**Expected Impact:** ${rec.expected_impact}\n\n") + } + + overviewTask.add("✅ Phase 4 Complete: ${recommendations.size} recommendations generated\n".renderMarkdown) + } + + // Phase 5: Risk Assessment (if enabled) + if (executionConfig.include_risk_assessment) { + overviewTask.add("\n### Phase 5: Risk Assessment\n*Identifying risks and challenges...*\n".renderMarkdown) + task.update() + + log.info("Phase 5: Generating risk assessment") + val riskTask = task.ui.newTask(false) + tabs["Risk Assessment"] = riskTask.placeholder + + riskTask.add( + buildString { + appendLine("# Risk Assessment") + appendLine() + appendLine("**Status:** Analyzing risks and challenges...") + appendLine() + }.renderMarkdown + ) + task.update() + + val riskAgent = ParsedAgent( + resultClass = RiskAssessment::class.java, + prompt = """ +You are a risk management expert. Identify and assess risks based on the report findings. + +Report Topic: $reportTopic +Report Type: ${executionConfig.report_type} + +Key Findings: +${outline.key_findings.joinToString("\n") { "- $it" }} + +Data Analysis: +${ + dataAnalyses.filter { it.significance.lowercase() in setOf("critical", "important") } + .joinToString("\n") { "- ${it.metric_name}: ${it.interpretation}" } + } + +Identify 3-5 key risks or challenges, including: +- Operational risks +- Financial risks +- Strategic risks +- Technical risks (if applicable) + +For each risk, provide: +- Category (operational, financial, strategic, technical) +- Clear description of the risk +- Likelihood (high, medium, low) +- Potential impact (high, medium, low) +- Mitigation strategies + +Be realistic and specific. Focus on risks that ${executionConfig.target_audience} should be aware of. + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + val riskAssessment = riskAgent.answer(listOf("Assess risks")).obj.risks + log.info("Identified ${riskAssessment.size} risks") + + val riskContent = buildString { + appendLine("## Identified Risks & Challenges") + appendLine() + riskAssessment.sortedByDescending { + val likelihoodScore = when (it.likelihood.lowercase()) { + "high" -> 3 + "medium" -> 2 + else -> 1 + } + val impactScore = when (it.impact.lowercase()) { + "high" -> 3 + "medium" -> 2 + else -> 1 + } + likelihoodScore * impactScore + }.forEach { risk -> + val riskLevel = when { + risk.likelihood.lowercase() == "high" && risk.impact.lowercase() == "high" -> "🔴 Critical" + risk.likelihood.lowercase() == "high" || risk.impact.lowercase() == "high" -> "🟡 Significant" + else -> "🟢 Moderate" + } + appendLine("### $riskLevel - ${risk.category.capitalize()} Risk") + appendLine() + appendLine("**Description:** ${risk.description}") + appendLine() + appendLine("**Likelihood:** ${risk.likelihood} | **Impact:** ${risk.impact}") + appendLine() + appendLine("**Mitigation:** ${risk.mitigation}") + appendLine() + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + riskTask.add(riskContent.renderMarkdown) + markdownTranscript?.write(riskContent.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + task.update() + + resultBuilder.append("## Risk Assessment\n\n") + riskAssessment.forEach { risk -> + resultBuilder.append("### ${risk.category.capitalize()} Risk: ${risk.description.take(100)}\n") + resultBuilder.append("**Likelihood:** ${risk.likelihood} | **Impact:** ${risk.impact}\n\n") + resultBuilder.append("**Mitigation:** ${risk.mitigation}\n\n") + } + + overviewTask.add("✅ Phase 5 Complete: ${riskAssessment.size} risks identified\n".renderMarkdown) + } + + // Phase 6: Revision (if enabled) + if (executionConfig.revision_passes > 0) { + overviewTask.add("\n### Phase 6: Revision\n*Refining and polishing report...*\n".renderMarkdown) + task.update() + + log.info("Phase 6: Performing ${executionConfig.revision_passes} revision pass(es)") + val revisionTask = task.ui.newTask(false) + tabs["Revision"] = revisionTask.placeholder + + revisionTask.add( + buildString { + appendLine("# Revision Process") + appendLine() + appendLine("**Status:** Performing ${executionConfig.revision_passes} revision pass(es)...") + appendLine() + }.renderMarkdown + ) + task.update() + + val fullReport = resultBuilder.toString() + + repeat(executionConfig.revision_passes) { passNum -> + log.debug("Revision pass ${passNum + 1}/${executionConfig.revision_passes}") + + val revisionAgent = ChatAgent( + prompt = """ +You are an expert business report editor. Review and improve this report. + +Current Report: +$fullReport + +Focus on: +1. Clarity and conciseness +2. Data presentation and interpretation +3. Logical flow between sections +4. Consistency in tone (${executionConfig.tone}) +5. Actionability of recommendations +6. Professional formatting and structure +7. Appropriateness for ${executionConfig.target_audience} + +Maintain: +- All key data points and metrics +- The core findings and recommendations +- Approximate word count ($cumulativeWordCount words) +- The ${executionConfig.tone} tone + +Provide the complete revised report. + """.trimIndent(), + model = api, + temperature = 0.6 + ) + + val revisedReport = revisionAgent.answer(listOf("Revise the report")) + resultBuilder.clear() + resultBuilder.append(revisedReport) + + revisionTask.add( + buildString { + appendLine("## Revision Pass ${passNum + 1}") + appendLine() + appendLine("✅ Complete") + appendLine() + }.renderMarkdown + ) + markdownTranscript?.write("## Revision Pass ${passNum + 1}\n\n✅ Complete\n\n".toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + task.update() + } + + overviewTask.add("✅ Phase 6 Complete: ${executionConfig.revision_passes} revision pass(es) completed\n".renderMarkdown) + } + + // Phase 7: Final Assembly + overviewTask.add("\n### Phase 7: Final Assembly\n*Compiling complete report...*\n".renderMarkdown) + task.update() + + log.info("Phase 7: Assembling final report") + val finalTask = task.ui.newTask(false) + tabs["Complete Report"] = finalTask.placeholder + + val finalReport = buildString { + appendLine("# ${outline.title}") + appendLine() + appendLine("**Report Type:** ${executionConfig.report_type.replace("_", " ").capitalize()}") + appendLine() + appendLine("**Period:** ${executionConfig.time_period ?: "Current"}") + appendLine() + appendLine("**Prepared for:** ${executionConfig.target_audience.capitalize()}") + appendLine() + appendLine("**Date:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("MMMM d, yyyy"))}") + appendLine() + appendLine("---") + appendLine() + if (executionConfig.include_executive_summary && outline.executive_summary.isNotBlank()) { + appendLine("## Executive Summary") + appendLine() + appendLine(outline.executive_summary) + appendLine() + appendLine("### Key Findings") + outline.key_findings.forEach { finding -> + appendLine("- $finding") + } + appendLine() + appendLine("---") + appendLine() + } + appendLine(resultBuilder.toString()) + appendLine() + appendLine("---") + appendLine() + appendLine("**Total Word Count:** $cumulativeWordCount") + appendLine() + appendLine("**Report Generated:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + } + + finalTask.add(finalReport.renderMarkdown) + markdownTranscript?.write(finalReport.toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + task.update() + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Total Word Count: $cumulativeWordCount") + appendLine("- Target Word Count: ${executionConfig.target_word_count}") + appendLine("- Completion: ${(cumulativeWordCount.toFloat() / executionConfig.target_word_count * 100).toInt()}%") + appendLine("- Number of Sections: ${generatedSections.size}") + appendLine("- Metrics Analyzed: ${dataAnalyses.size}") + if (executionConfig.include_visualizations) { + appendLine("- Visualizations Suggested: ${outline.visualization_suggestions.size}") + } + appendLine("- Revision Passes: ${executionConfig.revision_passes}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + markdownTranscript?.write(overviewTask.toString().toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + task.update() + + // Concise summary for resultFn + val finalResult = buildString { + appendLine("# Report Generation Summary: ${outline.title}") + appendLine() + appendLine( + "A complete ${ + executionConfig.report_type.replace( + "_", + " " + ) + } report of **$cumulativeWordCount words** was generated in **${totalTime / 1000.0}s**." + ) + appendLine() + appendLine("**Key Highlights:**") + appendLine("- ${dataAnalyses.size} metrics analyzed") + appendLine("- ${generatedSections.size} sections written") + if (executionConfig.include_recommendations) { + appendLine("- Actionable recommendations provided") + } + if (executionConfig.include_risk_assessment) { + appendLine("- Risk assessment completed") + } + appendLine() + appendLine("> The full report is available in the Complete Report tab for detailed review.") + } + + log.info("ReportGenerationTask completed: words=$cumulativeWordCount, sections=${generatedSections.size}, time=${totalTime}ms") + markdownTranscript?.write("\n\n---\n\n# Final Result\n\n${finalResult}".toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + markdownTranscript?.close() + + task.safeComplete("Report generation complete: $cumulativeWordCount words in ${totalTime / 1000}s", log) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during report generation", e) + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + markdownTranscript?.write("\n\n---\n\n# Error\n\n**Error:** ${e.message}\n\n**Type:** ${e.javaClass.simpleName}\n".toByteArray(java.nio.charset.StandardCharsets.UTF_8)) + task.update() + + val errorOutput = buildString { + appendLine("# Error in Report Generation") + appendLine() + appendLine("**Topic:** $reportTopic") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + markdownTranscript?.close() + resultFn(errorOutput) + } + } + + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun getRelatedContextFiles(): String { + val relatedFiles = executionConfig?.related_files ?: return "" + if (relatedFiles.isEmpty()) return "" + log.debug("Loading ${relatedFiles.size} related context files") + + return buildString { + appendLine("## Related Data Files") + appendLine() + relatedFiles.forEach { file -> + try { + val filePath = root.resolve(file) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded context file: $file") + appendLine("### $file") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(1500)) + appendLine("```") + appendLine() + } else { + log.warn("Context file not found: $file") + } + } catch (e: Exception) { + log.warn("Error reading file: $file", e) + } + } + } + } + + private fun isTextFile(file: java.io.File): Boolean { + val textExtensions = setOf( + "txt", "md", "kt", "java", "js", "ts", "py", "rb", "go", "rs", "c", "cpp", "h", "hpp", + "css", "html", "xml", "json", "yaml", "yml", "properties", "gradle", "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + private fun extractDocumentContent(file: java.io.File) = try { + file.getReader().use { reader -> + when (reader) { + is com.simiacryptus.cognotik.input.PaginatedDocumentReader -> reader.getText(0, reader.getPageCount()) + else -> reader.getText() + } + } + } catch (e: Exception) { + log.warn("Failed to extract content from ${file.name}, falling back to raw text", e) + try { + file.readText() + } catch (e2: Exception) { + "Error reading file: ${e2.message}" + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing detailed report to $link html pdf" + ) + return markdownTranscript + } + + companion object { + private val log: Logger = LoggerFactory.getLogger(ReportGenerationTask::class.java) + val ReportGeneration = TaskType( + "ReportGeneration", + ReportGenerationTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Generate comprehensive business reports with data analysis and recommendations", + """ + Generates complete, professional business reports with structured analysis. +
      +
    • Analyzes metrics and data points with trend analysis
    • +
    • Creates structured report outline with multiple sections
    • +
    • Generates executive summary/dashboard for quick insights
    • +
    • Writes detailed sections with data-driven content
    • +
    • Provides actionable recommendations based on findings
    • +
    • Includes risk assessment and mitigation strategies
    • +
    • Suggests data visualizations (charts, graphs, tables)
    • +
    • Supports multiple report types (status updates, quarterly reviews, incident reports)
    • +
    • Tailors content to target audience (executives, team members, stakeholders)
    • +
    • Optional revision passes for quality improvement
    • +
    • Ideal for business reporting, performance analysis, project summaries
    • +
    + """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ScriptwritingTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ScriptwritingTask.kt new file mode 100644 index 000000000..d49631355 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/ScriptwritingTask.kt @@ -0,0 +1,1247 @@ +package com.simiacryptus.cognotik.plan.tools.writing + + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.File +import java.io.FileOutputStream +import java.nio.file.FileSystems +import java.nio.file.Path +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class ScriptwritingTask( + orchestrationConfig: OrchestrationConfig, + planTask: ScriptwritingTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + protected val codeFiles = mutableMapOf() + + class ScriptwritingTaskExecutionConfigData( + @Description("The topic or subject of the script") + val topic: String? = null, + + @Description("The type of script to generate") + val script_type: String = "video", + + @Description("Target duration in minutes") + val target_duration_minutes: Int = 5, + + @Description("The intended audience for the script") + val target_audience: String = "general public", + + @Description("The tone of the script") + val tone: String = "professional", + + @Description("Whether to include visual/scene directions") + val include_directions: Boolean = true, + + @Description("Whether to include timing markers") + val include_timing: Boolean = true, + + @Description("Whether to suggest B-roll or supporting visuals") + val suggest_b_roll: Boolean = true, + + @Description("Whether to include speaker notes or production notes") + val include_notes: Boolean = true, + + @Description("Whether to mark key points for emphasis or graphics") + val mark_key_points: Boolean = true, + + @Description("The pacing style") + val pacing: String = "moderate", + + @Description("Whether to include an opening hook") + val include_hook: Boolean = true, + + @Description("Whether to include a call-to-action") + val include_cta: Boolean = true, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, + + + @Description("Number of revision passes") + val revision_passes: Int = 1, + + @Description("Related files or research to incorporate") + val related_files: List? = null, + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = Scriptwriting.name, + task_description = task_description ?: "Generate script for: '$topic'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (topic.isNullOrBlank()) { + return "topic must not be null or blank" + } + if (target_duration_minutes <= 0 || target_duration_minutes > 180) { + return "target_duration_minutes must be between 1 and 180, got: $target_duration_minutes" + } + if (script_type.isBlank()) { + return "script_type must not be blank" + } + if (tone.isBlank()) { + return "tone must not be blank" + } + val validPacing = setOf("slow", "moderate", "fast", "dynamic") + if (pacing.lowercase() !in validPacing) { + return "pacing must be one of: ${validPacing.joinToString(", ")}, got: $pacing" + } + if (revision_passes < 0 || revision_passes > 5) { + return "revision_passes must be between 0 and 5, got: $revision_passes" + } + return ValidatedObject.validateFields(this) + } + } + + data class ScriptOutline( + @Description("The script title") + val title: String = "", + @Description("Opening hook or attention grabber") + val hook: String = "", + @Description("Main sections of the script") + val sections: List = emptyList(), + @Description("Closing and call-to-action") + val closing: String = "", + @Description("Estimated total duration in seconds") + val estimated_duration_seconds: Int = 0, + @Description("Key messages to convey") + val key_messages: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (sections.isEmpty()) return "sections must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class ScriptSection( + @Description("Section number") + val section_number: Int = 1, + @Description("Section title or purpose") + val title: String = "", + @Description("Key points to cover in this section") + val key_points: List = emptyList(), + @Description("Visual elements or B-roll suggestions") + val visual_suggestions: List = emptyList(), + @Description("Estimated duration in seconds") + val estimated_duration_seconds: Int = 0 + ) : ValidatedObject { + override fun validate(): String? { + if (section_number < 1) return "section_number must be positive" + if (title.isBlank()) return "title must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class ScriptSegment( + @Description("Segment type") + val segment_type: String = "", + @Description("The spoken dialogue or narration") + val dialogue: String = "", + @Description("Visual directions or scene description") + val visual_direction: String = "", + @Description("B-roll or supporting visual suggestions") + val b_roll_suggestions: List = emptyList(), + @Description("Production notes or speaker notes") + val notes: String = "", + @Description("Timing marker in MM:SS format") + val timing: String = "", + @Description("Key points marked for emphasis or graphics") + val key_points_marked: List = emptyList(), + @Description("Estimated duration in seconds") + val duration_seconds: Int = 0 + ) : ValidatedObject { + override fun validate(): String? { + if (dialogue.isBlank()) return "dialogue must not be blank" + return ValidatedObject.validateFields(this) + } + } + + override fun promptSegment(): String { + return """ + Scriptwriting - Generate complete scripts for videos, podcasts, and presentations + ** Optionally, list input files (supports glob patterns) to be examined when generating the script + ** Available files: + ${getAvailableFiles(root).joinToString("\n") { " - $it" }} + ** Specify the topic and script type (video, podcast, presentation, etc.) + ** Set target duration and audience + ** Configure tone and pacing + ** Specify the topic and script type (video, podcast, presentation, etc.) + ** Set target duration and audience + ** Configure tone and pacing + ** Include visual directions, timing markers, and B-roll suggestions + ** Mark key points for emphasis or graphics + ** Add speaker notes and production notes + ** Performs outline creation, segment writing, and timing calculation + ** Produces complete, production-ready script with all necessary elements + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + log.info("Starting ScriptwritingTask for topic: '${executionConfig?.topic}'") + val markdownTranscript = transcript(task) + + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + markdownTranscript?.close() + resultFn("CONFIGURATION ERROR: $validationError") + markdownTranscript?.close() + return + } + + val topic = executionConfig?.topic + if (topic.isNullOrBlank()) { + log.error("No topic specified for scriptwriting") + task.safeComplete("CONFIGURATION ERROR: No topic specified", log) + markdownTranscript?.close() + resultFn("CONFIGURATION ERROR: No topic specified") + markdownTranscript?.close() + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Script Generation") + appendLine() + appendLine("**Topic:** $topic") + appendLine() + } + markdownTranscript?.write(overviewContent.toByteArray()) + markdownTranscript?.write("\n".toByteArray()) + overviewTask.add(overviewContent.renderMarkdown) + task.update() + val overviewContent2 = buildString { + appendLine() + appendLine("## Configuration") + appendLine("- Script Type: ${executionConfig.script_type}") + appendLine("- Target Duration: ${executionConfig.target_duration_minutes} minutes") + appendLine("- Target Audience: ${executionConfig.target_audience}") + appendLine("- Tone: ${executionConfig.tone}") + appendLine("- Pacing: ${executionConfig.pacing}") + appendLine("- Include Directions: ${if (executionConfig.include_directions) "✓" else "✗"}") + appendLine("- Include Timing: ${if (executionConfig.include_timing) "✓" else "✗"}") + appendLine("- Suggest B-Roll: ${if (executionConfig.suggest_b_roll) "✓" else "✗"}") + appendLine("- Include Notes: ${if (executionConfig.include_notes) "✓" else "✗"}") + appendLine("- Mark Key Points: ${if (executionConfig.mark_key_points) "✓" else "✗"}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Research & Outline") + appendLine("*Analyzing topic and creating script structure...*") + } + markdownTranscript?.write(overviewContent2.toByteArray()) + markdownTranscript?.write(overviewContent2.toByteArray()) + markdownTranscript?.write("\n".toByteArray()) + overviewTask.add(overviewContent2.renderMarkdown) + task.update() + + val resultBuilder = StringBuilder() + markdownTranscript?.write("# Research Context\n\n".toByteArray()) + markdownTranscript?.write("Context loaded from prior tasks and related files.\n\n".toByteArray()) + resultBuilder.append("# Script: $topic\n\n") + + try { + // Gather context + val priorContext = getPriorCode(agent.executionState) + val contextFiles = getContextFiles() + + if (priorContext.isNotBlank() || contextFiles.isNotBlank()) { + log.debug("Found context: priorContext=${priorContext.length} chars, contextFiles=${contextFiles.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Research Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Research Context") + appendLine() + if (priorContext.isNotBlank()) { + appendLine("## Prior Context") + appendLine(priorContext.truncateForDisplay(2000)) + appendLine() + } + if (contextFiles.isNotBlank()) { + appendLine("## Related Files") + appendLine(contextFiles.truncateForDisplay(2000)) + } + }.renderMarkdown + ) + task.update() + } + markdownTranscript?.write("# Research Context\n\n".toByteArray()) + + // Phase 1: Create outline + log.info("Phase 1: Creating script outline") + markdownTranscript?.write("# Script Outline\n\n".toByteArray()) + val outlineTask = task.ui.newTask(false) + tabs["Outline"] = outlineTask.placeholder + + outlineTask.add( + buildString { + appendLine("# Script Outline") + appendLine() + appendLine("**Status:** Creating structured outline...") + appendLine() + }.renderMarkdown + ) + markdownTranscript?.write("# Script Outline\n\n".toByteArray()) + markdownTranscript?.write("Creating structured outline...\n\n".toByteArray()) + task.update() + + val targetDurationSeconds = executionConfig.target_duration_minutes * 60 + val wordsPerMinute = when (executionConfig.pacing.lowercase()) { + "slow" -> 120 + "moderate" -> 150 + "fast" -> 180 + "dynamic" -> 160 + else -> 150 + } + val targetWordCount = executionConfig.target_duration_minutes * wordsPerMinute + + val outlineAgent = ParsedAgent( + resultClass = ScriptOutline::class.java, + prompt = """ +You are an expert scriptwriter specializing in ${executionConfig.script_type} scripts. Create a detailed outline for this script. + +Topic: $topic + +Script Type: ${executionConfig.script_type} +Target Duration: ${executionConfig.target_duration_minutes} minutes (~$targetDurationSeconds seconds) +Target Audience: ${executionConfig.target_audience} +Tone: ${executionConfig.tone} +Pacing: ${executionConfig.pacing} (~$wordsPerMinute words per minute) +Target Word Count: ~$targetWordCount words + +${if (priorContext.isNotBlank()) "Research Context:\n${priorContext.truncateForDisplay(3000)}\n" else ""} +${if (contextFiles.isNotBlank()) "Additional Research:\n${contextFiles.truncateForDisplay(3000)}\n" else ""} + +Create an outline with: +1. A compelling title +${if (executionConfig.include_hook) "2. An attention-grabbing opening hook (10-15 seconds)" else ""} +3. 3-5 main sections that logically progress through the topic +4. Key points to cover in each section +${if (executionConfig.suggest_b_roll) "5. Visual suggestions for each section" else ""} +6. Estimated duration for each section +${if (executionConfig.include_cta) "7. A strong closing with call-to-action" else "7. A memorable closing"} + +Ensure the outline: +- Flows logically from introduction to conclusion +- Maintains the ${executionConfig.tone} tone throughout +- Fits within the ${executionConfig.target_duration_minutes}-minute timeframe +- Engages the ${executionConfig.target_audience} +- Balances information delivery with entertainment/engagement + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var outline = outlineAgent.answer(listOf("Generate outline")).obj + + // Validate outline + outline.validate()?.let { validationError -> + log.error("Outline validation failed: $validationError") + outlineTask.error(ValidatedObject.ValidationError(validationError, outline)) + task.safeComplete("Outline validation failed: $validationError", log) + resultFn("ERROR: Outline validation failed: $validationError") + return + } + + log.info("Generated outline: ${outline.sections.size} sections, ${outline.estimated_duration_seconds}s estimated") + + val outlineContent = buildString { + appendLine("## ${outline.title}") + appendLine() + if (executionConfig.include_hook && outline.hook.isNotBlank()) { + appendLine("### Opening Hook") + appendLine(outline.hook) + appendLine() + } + appendLine("### Key Messages") + outline.key_messages.forEach { message -> + appendLine("- $message") + } + appendLine() + appendLine("---") + appendLine() + appendLine("### Script Sections") + outline.sections.forEach { section -> + appendLine("#### Section ${section.section_number}: ${section.title}") + appendLine() + appendLine("**Duration:** ~${section.estimated_duration_seconds}s") + appendLine() + appendLine("**Key Points:**") + section.key_points.forEach { point -> + appendLine("- $point") + } + appendLine() + if (section.visual_suggestions.isNotEmpty()) { + appendLine("**Visual Suggestions:**") + section.visual_suggestions.forEach { visual -> + appendLine("- $visual") + } + appendLine() + } + appendLine("---") + appendLine() + } + appendLine("### Closing") + appendLine(outline.closing) + appendLine() + appendLine("**Total Estimated Duration:** ${outline.estimated_duration_seconds}s (${outline.estimated_duration_seconds / 60}m ${outline.estimated_duration_seconds % 60}s)") + appendLine() + appendLine("**Status:** ✅ Complete") + } + outlineTask.add(outlineContent.renderMarkdown) + markdownTranscript?.write(outlineContent.toByteArray()) + markdownTranscript?.write(outlineContent.toByteArray()) + markdownTranscript?.write("\n".toByteArray()) + task.update() + + overviewTask.add("✅ Phase 1 Complete: Outline created (${outline.sections.size} sections)\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Script Writing\n*Writing detailed script segments...*\n".renderMarkdown) + task.update() + + // Phase 2: Write script segments + log.info("Phase 2: Writing script segments") + val scriptSegments = mutableListOf() + var cumulativeDuration = 0 + var cumulativeWordCount = 0 + + // Write opening if hook is included + if (executionConfig.include_hook && outline.hook.isNotBlank()) { + log.info("Writing opening hook") + overviewTask.add("- Opening Hook ".renderMarkdown) + task.update() + + val hookTask = task.ui.newTask(false) + tabs["Opening"] = hookTask.placeholder + + hookTask.add( + buildString { + appendLine("# Opening Hook") + appendLine() + appendLine("**Status:** Writing opening...") + appendLine() + }.renderMarkdown + ) + task.update() + + val hookAgent = ParsedAgent( + resultClass = ScriptSegment::class.java, + prompt = """ +You are an expert scriptwriter. Write the opening hook for this ${executionConfig.script_type} script. + +Topic: $topic +Hook Concept: ${outline.hook} +Tone: ${executionConfig.tone} +Target Audience: ${executionConfig.target_audience} +Target Duration: 10-15 seconds + +Write an opening that: +1. Immediately grabs attention +2. Sets the tone for the entire script +3. Hints at what's coming +4. Is conversational and natural for spoken delivery +${if (executionConfig.include_directions) "5. Includes visual direction for what the viewer sees" else ""} +${if (executionConfig.suggest_b_roll) "6. Suggests B-roll or supporting visuals" else ""} + +Make it punchy, engaging, and memorable. +Ensure the dialogue sounds natural when spoken aloud. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var hookSegment = hookAgent.answer(listOf("Write opening")).obj + scriptSegments.add(hookSegment) + cumulativeDuration += hookSegment.duration_seconds + cumulativeWordCount += hookSegment.dialogue.split("\\s+".toRegex()).size + + hookTask.add( + buildString { + appendLine("## Opening Hook") + appendLine() + if (executionConfig.include_timing) { + appendLine("**[${formatTiming(0)}]**") + appendLine() + } + if (executionConfig.include_directions && hookSegment.visual_direction.isNotBlank()) { + appendLine("*${hookSegment.visual_direction}*") + appendLine() + } + appendLine(hookSegment.dialogue) + appendLine() + if (executionConfig.suggest_b_roll && hookSegment.b_roll_suggestions.isNotEmpty()) { + appendLine("**B-Roll:**") + hookSegment.b_roll_suggestions.forEach { broll -> + appendLine("- $broll") + } + appendLine() + } + if (executionConfig.include_notes && hookSegment.notes.isNotBlank()) { + appendLine("**Notes:** ${hookSegment.notes}") + appendLine() + } + appendLine("---") + appendLine() + appendLine("**Duration:** ${hookSegment.duration_seconds}s | **Words:** ${hookSegment.dialogue.split("\\s+".toRegex()).size}") + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + markdownTranscript?.write("# Opening Hook\n\n".toByteArray()) + markdownTranscript?.write("# Opening Hook\n\n".toByteArray()) + markdownTranscript?.write(hookSegment.dialogue.toByteArray()) + markdownTranscript?.write("\n\n".toByteArray()) + task.update() + + overviewTask.add("✅\n".renderMarkdown) + task.update() + } + + // Write each section + outline.sections.forEachIndexed { index, sectionOutline -> + log.info("Writing section ${index + 1}/${outline.sections.size}: ${sectionOutline.title}") + + overviewTask.add("- Section ${sectionOutline.section_number}: ${sectionOutline.title} ".renderMarkdown) + task.update() + + val sectionTask = task.ui.newTask(false) + tabs["Section ${sectionOutline.section_number}"] = sectionTask.placeholder + + sectionTask.add( + buildString { + appendLine("# Section ${sectionOutline.section_number}: ${sectionOutline.title}") + appendLine() + appendLine("**Status:** Writing section...") + appendLine() + }.renderMarkdown + ) + task.update() + + // Build context from previous segments + val previousContext = if (scriptSegments.isNotEmpty()) { + buildString { + appendLine("## Previous Script Context") + val lastSegments = scriptSegments.takeLast(2) + lastSegments.forEach { prevSegment -> + appendLine("**Previous Dialogue:**") + appendLine(prevSegment.dialogue.takeLast(200)) + appendLine() + } + appendLine("**Current Duration:** ${cumulativeDuration}s") + } + } else { + "This is the first main section." + } + + val sectionAgent = ParsedAgent( + resultClass = ScriptSegment::class.java, + prompt = """ +You are an expert scriptwriter. Write Section ${sectionOutline.section_number} of this ${executionConfig.script_type} script. + +Overall Topic: $topic +Section Title: ${sectionOutline.title} +Section Purpose: Cover these key points: ${sectionOutline.key_points.joinToString("; ")} +Target Duration: ${sectionOutline.estimated_duration_seconds} seconds +Tone: ${executionConfig.tone} +Pacing: ${executionConfig.pacing} + +$previousContext + +${if (sectionOutline.visual_suggestions.isNotEmpty()) "Visual Suggestions: ${sectionOutline.visual_suggestions.joinToString("; ")}" else ""} + +Write this section with: +1. Natural, conversational dialogue that sounds good when spoken +2. Clear transitions from the previous section +3. Logical flow through the key points +4. Appropriate pacing for ${executionConfig.pacing} delivery +${if (executionConfig.include_directions) "5. Visual directions for what appears on screen" else ""} +${if (executionConfig.suggest_b_roll) "6. B-roll suggestions to support the narration" else ""} +${if (executionConfig.include_notes) "7. Production notes for the speaker/director" else ""} +${if (executionConfig.mark_key_points) "8. Mark key points that should be emphasized or shown as graphics" else ""} + +Ensure the dialogue: +- Sounds natural when read aloud +- Uses contractions and conversational language +- Varies sentence length for rhythm +- Includes pauses where appropriate +- Maintains the ${executionConfig.tone} tone +- Engages the ${executionConfig.target_audience} + +Aim for approximately ${sectionOutline.estimated_duration_seconds} seconds of content. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var sectionSegment = sectionAgent.answer(listOf("Write section")).obj + scriptSegments.add(sectionSegment) + cumulativeDuration += sectionSegment.duration_seconds + cumulativeWordCount += sectionSegment.dialogue.split("\\s+".toRegex()).size + + sectionTask.add( + buildString { + appendLine("## ${sectionOutline.title}") + appendLine() + if (executionConfig.include_timing) { + appendLine("**[${formatTiming(cumulativeDuration - sectionSegment.duration_seconds)}]**") + appendLine() + } + if (executionConfig.include_directions && sectionSegment.visual_direction.isNotBlank()) { + appendLine("*${sectionSegment.visual_direction}*") + appendLine() + } + appendLine(sectionSegment.dialogue) + appendLine() + if (executionConfig.suggest_b_roll && sectionSegment.b_roll_suggestions.isNotEmpty()) { + appendLine("**B-Roll:**") + sectionSegment.b_roll_suggestions.forEach { broll -> + appendLine("- $broll") + } + appendLine() + } + if (executionConfig.mark_key_points && sectionSegment.key_points_marked.isNotEmpty()) { + appendLine("**Key Points for Graphics:**") + sectionSegment.key_points_marked.forEach { point -> + appendLine("- $point") + } + appendLine() + } + if (executionConfig.include_notes && sectionSegment.notes.isNotBlank()) { + appendLine("**Notes:** ${sectionSegment.notes}") + appendLine() + } + appendLine("---") + appendLine() + appendLine("**Duration:** ${sectionSegment.duration_seconds}s | **Words:** ${sectionSegment.dialogue.split("\\s+".toRegex()).size}") + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + markdownTranscript?.write("## Section ${sectionOutline.section_number}: ${sectionOutline.title}\n\n".toByteArray()) + markdownTranscript?.write("## Section ${sectionOutline.section_number}: ${sectionOutline.title}\n\n".toByteArray()) + markdownTranscript?.write(sectionSegment.dialogue.toByteArray()) + markdownTranscript?.write("\n\n".toByteArray()) + task.update() + + overviewTask.add("✅ (${sectionSegment.duration_seconds}s)\n".renderMarkdown) + task.update() + } + + // Write closing + log.info("Writing closing") + overviewTask.add("- Closing ".renderMarkdown) + task.update() + + val closingTask = task.ui.newTask(false) + tabs["Closing"] = closingTask.placeholder + + closingTask.add( + buildString { + appendLine("# Closing") + appendLine() + appendLine("**Status:** Writing closing...") + appendLine() + }.renderMarkdown + ) + task.update() + + val closingAgent = ParsedAgent( + resultClass = ScriptSegment::class.java, + prompt = """ +You are an expert scriptwriter. Write the closing for this ${executionConfig.script_type} script. + +Topic: $topic +Closing Concept: ${outline.closing} +Tone: ${executionConfig.tone} +Target Audience: ${executionConfig.target_audience} +${if (executionConfig.include_cta) "Include Call-to-Action: Yes" else ""} + +Key Messages Covered: +${outline.key_messages.joinToString("\n") { "- $it" }} + +Previous Script Context: +${scriptSegments.takeLast(1).firstOrNull()?.dialogue?.takeLast(200) ?: ""} + +Write a closing that: +1. Summarizes the key takeaways +2. Reinforces the main message +3. Leaves a lasting impression +${if (executionConfig.include_cta) "4. Includes a clear, compelling call-to-action" else "4. Ends on a strong note"} +5. Sounds natural and conversational +${if (executionConfig.include_directions) "6. Includes visual direction for the final shot" else ""} + +Make it memorable and motivating. +Target duration: 15-20 seconds. + """.trimIndent(), + model = api, + temperature = 0.8, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var closingSegment = closingAgent.answer(listOf("Write closing")).obj + scriptSegments.add(closingSegment) + cumulativeDuration += closingSegment.duration_seconds + cumulativeWordCount += closingSegment.dialogue.split("\\s+".toRegex()).size + + closingTask.add( + buildString { + appendLine("## Closing") + appendLine() + if (executionConfig.include_timing) { + appendLine("**[${formatTiming(cumulativeDuration - closingSegment.duration_seconds)}]**") + appendLine() + } + if (executionConfig.include_directions && closingSegment.visual_direction.isNotBlank()) { + appendLine("*${closingSegment.visual_direction}*") + appendLine() + } + appendLine(closingSegment.dialogue) + appendLine() + if (executionConfig.suggest_b_roll && closingSegment.b_roll_suggestions.isNotEmpty()) { + appendLine("**B-Roll:**") + closingSegment.b_roll_suggestions.forEach { broll -> + appendLine("- $broll") + } + appendLine() + } + if (executionConfig.include_notes && closingSegment.notes.isNotBlank()) { + appendLine("**Notes:** ${closingSegment.notes}") + appendLine() + } + appendLine("---") + appendLine() + appendLine("**Duration:** ${closingSegment.duration_seconds}s | **Words:** ${closingSegment.dialogue.split("\\s+".toRegex()).size}") + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + markdownTranscript?.write("# Closing\n\n".toByteArray()) + markdownTranscript?.write("# Closing\n\n".toByteArray()) + markdownTranscript?.write(closingSegment.dialogue.toByteArray()) + markdownTranscript?.write("\n\n".toByteArray()) + task.update() + + overviewTask.add("✅\n".renderMarkdown) + overviewTask.add("✅ Phase 2 Complete: All segments written\n".renderMarkdown) + task.update() + + // Phase 3: Revision (if enabled) + if (executionConfig.revision_passes > 0) { + overviewTask.add("\n### Phase 3: Revision\n*Refining script for flow and timing...*\n".renderMarkdown) + task.update() + + log.info("Phase 3: Performing ${executionConfig.revision_passes} revision pass(es)") + val revisionTask = task.ui.newTask(false) + tabs["Revision"] = revisionTask.placeholder + + revisionTask.add( + buildString { + appendLine("# Revision Process") + appendLine() + appendLine("**Status:** Performing ${executionConfig.revision_passes} revision pass(es)...") + appendLine() + }.renderMarkdown + ) + task.update() + + val fullScript = buildFullScript(outline, scriptSegments) + + repeat(executionConfig.revision_passes) { passNum -> + log.debug("Revision pass ${passNum + 1}/${executionConfig.revision_passes}") + + val revisionAgent = ChatAgent( + prompt = """ +You are an expert script editor. Review and improve this ${executionConfig.script_type} script. + +Current Script: +$fullScript + +Focus on: +1. Natural dialogue flow and conversational tone +2. Pacing and rhythm (target: ${executionConfig.pacing}) +3. Clarity and conciseness +4. Smooth transitions between sections +5. Timing accuracy (target: ${executionConfig.target_duration_minutes} minutes) +6. Engagement and audience connection +7. Consistency in tone (${executionConfig.tone}) + +Maintain: +- All key messages and content +- The overall structure +- Approximate duration ($cumulativeDuration seconds) +- The ${executionConfig.tone} tone + +Provide the complete revised script with all formatting intact. + """.trimIndent(), + model = api, + temperature = 0.6 + ) + + revisionAgent.answer(listOf("Revise the script")) + + revisionTask.add( + buildString { + appendLine("## Revision Pass ${passNum + 1}") + appendLine() + appendLine("✅ Complete") + appendLine() + }.renderMarkdown + ) + task.update() + } + + overviewTask.add("✅ Phase 3 Complete: ${executionConfig.revision_passes} revision pass(es) completed\n".renderMarkdown) + } + + // Phase 4: Final Assembly + overviewTask.add("\n### Phase 4: Final Assembly\n*Compiling complete script...*\n".renderMarkdown) + task.update() + + log.info("Phase 4: Assembling final script") + val finalTask = task.ui.newTask(false) + tabs["Complete Script"] = finalTask.placeholder + + val finalScript = buildString { + appendLine("# ${outline.title}") + appendLine() + appendLine("**Script Type:** ${executionConfig.script_type.capitalize()}") + appendLine("**Duration:** ${formatTiming(cumulativeDuration)} (${cumulativeDuration}s)") + appendLine("**Word Count:** $cumulativeWordCount") + appendLine("**Tone:** ${executionConfig.tone.capitalize()}") + appendLine("**Target Audience:** ${executionConfig.target_audience}") + appendLine() + appendLine("---") + appendLine() + + var currentTime = 0 + scriptSegments.forEachIndexed { index, segment -> + if (executionConfig.include_timing) { + appendLine("**[${formatTiming(currentTime)}]**") + appendLine() + } + + if (executionConfig.include_directions && segment.visual_direction.isNotBlank()) { + appendLine("*${segment.visual_direction}*") + appendLine() + } + + appendLine(segment.dialogue) + appendLine() + + if (executionConfig.suggest_b_roll && segment.b_roll_suggestions.isNotEmpty()) { + appendLine("**B-Roll:**") + segment.b_roll_suggestions.forEach { broll -> + appendLine("- $broll") + } + appendLine() + } + + if (executionConfig.mark_key_points && segment.key_points_marked.isNotEmpty()) { + appendLine("**Key Points for Graphics:**") + segment.key_points_marked.forEach { point -> + appendLine("- $point") + } + appendLine() + } + + if (executionConfig.include_notes && segment.notes.isNotBlank()) { + appendLine("**Notes:** ${segment.notes}") + appendLine() + } + + currentTime += segment.duration_seconds + + if (index < scriptSegments.size - 1) { + appendLine("---") + appendLine() + } + } + + appendLine() + appendLine("---") + appendLine() + appendLine("**END OF SCRIPT**") + appendLine() + appendLine("**Total Duration:** ${formatTiming(cumulativeDuration)}") + appendLine("**Total Word Count:** $cumulativeWordCount") + appendLine("**Average Words Per Minute:** ${(cumulativeWordCount.toFloat() / (cumulativeDuration / 60f)).toInt()}") + } + + finalTask.add(finalScript.renderMarkdown) + markdownTranscript?.write("\n---\n\n# Complete Script\n\n".toByteArray()) + markdownTranscript?.write("\n---\n\n# Complete Script\n\n".toByteArray()) + markdownTranscript?.write(finalScript.toByteArray()) + markdownTranscript?.write("\n".toByteArray()) + task.update() + + // Production notes tab + if (executionConfig.include_notes) { + val productionNotesTask = task.ui.newTask(false) + tabs["Production Notes"] = productionNotesTask.placeholder + + val productionNotes = buildString { + appendLine("# Production Notes") + appendLine() + appendLine("## Script Overview") + appendLine("- **Total Duration:** ${formatTiming(cumulativeDuration)}") + appendLine("- **Total Segments:** ${scriptSegments.size}") + appendLine("- **Word Count:** $cumulativeWordCount") + appendLine("- **Average WPM:** ${(cumulativeWordCount.toFloat() / (cumulativeDuration / 60f)).toInt()}") + appendLine() + appendLine("## Timing Breakdown") + var segmentTime = 0 + scriptSegments.forEachIndexed { index, segment -> + val segmentType = when { + index == 0 && executionConfig.include_hook -> "Opening Hook" + index == scriptSegments.size - 1 -> "Closing" + else -> "Section ${index}" + } + appendLine("- **$segmentType:** ${formatTiming(segmentTime)} - ${formatTiming(segmentTime + segment.duration_seconds)} (${segment.duration_seconds}s)") + segmentTime += segment.duration_seconds + } + appendLine() + if (executionConfig.suggest_b_roll) { + appendLine("## B-Roll Requirements") + val allBRoll = scriptSegments.flatMap { it.b_roll_suggestions }.distinct() + if (allBRoll.isNotEmpty()) { + allBRoll.forEach { broll -> + appendLine("- $broll") + } + } else { + appendLine("*No specific B-roll requirements*") + } + appendLine() + } + if (executionConfig.mark_key_points) { + appendLine("## Graphics/Text Overlays") + val allKeyPoints = scriptSegments.flatMap { it.key_points_marked } + if (allKeyPoints.isNotEmpty()) { + allKeyPoints.forEach { point -> + appendLine("- $point") + } + } else { + appendLine("*No specific graphics requirements*") + } + appendLine() + } + appendLine("## Key Messages") + outline.key_messages.forEach { message -> + appendLine("- $message") + } + } + + productionNotesTask.add(productionNotes.renderMarkdown) + markdownTranscript?.write("\n---\n\n".toByteArray()) + markdownTranscript?.write("\n---\n\n".toByteArray()) + markdownTranscript?.write(productionNotes.toByteArray()) + markdownTranscript?.write("\n".toByteArray()) + task.update() + } + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + val targetDurationDiff = cumulativeDuration - targetDurationSeconds + val durationAccuracy = 100 - (Math.abs(targetDurationDiff).toFloat() / targetDurationSeconds * 100) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Total Duration: ${formatTiming(cumulativeDuration)} (${cumulativeDuration}s)") + appendLine("- Target Duration: ${formatTiming(targetDurationSeconds)} (${targetDurationSeconds}s)") + appendLine("- Duration Accuracy: ${durationAccuracy.toInt()}%") + appendLine("- Total Word Count: $cumulativeWordCount") + appendLine("- Average WPM: ${(cumulativeWordCount.toFloat() / (cumulativeDuration / 60f)).toInt()}") + appendLine("- Number of Segments: ${scriptSegments.size}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + markdownTranscript?.write("\n---\n\n## Generation Complete\n\n".toByteArray()) + markdownTranscript?.write("\n---\n\n## Generation Complete\n\n".toByteArray()) + markdownTranscript?.write("Script generation completed successfully.\n".toByteArray()) + task.update() + + // Concise summary for resultFn + val finalResult = buildString { + appendLine("# Script Generation Summary: ${outline.title}") + appendLine() + appendLine("A complete ${executionConfig.script_type} script of **${formatTiming(cumulativeDuration)}** ($cumulativeWordCount words) was generated in **${totalTime / 1000.0}s**.") + appendLine() + appendLine("**Topic:** $topic") + appendLine() + appendLine("**Structure:**") + if (executionConfig.include_hook) { + appendLine("- Opening hook") + } + appendLine("- ${outline.sections.size} main sections") + appendLine("- Closing${if (executionConfig.include_cta) " with call-to-action" else ""}") + appendLine() + appendLine("**Duration Accuracy:** ${durationAccuracy.toInt()}% (target: ${executionConfig.target_duration_minutes}m)") + appendLine() + appendLine("> The complete script with all formatting, timing, and production notes is available in the Complete Script tab.") + } + + log.info("ScriptwritingTask completed: duration=${cumulativeDuration}s, words=$cumulativeWordCount, segments=${scriptSegments.size}, time=${totalTime}ms") + markdownTranscript?.close() + markdownTranscript?.close() + + task.safeComplete("Script generation complete: ${formatTiming(cumulativeDuration)} in ${totalTime / 1000}s", log) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during script generation", e) + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + markdownTranscript?.close() + + val errorOutput = buildString { + appendLine("# Error in Script Generation") + appendLine() + appendLine("**Topic:** $topic") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + markdownTranscript?.close() + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = if (!isTextFile(file)) { + extractDocumentContent(file) + } else { + codeFiles[file.toPath()] ?: file.readText() + } + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + private fun isTextFile(file: File): Boolean { + val textExtensions = setOf( + "txt", + "md", + "kt", + "java", + "js", + "ts", + "py", + "rb", + "go", + "rs", + "c", + "cpp", + "h", + "hpp", + "css", + "html", + "xml", + "json", + "yaml", + "yml", + "properties", + "gradle", + "maven" + ) + return textExtensions.contains(file.extension.lowercase()) + } + + + private fun getContextFiles(): String { + val relatedFiles = executionConfig?.related_files ?: return "" + if (relatedFiles.isEmpty()) return "" + log.debug("Loading ${relatedFiles.size} related context files") + + return buildString { + appendLine("## Related Research Files") + appendLine() + relatedFiles.forEach { file -> + try { + val filePath = root.resolve(file) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded context file: $file") + appendLine("### $file") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(1500)) + appendLine("```") + appendLine() + } else { + log.warn("Context file not found: $file") + } + } catch (e: Exception) { + log.warn("Error reading file: $file", e) + } + } + } + } + + private fun formatTiming(seconds: Int): String { + val minutes = seconds / 60 + val remainingSeconds = seconds % 60 + return String.format("%02d:%02d", minutes, remainingSeconds) + } + + private fun buildFullScript(outline: ScriptOutline, segments: List): String { + return buildString { + appendLine("# ${outline.title}") + appendLine() + var currentTime = 0 + segments.forEach { segment -> + appendLine("[${formatTiming(currentTime)}]") + if (segment.visual_direction.isNotBlank()) { + appendLine("*${segment.visual_direction}*") + } + appendLine(segment.dialogue) + appendLine() + currentTime += segment.duration_seconds + } + } + } + + companion object { + private val log: Logger = LoggerFactory.getLogger(ScriptwritingTask::class.java) + fun getAvailableFiles( + path: Path, + treatDocumentsAsText: Boolean = false, + ): List { + return try { + listOf(FileSelectionUtils.filteredWalkAsciiTree(path.toFile(), 20, treatDocumentsAsText = treatDocumentsAsText)) + } catch (e: Exception) { + log.error("Error listing available files", e) + listOf("Error listing files: ${e.message}") + } + } + + fun extractDocumentContent(file: File) = try { + file.readText() + } catch (e: Exception) { + "Error reading file: ${e.message}" + } + + val Scriptwriting = TaskType( + "Scriptwriting", + ScriptwritingTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Generate complete scripts for videos, podcasts, and presentations", + """ + Generates production-ready scripts with dialogue, timing, and production notes. +
      +
    • Creates detailed script outline with sections and timing
    • +
    • Writes natural, conversational dialogue for spoken delivery
    • +
    • Includes visual directions and scene descriptions
    • +
    • Suggests B-roll and supporting visuals
    • +
    • Marks key points for emphasis or graphics
    • +
    • Provides timing markers and duration estimates
    • +
    • Includes production notes and speaker guidance
    • +
    • Supports multiple script types (video, podcast, presentation, commercial)
    • +
    • Configurable tone, pacing, and audience targeting
    • +
    • Optional revision passes for quality improvement
    • +
    • Ideal for video production, podcasts, presentations, training videos
    • +
    + """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/TechnicalExplanationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/TechnicalExplanationTask.kt new file mode 100644 index 000000000..52b9ec293 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/TechnicalExplanationTask.kt @@ -0,0 +1,1082 @@ +package com.simiacryptus.cognotik.plan.tools.writing + + +import com.simiacryptus.cognotik.agents.ChatAgent +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.FileSelectionUtils +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream +import java.nio.charset.StandardCharsets +import java.nio.file.FileSystems +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + +class TechnicalExplanationTask( + orchestrationConfig: OrchestrationConfig, + planTask: TechnicalExplanationTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + + class TechnicalExplanationTaskExecutionConfigData( + @Description("The complex technical subject to explain") + val topic: String? = null, + + @Description("Target audience expertise level (e.g., 'layperson', 'beginner', 'intermediate', 'expert', 'manager', 'software_engineer', 'data_scientist')") + val target_audience: String = "intermediate", + + @Description("Level of detail for the explanation (e.g., 'high_level_overview', 'moderate_detail', 'detailed_walkthrough', 'comprehensive')") + val level_of_detail: String = "moderate_detail", + + @Description("Whether to include code examples and snippets") + val include_code_examples: Boolean = true, + + @Description("Explanation format (e.g., 'markdown', 'q_and_a', 'step_by_step', 'narrative', 'tutorial')") + val explanation_format: String = "markdown", + + @Description("Whether to generate analogies and metaphors") + val use_analogies: Boolean = true, + + @Description("Whether to include visual descriptions or diagrams") + val include_visual_descriptions: Boolean = true, + + @Description("Whether to define key terminology") + val define_terminology: Boolean = true, + + @Description("Whether to include practical examples and use cases") + val include_examples: Boolean = true, + + @Description("Whether to provide comparison with related concepts") + val include_comparisons: Boolean = true, + @Description("The specific files (or file patterns, e.g. **/*.kt) to be used as input for the task") + val input_files: List? = null, + + + @Description("Programming language for code examples (if applicable)") + val code_language: String? = null, + + @Description("Number of revision passes for clarity improvement") + val revision_passes: Int = 1, + + @Description("Related files or documentation to reference") + val related_files: List? = null, + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = TechnicalExplanation.name, + task_description = task_description ?: "Generate technical explanation for: '$topic'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (topic.isNullOrBlank()) { + return "topic must not be null or blank" + } + val validAudiences = setOf( + "layperson", "beginner", "intermediate", "expert", + "manager", "software_engineer", "data_scientist", "student" + ) + if (target_audience.lowercase() !in validAudiences) { + return "target_audience must be one of: ${validAudiences.joinToString(", ")}, got: $target_audience" + } + val validDetailLevels = setOf( + "high_level_overview", "moderate_detail", "detailed_walkthrough", "comprehensive" + ) + if (level_of_detail.lowercase() !in validDetailLevels) { + return "level_of_detail must be one of: ${validDetailLevels.joinToString(", ")}, got: $level_of_detail" + } + val validFormats = setOf("markdown", "q_and_a", "step_by_step", "narrative", "tutorial") + if (explanation_format.lowercase() !in validFormats) { + return "explanation_format must be one of: ${validFormats.joinToString(", ")}, got: $explanation_format" + } + if (revision_passes < 0 || revision_passes > 5) { + return "revision_passes must be between 0 and 5, got: $revision_passes" + } + if (!input_files.isNullOrEmpty()) { + input_files.forEach { file -> + if (file.isBlank()) { + return "input_files must not contain blank entries" + } + } + } + return ValidatedObject.validateFields(this) + } + } + + data class ExplanationOutline( + @Description("The main topic title") + val title: String = "", + @Description("Brief overview of what will be explained") + val overview: String = "", + @Description("Key concepts to cover in order") + val key_concepts: List = emptyList(), + @Description("Core terminology that needs definition") + val terminology: List = emptyList(), + @Description("Analogies to use for complex concepts") + val analogies: List = emptyList(), + @Description("Code examples to include") + val code_examples: List = emptyList(), + @Description("Visual descriptions or diagrams needed") + val visual_descriptions: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (overview.isBlank()) return "overview must not be blank" + if (key_concepts.isEmpty()) return "key_concepts must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class ConceptOutline( + @Description("Concept name") + val concept: String = "", + @Description("Why this concept matters") + val importance: String = "", + @Description("Sub-topics to cover") + val subtopics: List = emptyList(), + @Description("Complexity level") + val complexity: String = "", + @Description("Estimated explanation length") + val estimated_paragraphs: Int = 0 + ) : ValidatedObject + + data class TermDefinition( + @Description("The technical term") + val term: String = "", + @Description("Simple definition") + val definition: String = "", + @Description("Context where it's used") + val context: String = "" + ) : ValidatedObject + + data class AnalogyMapping( + @Description("The technical concept") + val technical_concept: String = "", + @Description("The relatable analogy") + val analogy: String = "", + @Description("How they map to each other") + val mapping_explanation: String = "" + ) : ValidatedObject + + data class CodeExampleOutline( + @Description("What the code demonstrates") + val purpose: String = "", + @Description("Programming language") + val language: String = "", + @Description("Complexity level") + val complexity: String = "", + @Description("Key points to highlight") + val key_points: List = emptyList() + ) : ValidatedObject + + data class ExplanationSection( + @Description("Section title") + val title: String = "", + @Description("Section content") + val content: String = "", + @Description("Code snippets in this section") + val code_snippets: List = emptyList(), + @Description("Key takeaways") + val key_takeaways: List = emptyList() + ) : ValidatedObject + + data class CodeSnippet( + @Description("Programming language") + val language: String = "", + @Description("The code") + val code: String = "", + @Description("Explanation of the code") + val explanation: String = "", + @Description("Key points highlighted") + val highlights: List = emptyList() + ) : ValidatedObject + + override fun promptSegment(): String { + return """ +TechnicalExplanation - Break down complex technical subjects into clear, digestible explanations + ** Specify the technical topic to explain + ** Define target audience expertise level + ** Set level of detail (overview to comprehensive) + ** Configure explanation format (markdown, Q&A, step-by-step, etc.) + ** Enable analogies and metaphors for clarity + ** Include code examples with explanations + ** Define key terminology + ** Provide visual descriptions + ** Include practical examples and use cases + ** Compare with related concepts + ** Performs outline creation, content generation, and iterative refinement + ** Produces clear, audience-appropriate technical explanations + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + log.info("Starting TechnicalExplanationTask for topic: '${executionConfig?.topic}'") + val markdownTranscript = transcript(task) + val userMessages = messages.filter { it.isNotBlank() } + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + resultFn("CONFIGURATION ERROR: $validationError") + return + } + + val topic = executionConfig?.topic + if (topic.isNullOrBlank()) { + log.error("No topic specified for technical explanation") + task.safeComplete("CONFIGURATION ERROR: No topic specified", log) + resultFn("CONFIGURATION ERROR: No topic specified") + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + // Load input files if specified + val inputFileContent = getInputFileCode() + if (inputFileContent.isNotBlank()) { + log.info("Loaded input files for context") + val inputFilesTask = task.ui.newTask(false) + tabs["Input Files"] = inputFilesTask.placeholder + inputFilesTask.add( + buildString { + appendLine("# Input Files") + appendLine() + appendLine(inputFileContent.truncateForDisplay(3000)) + appendLine() + }.renderMarkdown + ) + markdownTranscript?.write("# Input Files\n\n".toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write(inputFileContent.truncateForDisplay(3000).toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write("\n\n".toByteArray(StandardCharsets.UTF_8)) + task.update() + } + // Include user messages in context + if (userMessages.isNotEmpty()) { + log.info("Including ${userMessages.size} user message(s) in context") + } + + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + + val overviewContent = buildString { + appendLine("# Technical Explanation Generation") + appendLine() + appendLine("**Topic:** $topic") + appendLine() + appendLine("## Configuration") + appendLine() + if (userMessages.isNotEmpty()) { + appendLine("### User Input") + appendLine() + userMessages.forEach { message -> + appendLine(message) + appendLine() + } + appendLine("---") + appendLine() + } + } + overviewTask.add(overviewContent.renderMarkdown) + markdownTranscript?.write(overviewContent.toByteArray(StandardCharsets.UTF_8)) + buildString { + appendLine("- Target Audience: ${executionConfig.target_audience}") + appendLine("- Level of Detail: ${executionConfig.level_of_detail}") + appendLine("- Format: ${executionConfig.explanation_format}") + appendLine("- Include Code Examples: ${if (executionConfig.include_code_examples) "✓" else "✗"}") + appendLine("- Use Analogies: ${if (executionConfig.use_analogies) "✓" else "✗"}") + appendLine("- Define Terminology: ${if (executionConfig.define_terminology) "✓" else "✗"}") + appendLine("- Include Visual Descriptions: ${if (executionConfig.include_visual_descriptions) "✓" else "✗"}") + appendLine("- Include Examples: ${if (executionConfig.include_examples) "✓" else "✗"}") + appendLine("- Include Comparisons: ${if (executionConfig.include_comparisons) "✓" else "✗"}") + if (executionConfig.code_language != null) { + appendLine("- Code Language: ${executionConfig.code_language}") + } + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine() + appendLine("---") + appendLine() + } + buildString { + appendLine("### Phase 1: Analysis & Outline") + appendLine("*Analyzing topic and creating explanation structure...*") + } + overviewTask.add(overviewContent.renderMarkdown) + task.update() + + val resultBuilder = StringBuilder() + resultBuilder.append("# Technical Explanation: $topic\n\n") + + try { + // Gather context + val priorContext = getPriorCode(agent.executionState) + val contextFiles = getContextFiles() + + if (priorContext.isNotBlank() || contextFiles.isNotBlank()) { + log.debug("Found context: priorContext=${priorContext.length} chars, contextFiles=${contextFiles.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Reference Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Reference Context") + appendLine() + if (priorContext.isNotBlank()) { + appendLine("## Prior Context") + appendLine(priorContext.truncateForDisplay(2000)) + markdownTranscript?.write("\n## Prior Context\n".toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write(priorContext.truncateForDisplay(2000).toByteArray(StandardCharsets.UTF_8)) + appendLine() + } + if (contextFiles.isNotBlank()) { + appendLine("## Related Files") + appendLine(contextFiles.truncateForDisplay(2000)) + } + }.renderMarkdown + ) + task.update() + markdownTranscript?.write("\n## Related Files\n".toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write(contextFiles.truncateForDisplay(2000).toByteArray(StandardCharsets.UTF_8)) + } + + // Phase 1: Create outline + log.info("Phase 1: Creating explanation outline") + val outlineTask = task.ui.newTask(false) + tabs["Outline"] = outlineTask.placeholder + + outlineTask.add( + buildString { + appendLine("# Explanation Outline") + appendLine() + appendLine("**Status:** Creating structured outline...") + appendLine() + }.renderMarkdown + ) + markdownTranscript?.write("\n# Explanation Outline\n\n".toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write("**Status:** Creating structured outline...\n\n".toByteArray(StandardCharsets.UTF_8)) + task.update() + + val audienceGuidance = when (executionConfig.target_audience.lowercase()) { + "layperson" -> "Assume no technical background. Use everyday language and avoid jargon." + "beginner" -> "Assume basic familiarity with technology but limited domain knowledge." + "intermediate" -> "Assume solid foundation in the field with some practical experience." + "expert" -> "Assume deep technical knowledge. Focus on nuances and advanced concepts." + "manager" -> "Focus on high-level concepts, business value, and practical implications." + "software_engineer" -> "Assume programming knowledge. Include implementation details." + "data_scientist" -> "Assume statistical and algorithmic knowledge. Include mathematical concepts." + else -> "Adjust language to match the audience's technical level." + } + + val detailGuidance = when (executionConfig.level_of_detail.lowercase()) { + "high_level_overview" -> "Provide a bird's-eye view. Focus on the 'what' and 'why'." + "moderate_detail" -> "Balance overview with key details. Cover 'what', 'why', and 'how' at a moderate depth." + "detailed_walkthrough" -> "Provide comprehensive coverage with step-by-step explanations." + "comprehensive" -> "Cover all aspects thoroughly, including edge cases and advanced topics." + else -> "Provide moderate detail with clear explanations." + } + + val outlineAgent = ParsedAgent( + resultClass = ExplanationOutline::class.java, + prompt = """ +You are an expert technical educator and communicator. Create a detailed outline for explaining this topic. + +Topic: $topic + +Target Audience: ${executionConfig.target_audience} +Audience Guidance: $audienceGuidance + +Level of Detail: ${executionConfig.level_of_detail} +Detail Guidance: $detailGuidance + +Format: ${executionConfig.explanation_format} + +${if (priorContext.isNotBlank()) "Reference Context:\n${priorContext.truncateForDisplay(3000)}\n" else ""} +${if (contextFiles.isNotBlank()) "Additional Documentation:\n${contextFiles.truncateForDisplay(3000)}\n" else ""} + +Create an outline that includes: +1. A clear, engaging title +2. Brief overview (2-3 sentences) of what will be explained +3. 3-6 key concepts to cover, ordered logically (simple to complex or general to specific) +4. ${if (executionConfig.define_terminology) "5-10 essential terms that need definition" else "Key terminology (minimal)"} +5. ${if (executionConfig.use_analogies) "2-4 analogies to make complex concepts relatable" else "Analogies (if absolutely necessary)"} +6. ${if (executionConfig.include_code_examples) "3-5 code examples to illustrate concepts" else "Code examples (minimal or none)"} +7. ${if (executionConfig.include_visual_descriptions) "Descriptions of diagrams or visualizations that would help" else "Visual aids (if critical)"} + +For each key concept, specify: +- The concept name +- Why it's important to understand +- Sub-topics to cover +- Complexity level (basic, intermediate, advanced) +- Estimated paragraphs needed + +Ensure the outline: +- Builds understanding progressively +- Matches the ${executionConfig.target_audience} audience level +- Provides ${executionConfig.level_of_detail} level of detail +- Follows ${executionConfig.explanation_format} format conventions + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var outline = outlineAgent.answer(listOf("Generate outline")).obj + + // Validate outline + outline.validate()?.let { validationError -> + log.error("Outline validation failed: $validationError") + outlineTask.error(ValidatedObject.ValidationError(validationError, outline)) + task.safeComplete("Outline validation failed: $validationError", log) + resultFn("ERROR: Outline validation failed: $validationError") + return + } + + log.info("Generated outline: ${outline.key_concepts.size} concepts, ${outline.terminology.size} terms, ${outline.analogies.size} analogies") + + val outlineContent = buildString { + appendLine("## ${outline.title}") + appendLine() + appendLine("### Overview") + appendLine(outline.overview) + appendLine() + appendLine("---") + appendLine() + appendLine("### Key Concepts") + outline.key_concepts.forEachIndexed { index, concept -> + appendLine("#### ${index + 1}. ${concept.concept}") + appendLine() + appendLine("**Importance:** ${concept.importance}") + appendLine() + appendLine("**Complexity:** ${concept.complexity}") + appendLine() + if (concept.subtopics.isNotEmpty()) { + appendLine("**Subtopics:**") + concept.subtopics.forEach { subtopic -> + appendLine("- $subtopic") + } + appendLine() + } + appendLine("**Est. Paragraphs:** ${concept.estimated_paragraphs}") + appendLine() + appendLine("---") + appendLine() + } + if (outline.terminology.isNotEmpty()) { + appendLine("### Key Terminology") + outline.terminology.forEach { term -> + appendLine("**${term.term}:** ${term.definition}") + if (term.context.isNotBlank()) { + appendLine(" - *Context: ${term.context}*") + } + appendLine() + } + appendLine("---") + appendLine() + } + if (outline.analogies.isNotEmpty()) { + appendLine("### Analogies") + outline.analogies.forEach { analogy -> + appendLine("**${analogy.technical_concept}** ≈ ${analogy.analogy}") + appendLine(" - ${analogy.mapping_explanation}") + appendLine() + } + appendLine("---") + appendLine() + } + if (outline.code_examples.isNotEmpty()) { + appendLine("### Code Examples") + outline.code_examples.forEachIndexed { index, example -> + appendLine("${index + 1}. **${example.purpose}** (${example.language})") + appendLine(" - Complexity: ${example.complexity}") + if (example.key_points.isNotEmpty()) { + appendLine(" - Key points: ${example.key_points.joinToString(", ")}") + } + appendLine() + } + appendLine("---") + appendLine() + } + if (outline.visual_descriptions.isNotEmpty()) { + appendLine("### Visual Aids") + outline.visual_descriptions.forEach { visual -> + appendLine("- $visual") + } + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + outlineTask.add(outlineContent.renderMarkdown) + task.update() + markdownTranscript?.write(outlineContent.toByteArray(StandardCharsets.UTF_8)) + + overviewTask.add("✅ Phase 1 Complete: Outline created\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Content Generation\n*Writing explanation sections...*\n".renderMarkdown) + task.update() + + // Phase 2: Generate content for each concept + log.info("Phase 2: Generating explanation content") + val sections = mutableListOf() + + outline.key_concepts.forEachIndexed { index, conceptOutline -> + log.info("Writing section ${index + 1}/${outline.key_concepts.size}: ${conceptOutline.concept}") + + overviewTask.add("- Section ${index + 1}: ${conceptOutline.concept.truncateForDisplay(50)} ".renderMarkdown) + task.update() + + val sectionTask = task.ui.newTask(false) + tabs["Section ${index + 1}"] = sectionTask.placeholder + + sectionTask.add( + buildString { + appendLine("# ${conceptOutline.concept}") + appendLine() + appendLine("**Status:** Writing section...") + appendLine() + }.renderMarkdown + ) + markdownTranscript?.write("\n# ${conceptOutline.concept}\n\n".toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write("**Status:** Writing section...\n\n".toByteArray(StandardCharsets.UTF_8)) + task.update() + + // Build context from previous sections + val previousContext = if (sections.isNotEmpty()) { + buildString { + appendLine("## Previously Covered") + sections.takeLast(2).forEach { prevSection -> + appendLine("**${prevSection.title}:** ${prevSection.content.take(200)}...") + appendLine() + } + } + } else { + "This is the first section." + } + + // Find relevant analogies for this concept + val relevantAnalogies = outline.analogies.filter { + it.technical_concept.contains(conceptOutline.concept, ignoreCase = true) || + conceptOutline.concept.contains(it.technical_concept, ignoreCase = true) + } + + // Find relevant code examples + val relevantCodeExamples = outline.code_examples.filter { + it.purpose.contains(conceptOutline.concept, ignoreCase = true) || + conceptOutline.concept.contains(it.purpose, ignoreCase = true) + } + + val sectionAgent = ParsedAgent( + resultClass = ExplanationSection::class.java, + prompt = """ +You are an expert technical educator. Write a clear, engaging explanation of this concept. + +Overall Topic: $topic +Target Audience: ${executionConfig.target_audience} +Audience Guidance: $audienceGuidance + +Concept to Explain: ${conceptOutline.concept} +Importance: ${conceptOutline.importance} +Subtopics: ${conceptOutline.subtopics.joinToString(", ")} +Complexity: ${conceptOutline.complexity} + +$previousContext + +${ + if (relevantAnalogies.isNotEmpty()) { + "Analogies to Use:\n${relevantAnalogies.joinToString("\n") { "- ${it.analogy}: ${it.mapping_explanation}" }}\n" + } else "" + } + +${ + if (executionConfig.include_code_examples && relevantCodeExamples.isNotEmpty()) { + "Code Examples to Include:\n${relevantCodeExamples.joinToString("\n") { "- ${it.purpose} (${it.language})" }}\n" + } else "" + } + +Write a section that: +1. Opens with a clear introduction to the concept +2. ${if (executionConfig.use_analogies && relevantAnalogies.isNotEmpty()) "Uses the provided analogy to make it relatable" else "Explains clearly without jargon"} +3. Covers all subtopics: ${conceptOutline.subtopics.joinToString(", ")} +4. ${if (executionConfig.include_examples) "Includes practical examples or use cases" else "Focuses on conceptual understanding"} +5. ${if (executionConfig.include_code_examples) "Includes code snippets with clear explanations" else "Avoids code unless absolutely necessary"} +6. ${if (executionConfig.include_visual_descriptions) "Describes visual representations that would help" else "Uses text-based explanations"} +7. Provides 2-4 key takeaways at the end +8. Transitions smoothly to the next concept + +Make it: +- Clear and accessible to ${executionConfig.target_audience} +- Engaging and well-structured +- Approximately ${conceptOutline.estimated_paragraphs} paragraphs +- Following ${executionConfig.explanation_format} format + +${ + if (executionConfig.include_code_examples) { + "For code snippets, provide:\n- The code in ${executionConfig.code_language ?: "appropriate language"}\n- Line-by-line or block explanation\n- Key points to highlight\n" + } else "" + } + """.trimIndent(), + model = api, + temperature = 0.7, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var section = sectionAgent.answer(listOf("Write section")).obj + sections.add(section) + + val sectionContent = buildString { + appendLine("## ${section.title}") + appendLine() + appendLine(section.content) + appendLine() + if (section.code_snippets.isNotEmpty()) { + appendLine("---") + appendLine() + appendLine("### Code Examples") + appendLine() + section.code_snippets.forEach { snippet -> + appendLine("**${snippet.explanation}**") + appendLine() + appendLine("```${snippet.language}") + appendLine(snippet.code) + appendLine("```") + appendLine() + if (snippet.highlights.isNotEmpty()) { + appendLine("**Key Points:**") + snippet.highlights.forEach { highlight -> + appendLine("- $highlight") + } + appendLine() + } + } + } + if (section.key_takeaways.isNotEmpty()) { + appendLine("---") + appendLine() + appendLine("### Key Takeaways") + section.key_takeaways.forEach { takeaway -> + appendLine("- $takeaway") + } + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + sectionTask.add(sectionContent.renderMarkdown) + task.update() + markdownTranscript?.write(sectionContent.toByteArray(StandardCharsets.UTF_8)) + + resultBuilder.append("## ${section.title}\n\n") + resultBuilder.append(section.content) + resultBuilder.append("\n\n") + + if (section.code_snippets.isNotEmpty()) { + section.code_snippets.forEach { snippet -> + resultBuilder.append("```${snippet.language}\n") + resultBuilder.append(snippet.code) + resultBuilder.append("\n```\n\n") + resultBuilder.append("*${snippet.explanation}*\n\n") + } + } + + overviewTask.add("✅\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 2 Complete: All sections written\n".renderMarkdown) + + // Phase 3: Add comparisons if enabled + if (executionConfig.include_comparisons) { + overviewTask.add("\n### Phase 3: Comparisons\n*Adding comparisons with related concepts...*\n".renderMarkdown) + task.update() + + log.info("Phase 3: Generating comparisons") + val comparisonTask = task.ui.newTask(false) + tabs["Comparisons"] = comparisonTask.placeholder + + comparisonTask.add( + buildString { + appendLine("# Comparisons") + appendLine() + appendLine("**Status:** Comparing with related concepts...") + appendLine() + }.renderMarkdown + ) + markdownTranscript?.write("\n# Comparisons\n\n".toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write("**Status:** Comparing with related concepts...\n\n".toByteArray(StandardCharsets.UTF_8)) + task.update() + + val comparisonAgent = ChatAgent( + prompt = """ +You are an expert technical educator. Compare and contrast this topic with related concepts. + +Topic: $topic +Target Audience: ${executionConfig.target_audience} + +Content Covered: +${sections.joinToString("\n") { "- ${it.title}" }} + +Provide comparisons that: +1. Identify 2-3 related or commonly confused concepts +2. Explain key similarities +3. Highlight important differences +4. Clarify when to use each +5. Help readers understand the boundaries and relationships + +Make comparisons clear and helpful for ${executionConfig.target_audience}. + """.trimIndent(), + model = api, + temperature = 0.6 + ) + + val comparisons = comparisonAgent.answer(listOf("Generate comparisons")) + + comparisonTask.add( + buildString { + appendLine("## Related Concepts") + appendLine() + appendLine(comparisons) + appendLine() + appendLine("**Status:** ✅ Complete") + }.renderMarkdown + ) + task.update() + markdownTranscript?.write("\n## Related Concepts\n\n${comparisons}\n\n".toByteArray(StandardCharsets.UTF_8)) + + resultBuilder.append("## Comparisons with Related Concepts\n\n") + resultBuilder.append(comparisons) + resultBuilder.append("\n\n") + + overviewTask.add("✅ Phase 3 Complete: Comparisons added\n".renderMarkdown) + } + + // Phase 4: Revision (if enabled) + if (executionConfig.revision_passes > 0) { + overviewTask.add("\n### Phase 4: Revision\n*Refining for clarity...*\n".renderMarkdown) + task.update() + + log.info("Phase 4: Performing ${executionConfig.revision_passes} revision pass(es)") + val revisionTask = task.ui.newTask(false) + tabs["Revision"] = revisionTask.placeholder + + revisionTask.add( + buildString { + appendLine("# Revision Process") + appendLine() + appendLine("**Status:** Performing ${executionConfig.revision_passes} revision pass(es)...") + appendLine() + }.renderMarkdown + ) + markdownTranscript?.write("\n# Revision Process\n\n".toByteArray(StandardCharsets.UTF_8)) + markdownTranscript?.write("**Status:** Performing ${executionConfig.revision_passes} revision pass(es)...\n\n".toByteArray(StandardCharsets.UTF_8)) + task.update() + + val fullExplanation = resultBuilder.toString() + + repeat(executionConfig.revision_passes) { passNum -> + log.debug("Revision pass ${passNum + 1}/${executionConfig.revision_passes}") + + val revisionAgent = ChatAgent( + prompt = """ +You are an expert technical editor. Review and improve this explanation for clarity and effectiveness. + +Current Explanation: +$fullExplanation + +Target Audience: ${executionConfig.target_audience} +Level of Detail: ${executionConfig.level_of_detail} + +Focus on: +1. Clarity and simplicity of language +2. Logical flow and transitions +3. Effectiveness of analogies and examples +4. Accuracy of technical content +5. Appropriateness for ${executionConfig.target_audience} +6. Completeness of coverage +7. Engagement and readability + +Maintain: +- All key concepts and information +- Code examples and their explanations +- Technical accuracy +- Approximate length +- ${executionConfig.explanation_format} format + +Provide the complete revised explanation. + """.trimIndent(), + model = api, + temperature = 0.5 + ) + + val revisedExplanation = revisionAgent.answer(listOf("Revise the explanation")) + resultBuilder.clear() + resultBuilder.append(revisedExplanation) + + revisionTask.add( + buildString { + appendLine("## Revision Pass ${passNum + 1}") + appendLine() + appendLine("✅ Complete") + appendLine() + }.renderMarkdown + ) + task.update() + markdownTranscript?.write("\n## Revision Pass ${passNum + 1}\n\n✅ Complete\n\n".toByteArray(StandardCharsets.UTF_8)) + } + + overviewTask.add("✅ Phase 4 Complete: ${executionConfig.revision_passes} revision pass(es) completed\n".renderMarkdown) + } + + // Phase 5: Final Assembly + overviewTask.add("\n### Phase 5: Final Assembly\n*Compiling complete explanation...*\n".renderMarkdown) + task.update() + + log.info("Phase 5: Assembling final explanation") + val finalTask = task.ui.newTask(false) + tabs["Complete Explanation"] = finalTask.placeholder + + val finalExplanation = buildString { + appendLine("# ${outline.title}") + appendLine() + appendLine("> *Explanation for: ${executionConfig.target_audience}*") + appendLine() + appendLine("## Overview") + appendLine() + appendLine(outline.overview) + appendLine() + if (outline.terminology.isNotEmpty() && executionConfig.define_terminology) { + appendLine("---") + appendLine() + appendLine("## Key Terminology") + appendLine() + outline.terminology.forEach { term -> + appendLine("**${term.term}:** ${term.definition}") + appendLine() + } + } + appendLine("---") + appendLine() + appendLine(resultBuilder.toString()) + appendLine() + appendLine("---") + appendLine() + appendLine("## Summary") + appendLine() + appendLine("This explanation covered:") + sections.forEach { section -> + appendLine("- **${section.title}**") + if (section.key_takeaways.isNotEmpty()) { + section.key_takeaways.forEach { takeaway -> + appendLine(" - ${takeaway.truncateForDisplay(100)}") + } + } + } + } + + finalTask.add(finalExplanation.renderMarkdown) + task.update() + markdownTranscript?.write("\n---\n\n${finalExplanation}\n".toByteArray(StandardCharsets.UTF_8)) + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + val wordCount = finalExplanation.split("\\s+".toRegex()).size + val codeExampleCount = sections.sumOf { it.code_snippets.size } + + val statsContent = buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Statistics:**") + appendLine("- Sections: ${sections.size}") + appendLine("- Word Count: $wordCount") + appendLine("- Code Examples: $codeExampleCount") + appendLine("- Analogies Used: ${outline.analogies.size}") + appendLine("- Terms Defined: ${outline.terminology.size}") + appendLine("- Revision Passes: ${executionConfig.revision_passes}") + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + } + overviewTask.add( + statsContent.renderMarkdown + ) + markdownTranscript?.write(statsContent.toByteArray(StandardCharsets.UTF_8)) + task.update() + + // Concise summary for resultFn + val finalResult = buildString { + appendLine("# Technical Explanation Summary: ${outline.title}") + appendLine() + appendLine("A complete technical explanation of **$topic** was generated in **${totalTime / 1000.0}s**.") + appendLine() + appendLine("**Target Audience:** ${executionConfig.target_audience}") + appendLine() + appendLine("**Coverage:**") + appendLine("- ${sections.size} main sections") + appendLine("- $wordCount words") + appendLine("- $codeExampleCount code examples") + appendLine("- ${outline.analogies.size} analogies") + appendLine() + appendLine("> The full explanation is available in the Complete Explanation tab for detailed review.") + } + + log.info("TechnicalExplanationTask completed: sections=${sections.size}, words=$wordCount, time=${totalTime}ms") + markdownTranscript?.close() + + task.safeComplete("Technical explanation generation complete: $wordCount words in ${totalTime / 1000}s", log) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during technical explanation generation", e) + task.error(e) + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + markdownTranscript?.close() + + val errorOutput = buildString { + appendLine("# Error in Technical Explanation Generation") + appendLine() + appendLine("**Topic:** $topic") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + } + + private fun getInputFileCode() = (executionConfig?.input_files ?: listOf()) + .flatMap { pattern: String -> + val matcher = FileSystems.getDefault().getPathMatcher("glob:$pattern") + (FileSelectionUtils.filteredWalk(root.toFile()) { + when { + FileSelectionUtils.isLLMIgnored(it.toPath()) -> false + matcher.matches(root.relativize(it.toPath())) -> true + it.isDirectory -> true + else -> false + } + }) + }.filter { file -> + file.isFile && file.exists() + } + .distinct() + .sortedBy { it } + .joinToString("\n\n") { relativePath -> + val file = root.toFile().resolve(relativePath) + try { + val content = file.readText() + "# $relativePath\n\n```\n$content\n```" + } catch (e: Throwable) { + log.warn("Error reading file: $relativePath", e) + "" + } + } + + + private fun getContextFiles(): String { + val relatedFiles = executionConfig?.related_files ?: return "" + if (relatedFiles.isEmpty()) return "" + log.debug("Loading ${relatedFiles.size} related context files") + + return buildString { + appendLine("## Related Documentation Files") + appendLine() + relatedFiles.forEach { file -> + try { + val filePath = root.resolve(file) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded context file: $file") + appendLine("### $file") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(1500)) + appendLine("```") + appendLine() + } else { + log.warn("Context file not found: $file") + } + } catch (e: Exception) { + log.warn("Error reading file: $file", e) + } + } + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + + companion object { + private val log: Logger = LoggerFactory.getLogger(TechnicalExplanationTask::class.java) + val TechnicalExplanation = TaskType( + "TechnicalExplanation", + TechnicalExplanationTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Break down complex technical subjects into clear, digestible explanations", + """ + Generates clear, audience-appropriate explanations of complex technical topics. +
      +
    • Creates structured outline with key concepts and terminology
    • +
    • Adjusts language and depth for target audience (layperson to expert)
    • +
    • Generates relatable analogies and metaphors
    • +
    • Includes code examples with detailed explanations
    • +
    • Defines essential terminology in context
    • +
    • Provides visual descriptions and diagrams
    • +
    • Includes practical examples and use cases
    • +
    • Compares with related concepts for clarity
    • +
    • Supports multiple formats (markdown, Q&A, step-by-step, tutorial)
    • +
    • Optional revision passes for clarity improvement
    • +
    • Ideal for documentation, onboarding, education, and knowledge sharing
    • +
    + """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/TutorialGenerationTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/TutorialGenerationTask.kt new file mode 100644 index 000000000..bcd888070 --- /dev/null +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/plan/tools/writing/TutorialGenerationTask.kt @@ -0,0 +1,1322 @@ +package com.simiacryptus.cognotik.plan.tools.writing + +import com.simiacryptus.cognotik.agents.ParsedAgent +import com.simiacryptus.cognotik.apps.general.renderMarkdown +import com.simiacryptus.cognotik.describe.Description +import com.simiacryptus.cognotik.plan.* +import com.simiacryptus.cognotik.plan.tools.reasoning.safeComplete +import com.simiacryptus.cognotik.plan.tools.reasoning.truncateForDisplay +import com.simiacryptus.cognotik.plan.tools.reasoning.validateAndGetApi +import com.simiacryptus.cognotik.util.LoggerFactory +import com.simiacryptus.cognotik.util.TabbedDisplay +import com.simiacryptus.cognotik.util.ValidatedObject +import com.simiacryptus.cognotik.webui.session.SessionTask +import org.slf4j.Logger +import java.io.FileOutputStream +import java.time.LocalDateTime +import java.time.format.DateTimeFormatter + + +class TutorialGenerationTask( + orchestrationConfig: OrchestrationConfig, + planTask: TutorialGenerationTaskExecutionConfigData? +) : AbstractTask( + orchestrationConfig, + planTask +) { + + class TutorialGenerationTaskExecutionConfigData( + @Description("The final outcome the user should achieve (e.g., 'deploy a web app to the cloud', 'train a simple machine learning model')") + val goal: String? = null, + + @Description("The environment the tutorial is for (e.g., 'Windows', 'Linux', 'macOS', 'VS Code', 'Docker')") + val target_platform: String = "cross-platform", + + @Description("Whether to add placeholders like '[Screenshot of the successful output]' where visuals would be needed") + val include_screenshots_placeholders: Boolean = true, + + @Description("Controls how much explanatory text is included with each step ('concise', 'detailed', 'verbose')") + val verbosity: String = "detailed", + + @Description("Whether to add a common errors and troubleshooting section") + val include_troubleshooting: Boolean = true, + + @Description("Target audience skill level (e.g., 'beginner', 'intermediate', 'advanced')") + val skill_level: String = "beginner", + + @Description("Estimated time to complete the tutorial in minutes") + val estimated_duration: Int = 30, + + @Description("Whether to include code examples and commands") + val include_code_examples: Boolean = true, + + @Description("Whether to include validation steps to verify success") + val include_validation_steps: Boolean = true, + + @Description("Whether to include a 'What You'll Learn' section") + val include_learning_objectives: Boolean = true, + + @Description("Whether to include a 'Next Steps' section for further learning") + val include_next_steps: Boolean = true, + + @Description("Number of main steps to break the tutorial into") + val target_step_count: Int = 7, + + @Description("Related files or documentation to reference") + val related_files: List? = null, + @Description("Optional input files to use as context (supports glob patterns, e.g. **/*.kt)") + val input_files: List? = null, + + + task_description: String? = null, + task_dependencies: List? = null, + state: TaskState? = TaskState.Pending, + ) : TaskExecutionConfig( + task_type = TutorialGeneration.name, + task_description = task_description ?: "Generate tutorial for: '$goal'", + task_dependencies = task_dependencies?.toMutableList(), + state = state + ), ValidatedObject { + override fun validate(): String? { + if (goal.isNullOrBlank()) { + return "goal must not be null or blank" + } + if (estimated_duration <= 0) { + return "estimated_duration must be positive, got: $estimated_duration" + } + if (target_step_count < 3 || target_step_count > 20) { + return "target_step_count must be between 3 and 20, got: $target_step_count" + } + if (verbosity.isBlank()) { + return "verbosity must not be blank" + } + if (skill_level.isBlank()) { + return "skill_level must not be blank" + } + return ValidatedObject.validateFields(this) + } + } + + data class TutorialOutline( + @Description("Tutorial title") + val title: String = "", + @Description("Brief description of what the tutorial covers") + val description: String = "", + @Description("Learning objectives") + val learning_objectives: List = emptyList(), + @Description("Prerequisites (tools, software, prior knowledge)") + val prerequisites: List = emptyList(), + @Description("Main tutorial steps") + val steps: List = emptyList(), + @Description("Estimated completion time in minutes") + val estimated_time: Int = 0 + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "title must not be blank" + if (description.isBlank()) return "description must not be blank" + if (steps.isEmpty()) return "steps must not be empty" + if (estimated_time <= 0) return "estimated_time must be positive" + return ValidatedObject.validateFields(this) + } + } + + data class Prerequisite( + @Description("Type of prerequisite (e.g., 'software', 'knowledge', 'account', 'hardware')") + val type: String = "", + @Description("Name of the prerequisite") + val name: String = "", + @Description("Description or installation instructions") + val description: String = "", + @Description("Whether this is required or optional") + val required: Boolean = true, + @Description("Link to download or learn more") + val link: String? = null + ) : ValidatedObject { + override fun validate(): String? { + if (name.isBlank()) return "prerequisite name must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class TutorialStepOutline( + @Description("Step number") + val step_number: Int = 1, + @Description("Step title") + val title: String = "", + @Description("What this step accomplishes") + val purpose: String = "", + @Description("Key actions to perform") + val actions: List = emptyList(), + @Description("Whether this step includes code or commands") + val has_code: Boolean = false, + @Description("Whether this step needs a screenshot placeholder") + val needs_screenshot: Boolean = false, + @Description("Expected outcome or result") + val expected_outcome: String = "", + @Description("Estimated time for this step in minutes") + val estimated_time: Int = 0 + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "step title must not be blank" + if (step_number <= 0) return "step_number must be positive" + return ValidatedObject.validateFields(this) + } + } + + data class TutorialStep( + @Description("Step number") + val step_number: Int = 1, + @Description("Step title") + val title: String = "", + @Description("Detailed explanation") + val explanation: String = "", + @Description("Commands or code to execute") + val code_blocks: List = emptyList(), + @Description("Expected outcome description") + val expected_outcome: String = "", + @Description("Validation steps to verify success") + val validation_steps: List = emptyList(), + @Description("Screenshot placeholder locations") + val screenshot_placeholders: List = emptyList(), + @Description("Common issues for this step") + val common_issues: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (title.isBlank()) return "step title must not be blank" + if (explanation.isBlank()) return "step explanation must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class CodeBlock( + @Description("Programming language or shell type") + val language: String = "", + @Description("The code or command") + val code: String = "", + @Description("Brief description of what this code does") + val description: String = "", + @Description("Whether this should be run in a specific directory") + val working_directory: String? = null + ) : ValidatedObject { + override fun validate(): String? { + if (code.isBlank()) return "code must not be blank" + return ValidatedObject.validateFields(this) + } + } + + data class TroubleshootingSection( + @Description("Common problems and solutions") + val issues: List = emptyList() + ) : ValidatedObject + + data class TroubleshootingIssue( + @Description("The problem or error") + val problem: String = "", + @Description("Symptoms or error messages") + val symptoms: List = emptyList(), + @Description("Possible causes") + val causes: List = emptyList(), + @Description("Solutions to try") + val solutions: List = emptyList() + ) : ValidatedObject { + override fun validate(): String? { + if (problem.isBlank()) return "problem must not be blank" + if (solutions.isEmpty()) return "solutions must not be empty" + return ValidatedObject.validateFields(this) + } + } + + data class NextSteps( + @Description("Suggestions for further learning") + val suggestions: List = emptyList(), + @Description("Related tutorials or resources") + val related_resources: List = emptyList(), + @Description("Advanced topics to explore") + val advanced_topics: List = emptyList() + ) : ValidatedObject + + override fun promptSegment(): String { + return """ +TutorialGeneration - Create complete, step-by-step tutorials for processes and projects + ** Specify the goal or final outcome to achieve + ** Define target platform and environment + ** Set skill level and estimated duration + ** Enable screenshot placeholders for visual guidance + ** Configure verbosity level (concise, detailed, verbose) + ** Include code examples and commands + ** Add validation steps to verify success + ** Include troubleshooting section for common errors + ** Add learning objectives and next steps + ** Produces publication-ready tutorial with clear, actionable steps + """.trimIndent() + } + + override fun run( + agent: TaskOrchestrator, + messages: List, + task: SessionTask, + resultFn: (String) -> Unit, + orchestrationConfig: OrchestrationConfig + ) { + val startTime = System.currentTimeMillis() + log.info("Starting TutorialGenerationTask for goal: '${executionConfig?.goal}'") + val transcript = transcript(task) + val tutorialOutputFile = createTutorialOutputFile(task) + + // Validate configuration + executionConfig?.validate()?.let { validationError -> + log.error("Configuration validation failed: $validationError") + task.safeComplete("CONFIGURATION ERROR: $validationError", log) + task.error(ValidatedObject.ValidationError(validationError, executionConfig)) + resultFn("CONFIGURATION ERROR: $validationError") + return + } + + val goal = executionConfig?.goal + if (goal.isNullOrBlank()) { + log.error("No goal specified for tutorial generation") + task.safeComplete("CONFIGURATION ERROR: No goal specified", log) + resultFn("CONFIGURATION ERROR: No goal specified") + return + } + + val api = validateAndGetApi(orchestrationConfig, task, log, resultFn) ?: return + + val tabs = TabbedDisplay(task) + + // Overview tab + val overviewTask = task.ui.newTask(false) + tabs["Overview"] = overviewTask.placeholder + transcript?.write("# Tutorial Generation Transcript\n\n".toByteArray()) + transcript?.write("**Goal:** $goal\n\n".toByteArray()) + transcript?.write("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) + + + val overviewContent = buildString { + appendLine("# Tutorial Generation") + appendLine() + appendLine("**Goal:** $goal") + appendLine() + appendLine("## Configuration") + appendLine("- Target Platform: ${executionConfig.target_platform}") + appendLine("- Skill Level: ${executionConfig.skill_level}") + appendLine("- Estimated Duration: ${executionConfig.estimated_duration} minutes") + appendLine("- Verbosity: ${executionConfig.verbosity}") + appendLine("- Target Steps: ${executionConfig.target_step_count}") + appendLine("- Include Code Examples: ${if (executionConfig.include_code_examples) "✓" else "✗"}") + appendLine("- Include Screenshots: ${if (executionConfig.include_screenshots_placeholders) "✓" else "✗"}") + appendLine("- Include Validation: ${if (executionConfig.include_validation_steps) "✓" else "✗"}") + appendLine("- Include Troubleshooting: ${if (executionConfig.include_troubleshooting) "✓" else "✗"}") + appendLine() + appendLine("**Started:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + appendLine("**Input Messages:** ${messages.size}") + appendLine() + appendLine("---") + appendLine() + appendLine("## Progress") + appendLine() + appendLine("### Phase 1: Planning & Outline") + appendLine("*Creating tutorial structure...*") + } + overviewTask.add(overviewContent.renderMarkdown) + task.update() + + val resultBuilder = StringBuilder() + resultBuilder.append("# Tutorial: $goal\n\n") + + try { + // Gather context + val priorContext = getPriorCode(agent.executionState) + val contextFiles = getContextFiles() + val inputFileContent = getInputFileCode() + // Combine all context + buildString { + if (inputFileContent.isNotBlank()) appendLine(inputFileContent) + if (contextFiles.isNotBlank()) appendLine(contextFiles) + } + + if (priorContext.isNotBlank() || contextFiles.isNotBlank()) { + log.debug("Found context: priorContext=${priorContext.length} chars, contextFiles=${contextFiles.length} chars") + val contextTask = task.ui.newTask(false) + tabs["Context"] = contextTask.placeholder + contextTask.add( + buildString { + appendLine("# Context & Resources") + appendLine() + if (inputFileContent.isNotBlank()) { + appendLine("## Input Files") + appendLine(inputFileContent.truncateForDisplay(2000)) + appendLine() + } + if (priorContext.isNotBlank()) { + appendLine("## Prior Context") + appendLine(priorContext.truncateForDisplay(2000)) + appendLine() + } + if (contextFiles.isNotBlank()) { + appendLine("## Related Files") + appendLine(contextFiles.truncateForDisplay(2000)) + } + }.renderMarkdown + ) + task.update() + } + + // Phase 1: Create outline + log.info("Phase 1: Creating tutorial outline") + val outlineTask = task.ui.newTask(false) + tabs["Outline"] = outlineTask.placeholder + transcript?.write("## Phase 1: Planning & Outline\n\n".toByteArray()) + transcript?.write("Creating tutorial structure...\n\n".toByteArray()) + transcript?.write("**Configuration:**\n".toByteArray()) + transcript?.write("- Target Steps: ${executionConfig.target_step_count}\n".toByteArray()) + transcript?.write("- Skill Level: ${executionConfig.skill_level}\n\n".toByteArray()) + + outlineTask.add( + buildString { + appendLine("# Tutorial Outline") + appendLine() + appendLine("**Status:** Creating structured outline...") + appendLine() + }.renderMarkdown + ) + task.update() + + val outlineAgent = ParsedAgent( + resultClass = TutorialOutline::class.java, + prompt = """ +You are an expert technical writer and educator. Create a detailed outline for a tutorial. + +Goal: $goal + +Target Platform: ${executionConfig.target_platform} +Skill Level: ${executionConfig.skill_level} +Estimated Duration: ${executionConfig.estimated_duration} minutes +Target Step Count: ${executionConfig.target_step_count} + +${if (inputFileContent.isNotBlank()) "Input Files:\n${inputFileContent.truncateForDisplay(3000)}\n" else ""} +${if (messages.isNotEmpty()) "User Messages:\n${messages.joinToString("\n").truncateForDisplay(2000)}\n" else ""} +${if (priorContext.isNotBlank()) "Context:\n${priorContext.truncateForDisplay(3000)}\n" else ""} +${if (contextFiles.isNotBlank()) "Related Files:\n${contextFiles.truncateForDisplay(3000)}\n" else ""} + +Create an outline with: +1. A clear, descriptive title +2. Brief description of what the tutorial covers +3. ${if (executionConfig.include_learning_objectives) "3-5 specific learning objectives" else ""} +4. Complete list of prerequisites: + - Required software and tools (with versions if relevant) + - Prior knowledge or skills needed + - Accounts or services required + - Hardware requirements if applicable +5. ${executionConfig.target_step_count} main steps that: + - Follow a logical progression + - Are appropriately sized (not too large or small) + - Build on previous steps + - Lead to the stated goal + - Include time estimates + +For each step, specify: +- Clear, action-oriented title +- Purpose (what this step accomplishes) +- Key actions to perform +- Whether it includes code/commands +- Whether it needs a screenshot +- Expected outcome +- Estimated time (total should be ~${executionConfig.estimated_duration} minutes) + +Ensure the outline: +- Is appropriate for ${executionConfig.skill_level} level +- Works on ${executionConfig.target_platform} +- Follows best practices for technical tutorials +- Has a clear beginning, middle, and end + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var outline = outlineAgent.answer(listOf("Generate outline")).obj + + // Validate outline + outline.validate()?.let { validationError -> + log.error("Outline validation failed: $validationError") + outlineTask.error(ValidatedObject.ValidationError(validationError, outline)) + task.safeComplete("Outline validation failed: $validationError", log) + resultFn("ERROR: Outline validation failed: $validationError") + return + } + + log.info("Generated outline: ${outline.steps.size} steps, ${outline.prerequisites.size} prerequisites") + transcript?.write("### Outline Generated\n\n".toByteArray()) + transcript?.write("**Title:** ${outline.title}\n\n".toByteArray()) + transcript?.write("**Steps:** ${outline.steps.size}\n\n".toByteArray()) + transcript?.write("**Prerequisites:** ${outline.prerequisites.size}\n\n".toByteArray()) + transcript?.write("**Estimated Time:** ${outline.estimated_time} minutes\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) + + + val outlineContent = buildString { + appendLine("## ${outline.title}") + appendLine() + appendLine(outline.description) + appendLine() + appendLine("**Estimated Time:** ${outline.estimated_time} minutes") + appendLine() + appendLine("---") + appendLine() + if (outline.learning_objectives.isNotEmpty()) { + appendLine("### What You'll Learn") + outline.learning_objectives.forEach { objective -> + appendLine("- $objective") + } + appendLine() + appendLine("---") + appendLine() + } + appendLine("### Prerequisites") + appendLine() + val requiredPrereqs = outline.prerequisites.filter { it.required } + val optionalPrereqs = outline.prerequisites.filter { !it.required } + + if (requiredPrereqs.isNotEmpty()) { + appendLine("#### Required") + requiredPrereqs.forEach { prereq -> + appendLine("**${prereq.name}** (${prereq.type})") + appendLine() + appendLine(prereq.description) + if (prereq.link != null) { + appendLine() + appendLine("Download: ${prereq.link}") + } + appendLine() + } + } + + if (optionalPrereqs.isNotEmpty()) { + appendLine("#### Optional") + optionalPrereqs.forEach { prereq -> + appendLine("**${prereq.name}** (${prereq.type})") + appendLine() + appendLine(prereq.description) + if (prereq.link != null) { + appendLine() + appendLine("Download: ${prereq.link}") + } + appendLine() + } + } + appendLine("---") + appendLine() + appendLine("### Tutorial Steps") + outline.steps.forEach { step -> + appendLine("#### Step ${step.step_number}: ${step.title}") + appendLine() + appendLine("**Purpose:** ${step.purpose}") + appendLine() + appendLine("**Actions:**") + step.actions.forEach { action -> + appendLine("- $action") + } + appendLine() + if (step.has_code) { + appendLine("*Includes code/commands*") + appendLine() + } + if (step.needs_screenshot) { + appendLine("*Screenshot needed*") + appendLine() + } + appendLine("**Expected Outcome:** ${step.expected_outcome}") + appendLine() + appendLine("**Time:** ~${step.estimated_time} min") + appendLine() + appendLine("---") + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + outlineTask.add(outlineContent.renderMarkdown) + task.update() + + overviewTask.add("✅ Phase 1 Complete: Outline created (${outline.steps.size} steps)\n".renderMarkdown) + overviewTask.add("\n### Phase 2: Writing Steps\n*Developing detailed step-by-step instructions...*\n".renderMarkdown) + task.update() + transcript?.write("## Phase 2: Writing Steps\n\n".toByteArray()) + transcript?.write("Input Context:\n".toByteArray()) + if (messages.isNotEmpty()) { + transcript?.write("**Messages:** ${messages.size} items\n".toByteArray()) + messages.forEach { msg -> + transcript?.write("- ${msg.truncateForDisplay(100)}\n".toByteArray()) + } + transcript?.write("\n".toByteArray()) + } + if (inputFileContent.isNotBlank()) { + transcript?.write("**Input Files Loaded:** ${inputFileContent.length} characters\n\n".toByteArray()) + } + transcript?.write("Developing detailed step-by-step instructions...\n\n".toByteArray()) + + + // Phase 2: Write each step + log.info("Phase 2: Writing detailed steps") + val tutorialSteps = mutableListOf() + + outline.steps.forEachIndexed { index, stepOutline -> + log.info("Writing step ${index + 1}/${outline.steps.size}: ${stepOutline.title}") + + overviewTask.add("- Step ${index + 1}: ${stepOutline.title.truncateForDisplay(50)} ".renderMarkdown) + task.update() + transcript?.write("### Step ${index + 1}: ${stepOutline.title}\n\n".toByteArray()) + transcript?.write("Writing detailed instructions...\n\n".toByteArray()) + + + val stepTask = task.ui.newTask(false) + tabs["Step ${index + 1}"] = stepTask.placeholder + + stepTask.add( + buildString { + appendLine("# Step ${index + 1}: ${stepOutline.title}") + appendLine() + appendLine("**Status:** Writing detailed instructions...") + appendLine() + }.renderMarkdown + ) + task.update() + + // Build context from previous steps + val previousStepsContext = if (tutorialSteps.isNotEmpty()) { + buildString { + appendLine("## Previous Steps Summary") + tutorialSteps.takeLast(2).forEach { prevStep -> + appendLine("**Step ${prevStep.step_number}:** ${prevStep.title}") + appendLine("Outcome: ${prevStep.expected_outcome}") + appendLine() + } + } + } else { + "This is the first step." + } + + val stepAgent = ParsedAgent( + resultClass = TutorialStep::class.java, + prompt = """ +You are an expert technical writer. Write detailed instructions for this tutorial step. + +Overall Goal: $goal +Target Platform: ${executionConfig.target_platform} +Skill Level: ${executionConfig.skill_level} +Verbosity: ${executionConfig.verbosity} + +Step to Write: +Number: ${stepOutline.step_number} +Title: ${stepOutline.title} +Purpose: ${stepOutline.purpose} +Actions: ${stepOutline.actions.joinToString("; ")} +Expected Outcome: ${stepOutline.expected_outcome} + +$previousStepsContext + +Write a complete step with: +1. Clear, ${executionConfig.verbosity} explanation of what to do and why +2. ${if (executionConfig.include_code_examples && stepOutline.has_code) "Exact commands or code to execute (with language/shell specified)" else ""} +3. ${if (executionConfig.include_screenshots_placeholders && stepOutline.needs_screenshot) "Screenshot placeholders where visual confirmation is helpful" else ""} +4. Description of expected outcome (what the user should see) +5. ${if (executionConfig.include_validation_steps) "Validation steps to verify success" else ""} +6. ${if (executionConfig.include_troubleshooting) "Common issues that might occur in this step" else ""} + +Guidelines: +- Use ${executionConfig.verbosity} level of detail +- Write for ${executionConfig.skill_level} skill level +- Be specific about ${executionConfig.target_platform} requirements +- Use clear, imperative language ("Click...", "Run...", "Open...") +- Include exact file paths, commands, and values +- Explain technical terms if needed for skill level +- Number sub-steps if there are multiple actions + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + var tutorialStep = stepAgent.answer(listOf("Write step")).obj + tutorialSteps.add(tutorialStep) + transcript?.write("**Completed:** ${tutorialStep.title}\n".toByteArray()) + transcript?.write("- Code blocks: ${tutorialStep.code_blocks.size}\n".toByteArray()) + transcript?.write("- Validation steps: ${tutorialStep.validation_steps.size}\n\n".toByteArray()) + + + val stepContent = buildString { + appendLine("## Step ${tutorialStep.step_number}: ${tutorialStep.title}") + appendLine() + appendLine(tutorialStep.explanation) + appendLine() + + if (tutorialStep.code_blocks.isNotEmpty()) { + appendLine("### Commands/Code") + appendLine() + tutorialStep.code_blocks.forEach { codeBlock -> + if (codeBlock.description.isNotBlank()) { + appendLine(codeBlock.description) + appendLine() + } + if (codeBlock.working_directory != null) { + appendLine("*Run in directory: `${codeBlock.working_directory}`*") + appendLine() + } + appendLine("```${codeBlock.language}") + appendLine(codeBlock.code) + appendLine("```") + appendLine() + } + } + + if (tutorialStep.screenshot_placeholders.isNotEmpty()) { + appendLine("### Visual Checkpoints") + tutorialStep.screenshot_placeholders.forEach { placeholder -> + appendLine("📸 $placeholder") + appendLine() + } + } + + appendLine("### Expected Outcome") + appendLine(tutorialStep.expected_outcome) + appendLine() + + if (tutorialStep.validation_steps.isNotEmpty()) { + appendLine("### Verify Success") + tutorialStep.validation_steps.forEachIndexed { idx, validation -> + appendLine("${idx + 1}. $validation") + } + appendLine() + } + + if (tutorialStep.common_issues.isNotEmpty()) { + appendLine("### Common Issues") + tutorialStep.common_issues.forEach { issue -> + appendLine("⚠️ $issue") + appendLine() + } + } + + appendLine("---") + appendLine() + appendLine("**Status:** ✅ Complete") + } + stepTask.add(stepContent.renderMarkdown) + task.update() + + overviewTask.add("✅\n".renderMarkdown) + task.update() + } + + overviewTask.add("✅ Phase 2 Complete: All steps written\n".renderMarkdown) + + // Phase 3: Troubleshooting section (if enabled) + var troubleshootingSection: TroubleshootingSection? = null + if (executionConfig.include_troubleshooting) { + overviewTask.add("\n### Phase 3: Troubleshooting\n*Compiling common issues and solutions...*\n".renderMarkdown) + task.update() + transcript?.write("## Phase 3: Troubleshooting\n\n".toByteArray()) + transcript?.write("Compiling common issues and solutions...\n\n".toByteArray()) + + + log.info("Phase 3: Creating troubleshooting section") + val troubleshootingTask = task.ui.newTask(false) + tabs["Troubleshooting"] = troubleshootingTask.placeholder + + troubleshootingTask.add( + buildString { + appendLine("# Troubleshooting") + appendLine() + appendLine("**Status:** Identifying common problems...") + appendLine() + }.renderMarkdown + ) + task.update() + + val troubleshootingAgent = ParsedAgent( + resultClass = TroubleshootingSection::class.java, + prompt = """ +You are an expert technical support specialist. Create a troubleshooting section for this tutorial. + +Goal: $goal +Target Platform: ${executionConfig.target_platform} +Skill Level: ${executionConfig.skill_level} + +Tutorial Steps Summary: +${tutorialSteps.joinToString("\n") { "Step ${it.step_number}: ${it.title}" }} + +Identify 5-8 common problems users might encounter, including: +- Platform-specific issues +- Configuration errors +- Permission problems +- Version compatibility issues +- Common mistakes or misunderstandings +- Environment setup problems + +For each issue, provide: +- Clear description of the problem +- Symptoms or error messages users might see +- Possible causes +- Step-by-step solutions (multiple if applicable) + +Focus on issues that: +- Are likely to occur for ${executionConfig.skill_level} users +- Are specific to ${executionConfig.target_platform} +- Have clear, actionable solutions +- Aren't already covered in step-specific troubleshooting + """.trimIndent(), + model = api, + temperature = 0.5, + parsingChatter = orchestrationConfig.parsingChatter + ) + + troubleshootingSection = troubleshootingAgent.answer(listOf("Create troubleshooting")).obj + transcript?.write("**Troubleshooting Issues Identified:** ${troubleshootingSection.issues.size}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) + + + val troubleshootingContent = buildString { + appendLine("## Common Issues and Solutions") + appendLine() + if (troubleshootingSection.issues.isEmpty()) { + appendLine("No common issues identified. If you encounter problems, check:") + appendLine("- Prerequisites are correctly installed") + appendLine("- Commands are run in the correct directory") + appendLine("- Platform-specific requirements are met") + } else { + troubleshootingSection.issues.forEachIndexed { index, issue -> + appendLine("### ${index + 1}. ${issue.problem}") + appendLine() + if (issue.symptoms.isNotEmpty()) { + appendLine("**Symptoms:**") + issue.symptoms.forEach { symptom -> + appendLine("- $symptom") + } + appendLine() + } + if (issue.causes.isNotEmpty()) { + appendLine("**Possible Causes:**") + issue.causes.forEach { cause -> + appendLine("- $cause") + } + appendLine() + } + appendLine("**Solutions:**") + issue.solutions.forEachIndexed { solIdx, solution -> + appendLine("${solIdx + 1}. $solution") + } + appendLine() + if (index < troubleshootingSection.issues.size - 1) { + appendLine("---") + appendLine() + } + } + } + appendLine() + appendLine("**Status:** ✅ Complete") + } + troubleshootingTask.add(troubleshootingContent.renderMarkdown) + task.update() + + overviewTask.add("✅ Phase 3 Complete: Troubleshooting section added\n".renderMarkdown) + } + + // Phase 4: Next Steps (if enabled) + var nextSteps: NextSteps? = null + if (executionConfig.include_next_steps) { + overviewTask.add("\n### Phase 4: Next Steps\n*Suggesting further learning paths...*\n".renderMarkdown) + task.update() + transcript?.write("## Phase 4: Next Steps\n\n".toByteArray()) + transcript?.write("Suggesting further learning paths...\n\n".toByteArray()) + + + log.info("Phase 4: Creating next steps section") + val nextStepsTask = task.ui.newTask(false) + tabs["Next Steps"] = nextStepsTask.placeholder + + nextStepsTask.add( + buildString { + appendLine("# Next Steps") + appendLine() + appendLine("**Status:** Generating recommendations...") + appendLine() + }.renderMarkdown + ) + task.update() + + val nextStepsAgent = ParsedAgent( + resultClass = NextSteps::class.java, + prompt = """ +You are an expert educator. Suggest next steps for learners who completed this tutorial. + +Goal Achieved: $goal +Skill Level: ${executionConfig.skill_level} + +Provide: +1. 3-5 suggestions for what to try next or how to extend what they learned +2. 3-5 related tutorials, documentation, or resources +3. 3-5 advanced topics to explore + +Make suggestions: +- Progressive (build on what was learned) +- Appropriate for ${executionConfig.skill_level} moving to the next level +- Specific and actionable +- Include mix of practice, learning, and exploration + """.trimIndent(), + model = api, + temperature = 0.6, + parsingChatter = orchestrationConfig.parsingChatter + ) + + nextSteps = nextStepsAgent.answer(listOf("Generate next steps")).obj + transcript?.write("**Next Steps Generated:**\n".toByteArray()) + transcript?.write("- Suggestions: ${nextSteps.suggestions.size}\n".toByteArray()) + transcript?.write("- Resources: ${nextSteps.related_resources.size}\n\n".toByteArray()) + transcript?.write("---\n\n".toByteArray()) + + + val nextStepsContent = buildString { + appendLine("## What's Next?") + appendLine() + appendLine("Congratulations on completing this tutorial! Here are some ways to continue your learning:") + appendLine() + if (nextSteps.suggestions.isNotEmpty()) { + appendLine("### Try These Next") + nextSteps.suggestions.forEach { suggestion -> + appendLine("- $suggestion") + } + appendLine() + } + if (nextSteps.related_resources.isNotEmpty()) { + appendLine("### Related Resources") + nextSteps.related_resources.forEach { resource -> + appendLine("- $resource") + } + appendLine() + } + if (nextSteps.advanced_topics.isNotEmpty()) { + appendLine("### Advanced Topics") + nextSteps.advanced_topics.forEach { topic -> + appendLine("- $topic") + } + appendLine() + } + appendLine("**Status:** ✅ Complete") + } + nextStepsTask.add(nextStepsContent.renderMarkdown) + task.update() + + overviewTask.add("✅ Phase 4 Complete: Next steps added\n".renderMarkdown) + } + + // Phase 5: Final Assembly + overviewTask.add("\n### Phase 5: Final Assembly\n*Compiling complete tutorial...*\n".renderMarkdown) + task.update() + transcript?.write("## Phase 5: Final Assembly\n\n".toByteArray()) + transcript?.write("Compiling complete tutorial...\n\n".toByteArray()) + + + log.info("Phase 5: Assembling final tutorial") + val finalTask = task.ui.newTask(false) + tabs["Complete Tutorial"] = finalTask.placeholder + + val finalTutorial = buildString { + appendLine("# ${outline.title}") + appendLine() + appendLine(outline.description) + appendLine() + appendLine("**⏱️ Estimated Time:** ${outline.estimated_time} minutes") + appendLine() + appendLine("**🎯 Skill Level:** ${executionConfig.skill_level.capitalize()}") + appendLine() + appendLine("**💻 Platform:** ${executionConfig.target_platform}") + appendLine() + appendLine("---") + appendLine() + + if (outline.learning_objectives.isNotEmpty()) { + appendLine("## What You'll Learn") + appendLine() + outline.learning_objectives.forEach { objective -> + appendLine("✓ $objective") + } + appendLine() + appendLine("---") + appendLine() + } + + appendLine("## Prerequisites") + appendLine() + val requiredPrereqs = outline.prerequisites.filter { it.required } + val optionalPrereqs = outline.prerequisites.filter { !it.required } + + if (requiredPrereqs.isNotEmpty()) { + appendLine("### Required") + appendLine() + requiredPrereqs.forEach { prereq -> + appendLine("- **${prereq.name}** (${prereq.type}): ${prereq.description}") + if (prereq.link != null) { + appendLine(" - Download: ${prereq.link}") + } + } + appendLine() + } + + if (optionalPrereqs.isNotEmpty()) { + appendLine("### Optional") + appendLine() + optionalPrereqs.forEach { prereq -> + appendLine("- **${prereq.name}** (${prereq.type}): ${prereq.description}") + if (prereq.link != null) { + appendLine(" - Download: ${prereq.link}") + } + } + appendLine() + } + + appendLine("---") + appendLine() + appendLine("## Tutorial Steps") + appendLine() + + tutorialSteps.forEach { step -> + appendLine("### Step ${step.step_number}: ${step.title}") + appendLine() + appendLine(step.explanation) + appendLine() + + if (step.code_blocks.isNotEmpty()) { + step.code_blocks.forEach { codeBlock -> + if (codeBlock.description.isNotBlank()) { + appendLine(codeBlock.description) + appendLine() + } + if (codeBlock.working_directory != null) { + appendLine("*Run in: `${codeBlock.working_directory}`*") + appendLine() + } + appendLine("```${codeBlock.language}") + appendLine(codeBlock.code) + appendLine("```") + appendLine() + } + } + + if (step.screenshot_placeholders.isNotEmpty()) { + step.screenshot_placeholders.forEach { placeholder -> + appendLine("📸 $placeholder") + appendLine() + } + } + + appendLine("**Expected Outcome:** ${step.expected_outcome}") + appendLine() + + if (step.validation_steps.isNotEmpty()) { + appendLine("**Verify Success:**") + step.validation_steps.forEachIndexed { idx, validation -> + appendLine("${idx + 1}. $validation") + } + appendLine() + } + + if (step.common_issues.isNotEmpty()) { + appendLine("**⚠️ Common Issues:**") + step.common_issues.forEach { issue -> + appendLine("- $issue") + } + appendLine() + } + + appendLine("---") + appendLine() + } + + if (troubleshootingSection != null && troubleshootingSection.issues.isNotEmpty()) { + appendLine("## Troubleshooting") + appendLine() + troubleshootingSection.issues.forEachIndexed { index, issue -> + appendLine("### ${index + 1}. ${issue.problem}") + appendLine() + if (issue.symptoms.isNotEmpty()) { + appendLine("**Symptoms:**") + issue.symptoms.forEach { symptom -> + appendLine("- $symptom") + } + appendLine() + } + if (issue.causes.isNotEmpty()) { + appendLine("**Possible Causes:**") + issue.causes.forEach { cause -> + appendLine("- $cause") + } + appendLine() + } + appendLine("**Solutions:**") + issue.solutions.forEachIndexed { solIdx, solution -> + appendLine("${solIdx + 1}. $solution") + } + appendLine() + } + appendLine("---") + appendLine() + } + + if (nextSteps != null) { + appendLine("## Next Steps") + appendLine() + appendLine("🎉 Congratulations on completing this tutorial!") + appendLine() + if (nextSteps.suggestions.isNotEmpty()) { + appendLine("### Try These Next") + nextSteps.suggestions.forEach { suggestion -> + appendLine("- $suggestion") + } + appendLine() + } + if (nextSteps.related_resources.isNotEmpty()) { + appendLine("### Related Resources") + nextSteps.related_resources.forEach { resource -> + appendLine("- $resource") + } + appendLine() + } + if (nextSteps.advanced_topics.isNotEmpty()) { + appendLine("### Advanced Topics") + nextSteps.advanced_topics.forEach { topic -> + appendLine("- $topic") + } + appendLine() + } + } + } + + finalTask.add(finalTutorial.renderMarkdown) + tutorialOutputFile?.write(finalTutorial.toByteArray(Charsets.UTF_8)) + task.update() + + // Final statistics + val totalTime = System.currentTimeMillis() - startTime + val totalWords = finalTutorial.split("\\s+".toRegex()).size + transcript?.write("## Generation Complete\n\n".toByteArray()) + transcript?.write("**Statistics:**\n".toByteArray()) + transcript?.write("- Total Steps: ${tutorialSteps.size}\n".toByteArray()) + transcript?.write("- Prerequisites: ${outline.prerequisites.size}\n".toByteArray()) + transcript?.write("- Word Count: $totalWords\n".toByteArray()) + transcript?.write("- Code Blocks: ${tutorialSteps.sumOf { it.code_blocks.size }}\n".toByteArray()) + transcript?.write("- Total Time: ${totalTime / 1000.0}s\n\n".toByteArray()) + transcript?.write("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}\n".toByteArray()) + transcript?.flush() + transcript?.close() + + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ✅ Generation Complete") + appendLine() + appendLine("**Output Files:**") + appendLine("- [Complete Tutorial](tutorial.md)") + appendLine("- [Transcript](transcript.md)") + appendLine() + appendLine("**Statistics:**") + appendLine("- Total Steps: ${tutorialSteps.size}") + appendLine("- Prerequisites: ${outline.prerequisites.size}") + appendLine("- Estimated Duration: ${outline.estimated_time} minutes") + appendLine("- Word Count: $totalWords") + appendLine("- Code Blocks: ${tutorialSteps.sumOf { it.code_blocks.size }}") + if (troubleshootingSection != null) { + appendLine("- Troubleshooting Issues: ${troubleshootingSection.issues.size}") + } + appendLine("- Total Time: ${totalTime / 1000.0}s") + appendLine() + appendLine("**Completed:** ${LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"))}") + }.renderMarkdown + ) + task.update() + + // Write final tutorial to file + tutorialOutputFile?.flush() + tutorialOutputFile?.close() + + // Concise summary for resultFn + val finalResult = buildString { + appendLine("# ✅ Tutorial Generated: ${outline.title}") + appendLine() + appendLine("A comprehensive tutorial with **${tutorialSteps.size} steps** was successfully generated.") + appendLine() + appendLine("## Summary") + appendLine() + appendLine("**Goal:** $goal") + appendLine("**Platform:** ${executionConfig.target_platform}") + appendLine("**Skill Level:** ${executionConfig.skill_level}") + appendLine("**Estimated Duration:** ${outline.estimated_time} minutes") + appendLine("**Key Features:**") + appendLine("- ${outline.prerequisites.size} prerequisites identified") + appendLine("- ${tutorialSteps.size} detailed steps with explanations") + appendLine("- ${tutorialSteps.sumOf { it.code_blocks.size }} code examples") + appendLine("- ${tutorialSteps.sumOf { it.validation_steps.size }} validation steps") + appendLine("- Estimated completion time: ${outline.estimated_time} minutes") + if (troubleshootingSection != null) { + appendLine("- ${troubleshootingSection.issues.size} troubleshooting scenarios") + } + appendLine() + appendLine("## Output Files") + appendLine() + appendLine("- **Complete Tutorial:** [tutorial.md](tutorial.md)") + appendLine("- **Transcript:** [transcript.md](transcript.md)") + appendLine() + appendLine("**Generation Time:** ${totalTime / 1000.0}s") + } + + log.info("TutorialGenerationTask completed: steps=${tutorialSteps.size}, time=${totalTime}ms") + + task.safeComplete("Tutorial generation complete: ${tutorialSteps.size} steps in ${totalTime / 1000}s", log) + resultFn(finalResult) + + } catch (e: Exception) { + log.error("Error during tutorial generation", e) + task.error(e) + transcript?.write("\n## Error Occurred\n\n".toByteArray()) + transcript?.write("**Error:** ${e.message}\n\n".toByteArray()) + transcript?.flush() + transcript?.close() + + overviewTask.add( + buildString { + appendLine() + appendLine("---") + appendLine() + appendLine("## ❌ Error Occurred") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + appendLine("**Type:** ${e.javaClass.simpleName}") + }.renderMarkdown + ) + task.update() + tutorialOutputFile?.close() + + val errorOutput = buildString { + appendLine("# Error in Tutorial Generation") + appendLine() + appendLine("**Goal:** $goal") + appendLine() + appendLine("**Error:** ${e.message}") + appendLine() + if (resultBuilder.isNotEmpty()) { + appendLine("## Partial Results") + appendLine() + appendLine(resultBuilder.toString()) + } + } + resultFn(errorOutput) + } + } + + private fun createTutorialOutputFile(task: SessionTask): FileOutputStream? { + return try { + val (link, file) = task.createFile("tutorial.md") + log.info("Created tutorial output file: $link") + file?.outputStream() + } catch (e: Exception) { + log.error("Failed to create tutorial output file", e) + null + } + } + + + private fun getContextFiles(): String { + val relatedFiles = executionConfig?.related_files ?: return "" + if (relatedFiles.isEmpty()) return "" + log.debug("Loading ${relatedFiles.size} related context files") + + return buildString { + appendLine("## Related Documentation Files") + appendLine() + relatedFiles.forEach { file -> + try { + val filePath = root.resolve(file) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded context file: $file") + appendLine("### $file") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(1500)) + appendLine("```") + appendLine() + } else { + log.warn("Context file not found: $file") + } + } catch (e: Exception) { + log.warn("Error reading file: $file", e) + } + } + } + } + + private fun getInputFileCode(): String { + val inputFiles = executionConfig?.input_files ?: return "" + if (inputFiles.isEmpty()) return "" + log.debug("Loading ${inputFiles.size} input files") + return buildString { + appendLine("## Input Files Context") + appendLine() + inputFiles.forEach { pattern -> + try { + val filePath = root.resolve(pattern) + if (filePath.toFile().exists()) { + log.debug("Successfully loaded input file: $pattern") + appendLine("### $pattern") + appendLine("```") + appendLine(filePath.toFile().readText().truncateForDisplay(2000)) + appendLine("```") + appendLine() + } else { + log.warn("Input file not found: $pattern") + } + } catch (e: Exception) { + log.warn("Error reading input file: $pattern", e) + } + } + } + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = task.createFile("transcript.md") + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + companion object { + private val log: Logger = LoggerFactory.getLogger(TutorialGenerationTask::class.java) + val TutorialGeneration = TaskType( + "TutorialGeneration", + TutorialGenerationTaskExecutionConfigData::class.java, + TaskTypeConfig::class.java, + "Create complete, step-by-step tutorials for processes and projects", + """ + Generates comprehensive tutorials with clear, actionable steps. +
      +
    • Creates detailed outline with prerequisites and learning objectives
    • +
    • Breaks process into logical, numbered steps
    • +
    • Generates exact commands and code examples
    • +
    • Includes expected outcomes and validation steps
    • +
    • Adds screenshot placeholders for visual guidance
    • +
    • Provides troubleshooting section for common issues
    • +
    • Suggests next steps for continued learning
    • +
    • Configurable verbosity and skill level
    • +
    • Platform-specific instructions and requirements
    • +
    • Ideal for how-to guides, educational content, and project-based learning
    • +
    + """ + ) + } +} \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/platform/model/UserSettingsInterface.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/platform/model/UserSettingsInterface.kt index edb24ea1d..61cc30a42 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/platform/model/UserSettingsInterface.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/platform/model/UserSettingsInterface.kt @@ -19,21 +19,21 @@ import com.simiacryptus.cognotik.platform.file.UserSettingsManager * Provides methods to retrieve and update settings for individual users. */ interface UserSettingsInterface { - /** - * Retrieves the settings for a specific user. - * - * @param user The user whose settings should be retrieved. Defaults to UserSettingsManager.defaultUser - * @return UserSettings object containing the user's configuration - */ - fun getUserSettings(user: User = UserSettingsManager.defaultUser): UserSettings + /** + * Retrieves the settings for a specific user. + * + * @param user The user whose settings should be retrieved. Defaults to UserSettingsManager.defaultUser + * @return UserSettings object containing the user's configuration + */ + fun getUserSettings(user: User = UserSettingsManager.defaultUser): UserSettings - /** - * Updates the settings for a specific user. - * - * @param user The user whose settings should be updated - * @param settings The new UserSettings object to save for the user - */ - fun updateUserSettings(user: User, settings: UserSettings) + /** + * Updates the settings for a specific user. + * + * @param user The user whose settings should be updated + * @param settings The new UserSettings object to save for the user + */ + fun updateUserSettings(user: User, settings: UserSettings) } /** @@ -44,9 +44,9 @@ interface UserSettingsInterface { * @property command The actual command or script to execute when the tool is invoked */ data class ToolData( - val name: String? = null, - val description: String? = null, - val command: String? = null, + val name: String? = null, + val description: String? = null, + val command: String? = null, ) /** @@ -60,31 +60,31 @@ data class ToolData( @JsonSerialize(using = UserSettingsSerializer::class) @JsonDeserialize(using = UserSettingsDeserializer::class) data class UserSettings( - val apis: MutableList = mutableListOf(), - val tools: MutableList = mutableListOf(), - val etc: MutableMap = mutableMapOf(), + val apis: MutableList = mutableListOf(), + val tools: MutableList = mutableListOf(), + val etc: MutableMap = mutableMapOf(), ) { - /** - * @deprecated Use the 'apis' property instead. This provides backward compatibility - * for legacy code expecting a Map of APIProvider to base URL. - * @return Map of API providers to their base URLs extracted from the apis list - */ - @get:JsonIgnore - @get:Deprecated("Use this.apis instead") - val apiBase: Map - get() = apis.associate { - it.provider!! to (it.baseUrl ?: "") - } + /** + * @deprecated Use the 'apis' property instead. This provides backward compatibility + * for legacy code expecting a Map of APIProvider to base URL. + * @return Map of API providers to their base URLs extracted from the apis list + */ + @get:JsonIgnore + @get:Deprecated("Use this.apis instead") + val apiBase: Map + get() = apis.associate { + it.provider!! to (it.baseUrl ?: "") + } - /** - * @deprecated Use the 'tools' property instead. This provides backward compatibility - * for legacy code expecting a simple list of tool names. - * @return List of tool names extracted from the tools list - */ - @get:JsonIgnore - @get:Deprecated("Use this.tools instead") - val localTools: List = tools.mapNotNull { it.name } + /** + * @deprecated Use the 'tools' property instead. This provides backward compatibility + * for legacy code expecting a simple list of tool names. + * @return List of tool names extracted from the tools list + */ + @get:JsonIgnore + @get:Deprecated("Use this.tools instead") + val localTools: List = tools.mapNotNull { it.name } } @@ -93,106 +93,114 @@ data class UserSettings( * Serializes UserSettings to JSON format with apis, tools, and etc fields. */ class UserSettingsSerializer : JsonSerializer() { - /** - * Custom JSON deserializer for UserSettings. - * Handles both new format (apis/tools/etc) and legacy format (apiKeys/apiBase/localTools) - * for backward compatibility with existing user configuration files. - */ - override fun serialize(value: UserSettings, gen: JsonGenerator, serializers: SerializerProvider) { - gen.writeStartObject() - gen.writeObjectField("apis", value.apis) - gen.writeObjectField("tools", value.tools) - gen.writeObjectField("etc", value.etc) - gen.writeEndObject() - } + /** + * Custom JSON deserializer for UserSettings. + * Handles both new format (apis/tools/etc) and legacy format (apiKeys/apiBase/localTools) + * for backward compatibility with existing user configuration files. + */ + override fun serialize(value: UserSettings, gen: JsonGenerator, serializers: SerializerProvider) { + gen.writeStartObject() + gen.writeObjectField("apis", value.apis) + gen.writeObjectField("tools", value.tools) + gen.writeObjectField("etc", value.etc) + gen.writeEndObject() + } } class UserSettingsDeserializer : JsonDeserializer() { - /** - * Custom JSON deserializer for ApiChatModel. - * Handles deserialization from both string format (model name) and object format - * (containing model and provider information). - */ - override fun deserialize(p: JsonParser, ctxt: DeserializationContext): UserSettings { - val node = p.readValueAsTree() - // Check if this is the new format (has apis/tools fields) - if (node.has("apis") || node.has("tools")) { - val apis = if (node.has("apis")) { - p.codec.treeToValue(node.get("apis"), Array::class.java).toMutableList() - } else { - mutableListOf() - } - val tools = if (node.has("tools")) { - p.codec.treeToValue(node.get("tools"), Array::class.java).toMutableList() - } else { - mutableListOf() - } - val etc = if (node.has("etc")) { - p.codec.treeToValue(node.get("etc"), MutableMap::class.java) as MutableMap - } else { - mutableMapOf() - } - return UserSettings(apis, tools, etc) - } - // Handle legacy format (apiKeys, apiBase, localTools) - val apiKeys = if (node.has("apiKeys")) { - (p.codec.treeToValue( - node.get("apiKeys"), - Map::class.java - ) as Map).mapKeys { APIProvider.valueOf(it.key) } - } else { - emptyMap() - } - val apiBase = if (node.has("apiBase")) { - (p.codec.treeToValue( - node.get("apiBase"), - Map::class.java - ) as Map).mapKeys { APIProvider.valueOf(it.key) } - } else { - emptyMap() - } - val localTools = if (node.has("localTools")) { - p.codec.treeToValue(node.get("localTools"), Array::class.java).toList() - } else { - emptyList() - } - return UserSettings(toApiList(apiKeys, apiBase), toTools(localTools)) + /** + * Custom JSON deserializer for ApiChatModel. + * Handles deserialization from both string format (model name) and object format + * (containing model and provider information). + */ + override fun deserialize(p: JsonParser, ctxt: DeserializationContext): UserSettings { + val node = p.readValueAsTree() + // Check if this is the new format (has apis/tools fields) + if (node.has("apis") || node.has("tools")) { + val apis = if (node.has("apis")) { + p.codec.treeToValue(node.get("apis"), Array::class.java).toMutableList() + } else { + mutableListOf() + } + val tools = if (node.has("tools")) { + p.codec.treeToValue(node.get("tools"), Array::class.java).toMutableList() + } else { + mutableListOf() + } + val etc = if (node.has("etc")) { + p.codec.treeToValue(node.get("etc"), MutableMap::class.java) as MutableMap + } else { + mutableMapOf() + } + return UserSettings(apis, tools, etc) } + // Handle legacy format (apiKeys, apiBase, localTools) + val apiKeys = if (node.has("apiKeys")) { + (p.codec.treeToValue( + node.get("apiKeys"), + Map::class.java + ) as Map).mapKeys { APIProvider.valueOf(it.key) } + } else { + emptyMap() + } + val apiBase = if (node.has("apiBase")) { + (p.codec.treeToValue( + node.get("apiBase"), + Map::class.java + ) as Map).mapKeys { APIProvider.valueOf(it.key) } + } else { + emptyMap() + } + val localTools = if (node.has("localTools")) { + p.codec.treeToValue(node.get("localTools"), Array::class.java).toList() + } else { + emptyList() + } + return UserSettings(toApiList(apiKeys, apiBase), toTools(localTools)) + } } class ApiChatModelDeserializer : JsonDeserializer() { - override fun deserialize(p: JsonParser, ctxt: DeserializationContext): ApiChatModel? { - return when (p.currentToken) { - com.fasterxml.jackson.core.JsonToken.VALUE_STRING -> { - // Handle string format - find model by name/key - val modelName = p.readValueAs(String::class.java) - val model = ChatModel.values().entries.find { - it.key == modelName || it.value.name == modelName || it.value.modelName == modelName - }?.value ?: throw IllegalArgumentException("Unknown model: $modelName") - ApiChatModel(model, null) - } - - com.fasterxml.jackson.core.JsonToken.START_OBJECT -> { - // Handle object format - val node = p.readValueAsTree() - if (node.has("model") && node.has("provider")) { - val model = p.codec.treeToValue(node.get("model"), ChatModel::class.java) - val provider = p.codec.treeToValue(node.get("provider"), ApiData::class.java) - ApiChatModel(model, provider) - } else if (node.has("modelName")) { - val modelName = node.get("modelName").asText() - val model = ChatModel.values().values.firstOrNull { it.modelName == modelName } - ?: throw IllegalArgumentException("Unknown model: $modelName") - ApiChatModel(model, null) - } else { - //throw IllegalArgumentException("Invalid ApiChatModel object format") - null - } - } + override fun deserialize(p: JsonParser, ctxt: DeserializationContext): ApiChatModel? { + return when (p.currentToken) { + com.fasterxml.jackson.core.JsonToken.VALUE_STRING -> { + try { + val modelName = p.readValueAs(String::class.java) + // Handle string format - find model by name/key + val model = ChatModel.values().entries.find { + it.key == modelName || it.value.name == modelName || it.value.modelName == modelName + }?.value ?: throw IllegalArgumentException("Unknown model: $modelName") + ApiChatModel(model, null) + } catch (e: Exception) { + throw IllegalArgumentException("Error deserializing ApiChatModel: ${e.message}", e) + } + } - else -> null // throw IllegalArgumentException("ApiChatModel must be deserialized from either a string or an object") + com.fasterxml.jackson.core.JsonToken.START_OBJECT -> { + // Handle object format + val node = p.readValueAsTree() + try { + if (node.has("model") && node.has("provider")) { + val model = p.codec.treeToValue(node.get("model"), ChatModel::class.java) + val provider = p.codec.treeToValue(node.get("provider"), ApiData::class.java) + ApiChatModel(model, provider) + } else if (node.has("modelName")) { + val modelName = node.get("modelName").asText() + val model = ChatModel.values().values.firstOrNull { it.modelName == modelName } + ?: throw IllegalArgumentException("Unknown model: $modelName") + ApiChatModel(model, null) + } else { + //throw IllegalArgumentException("Invalid ApiChatModel object format") + null + } + } catch (e: Exception) { + throw IllegalArgumentException("Error deserializing ApiChatModel: ${e.message}", e) } + } + + else -> null // throw IllegalArgumentException("ApiChatModel must be deserialized from either a string or an object") } + } } @@ -206,32 +214,32 @@ class ApiChatModelDeserializer : JsonDeserializer() { * @property provider The API provider type (OpenAI, Anthropic, Google, etc.) */ data class ApiData( - val name: String? = null, - val key: String? = null, - val baseUrl: String = "", - val provider: APIProvider? = null, + val name: String? = null, + val key: String? = null, + val baseUrl: String = "", + val provider: APIProvider? = null, ) { - /** - * Validates this API configuration. - * Checks that provider is set, API key is not blank, and for chat-capable providers, - * ensures at least one chat model is available. - * - * @return This ApiData instance if validation passes - * @throws IllegalStateException if validation fails - */ - fun validate(): ApiData { - if (provider == null) throw IllegalStateException("Provider not set or invalid") - if (key == null) throw IllegalStateException("API key not set") - // Only validate chat models for providers that support chat functionality - val supportsChatModels = provider.getChatModels(key, baseUrl).isNotEmpty() - if (supportsChatModels) { - val model = ChatModel.values().values.firstOrNull { it.provider == provider } - if (model == null) { - throw IllegalStateException("No chat model available for provider $provider") - } - } - return this + /** + * Validates this API configuration. + * Checks that provider is set, API key is not blank, and for chat-capable providers, + * ensures at least one chat model is available. + * + * @return This ApiData instance if validation passes + * @throws IllegalStateException if validation fails + */ + fun validate(): ApiData { + if (provider == null) throw IllegalStateException("Provider not set or invalid") + if (key == null) throw IllegalStateException("API key not set") + // Only validate chat models for providers that support chat functionality + val supportsChatModels = provider.getChatModels(key, baseUrl).isNotEmpty() + if (supportsChatModels) { + val model = ChatModel.values().values.firstOrNull { it.provider == provider } + if (model == null) { + throw IllegalStateException("No chat model available for provider $provider") + } } + return this + } } /** @@ -242,8 +250,8 @@ data class ApiData( */ @JsonDeserialize(using = ApiChatModelDeserializer::class) data class ApiChatModel( - val model: ChatModel? = null, - val provider: ApiData? = null, + val model: ChatModel? = null, + val provider: ApiData? = null, ) /** @@ -255,11 +263,11 @@ data class ApiChatModel( * @return MutableList of ApiData objects representing the converted configuration */ fun toApiList( - apiKeys: Map, apiBase: Map + apiKeys: Map, apiBase: Map ): MutableList = apiKeys.map { - ApiData( - key = it.value, baseUrl = apiBase[it.key] ?: it.key.base, provider = it.key - ).validate() + ApiData( + key = it.value, baseUrl = apiBase[it.key] ?: it.key.base, provider = it.key + ).validate() }.toMutableList() /** diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/util/AddApplyFileDiffLinks.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/util/AddApplyFileDiffLinks.kt index 8da3bbe02..3d7a61829 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/util/AddApplyFileDiffLinks.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/util/AddApplyFileDiffLinks.kt @@ -9,10 +9,12 @@ import com.simiacryptus.cognotik.diff.SimpleDiffApplier import com.simiacryptus.cognotik.util.FileSelectionUtils.prefilterFilename import com.simiacryptus.cognotik.util.FileSelectionUtils.resolveToRelativePath import com.simiacryptus.cognotik.webui.session.SocketManager +import com.simiacryptus.cognotik.webui.session.resolve import java.io.File import java.nio.file.Path import java.time.Duration import java.time.Instant +import java.util.* import kotlin.io.path.readText open class AddApplyFileDiffLinks(val processor: PatchProcessor) { @@ -125,7 +127,14 @@ open class AddApplyFileDiffLinks(val processor: PatchProcessor) { private fun String.reverseLines(): String = lines().reversed().joinToString("\n") - fun instrument( + private fun record(socketManager: SocketManager, data: Any): String { + val relativePath = UUID.randomUUID().toString() + ".json" + require(relativePath.isNotBlank()) { "File path cannot be blank" } + socketManager.resolve(relativePath)?.writeText(data.toJson()) + return "Patch Data" + } + + fun instrument( self: SocketManager, root: Path, response: String, @@ -180,6 +189,7 @@ open class AddApplyFileDiffLinks(val processor: PatchProcessor) { false } }.flatMap { it.second }.map { it.range to it }.toList() + val patchBlocks = resolvedMatches.filter { (header, block) -> try { val resolvedPath = resolver(root, header ?: return@filter false) @@ -192,8 +202,7 @@ open class AddApplyFileDiffLinks(val processor: PatchProcessor) { val withPatchLinks: String = patchBlocks.foldIndexed(response) { index, markdown, diffBlock -> val diffValue = diffBlock.second.groupValues[2].trim() - val header = - headers.lastOrNull { it.first.last < diffBlock.first.first }?.second ?: defaultFile ?: "Unknown" + val header = headers.lastOrNull { it.first.last < diffBlock.first.first }?.second ?: defaultFile ?: "Unknown" val filename = resolver(root, normalizeFilename(header)) if (filename.isNullOrBlank()) return@foldIndexed markdown val newValue = renderDiffBlock(root, filename, diffValue, handle, self, shouldAutoApply) @@ -210,10 +219,18 @@ open class AddApplyFileDiffLinks(val processor: PatchProcessor) { if (header.isNullOrBlank()) return markdown val filename = prefilterFilename(normalizeFilename(header)) if (filename.isNullOrBlank()) return markdown - val newMarkdown = renderNewFile(root, filename, codeValue, handle, self, lang, shouldAutoApply) + val newMarkdown = renderNewFile(root, filename, codeValue, handle, self, lang, shouldAutoApply) + record( + self, mapOf( + "filename" to filename, + "code" to codeValue, + "header" to header, + "language" to lang, + ) + ) markdown.replace(codeBlock.second.value, newMarkdown) } - return withSaveLinks + + return withSaveLinks } } @@ -451,7 +468,16 @@ open class AddApplyFileDiffLinks(val processor: PatchProcessor) { diffTask.placeholder + "\n" + applydiffTask.placeholder } else { diffTask.placeholder + """
    Warning: The patch is not valid: ${newCode.error?.renderMarkdown() ?: "???"}
    """ + applydiffTask.placeholder - } + } + record( + ui, mapOf( + "filename" to filename, + "originalCode" to prevCode, + "diff" to diffVal, + "newCode" to newCode.newCode, + "isValid" to newCode.isValid, + "errors" to newCode.error, + ) + ) } private val DiffApplicationResult.patchResult diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/application/ApplicationServer.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/application/ApplicationServer.kt index 3f22ee765..0336f5a09 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/application/ApplicationServer.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/application/ApplicationServer.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.webui.application -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent import com.simiacryptus.cognotik.platform.ApplicationServices import com.simiacryptus.cognotik.platform.ApplicationServices.authenticationManager import com.simiacryptus.cognotik.platform.ApplicationServices.authorizationManager diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/chat/ChatSocketManager.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/chat/ChatSocketManager.kt index 4f8186f15..a0142dcff 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/chat/ChatSocketManager.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/chat/ChatSocketManager.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.webui.chat -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.chat.model.ChatInterface import com.simiacryptus.cognotik.models.ModelSchema @@ -16,16 +16,17 @@ import com.simiacryptus.cognotik.webui.session.SocketManager import com.simiacryptus.cognotik.webui.session.getChildClient import java.io.FileOutputStream import java.io.OutputStream +import java.util.* import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.atomic.AtomicReference open class ChatSocketManager( - session: Session, - var useExpansionSyntax: Boolean = true, - var model: ChatInterface, - var parsingModel: ChatInterface, - val userInterfacePrompt: String = (if (!useExpansionSyntax) "" else """ + session: Session, + var useExpansionSyntax: Boolean = true, + var model: ChatInterface, + var parsingModel: ChatInterface, + val userInterfacePrompt: String = (if (!useExpansionSyntax) "" else """
    Query Expansion Syntax Guide @@ -54,382 +55,404 @@ open class ChatSocketManager(
    """.trimIndent()), - open val systemPrompt: String, - var temperature: Double = 0.3, - applicationClass: Class, - val storage: StorageInterface?, - open val fastTopicParsing: Boolean = true, - val retriable: Boolean = true, - val budget: Double, + open val systemPrompt: String, + var temperature: Double = 0.3, + applicationClass: Class, + val storage: StorageInterface?, + open val fastTopicParsing: Boolean = true, + val retriable: Boolean = true, + val budget: Double, ) : SocketManager(session, storage, owner = null, applicationClass = applicationClass) { - private val aggregateTopics = ConcurrentHashMap>() - private val messagesLock = Any() + private val aggregateTopics = ConcurrentHashMap>() + private val messagesLock = Any() - init { - if (userInterfacePrompt.isNotBlank()) { - newTask().complete(userInterfacePrompt) - } + init { + if (userInterfacePrompt.isNotBlank()) { + newTask().complete(userInterfacePrompt) } + } - val sysMessage: ModelSchema.ChatMessage - get() { - return ModelSchema.ChatMessage(ModelSchema.Role.system, systemPrompt.toContentList()) - } - protected val chatMessages = mutableListOf() + val sysMessage: ModelSchema.ChatMessage + get() { + return ModelSchema.ChatMessage(ModelSchema.Role.system, systemPrompt.toContentList()) + } + protected val chatMessages = mutableListOf() + + val markdownTranscript by lazy { transcript(newTask()) } - val markdownTranscript by lazy { transcript() } + override fun onRun(userMessage: String, socket: ChatSocket) { - override fun onRun(userMessage: String, socket: ChatSocket) { + val expandedUserMessage = expandTopics(userMessage) + markdownTranscript?.write("## User\n$expandedUserMessage\n\n".toByteArray()) + val task = newTask() + task.echo(renderResponse(expandedUserMessage, task)) - val expandedUserMessage = expandTopics(userMessage) - markdownTranscript?.write("## User\n$expandedUserMessage\n\n".toByteArray()) - val task = newTask() - task.echo(renderResponse(expandedUserMessage, task)) + synchronized(messagesLock) { + chatMessages += ModelSchema.ChatMessage(ModelSchema.Role.user, expandedUserMessage.toContentList()) + } + try { + if (!retriable) { + task.add("") + val responseString = respond(task, expandedUserMessage, chatMessages(), markdownTranscript) synchronized(messagesLock) { - chatMessages += ModelSchema.ChatMessage(ModelSchema.Role.user, expandedUserMessage.toContentList()) + if (chatMessages.lastOrNull()?.role == ModelSchema.Role.assistant) { + chatMessages.removeAt(chatMessages.size - 1) + } + chatMessages += ModelSchema.ChatMessage(ModelSchema.Role.assistant, responseString.toContentList()) } - - try { - if (!retriable) { - task.add("") - val responseString = respond(task, expandedUserMessage, chatMessages(), markdownTranscript) - synchronized(messagesLock) { - if (chatMessages.lastOrNull()?.role == ModelSchema.Role.assistant) { - chatMessages.removeAt(chatMessages.size - 1) - } - chatMessages += ModelSchema.ChatMessage(ModelSchema.Role.assistant, responseString.toContentList()) - } - task.complete() - } else { - retryable(task.ui, pool, task) { task -> - chatMessages.takeLastWhile { it.role == ModelSchema.Role.assistant } - .forEach { chatMessages.remove(it) } - val currentChatMessages = chatMessages() - innerRun(task, expandedUserMessage, currentChatMessages, markdownTranscript) - } - } - } catch (e: Exception) { - log.info("Error in chat", e) - task.error(e) + task.complete() + } else { + retryable(task.ui, pool, task) { task -> + chatMessages.takeLastWhile { it.role == ModelSchema.Role.assistant } + .forEach { chatMessages.remove(it) } + val currentChatMessages = chatMessages() + innerRun(task, expandedUserMessage, currentChatMessages, markdownTranscript) } + } + } catch (e: Exception) { + log.info("Error in chat", e) + task.error(e) } - - private fun transcript(): FileOutputStream? { - val task = newTask() - val (link, file) = task.createFile("transcript.md") - val markdownTranscript = file?.outputStream() - task.complete( - "Writing transcript to $link html pdf" + } + + private fun transcript(task: SessionTask): FileOutputStream? { + val (link, file) = Pair(task.linkTo("transcript.md"), task.resolve("transcript.md")) + val markdownTranscript = file?.outputStream() + task.complete( + "Writing transcript to $link html pdf" + ) + return markdownTranscript + } + + private fun innerRun( + task: SessionTask, + expandedUserMessage: String, + currentChatMessages: List, + transcriptStream: OutputStream? + ) { + try { + task.add("") + val responseString = respond(task, expandedUserMessage, currentChatMessages, transcriptStream) + synchronized(messagesLock) { + if (chatMessages.lastOrNull()?.role == ModelSchema.Role.assistant) { + chatMessages.removeAt(chatMessages.size - 1) } + chatMessages += ModelSchema.ChatMessage( + ModelSchema.Role.assistant, + responseString.toContentList() + ) + } + task.complete() + } catch (e: Throwable) { + log.warn("Exception occurred while processing chat message", e) } - - private val idSubPattern = - """[^|\n,/\\;}\]\[><()@]+""" // Matches any valid identifier character except for special characters used in the expansion syntax - private val expansionExpressionPattern = - Regex("""@\[($idSubPattern(?:[|,]$idSubPattern)+)]""") // Matches @[option1|option2|option3] - - private val sequenceExpansionPattern = - Regex("""@\{([^}]+(?:\s*->\s*[^}]+)+)\}""") // Matches @{item1 -> item2 -> item3} - - private val rangeExpansionPattern = - Regex("""@\((-?\d+)(?:\.{2,3}| to )(-?\d+)(?:(?::| by )(\d+))?\)""") // Matches @(start..end:step) or @(start to end by step) - - protected open fun respond( - task: SessionTask, - userMessage: String, - currentChatMessages: List, - transcriptStream: OutputStream? = null - ): String { - val model = model.getChildClient(task) - return buildString { - runAll( - processMsgRecursive( - userMessage, - task, - currentChatMessages, - transcriptStream, - model - ), this - ) - }.let { response -> - // Write assistant response to transcript - transcriptStream?.write("## Assistant\n$response\n\n".toByteArray()) - transcriptStream?.flush() - - try { - val answer = extractTopics(response, model) - val topicsText = try { - answer.topics.let { topics -> - if (topics?.isNotEmpty() == true) { - topics.forEach { (topicType, entities) -> - val topicList = aggregateTopics.computeIfAbsent(topicType) { mutableListOf() } - synchronized(topicList) { - topicList.addAll(entities) - } - } - val joinToString = - topics.entries.joinToString("\n") { "* `{${it.key}}` - ${it.value.joinToString(", ") { "`$it`" }}" } - task.complete(joinToString.renderMarkdown(), additionalClasses = "topics") - "\n\n" + joinToString - } else { - "" - } - } - } catch (e: Exception) { - task.error(e) - log.error("Error in topic extraction", e) - "" + } + + private val idSubPattern = + """[^|\n,/\\;}\]\[><()@]+""" // Matches any valid identifier character except for special characters used in the expansion syntax + private val expansionExpressionPattern = + Regex("""@\[($idSubPattern(?:[|,]$idSubPattern)+)]""") // Matches @[option1|option2|option3] + + private val sequenceExpansionPattern = + Regex("""@\{([^}]+(?:\s*->\s*[^}]+)+)\}""") // Matches @{item1 -> item2 -> item3} + + private val rangeExpansionPattern = + Regex("""@\((-?\d+)(?:\.{2,3}| to )(-?\d+)(?:(?::| by )(\d+))?\)""") // Matches @(start..end:step) or @(start to end by step) + + protected open fun respond( + task: SessionTask, + userMessage: String, + currentChatMessages: List, + transcriptStream: OutputStream? = null + ): String { + val model = model.getChildClient(task) + return buildString { + runAll( + processMsgRecursive( + userMessage, + task, + currentChatMessages, + transcriptStream, + model + ), this + ) + }.let { response -> + // Write assistant response to transcript + transcriptStream?.write("## Assistant\n$response\n\n".transcriptFilter().toByteArray()) + transcriptStream?.flush() + + try { + val answer = extractTopics(response, model) + val topicsText = try { + answer.topics.let { topics -> + if (topics?.isNotEmpty() == true) { + topics.forEach { (topicType, entities) -> + val topicList = aggregateTopics.computeIfAbsent(topicType) { mutableListOf() } + synchronized(topicList) { + topicList.addAll(entities) } - response + topicsText - } catch (e: Exception) { - log.error("Error in topic extraction", e) - response - } - } - } - - /** - * Executes a list of functions, each appending to the target StringBuilder, potentially in parallel. - */ - private fun runAll(function1s: List<(StringBuilder) -> Unit>, target: StringBuilder) { - val fixedConcurrencyProcessor = FixedConcurrencyProcessor(pool, 4) - function1s.map { function1 -> - fixedConcurrencyProcessor.submit { - function1(target) + } + val joinToString = + topics.entries.joinToString("\n") { "* `{${it.key}}` - ${it.value.joinToString(", ") { "`$it`" }}" } + task.complete(joinToString.renderMarkdown(), additionalClasses = "topics") + "\n\n" + joinToString + } else { + "" } - }.forEach { it.get() } - } - - private fun extractTopics(response: String, model: ChatInterface): Topics { - val topicsParsedActor = ParsedAgent( - resultClass = Topics::class.java, - prompt = "Identify topics (i.e. all named entities grouped by type) in the following text:", - model = model, - temperature = temperature, - name = "Topics", - parsingChatter = parsingModel, - ) - return if (fastTopicParsing) { - topicsParsedActor.getParser().apply(response) - } else { - topicsParsedActor.answer(listOf(response)).obj - } - } - - protected open fun chatMessages(): List = synchronized(messagesLock) { - if (chatMessages.isEmpty() || chatMessages.first().role != ModelSchema.Role.system) { - listOf(sysMessage) + chatMessages - } else { - chatMessages + } + } catch (e: Exception) { + task.error(e) + log.error("Error in topic extraction", e) + "" } + response + topicsText + } catch (e: Exception) { + log.error("Error in topic extraction", e) + response + } } - - data class Topics( - val topics: Map>? = emptyMap() + } + + /** + * Executes a list of functions, each appending to the target StringBuilder, potentially in parallel. + */ + private fun runAll(function1s: List<(StringBuilder) -> Unit>, target: StringBuilder) { + val fixedConcurrencyProcessor = FixedConcurrencyProcessor(pool, 4) + function1s.map { function1 -> + fixedConcurrencyProcessor.submit { + function1(target) + } + }.forEach { it.get() } + } + + private fun extractTopics(response: String, model: ChatInterface): Topics { + val topicsParsedActor = ParsedAgent( + resultClass = Topics::class.java, + prompt = "Identify topics (i.e. all named entities grouped by type) in the following text:", + model = model, + temperature = temperature, + name = "Topics", + parsingChatter = parsingModel, ) - - protected open fun expandTopics(userMessage: String): String { - // Matches both @TopicType and @{Topic Type With Spaces} - val topicReferencePattern = - Regex("""@\{([A-Z][a-zA-Z0-9_ ]+)\}|@([A-Z][a-zA-Z0-9_]*)""") - return topicReferencePattern.replace(userMessage) { matchResult -> // Read access needs synchronization - // Group 1 is for delimited format @{Topic Type}, Group 2 is for simple format @TopicType - val topicType = matchResult.groupValues[1].ifEmpty { matchResult.groupValues[2] } - val topicList = aggregateTopics[topicType] - val entities = synchronized(topicList ?: Any()) { // Synchronize on the list if it exists, or a dummy object - topicList?.toList() // Create copy while holding lock - } - if (!entities.isNullOrEmpty()) { // Check if the copied list is not null or empty - "@[${entities.joinToString("|")}]" // Use the copied list - } else { - matchResult.value - } - } + return if (fastTopicParsing) { + topicsParsedActor.getParser().apply(response) + } else { + topicsParsedActor.answer(listOf(response)).obj } + } - private fun processMsgRecursive( - currentMessage: String, - task: SessionTask, - baseMessages: List, - transcriptStream: OutputStream? = null, - model: ChatInterface - ): List<(StringBuilder) -> Unit> { - - if (useExpansionSyntax) { - val rangeMatch = rangeExpansionPattern.find(currentMessage) - if (rangeMatch != null) { - return expandRange(currentMessage, task, baseMessages, rangeMatch, transcriptStream) - } - - val sequenceMatch = sequenceExpansionPattern.find(currentMessage) - if (sequenceMatch != null) { - return listOf { finalAggregate: StringBuilder -> - expandSequence( - task, - baseMessages, - sequenceMatch.groupValues[1].split(Regex("""\s*->\s*""")), - currentMessage, - sequenceMatch.value, - transcriptStream - ) - } - } - - val match = expansionExpressionPattern.find(currentMessage) - if (match != null && match.groupValues[1].split('|', ',').size > 1) { - return expandAlternatives( - currentMessage, - task, - baseMessages, - match, - transcriptStream - ) { msg, tsk, msgs -> - processMsgRecursive(msg, tsk, msgs, transcriptStream, this@ChatSocketManager.model) - } - } - } - - return listOf { aggregateResponse: StringBuilder -> - task.add("") - - val finalMessages = baseMessages + ModelSchema.ChatMessage(ModelSchema.Role.user, currentMessage.toContentList()) - val responseRef = AtomicReference() - try { - val chatResponse = model.chat(finalMessages) - val newValue = chatResponse.choices.firstOrNull()?.message?.content.orEmpty() - responseRef.set(newValue) - } catch (e: Exception) { - log.error("Error in API call", e) - responseRef.set("Error: ${e.message}") - } - - val response = responseRef.get() ?: "No response received" - task.complete(renderResponse(response, task)) - aggregateResponse.append(response).append("\n\n") - // Write intermediate responses to transcript if in expansion mode - if (useExpansionSyntax && transcriptStream != null) { - transcriptStream.write("### Expansion Result\n$response\n\n".toByteArray()) - transcriptStream.flush() - } - - } + protected open fun chatMessages(): List = synchronized(messagesLock) { + if (chatMessages.isEmpty() || chatMessages.first().role != ModelSchema.Role.system) { + listOf(sysMessage) + chatMessages + } else { + chatMessages } - - /** - * Expands range expressions in the format [start...end:step] - * Creates a sequence of numbers from start to end with the given step (default 1) - */ - private fun expandRange( - currentMessage: String, - task: SessionTask, - baseMessages: List, - rangeMatch: MatchResult, - transcriptStream: OutputStream? = null - ): List<(StringBuilder) -> Unit> = listOf { finalAggregate: StringBuilder -> - val start = rangeMatch.groupValues[1].toInt() - val end = rangeMatch.groupValues[2].toInt() - val step = rangeMatch.groupValues[3].takeIf { it.isNotEmpty() }?.toInt() ?: 1 - expandSequence( + } + + data class Topics( + val topics: Map>? = emptyMap() + ) + + protected open fun expandTopics(userMessage: String): String { + // Matches both @TopicType and @{Topic Type With Spaces} + val topicReferencePattern = + Regex("""@\{([A-Z][a-zA-Z0-9_ ]+)\}|@([A-Z][a-zA-Z0-9_]*)""") + return topicReferencePattern.replace(userMessage) { matchResult -> // Read access needs synchronization + // Group 1 is for delimited format @{Topic Type}, Group 2 is for simple format @TopicType + val topicType = matchResult.groupValues[1].ifEmpty { matchResult.groupValues[2] } + val topicList = aggregateTopics[topicType] + val entities = synchronized(topicList ?: Any()) { // Synchronize on the list if it exists, or a dummy object + topicList?.toList() // Create copy while holding lock + } + if (!entities.isNullOrEmpty()) { // Check if the copied list is not null or empty + "@[${entities.joinToString("|")}]" // Use the copied list + } else { + matchResult.value + } + } + } + + private fun processMsgRecursive( + currentMessage: String, + task: SessionTask, + baseMessages: List, + transcriptStream: OutputStream? = null, + model: ChatInterface + ): List<(StringBuilder) -> Unit> { + + if (useExpansionSyntax) { + val rangeMatch = rangeExpansionPattern.find(currentMessage) + if (rangeMatch != null) { + return expandRange(currentMessage, task, baseMessages, rangeMatch, transcriptStream) + } + + val sequenceMatch = sequenceExpansionPattern.find(currentMessage) + if (sequenceMatch != null) { + return listOf { finalAggregate: StringBuilder -> + expandSequence( task, baseMessages, - generateSequence(start) { it + step } - .takeWhile { if (step > 0) it <= end else it >= end } - .toList() - .map { it.toString() }, + sequenceMatch.groupValues[1].split(Regex("""\s*->\s*""")), currentMessage, - rangeMatch.value, + sequenceMatch.value, transcriptStream - ) - } - - /** - * Expands alternative expressions in the format {option1|option2|option3} - * Each option is processed in parallel - */ - private fun expandAlternatives( - currentMessage: String, - task: SessionTask, - baseMessages: List, - match: MatchResult, - transcriptStream: OutputStream? = null, - recursiveFn: (String, SessionTask, List) -> List<(StringBuilder) -> Unit> - ): List<(StringBuilder) -> Unit> { - val tabs = TabbedDisplay(task, closable = useExpansionSyntax) - return match.groupValues[1].split('|', ',').flatMap { option -> - recursiveFn( - currentMessage.replaceFirst(match.value, option), - this.newTask(cancelable = false, root = false).apply { tabs[option] = placeholder }, - baseMessages.filter { it.content?.any { it.text?.contains(match.value) == true } != true } - ) - }.apply { - tabs.update() + ) } + } + + val match = expansionExpressionPattern.find(currentMessage) + if (match != null && match.groupValues[1].split('|', ',').size > 1) { + return expandAlternatives( + currentMessage, + task, + baseMessages, + match, + transcriptStream + ) { msg, tsk, msgs -> + processMsgRecursive(msg, tsk, msgs, transcriptStream, this@ChatSocketManager.model) + } + } } - private fun expandSequence( - task: SessionTask, - baseMessages: List, - items: List, - currentMessage: String, - expression: String, - transcriptStream: OutputStream? = null - ) { - val aggregatedResponse = StringBuilder() - val tabs = TabbedDisplay(task, closable = useExpansionSyntax) - val messages = baseMessages.dropLast(1).toMutableList() - for (item in items) { - val newMessage = currentMessage.replaceFirst(expression, item) - val subTaskFunctions = processMsgRecursive( - currentMessage = newMessage, - task = this.newTask(cancelable = false, root = false).apply { tabs[item] = placeholder }, - baseMessages = messages.filter { it.content?.any { it.text?.contains(expression) == true } != true }, - transcriptStream = transcriptStream, - model = this@ChatSocketManager.model + return listOf { aggregateResponse: StringBuilder -> + task.add("") + + val finalMessages = baseMessages + ModelSchema.ChatMessage(ModelSchema.Role.user, currentMessage.toContentList()) + val responseRef = AtomicReference() + try { + val chatResponse = model.chat(finalMessages) + val choices = chatResponse.choices + var responseText = choices.firstOrNull()?.message?.content.orEmpty() + choices.forEach { choice -> + choice.message?.image_data?.let { + val imageMimeType = choice.message?.image_mime_type ?: "image/png" + val (link, file) = task.createFile( + UUID.randomUUID().toString() + when (imageMimeType) { + "image/png" -> ".png" + "image/jpeg", "image/jpg" -> ".jpg" + "image/gif" -> ".gif" + else -> ".img" + } ) - val subAggregate = StringBuilder() - runAll(subTaskFunctions, subAggregate) - aggregatedResponse.append("[").append(item).append("]\n").append(subAggregate.toString()).append("\n") - messages.add(ModelSchema.ChatMessage(ModelSchema.Role.user, newMessage.toContentList())) - messages.add(ModelSchema.ChatMessage(ModelSchema.Role.assistant, subAggregate.toString().toContentList())) + file?.writeBytes(it) + val imageLink = """Image""" + responseText += "\n\n" + imageLink + } } - tabs.update() - } + responseRef.set(responseText) + } catch (e: Exception) { + log.error("Error in API call", e) + responseRef.set("Error: ${e.message}") + } + + val response = responseRef.get() ?: "No response received" + task.complete(renderResponse(response, task)) + aggregateResponse.append(response).append("\n\n") + // Write intermediate responses to transcript if in expansion mode + if (useExpansionSyntax && transcriptStream != null) { + transcriptStream.write("### Expansion Result\n$response\n\n".transcriptFilter().toByteArray()) + transcriptStream.flush() + } - open fun renderResponse(response: String, task: SessionTask) = - """
    ${response.renderMarkdown()}
    """ - - companion object { - private val log = LoggerFactory.getLogger(ChatSocketManager::class.java) } -} \ No newline at end of file + } + + /** + * Expands range expressions in the format [start...end:step] + * Creates a sequence of numbers from start to end with the given step (default 1) + */ + private fun expandRange( + currentMessage: String, + task: SessionTask, + baseMessages: List, + rangeMatch: MatchResult, + transcriptStream: OutputStream? = null + ): List<(StringBuilder) -> Unit> = listOf { finalAggregate: StringBuilder -> + val start = rangeMatch.groupValues[1].toInt() + val end = rangeMatch.groupValues[2].toInt() + val step = rangeMatch.groupValues[3].takeIf { it.isNotEmpty() }?.toInt() ?: 1 + expandSequence( + task, + baseMessages, + generateSequence(start) { it + step } + .takeWhile { if (step > 0) it <= end else it >= end } + .toList() + .map { it.toString() }, + currentMessage, + rangeMatch.value, + transcriptStream + ) + } + + /** + * Expands alternative expressions in the format {option1|option2|option3} + * Each option is processed in parallel + */ + private fun expandAlternatives( + currentMessage: String, + task: SessionTask, + baseMessages: List, + match: MatchResult, + transcriptStream: OutputStream? = null, + recursiveFn: (String, SessionTask, List) -> List<(StringBuilder) -> Unit> + ): List<(StringBuilder) -> Unit> { + val tabs = TabbedDisplay(task, closable = useExpansionSyntax) + return match.groupValues[1].split('|', ',').flatMap { option -> + recursiveFn( + currentMessage.replaceFirst(match.value, option), + this.newTask(cancelable = false, root = false).apply { tabs[option] = placeholder }, + baseMessages.filter { it.content?.any { it.text?.contains(match.value) == true } != true } + ) + }.apply { + tabs.update() + } + } + + private fun expandSequence( + task: SessionTask, + baseMessages: List, + items: List, + currentMessage: String, + expression: String, + transcriptStream: OutputStream? = null + ) { + val aggregatedResponse = StringBuilder() + val tabs = TabbedDisplay(task, closable = useExpansionSyntax) + val messages = baseMessages.dropLast(1).toMutableList() + for (item in items) { + val newMessage = currentMessage.replaceFirst(expression, item) + val subTaskFunctions = processMsgRecursive( + currentMessage = newMessage, + task = this.newTask(cancelable = false, root = false).apply { tabs[item] = placeholder }, + baseMessages = messages.filter { it.content?.any { it.text?.contains(expression) == true } != true }, + transcriptStream = transcriptStream, + model = this@ChatSocketManager.model + ) + val subAggregate = StringBuilder() + runAll(subTaskFunctions, subAggregate) + aggregatedResponse.append("[").append(item).append("]\n").append(subAggregate.toString()).append("\n") + messages.add(ModelSchema.ChatMessage(ModelSchema.Role.user, newMessage.toContentList())) + messages.add(ModelSchema.ChatMessage(ModelSchema.Role.assistant, subAggregate.toString().toContentList())) + } + tabs.update() + } + + open fun renderResponse(response: String, task: SessionTask) = + """
    ${response.renderMarkdown()}
    """ + + companion object { + private val log = LoggerFactory.getLogger(ChatSocketManager::class.java) + } +} + +fun String.transcriptFilter() = this.let { + Regex("""(href=|src=['"])?fileIndex/[A-Za-z0-9\-_]+/""").replace(it) { matchResult -> + matchResult.groupValues[1] + } +} diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/servlet/SessionListServlet.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/servlet/SessionListServlet.kt index f2846870b..703f416cc 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/servlet/SessionListServlet.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/servlet/SessionListServlet.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.webui.servlet -import com.simiacryptus.cognotik.actors.CodeAgent.Companion.indent +import com.simiacryptus.cognotik.agents.CodeAgent.Companion.indent import com.simiacryptus.cognotik.platform.ApplicationServices.authenticationManager import com.simiacryptus.cognotik.platform.model.StorageInterface import com.simiacryptus.cognotik.webui.application.ApplicationServer diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/session/SessionTask.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/session/SessionTask.kt index e386664f1..9b6a0fa4f 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/session/SessionTask.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/session/SessionTask.kt @@ -318,26 +318,7 @@ open class SessionTask( return "fileIndex/${ui.sessionId}/$relativePath" } - fun resolve(relativePath: String): File? { - require(relativePath.isNotBlank()) { "File path cannot be blank" } - require(!relativePath.contains("..")) { "Invalid file path: path traversal not allowed" } - return ui.dataStorage?.getSessionDir( - ui.owner, - ui.sessionId - )?.let { dir -> - if (!dir.exists() && !dir.mkdirs()) { - throw RuntimeException("Failed to create session directory: ${dir.absolutePath}") - } - val resolve = dir.resolve(relativePath) - resolve.parentFile?.let { parent -> - if (!parent.exists()) { - if (!parent.mkdirs()) log.warn("Failed to create parent directory: {}", parent.absolutePath) - } - } - log.debug("Successfully created file path: {}", resolve.absolutePath) - resolve - } - } + fun resolve(relativePath: String) = this.ui.resolve(relativePath) fun update() = send() @@ -374,7 +355,8 @@ fun ChatInterface.getChildClient(task: SessionTask): ChatInterface { } fun SessionTask.newLogStream(): BufferedOutputStream { - val pair = createFile(".logs/api-${UUID.randomUUID()}.log") + val relativePath = ".logs/api-${UUID.randomUUID()}.log" + val pair = Pair(this@newLogStream.linkTo(relativePath), this@newLogStream.resolve(relativePath)) val createFile = pair.second ?: throw IllegalStateException("Failed to create log file") val buffered = createFile.outputStream().buffered() buffered.write("API Logging Started\n".toByteArray()) @@ -384,4 +366,25 @@ fun SessionTask.newLogStream(): BufferedOutputStream { } verbose("""API log:
    ${createFile.absolutePath}
    """) return buffered +} + +fun SocketManager.resolve(relativePath: String): File? { + require(relativePath.isNotBlank()) { "File path cannot be blank" } + require(!relativePath.contains("..")) { "Invalid file path: path traversal not allowed" } + return dataStorage?.getSessionDir( + owner, + sessionId + )?.let { dir -> + if (!dir.exists() && !dir.mkdirs()) { + throw RuntimeException("Failed to create session directory: ${dir.absolutePath}") + } + val resolve = dir.resolve(relativePath) + resolve.parentFile?.let { parent -> + if (!parent.exists()) { + if (!parent.mkdirs()) SessionTask.Companion.log.warn("Failed to create parent directory: {}", parent.absolutePath) + } + } + SessionTask.Companion.log.debug("Successfully created file path: {}", resolve.absolutePath) + resolve + } } \ No newline at end of file diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/CodingActorTestApp.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/CodingActorTestApp.kt index fbab90933..7735e92e6 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/CodingActorTestApp.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/CodingActorTestApp.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.webui.test -import com.simiacryptus.cognotik.actors.CodeAgent +import com.simiacryptus.cognotik.agents.CodeAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.models.ModelSchema import com.simiacryptus.cognotik.platform.ApplicationServices diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ImageActorTestApp.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ImageActorTestApp.kt index 5ef31bb12..6ca093a97 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ImageActorTestApp.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ImageActorTestApp.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.webui.test -import com.simiacryptus.cognotik.actors.ImageAgent +import com.simiacryptus.cognotik.agents.ImageGenerationAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.platform.Session import com.simiacryptus.cognotik.platform.model.User @@ -9,15 +9,15 @@ import com.simiacryptus.cognotik.webui.application.ApplicationServer import com.simiacryptus.cognotik.webui.session.SocketManager open class ImageActorTestApp( - private val actor: ImageAgent, - applicationName: String = "ImageActorTest_" + actor.javaClass.simpleName, + private val actor: ImageGenerationAgent, + applicationName: String = "ImageActorTest_" + actor.javaClass.simpleName, ) : ApplicationServer( applicationName = applicationName, path = "/imageActorTest", ) { data class Settings( - val actor: ImageAgent? = null, + val actor: ImageGenerationAgent? = null, ) override val settingsClass: Class<*> get() = Settings::class.java @@ -39,7 +39,7 @@ open class ImageActorTestApp( listOf(userMessage) ) message.verbose(response.text) - message.image(response.image) + message.image(response.image!!) message.complete() } catch (e: Throwable) { log.warn("Error flushing image", e) diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ParsedActorTestApp.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ParsedActorTestApp.kt index 0da2f7703..263dd2997 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ParsedActorTestApp.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/ParsedActorTestApp.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.webui.test -import com.simiacryptus.cognotik.actors.ParsedAgent +import com.simiacryptus.cognotik.agents.ParsedAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.platform.Session import com.simiacryptus.cognotik.platform.model.User diff --git a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/SimpleActorTestApp.kt b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/SimpleActorTestApp.kt index b2a0bcffb..515184a25 100644 --- a/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/SimpleActorTestApp.kt +++ b/webui/src/main/kotlin/com/simiacryptus/cognotik/webui/test/SimpleActorTestApp.kt @@ -1,6 +1,6 @@ package com.simiacryptus.cognotik.webui.test -import com.simiacryptus.cognotik.actors.ChatAgent +import com.simiacryptus.cognotik.agents.ChatAgent import com.simiacryptus.cognotik.apps.general.renderMarkdown import com.simiacryptus.cognotik.platform.Session import com.simiacryptus.cognotik.platform.model.User diff --git a/webui/src/main/resources/application/asset-manifest.json b/webui/src/main/resources/application/asset-manifest.json index 04c25fc86..e41c1f293 100644 --- a/webui/src/main/resources/application/asset-manifest.json +++ b/webui/src/main/resources/application/asset-manifest.json @@ -1,7 +1,7 @@ { "files": { "main.css": "/static/css/main.ca34f186.css", - "main.js": "/static/js/main.44ef7b39.js", + "main.js": "/static/js/main.df8779f3.js", "static/js/7732.1eb5a529.chunk.js": "/static/js/7732.1eb5a529.chunk.js", "static/js/4467.0d8e509c.chunk.js": "/static/js/4467.0d8e509c.chunk.js", "static/js/3761.3928b16a.chunk.js": "/static/js/3761.3928b16a.chunk.js", @@ -74,7 +74,7 @@ "static/js/3355.f80f4792.chunk.js": "/static/js/3355.f80f4792.chunk.js", "index.html": "/index.html", "main.ca34f186.css.map": "/static/css/main.ca34f186.css.map", - "main.44ef7b39.js.map": "/static/js/main.44ef7b39.js.map", + "main.df8779f3.js.map": "/static/js/main.df8779f3.js.map", "7732.1eb5a529.chunk.js.map": "/static/js/7732.1eb5a529.chunk.js.map", "4467.0d8e509c.chunk.js.map": "/static/js/4467.0d8e509c.chunk.js.map", "3761.3928b16a.chunk.js.map": "/static/js/3761.3928b16a.chunk.js.map", @@ -142,6 +142,6 @@ }, "entrypoints": [ "static/css/main.ca34f186.css", - "static/js/main.44ef7b39.js" + "static/js/main.df8779f3.js" ] } \ No newline at end of file diff --git a/webui/src/main/resources/application/index.html b/webui/src/main/resources/application/index.html index 63ba2be12..347e0cd66 100644 --- a/webui/src/main/resources/application/index.html +++ b/webui/src/main/resources/application/index.html @@ -1 +1 @@ -Cognotik
    \ No newline at end of file +Cognotik
    \ No newline at end of file diff --git a/webui/src/main/resources/application/static/js/main.df8779f3.js b/webui/src/main/resources/application/static/js/main.df8779f3.js new file mode 100644 index 000000000..8cfda65e8 --- /dev/null +++ b/webui/src/main/resources/application/static/js/main.df8779f3.js @@ -0,0 +1,1261 @@ +/*! For license information please see main.df8779f3.js.LICENSE.txt */ +(()=>{var e={4:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(1954),i=n(3101);const o=function(e,t,n,o){var a=!n;n||(n={});for(var s=-1,l=t.length;++s{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),Object.defineProperty(t,"default",{enumerable:!0,get:function(){return r.createSvgIcon}});var r=n(7749)},45:()=>{Prism.languages.scala=Prism.languages.extend("java",{"triple-quoted-string":{pattern:/"""[\s\S]*?"""/,greedy:!0,alias:"string"},string:{pattern:/("|')(?:\\.|(?!\1)[^\\\r\n])*\1/,greedy:!0},keyword:/<-|=>|\b(?:abstract|case|catch|class|def|derives|do|else|enum|extends|extension|final|finally|for|forSome|given|if|implicit|import|infix|inline|lazy|match|new|null|object|opaque|open|override|package|private|protected|return|sealed|self|super|this|throw|trait|transparent|try|type|using|val|var|while|with|yield)\b/,number:/\b0x(?:[\da-f]*\.)?[\da-f]+|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e\d+)?[dfl]?/i,builtin:/\b(?:Any|AnyRef|AnyVal|Boolean|Byte|Char|Double|Float|Int|Long|Nothing|Short|String|Unit)\b/,symbol:/'[^\d\s\\]\w*/}),Prism.languages.insertBefore("scala","triple-quoted-string",{"string-interpolation":{pattern:/\b[a-z]\w*(?:"""(?:[^$]|\$(?:[^{]|\{(?:[^{}]|\{[^{}]*\})*\}))*?"""|"(?:[^$"\r\n]|\$(?:[^{]|\{(?:[^{}]|\{[^{}]*\})*\}))*")/i,greedy:!0,inside:{id:{pattern:/^\w+/,greedy:!0,alias:"function"},escape:{pattern:/\\\$"|\$[$"]/,greedy:!0,alias:"symbol"},interpolation:{pattern:/\$(?:\w+|\{(?:[^{}]|\{[^{}]*\})*\})/,greedy:!0,inside:{punctuation:/^\$\{?|\}$/,expression:{pattern:/[\s\S]+/,inside:Prism.languages.scala}}},string:/[\s\S]+/}}}),delete Prism.languages.scala["class-name"],delete Prism.languages.scala.function,delete Prism.languages.scala.constant},53:(e,t,n)=>{"use strict";n.d(t,{XX:()=>u,q7:()=>h,sO:()=>c});var r=n(1580),i=n(958),o=n(634),a=n(3759),s={common:a.Y2,getConfig:a.zj,insertCluster:i.U,insertEdge:r.Jo,insertEdgeLabel:r.jP,insertMarkers:r.g0,insertNode:i.on,interpolateToCurve:o.Ib,labelHelper:i.Zk,log:a.Rm,positionEdgeLabel:r.T_},l={},c=(0,a.K2)((e=>{for(const t of e)l[t.name]=t}),"registerLayoutLoaders");(0,a.K2)((()=>{c([{name:"dagre",loader:(0,a.K2)((async()=>await Promise.all([n.e(7854),n.e(62),n.e(5626)]).then(n.bind(n,5626))),"loader")}])}),"registerDefaultLayoutLoaders")();var u=(0,a.K2)((async(e,t)=>{if(!(e.layoutAlgorithm in l))throw new Error(`Unknown layout algorithm: ${e.layoutAlgorithm}`);const n=l[e.layoutAlgorithm];return(await n.loader()).render(e,t,s,{algorithm:n.algorithm})}),"render"),h=(0,a.K2)((function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"",{fallback:t="dagre"}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(e in l)return e;if(t in l)return a.Rm.warn(`Layout algorithm ${e} is not registered. Using ${t} as fallback.`),t;throw new Error(`Both layout algorithms ${e} and ${t} are not registered.`)}),"getRegisteredLayoutAlgorithm")},219:(e,t,n)=>{"use strict";var r=n(3763),i={childContextTypes:!0,contextType:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromError:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},o={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},a={$$typeof:!0,compare:!0,defaultProps:!0,displayName:!0,propTypes:!0,type:!0},s={};function l(e){return r.isMemo(e)?a:s[e.$$typeof]||i}s[r.ForwardRef]={$$typeof:!0,render:!0,defaultProps:!0,displayName:!0,propTypes:!0},s[r.Memo]=a;var c=Object.defineProperty,u=Object.getOwnPropertyNames,h=Object.getOwnPropertySymbols,d=Object.getOwnPropertyDescriptor,f=Object.getPrototypeOf,p=Object.prototype;e.exports=function e(t,n,r){if("string"!==typeof n){if(p){var i=f(n);i&&i!==p&&e(t,i,r)}var a=u(n);h&&(a=a.concat(h(n)));for(var s=l(t),g=l(n),m=0;m{"use strict";n.d(t,{A:()=>i});var r=Object.prototype;const i=function(e){var t=e&&e.constructor;return e===("function"==typeof t&&t.prototype||r)}},438:(e,t)=>{var n,r,i,o=function(){var e=function(e,t){var n=e,r=o[t],i=null,a=0,s=null,l=[],c={},u=function(e,t){i=function(e){for(var t=new Array(e),n=0;n=7&&g(e),null==s&&(s=A(n,r,l)),C(s,t)},h=function(e,t){for(var n=-1;n<=7;n+=1)if(!(e+n<=-1||a<=e+n))for(var r=-1;r<=7;r+=1)t+r<=-1||a<=t+r||(i[e+n][t+r]=0<=n&&n<=6&&(0==r||6==r)||0<=r&&r<=6&&(0==n||6==n)||2<=n&&n<=4&&2<=r&&r<=4)},d=function(){for(var e=8;e>r&1);i[Math.floor(r/3)][r%3+a-8-3]=o}for(r=0;r<18;r+=1){o=!e&&1==(t>>r&1);i[r%3+a-8-3][Math.floor(r/3)]=o}},S=function(e,t){for(var n=r<<3|t,o=p.getBCHTypeInfo(n),s=0;s<15;s+=1){var l=!e&&1==(o>>s&1);s<6?i[s][8]=l:s<8?i[s+1][8]=l:i[a-15+s][8]=l}for(s=0;s<15;s+=1){l=!e&&1==(o>>s&1);s<8?i[8][a-s-1]=l:s<9?i[8][15-s-1+1]=l:i[8][15-s-1]=l}i[a-8][8]=!e},C=function(e,t){for(var n=-1,r=a-1,o=7,s=0,l=p.getMaskFunction(t),c=a-1;c>0;c-=2)for(6==c&&(c-=1);;){for(var u=0;u<2;u+=1)if(null==i[r][c-u]){var h=!1;s>>o&1)),l(r,c-u)&&(h=!h),i[r][c-u]=h,-1==(o-=1)&&(s+=1,o=7)}if((r+=n)<0||a<=r){r-=n,n=-n;break}}},A=function(e,t,n){for(var r=y.getRSBlocks(e,t),i=b(),o=0;o8*s)throw"code length overflow. ("+i.getLengthInBits()+">"+8*s+")";for(i.getLengthInBits()+4<=8*s&&i.put(0,4);i.getLengthInBits()%8!=0;)i.putBit(!1);for(;!(i.getLengthInBits()>=8*s)&&(i.put(236,8),!(i.getLengthInBits()>=8*s));)i.put(17,8);return function(e,t){for(var n=0,r=0,i=0,o=new Array(t.length),a=new Array(t.length),s=0;s=0?d.getAt(f):0}}var g=0;for(u=0;ur)&&(e=r,t=n)}return t}())},c.createTableTag=function(e,t){e=e||2;var n="";n+='";for(var i=0;i';n+=""}return n+="",n+="
    "},c.createSvgTag=function(e,t,n,r){var i={};"object"==typeof arguments[0]&&(e=(i=arguments[0]).cellSize,t=i.margin,n=i.alt,r=i.title),e=e||2,t="undefined"==typeof t?4*e:t,(n="string"===typeof n?{text:n}:n||{}).text=n.text||null,n.id=n.text?n.id||"qrcode-description":null,(r="string"===typeof r?{text:r}:r||{}).text=r.text||null,r.id=r.text?r.id||"qrcode-title":null;var o,a,s,l,u=c.getModuleCount()*e+2*t,h="";for(l="l"+e+",0 0,"+e+" -"+e+",0 0,-"+e+"z ",h+=''+T(r.text)+"":"",h+=n.text?''+T(n.text)+"":"",h+='',h+='":t+=">";break;case"&":t+="&";break;case'"':t+=""";break;default:t+=r}}return t};return c.createASCII=function(e,t){if((e=e||1)<2)return function(e){e="undefined"==typeof e?2:e;var t,n,r,i,o,a=1*c.getModuleCount()+2*e,s=e,l=a-e,u={"\u2588\u2588":"\u2588","\u2588 ":"\u2580"," \u2588":"\u2584"," ":" "},h={"\u2588\u2588":"\u2580","\u2588 ":"\u2580"," \u2588":" "," ":" "},d="";for(t=0;t=l?h[o]:u[o];d+="\n"}return a%2&&e>0?d.substring(0,d.length-a-1)+Array(a+1).join("\u2580"):d.substring(0,d.length-1)}(t);e-=1,t="undefined"==typeof t?2*e:t;var n,r,i,o,a=c.getModuleCount()*e+2*t,s=t,l=a-t,u=Array(e+1).join("\u2588\u2588"),h=Array(e+1).join(" "),d="",f="";for(n=0;n>>8),t.push(255&a)):t.push(r)}}return t}};var t=1,n=2,r=4,i=8,o={L:1,M:0,Q:3,H:2},a=0,s=1,l=2,c=3,u=4,h=5,d=6,f=7,p=function(){var e=[[],[6,18],[6,22],[6,26],[6,30],[6,34],[6,22,38],[6,24,42],[6,26,46],[6,28,50],[6,30,54],[6,32,58],[6,34,62],[6,26,46,66],[6,26,48,70],[6,26,50,74],[6,30,54,78],[6,30,56,82],[6,30,58,86],[6,34,62,90],[6,28,50,72,94],[6,26,50,74,98],[6,30,54,78,102],[6,28,54,80,106],[6,32,58,84,110],[6,30,58,86,114],[6,34,62,90,118],[6,26,50,74,98,122],[6,30,54,78,102,126],[6,26,52,78,104,130],[6,30,56,82,108,134],[6,34,60,86,112,138],[6,30,58,86,114,142],[6,34,62,90,118,146],[6,30,54,78,102,126,150],[6,24,50,76,102,128,154],[6,28,54,80,106,132,158],[6,32,58,84,110,136,162],[6,26,54,82,110,138,166],[6,30,58,86,114,142,170]],o=1335,p=7973,y={},b=function(e){for(var t=0;0!=e;)t+=1,e>>>=1;return t};return y.getBCHTypeInfo=function(e){for(var t=e<<10;b(t)-b(o)>=0;)t^=o<=0;)t^=p<5&&(n+=3+o-5)}for(r=0;r=256;)t-=255;return e[t]}};return r}();function m(e,t){if("undefined"==typeof e.length)throw e.length+"/"+t;var n=function(){for(var n=0;n>>7-t%8&1)},put:function(e,t){for(var r=0;r>>t-r-1&1))},getLengthInBits:function(){return t},putBit:function(n){var r=Math.floor(t/8);e.length<=r&&e.push(0),n&&(e[r]|=128>>>t%8),t+=1}};return n},v=function(e){var n=t,r=e,i={getMode:function(){return n},getLength:function(e){return r.length},write:function(e){for(var t=r,n=0;n+2>>8&255)+(255&r),e.put(r,13),n+=2}if(n>>8)},writeBytes:function(e,n,r){n=n||0,r=r||e.length;for(var i=0;i0&&(t+=","),t+=e[n];return t+="]"}};return t},C=function(e){var t=e,n=0,r=0,i=0,o={read:function(){for(;i<8;){if(n>=t.length){if(0==i)return-1;throw"unexpected end of file./"+i}var e=t.charAt(n);if(n+=1,"="==e)return i=0,-1;e.match(/^\s$/)||(r=r<<6|a(e.charCodeAt(0)),i+=6)}var o=r>>>i-8&255;return i-=8,o}},a=function(e){if(65<=e&&e<=90)return e-65;if(97<=e&&e<=122)return e-97+26;if(48<=e&&e<=57)return e-48+52;if(43==e)return 62;if(47==e)return 63;throw"c:"+e};return o},_=function(e,t,n){for(var r=function(e,t){var n=e,r=t,i=new Array(e*t),o={setPixel:function(e,t,r){i[t*n+e]=r},write:function(e){e.writeString("GIF87a"),e.writeShort(n),e.writeShort(r),e.writeByte(128),e.writeByte(0),e.writeByte(0),e.writeByte(0),e.writeByte(0),e.writeByte(0),e.writeByte(255),e.writeByte(255),e.writeByte(255),e.writeString(","),e.writeShort(0),e.writeShort(0),e.writeShort(n),e.writeShort(r),e.writeByte(0);var t=a(2);e.writeByte(2);for(var i=0;t.length-i>255;)e.writeByte(255),e.writeBytes(t,i,255),i+=255;e.writeByte(t.length-i),e.writeBytes(t,i,t.length-i),e.writeByte(0),e.writeString(";")}},a=function(e){for(var t=1<>>i!=0)throw"length over";for(;n+i>=8;)t.writeByte(255&(e<>>=8-n,r=0,n=0;r|=e<0&&t.writeByte(r)}}}(l);c.write(t,r);var u=0,h=String.fromCharCode(i[u]);for(u+=1;u=6;)o(e>>>t-6),t-=6},i.flush=function(){if(t>0&&(o(e<<6-t),e=0,t=0),n%3!=0)for(var i=3-n%3,a=0;a>6,128|63&r):r<55296||r>=57344?t.push(224|r>>12,128|r>>6&63,128|63&r):(n++,r=65536+((1023&r)<<10|1023&e.charCodeAt(n)),t.push(240|r>>18,128|r>>12&63,128|r>>6&63,128|63&r))}return t}(e)},r=[],void 0===(i="function"===typeof(n=function(){return o})?n.apply(t,r):n)||(e.exports=i)},446:function(e){e.exports=function(){"use strict";var e=1e3,t=6e4,n=36e5,r="millisecond",i="second",o="minute",a="hour",s="day",l="week",c="month",u="quarter",h="year",d="date",f="Invalid Date",p=/^(\d{4})[-/]?(\d{1,2})?[-/]?(\d{0,2})[Tt\s]*(\d{1,2})?:?(\d{1,2})?:?(\d{1,2})?[.:]?(\d+)?$/,g=/\[([^\]]+)]|Y{1,4}|M{1,4}|D{1,2}|d{1,4}|H{1,2}|h{1,2}|a|A|m{1,2}|s{1,2}|Z{1,2}|SSS/g,m={name:"en",weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),ordinal:function(e){var t=["th","st","nd","rd"],n=e%100;return"["+e+(t[(n-20)%10]||t[n]||t[0])+"]"}},y=function(e,t,n){var r=String(e);return!r||r.length>=t?e:""+Array(t+1-r.length).join(n)+e},b={s:y,z:function(e){var t=-e.utcOffset(),n=Math.abs(t),r=Math.floor(n/60),i=n%60;return(t<=0?"+":"-")+y(r,2,"0")+":"+y(i,2,"0")},m:function e(t,n){if(t.date()1)return e(a[0])}else{var s=t.name;x[s]=t,i=s}return!r&&i&&(v=i),i||!r&&v},C=function(e,t){if(w(e))return e.clone();var n="object"==typeof t?t:{};return n.date=e,n.args=arguments,new A(n)},_=b;_.l=S,_.i=w,_.w=function(e,t){return C(e,{locale:t.$L,utc:t.$u,x:t.$x,$offset:t.$offset})};var A=function(){function m(e){this.$L=S(e.locale,null,!0),this.parse(e),this.$x=this.$x||e.x||{},this[k]=!0}var y=m.prototype;return y.parse=function(e){this.$d=function(e){var t=e.date,n=e.utc;if(null===t)return new Date(NaN);if(_.u(t))return new Date;if(t instanceof Date)return new Date(t);if("string"==typeof t&&!/Z$/i.test(t)){var r=t.match(p);if(r){var i=r[2]-1||0,o=(r[7]||"0").substring(0,3);return n?new Date(Date.UTC(r[1],i,r[3]||1,r[4]||0,r[5]||0,r[6]||0,o)):new Date(r[1],i,r[3]||1,r[4]||0,r[5]||0,r[6]||0,o)}}return new Date(t)}(e),this.init()},y.init=function(){var e=this.$d;this.$y=e.getFullYear(),this.$M=e.getMonth(),this.$D=e.getDate(),this.$W=e.getDay(),this.$H=e.getHours(),this.$m=e.getMinutes(),this.$s=e.getSeconds(),this.$ms=e.getMilliseconds()},y.$utils=function(){return _},y.isValid=function(){return!(this.$d.toString()===f)},y.isSame=function(e,t){var n=C(e);return this.startOf(t)<=n&&n<=this.endOf(t)},y.isAfter=function(e,t){return C(e){"use strict";n.r(t),n.d(t,{default:()=>r.A});var r=n(7868)},463:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M6 17h3l2-4V7H5v6h3zm8 0h3l2-4V7h-6v6h3z"}),"FormatQuote")},522:(e,t,n)=>{"use strict";n.d(t,{H:()=>nn,r:()=>tn});var r=n(3759);function i(e){return"undefined"===typeof e||null===e}function o(e){return"object"===typeof e&&null!==e}function a(e){return Array.isArray(e)?e:i(e)?[]:[e]}function s(e,t){var n,r,i,o;if(t)for(n=0,r=(o=Object.keys(t)).length;ns&&(t=r-s+(o=" ... ").length),n-r>s&&(n=r+s-(a=" ...").length),{str:o+e.slice(t,n).replace(/\t/g,"\u2192")+a,pos:r-t+o.length}}function g(e,t){return u.repeat(" ",t-e.length)+e}function m(e,t){if(t=Object.create(t||null),!e.buffer)return null;t.maxLength||(t.maxLength=79),"number"!==typeof t.indent&&(t.indent=1),"number"!==typeof t.linesBefore&&(t.linesBefore=3),"number"!==typeof t.linesAfter&&(t.linesAfter=2);for(var n,r=/\r?\n|\r|\0/g,i=[0],o=[],a=-1;n=r.exec(e.buffer);)o.push(n.index),i.push(n.index+n[0].length),e.position<=n.index&&a<0&&(a=i.length-2);a<0&&(a=i.length-1);var s,l,c="",h=Math.min(e.line+t.linesAfter,o.length).toString().length,d=t.maxLength-(t.indent+h+3);for(s=1;s<=t.linesBefore&&!(a-s<0);s++)l=p(e.buffer,i[a-s],o[a-s],e.position-(i[a]-i[a-s]),d),c=u.repeat(" ",t.indent)+g((e.line-s+1).toString(),h)+" | "+l.str+"\n"+c;for(l=p(e.buffer,i[a],o[a],e.position,d),c+=u.repeat(" ",t.indent)+g((e.line+1).toString(),h)+" | "+l.str+"\n",c+=u.repeat("-",t.indent+h+3+l.pos)+"^\n",s=1;s<=t.linesAfter&&!(a+s>=o.length);s++)l=p(e.buffer,i[a+s],o[a+s],e.position-(i[a]-i[a+s]),d),c+=u.repeat(" ",t.indent)+g((e.line+s+1).toString(),h)+" | "+l.str+"\n";return c.replace(/\n$/,"")}(0,r.K2)(p,"getLine"),(0,r.K2)(g,"padStart"),(0,r.K2)(m,"makeSnippet");var y=m,b=["kind","multi","resolve","construct","instanceOf","predicate","represent","representName","defaultStyle","styleAliases"],v=["scalar","sequence","mapping"];function x(e){var t={};return null!==e&&Object.keys(e).forEach((function(n){e[n].forEach((function(e){t[String(e)]=n}))})),t}function k(e,t){if(t=t||{},Object.keys(t).forEach((function(t){if(-1===b.indexOf(t))throw new f('Unknown option "'+t+'" is met in definition of "'+e+'" YAML type.')})),this.options=t,this.tag=e,this.kind=t.kind||null,this.resolve=t.resolve||function(){return!0},this.construct=t.construct||function(e){return e},this.instanceOf=t.instanceOf||null,this.predicate=t.predicate||null,this.represent=t.represent||null,this.representName=t.representName||null,this.defaultStyle=t.defaultStyle||null,this.multi=t.multi||!1,this.styleAliases=x(t.styleAliases||null),-1===v.indexOf(this.kind))throw new f('Unknown kind "'+this.kind+'" is specified for "'+e+'" YAML type.')}(0,r.K2)(x,"compileStyleAliases"),(0,r.K2)(k,"Type$1");var w=k;function S(e,t){var n=[];return e[t].forEach((function(e){var t=n.length;n.forEach((function(n,r){n.tag===e.tag&&n.kind===e.kind&&n.multi===e.multi&&(t=r)})),n[t]=e})),n}function C(){var e,t,n={scalar:{},sequence:{},mapping:{},fallback:{},multi:{scalar:[],sequence:[],mapping:[],fallback:[]}};function i(e){e.multi?(n.multi[e.kind].push(e),n.multi.fallback.push(e)):n[e.kind][e.tag]=n.fallback[e.tag]=e}for((0,r.K2)(i,"collectType"),e=0,t=arguments.length;e=0?"0b"+e.toString(2):"-0b"+e.toString(2).slice(1)}),"binary"),octal:(0,r.K2)((function(e){return e>=0?"0o"+e.toString(8):"-0o"+e.toString(8).slice(1)}),"octal"),decimal:(0,r.K2)((function(e){return e.toString(10)}),"decimal"),hexadecimal:(0,r.K2)((function(e){return e>=0?"0x"+e.toString(16).toUpperCase():"-0x"+e.toString(16).toUpperCase().slice(1)}),"hexadecimal")},defaultStyle:"decimal",styleAliases:{binary:[2,"bin"],octal:[8,"oct"],decimal:[10,"dec"],hexadecimal:[16,"hex"]}}),q=new RegExp("^(?:[-+]?(?:[0-9][0-9_]*)(?:\\.[0-9_]*)?(?:[eE][-+]?[0-9]+)?|\\.[0-9_]+(?:[eE][-+]?[0-9]+)?|[-+]?\\.(?:inf|Inf|INF)|\\.(?:nan|NaN|NAN))$");function H(e){return null!==e&&!(!q.test(e)||"_"===e[e.length-1])}function W(e){var t,n;return n="-"===(t=e.replace(/_/g,"").toLowerCase())[0]?-1:1,"+-".indexOf(t[0])>=0&&(t=t.slice(1)),".inf"===t?1===n?Number.POSITIVE_INFINITY:Number.NEGATIVE_INFINITY:".nan"===t?NaN:n*parseFloat(t,10)}(0,r.K2)(H,"resolveYamlFloat"),(0,r.K2)(W,"constructYamlFloat");var K=/^[-+]?[0-9]+e/;function U(e,t){var n;if(isNaN(e))switch(t){case"lowercase":return".nan";case"uppercase":return".NAN";case"camelcase":return".NaN"}else if(Number.POSITIVE_INFINITY===e)switch(t){case"lowercase":return".inf";case"uppercase":return".INF";case"camelcase":return".Inf"}else if(Number.NEGATIVE_INFINITY===e)switch(t){case"lowercase":return"-.inf";case"uppercase":return"-.INF";case"camelcase":return"-.Inf"}else if(u.isNegativeZero(e))return"-0.0";return n=e.toString(10),K.test(n)?n.replace("e",".e"):n}function V(e){return"[object Number]"===Object.prototype.toString.call(e)&&(e%1!==0||u.isNegativeZero(e))}(0,r.K2)(U,"representYamlFloat"),(0,r.K2)(V,"isFloat");var Y=new w("tag:yaml.org,2002:float",{kind:"scalar",resolve:H,construct:W,predicate:V,represent:U,defaultStyle:"lowercase"}),G=A.extend({implicit:[M,$,j,Y]}),X=G,Q=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9])-([0-9][0-9])$"),Z=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:[Tt]|[ \\t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \\t]*(Z|([-+])([0-9][0-9]?)(?::([0-9][0-9]))?))?$");function J(e){return null!==e&&(null!==Q.exec(e)||null!==Z.exec(e))}function ee(e){var t,n,r,i,o,a,s,l,c=0,u=null;if(null===(t=Q.exec(e))&&(t=Z.exec(e)),null===t)throw new Error("Date resolve error");if(n=+t[1],r=+t[2]-1,i=+t[3],!t[4])return new Date(Date.UTC(n,r,i));if(o=+t[4],a=+t[5],s=+t[6],t[7]){for(c=t[7].slice(0,3);c.length<3;)c+="0";c=+c}return t[9]&&(u=6e4*(60*+t[10]+ +(t[11]||0)),"-"===t[9]&&(u=-u)),l=new Date(Date.UTC(n,r,i,o,a,s,c)),u&&l.setTime(l.getTime()-u),l}function te(e){return e.toISOString()}(0,r.K2)(J,"resolveYamlTimestamp"),(0,r.K2)(ee,"constructYamlTimestamp"),(0,r.K2)(te,"representYamlTimestamp");var ne=new w("tag:yaml.org,2002:timestamp",{kind:"scalar",resolve:J,construct:ee,instanceOf:Date,represent:te});function re(e){return"<<"===e||null===e}(0,r.K2)(re,"resolveYamlMerge");var ie=new w("tag:yaml.org,2002:merge",{kind:"scalar",resolve:re}),oe="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\n\r";function ae(e){if(null===e)return!1;var t,n,r=0,i=e.length,o=oe;for(n=0;n64)){if(t<0)return!1;r+=6}return r%8===0}function se(e){var t,n,r=e.replace(/[\r\n=]/g,""),i=r.length,o=oe,a=0,s=[];for(t=0;t>16&255),s.push(a>>8&255),s.push(255&a)),a=a<<6|o.indexOf(r.charAt(t));return 0===(n=i%4*6)?(s.push(a>>16&255),s.push(a>>8&255),s.push(255&a)):18===n?(s.push(a>>10&255),s.push(a>>2&255)):12===n&&s.push(a>>4&255),new Uint8Array(s)}function le(e){var t,n,r="",i=0,o=e.length,a=oe;for(t=0;t>18&63],r+=a[i>>12&63],r+=a[i>>6&63],r+=a[63&i]),i=(i<<8)+e[t];return 0===(n=o%3)?(r+=a[i>>18&63],r+=a[i>>12&63],r+=a[i>>6&63],r+=a[63&i]):2===n?(r+=a[i>>10&63],r+=a[i>>4&63],r+=a[i<<2&63],r+=a[64]):1===n&&(r+=a[i>>2&63],r+=a[i<<4&63],r+=a[64],r+=a[64]),r}function ce(e){return"[object Uint8Array]"===Object.prototype.toString.call(e)}(0,r.K2)(ae,"resolveYamlBinary"),(0,r.K2)(se,"constructYamlBinary"),(0,r.K2)(le,"representYamlBinary"),(0,r.K2)(ce,"isBinary");var ue=new w("tag:yaml.org,2002:binary",{kind:"scalar",resolve:ae,construct:se,predicate:ce,represent:le}),he=Object.prototype.hasOwnProperty,de=Object.prototype.toString;function fe(e){if(null===e)return!0;var t,n,r,i,o,a=[],s=e;for(t=0,n=s.length;t>10),56320+(e-65536&1023))}(0,r.K2)(Le,"_class"),(0,r.K2)(Pe,"is_EOL"),(0,r.K2)(Oe,"is_WHITE_SPACE"),(0,r.K2)($e,"is_WS_OR_EOL"),(0,r.K2)(Be,"is_FLOW_INDICATOR"),(0,r.K2)(De,"fromHexCode"),(0,r.K2)(ze,"escapedHexLen"),(0,r.K2)(Ie,"fromDecimalCode"),(0,r.K2)(Ne,"simpleEscapeSequence"),(0,r.K2)(Re,"charFromCodepoint");var je,qe=new Array(256),He=new Array(256);for(je=0;je<256;je++)qe[je]=Ne(je)?1:0,He[je]=Ne(je);function We(e,t){this.input=e,this.filename=t.filename||null,this.schema=t.schema||Ce,this.onWarning=t.onWarning||null,this.legacy=t.legacy||!1,this.json=t.json||!1,this.listener=t.listener||null,this.implicitTypes=this.schema.compiledImplicit,this.typeMap=this.schema.compiledTypeMap,this.length=e.length,this.position=0,this.line=0,this.lineStart=0,this.lineIndent=0,this.firstTabInLine=-1,this.documents=[]}function Ke(e,t){var n={name:e.filename,buffer:e.input.slice(0,-1),position:e.position,line:e.line,column:e.position-e.lineStart};return n.snippet=y(n),new f(t,n)}function Ue(e,t){throw Ke(e,t)}function Ve(e,t){e.onWarning&&e.onWarning.call(null,Ke(e,t))}(0,r.K2)(We,"State$1"),(0,r.K2)(Ke,"generateError"),(0,r.K2)(Ue,"throwError"),(0,r.K2)(Ve,"throwWarning");var Ye={YAML:(0,r.K2)((function(e,t,n){var r,i,o;null!==e.version&&Ue(e,"duplication of %YAML directive"),1!==n.length&&Ue(e,"YAML directive accepts exactly one argument"),null===(r=/^([0-9]+)\.([0-9]+)$/.exec(n[0]))&&Ue(e,"ill-formed argument of the YAML directive"),i=parseInt(r[1],10),o=parseInt(r[2],10),1!==i&&Ue(e,"unacceptable YAML version of the document"),e.version=n[0],e.checkLineBreaks=o<2,1!==o&&2!==o&&Ve(e,"unsupported YAML version of the document")}),"handleYamlDirective"),TAG:(0,r.K2)((function(e,t,n){var r,i;2!==n.length&&Ue(e,"TAG directive accepts exactly two arguments"),r=n[0],i=n[1],Fe.test(r)||Ue(e,"ill-formed tag handle (first argument) of the TAG directive"),_e.call(e.tagMap,r)&&Ue(e,'there is a previously declared suffix for "'+r+'" tag handle'),Me.test(i)||Ue(e,"ill-formed tag prefix (second argument) of the TAG directive");try{i=decodeURIComponent(i)}catch(o){Ue(e,"tag prefix is malformed: "+i)}e.tagMap[r]=i}),"handleTagDirective")};function Ge(e,t,n,r){var i,o,a,s;if(t1&&(e.result+=u.repeat("\n",t-1))}function nt(e,t,n){var r,i,o,a,s,l,c,u,h=e.kind,d=e.result;if($e(u=e.input.charCodeAt(e.position))||Be(u)||35===u||38===u||42===u||33===u||124===u||62===u||39===u||34===u||37===u||64===u||96===u)return!1;if((63===u||45===u)&&($e(r=e.input.charCodeAt(e.position+1))||n&&Be(r)))return!1;for(e.kind="scalar",e.result="",i=o=e.position,a=!1;0!==u;){if(58===u){if($e(r=e.input.charCodeAt(e.position+1))||n&&Be(r))break}else if(35===u){if($e(e.input.charCodeAt(e.position-1)))break}else{if(e.position===e.lineStart&&et(e)||n&&Be(u))break;if(Pe(u)){if(s=e.line,l=e.lineStart,c=e.lineIndent,Je(e,!1,-1),e.lineIndent>=t){a=!0,u=e.input.charCodeAt(e.position);continue}e.position=o,e.line=s,e.lineStart=l,e.lineIndent=c;break}}a&&(Ge(e,i,o,!1),tt(e,e.line-s),i=o=e.position,a=!1),Oe(u)||(o=e.position+1),u=e.input.charCodeAt(++e.position)}return Ge(e,i,o,!1),!!e.result||(e.kind=h,e.result=d,!1)}function rt(e,t){var n,r,i;if(39!==(n=e.input.charCodeAt(e.position)))return!1;for(e.kind="scalar",e.result="",e.position++,r=i=e.position;0!==(n=e.input.charCodeAt(e.position));)if(39===n){if(Ge(e,r,e.position,!0),39!==(n=e.input.charCodeAt(++e.position)))return!0;r=e.position,e.position++,i=e.position}else Pe(n)?(Ge(e,r,i,!0),tt(e,Je(e,!1,t)),r=i=e.position):e.position===e.lineStart&&et(e)?Ue(e,"unexpected end of the document within a single quoted scalar"):(e.position++,i=e.position);Ue(e,"unexpected end of the stream within a single quoted scalar")}function it(e,t){var n,r,i,o,a,s;if(34!==(s=e.input.charCodeAt(e.position)))return!1;for(e.kind="scalar",e.result="",e.position++,n=r=e.position;0!==(s=e.input.charCodeAt(e.position));){if(34===s)return Ge(e,n,e.position,!0),e.position++,!0;if(92===s){if(Ge(e,n,e.position,!0),Pe(s=e.input.charCodeAt(++e.position)))Je(e,!1,t);else if(s<256&&qe[s])e.result+=He[s],e.position++;else if((a=ze(s))>0){for(i=a,o=0;i>0;i--)(a=De(s=e.input.charCodeAt(++e.position)))>=0?o=(o<<4)+a:Ue(e,"expected hexadecimal character");e.result+=Re(o),e.position++}else Ue(e,"unknown escape sequence");n=r=e.position}else Pe(s)?(Ge(e,n,r,!0),tt(e,Je(e,!1,t)),n=r=e.position):e.position===e.lineStart&&et(e)?Ue(e,"unexpected end of the document within a double quoted scalar"):(e.position++,r=e.position)}Ue(e,"unexpected end of the stream within a double quoted scalar")}function ot(e,t){var n,r,i,o,a,s,l,c,u,h,d,f,p=!0,g=e.tag,m=e.anchor,y=Object.create(null);if(91===(f=e.input.charCodeAt(e.position)))a=93,c=!1,o=[];else{if(123!==f)return!1;a=125,c=!0,o={}}for(null!==e.anchor&&(e.anchorMap[e.anchor]=o),f=e.input.charCodeAt(++e.position);0!==f;){if(Je(e,!0,t),(f=e.input.charCodeAt(e.position))===a)return e.position++,e.tag=g,e.anchor=m,e.kind=c?"mapping":"sequence",e.result=o,!0;p?44===f&&Ue(e,"expected the node content, but found ','"):Ue(e,"missed comma between flow collection entries"),d=null,s=l=!1,63===f&&$e(e.input.charCodeAt(e.position+1))&&(s=l=!0,e.position++,Je(e,!0,t)),n=e.line,r=e.lineStart,i=e.position,dt(e,t,1,!1,!0),h=e.tag,u=e.result,Je(e,!0,t),f=e.input.charCodeAt(e.position),!l&&e.line!==n||58!==f||(s=!0,f=e.input.charCodeAt(++e.position),Je(e,!0,t),dt(e,t,1,!1,!0),d=e.result),c?Qe(e,o,y,h,u,d,n,r,i):s?o.push(Qe(e,null,y,h,u,d,n,r,i)):o.push(u),Je(e,!0,t),44===(f=e.input.charCodeAt(e.position))?(p=!0,f=e.input.charCodeAt(++e.position)):p=!1}Ue(e,"unexpected end of the stream within a flow collection")}function at(e,t){var n,r,i,o,a=1,s=!1,l=!1,c=t,h=0,d=!1;if(124===(o=e.input.charCodeAt(e.position)))r=!1;else{if(62!==o)return!1;r=!0}for(e.kind="scalar",e.result="";0!==o;)if(43===(o=e.input.charCodeAt(++e.position))||45===o)1===a?a=43===o?3:2:Ue(e,"repeat of a chomping mode identifier");else{if(!((i=Ie(o))>=0))break;0===i?Ue(e,"bad explicit indentation width of a block scalar; it cannot be less than one"):l?Ue(e,"repeat of an indentation width identifier"):(c=t+i-1,l=!0)}if(Oe(o)){do{o=e.input.charCodeAt(++e.position)}while(Oe(o));if(35===o)do{o=e.input.charCodeAt(++e.position)}while(!Pe(o)&&0!==o)}for(;0!==o;){for(Ze(e),e.lineIndent=0,o=e.input.charCodeAt(e.position);(!l||e.lineIndentc&&(c=e.lineIndent),Pe(o))h++;else{if(e.lineIndentt)&&0!==r)Ue(e,"bad indentation of a sequence entry");else if(e.lineIndentt)&&(y&&(a=e.line,s=e.lineStart,l=e.position),dt(e,t,4,!0,i)&&(y?g=e.result:m=e.result),y||(Qe(e,d,f,p,g,m,a,s,l),p=g=m=null),Je(e,!0,-1),c=e.input.charCodeAt(e.position)),(e.line===o||e.lineIndent>t)&&0!==c)Ue(e,"bad indentation of a mapping entry");else if(e.lineIndentt?p=1:e.lineIndent===t?p=0:e.lineIndentt?p=1:e.lineIndent===t?p=0:e.lineIndent tag; it should be "scalar", not "'+e.kind+'"'),l=0,c=e.implicitTypes.length;l"),null!==e.result&&h.kind!==e.kind&&Ue(e,"unacceptable node kind for !<"+e.tag+'> tag; it should be "'+h.kind+'", not "'+e.kind+'"'),h.resolve(e.result,e.tag)?(e.result=h.construct(e.result,e.tag),null!==e.anchor&&(e.anchorMap[e.anchor]=e.result)):Ue(e,"cannot resolve a node with !<"+e.tag+"> explicit tag")}return null!==e.listener&&e.listener("close",e),null!==e.tag||null!==e.anchor||m}function ft(e){var t,n,r,i,o=e.position,a=!1;for(e.version=null,e.checkLineBreaks=e.legacy,e.tagMap=Object.create(null),e.anchorMap=Object.create(null);0!==(i=e.input.charCodeAt(e.position))&&(Je(e,!0,-1),i=e.input.charCodeAt(e.position),!(e.lineIndent>0||37!==i));){for(a=!0,i=e.input.charCodeAt(++e.position),t=e.position;0!==i&&!$e(i);)i=e.input.charCodeAt(++e.position);for(r=[],(n=e.input.slice(t,e.position)).length<1&&Ue(e,"directive name must not be less than one character in length");0!==i;){for(;Oe(i);)i=e.input.charCodeAt(++e.position);if(35===i){do{i=e.input.charCodeAt(++e.position)}while(0!==i&&!Pe(i));break}if(Pe(i))break;for(t=e.position;0!==i&&!$e(i);)i=e.input.charCodeAt(++e.position);r.push(e.input.slice(t,e.position))}0!==i&&Ze(e),_e.call(Ye,n)?Ye[n](e,n,r):Ve(e,'unknown document directive "'+n+'"')}Je(e,!0,-1),0===e.lineIndent&&45===e.input.charCodeAt(e.position)&&45===e.input.charCodeAt(e.position+1)&&45===e.input.charCodeAt(e.position+2)?(e.position+=3,Je(e,!0,-1)):a&&Ue(e,"directives end mark is expected"),dt(e,e.lineIndent-1,4,!1,!0),Je(e,!0,-1),e.checkLineBreaks&&Te.test(e.input.slice(o,e.position))&&Ve(e,"non-ASCII line breaks are interpreted as content"),e.documents.push(e.result),e.position===e.lineStart&&et(e)?46===e.input.charCodeAt(e.position)&&(e.position+=3,Je(e,!0,-1)):e.position=55296&&r<=56319&&t+1=56320&&n<=57343?1024*(r-55296)+n-56320+65536:r}function zt(e){return/^\n* /.test(e)}(0,r.K2)(At,"State"),(0,r.K2)(Tt,"indentString"),(0,r.K2)(Et,"generateNextLine"),(0,r.K2)(Ft,"testImplicitResolving"),(0,r.K2)(Mt,"isWhitespace"),(0,r.K2)(Lt,"isPrintable"),(0,r.K2)(Pt,"isNsCharOrWhitespace"),(0,r.K2)(Ot,"isPlainSafe"),(0,r.K2)($t,"isPlainSafeFirst"),(0,r.K2)(Bt,"isPlainSafeLast"),(0,r.K2)(Dt,"codePointAt"),(0,r.K2)(zt,"needIndentIndicator");function It(e,t,n,r,i,o,a,s){var l,c=0,u=null,h=!1,d=!1,f=-1!==r,p=-1,g=$t(Dt(e,0))&&Bt(Dt(e,e.length-1));if(t||a)for(l=0;l=65536?l+=2:l++){if(!Lt(c=Dt(e,l)))return 5;g=g&&Ot(c,u,s),u=c}else{for(l=0;l=65536?l+=2:l++){if(10===(c=Dt(e,l)))h=!0,f&&(d=d||l-p-1>r&&" "!==e[p+1],p=l);else if(!Lt(c))return 5;g=g&&Ot(c,u,s),u=c}d=d||f&&l-p-1>r&&" "!==e[p+1]}return h||d?n>9&&zt(e)?5:a?2===o?5:2:d?4:3:!g||a||i(e)?2===o?5:2:1}function Nt(e,t,n,i,o){e.dump=function(){if(0===t.length)return 2===e.quotingType?'""':"''";if(!e.noCompatMode&&(-1!==wt.indexOf(t)||St.test(t)))return 2===e.quotingType?'"'+t+'"':"'"+t+"'";var a=e.indent*Math.max(1,n),s=-1===e.lineWidth?-1:Math.max(Math.min(e.lineWidth,40),e.lineWidth-a),l=i||e.flowLevel>-1&&n>=e.flowLevel;function c(t){return Ft(e,t)}switch((0,r.K2)(c,"testAmbiguity"),It(t,l,e.indent,s,c,e.quotingType,e.forceQuotes&&!i,o)){case 1:return t;case 2:return"'"+t.replace(/'/g,"''")+"'";case 3:return"|"+Rt(t,e.indent)+jt(Tt(t,a));case 4:return">"+Rt(t,e.indent)+jt(Tt(qt(t,s),a));case 5:return'"'+Wt(t)+'"';default:throw new f("impossible error: invalid scalar style")}}()}function Rt(e,t){var n=zt(e)?String(t):"",r="\n"===e[e.length-1];return n+(r&&("\n"===e[e.length-2]||"\n"===e)?"+":r?"":"-")+"\n"}function jt(e){return"\n"===e[e.length-1]?e.slice(0,-1):e}function qt(e,t){for(var n,r,i=/(\n+)([^\n]*)/g,o=function(){var n=e.indexOf("\n");return n=-1!==n?n:e.length,i.lastIndex=n,Ht(e.slice(0,n),t)}(),a="\n"===e[0]||" "===e[0];r=i.exec(e);){var s=r[1],l=r[2];n=" "===l[0],o+=s+(a||n||""===l?"":"\n")+Ht(l,t),a=n}return o}function Ht(e,t){if(""===e||" "===e[0])return e;for(var n,r,i=/ [^ ]/g,o=0,a=0,s=0,l="";n=i.exec(e);)(s=n.index)-o>t&&(r=a>o?a:s,l+="\n"+e.slice(o,r),o=r+1),a=s;return l+="\n",e.length-o>t&&a>o?l+=e.slice(o,a)+"\n"+e.slice(a+1):l+=e.slice(o),l.slice(1)}function Wt(e){for(var t,n="",r=0,i=0;i=65536?i+=2:i++)r=Dt(e,i),!(t=kt[r])&&Lt(r)?(n+=e[i],r>=65536&&(n+=e[i+1])):n+=t||_t(r);return n}function Kt(e,t,n){var r,i,o,a="",s=e.tag;for(r=0,i=n.length;r1024&&(s+="? "),s+=e.dump+(e.condenseFlow?'"':"")+":"+(e.condenseFlow?"":" "),Xt(e,t,a,!1,!1)&&(l+=s+=e.dump));e.tag=c,e.dump="{"+l+"}"}function Yt(e,t,n,r){var i,o,a,s,l,c,u="",h=e.tag,d=Object.keys(n);if(!0===e.sortKeys)d.sort();else if("function"===typeof e.sortKeys)d.sort(e.sortKeys);else if(e.sortKeys)throw new f("sortKeys must be a boolean or a function");for(i=0,o=d.length;i1024)&&(e.dump&&10===e.dump.charCodeAt(0)?c+="?":c+="? "),c+=e.dump,l&&(c+=Et(e,t)),Xt(e,t+1,s,!0,l)&&(e.dump&&10===e.dump.charCodeAt(0)?c+=":":c+=": ",u+=c+=e.dump));e.tag=h,e.dump=u||"{}"}function Gt(e,t,n){var r,i,o,a,s,l;for(o=0,a=(i=n?e.explicitTypes:e.implicitTypes).length;o tag resolver accepts not "'+l+'" style');r=s.represent[l](t,l)}e.dump=r}return!0}return!1}function Xt(e,t,n,r,i,o,a){e.tag=null,e.dump=n,Gt(e,n,!1)||Gt(e,n,!0);var s,l=bt.call(e.dump),c=r;r&&(r=e.flowLevel<0||e.flowLevel>t);var u,h,d="[object Object]"===l||"[object Array]"===l;if(d&&(h=-1!==(u=e.duplicates.indexOf(n))),(null!==e.tag&&"?"!==e.tag||h||2!==e.indent&&t>0)&&(i=!1),h&&e.usedDuplicates[u])e.dump="*ref_"+u;else{if(d&&h&&!e.usedDuplicates[u]&&(e.usedDuplicates[u]=!0),"[object Object]"===l)r&&0!==Object.keys(e.dump).length?(Yt(e,t,e.dump,i),h&&(e.dump="&ref_"+u+e.dump)):(Vt(e,t,e.dump),h&&(e.dump="&ref_"+u+" "+e.dump));else if("[object Array]"===l)r&&0!==e.dump.length?(e.noArrayIndent&&!a&&t>0?Ut(e,t-1,e.dump,i):Ut(e,t,e.dump,i),h&&(e.dump="&ref_"+u+e.dump)):(Kt(e,t,e.dump),h&&(e.dump="&ref_"+u+" "+e.dump));else{if("[object String]"!==l){if("[object Undefined]"===l)return!1;if(e.skipInvalid)return!1;throw new f("unacceptable kind of an object to dump "+l)}"?"!==e.tag&&Nt(e,e.dump,t,o,c)}null!==e.tag&&"?"!==e.tag&&(s=encodeURI("!"===e.tag[0]?e.tag.slice(1):e.tag).replace(/!/g,"%21"),s="!"===e.tag[0]?"!"+s:"tag:yaml.org,2002:"===s.slice(0,18)?"!!"+s.slice(18):"!<"+s+">",e.dump=s+" "+e.dump)}return!0}function Qt(e,t){var n,r,i=[],o=[];for(Zt(e,i,o),n=0,r=o.length;n{"use strict";var n=Symbol.for("react.transitional.element"),r=Symbol.for("react.portal"),i=Symbol.for("react.fragment"),o=Symbol.for("react.strict_mode"),a=Symbol.for("react.profiler");Symbol.for("react.provider");var s=Symbol.for("react.consumer"),l=Symbol.for("react.context"),c=Symbol.for("react.forward_ref"),u=Symbol.for("react.suspense"),h=Symbol.for("react.suspense_list"),d=Symbol.for("react.memo"),f=Symbol.for("react.lazy"),p=Symbol.for("react.view_transition"),g=Symbol.for("react.client.reference");function m(e){if("object"===typeof e&&null!==e){var t=e.$$typeof;switch(t){case n:switch(e=e.type){case i:case a:case o:case u:case h:case p:return e;default:switch(e=e&&e.$$typeof){case l:case c:case f:case d:case s:return e;default:return t}}case r:return t}}}t.vM=c,t.lD=d},579:(e,t,n)=>{"use strict";e.exports=n(2799)},634:(e,t,n)=>{"use strict";n.d(t,{$C:()=>T,$t:()=>H,C4:()=>K,I5:()=>q,Ib:()=>g,KL:()=>Y,Sm:()=>U,Un:()=>B,_K:()=>W,bH:()=>P,dq:()=>R,pe:()=>l,rY:()=>V,ru:()=>$,sM:()=>_,vU:()=>f,yT:()=>F});var r=n(3759),i=n(3755),o=n(3638),a=n(2863),s=n(7697),l="\u200b",c={curveBasis:o.qrM,curveBasisClosed:o.Yu4,curveBasisOpen:o.IA3,curveBumpX:o.Wi0,curveBumpY:o.PGM,curveBundle:o.OEq,curveCardinalClosed:o.olC,curveCardinalOpen:o.IrU,curveCardinal:o.y8u,curveCatmullRomClosed:o.Q7f,curveCatmullRomOpen:o.cVp,curveCatmullRom:o.oDi,curveLinear:o.lUB,curveLinearClosed:o.Lx9,curveMonotoneX:o.nVG,curveMonotoneY:o.uxU,curveNatural:o.Xf2,curveStep:o.GZz,curveStepAfter:o.UPb,curveStepBefore:o.dyv},u=/\s*(?:(\w+)(?=:):|(\w+))\s*(?:(\w+)|((?:(?!}%{2}).|\r?\n)*))?\s*(?:}%{2})?/gi,h=(0,r.K2)((function(e,t){const n=d(e,/(?:init\b)|(?:initialize\b)/);let i={};if(Array.isArray(n)){const e=n.map((e=>e.args));(0,r.$i)(e),i=(0,r.hH)(i,[...e])}else i=n.args;if(!i)return;let o=(0,r.Ch)(e,t);const a="config";return void 0!==i[a]&&("flowchart-v2"===o&&(o="flowchart"),i[o]=i[a],delete i[a]),i}),"detectInit"),d=(0,r.K2)((function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:null;try{const n=new RegExp(`[%]{2}(?![{]${u.source})(?=[}][%]{2}).*\n`,"ig");let i;e=e.trim().replace(n,"").replace(/'/gm,'"'),r.Rm.debug(`Detecting diagram directive${null!==t?" type:"+t:""} based on the text:${e}`);const o=[];for(;null!==(i=r.DB.exec(e));)if(i.index===r.DB.lastIndex&&r.DB.lastIndex++,i&&!t||t&&i[1]?.match(t)||t&&i[2]?.match(t)){const e=i[1]?i[1]:i[2],t=i[3]?i[3].trim():i[4]?JSON.parse(i[4].trim()):null;o.push({type:e,args:t})}return 0===o.length?{type:e,args:null}:1===o.length?o[0]:o}catch(n){return r.Rm.error(`ERROR: ${n.message} - Unable to parse directive type: '${t}' based on the text: '${e}'`),{type:void 0,args:null}}}),"detectDirective"),f=(0,r.K2)((function(e){return e.replace(r.DB,"")}),"removeDirectives"),p=(0,r.K2)((function(e,t){for(const[n,r]of t.entries())if(r.match(e))return n;return-1}),"isSubstringInArray");function g(e,t){if(!e)return t;const n=`curve${e.charAt(0).toUpperCase()+e.slice(1)}`;return c[n]??t}function m(e,t){const n=e.trim();if(n)return"loose"!==t.securityLevel?(0,i.J)(n):n}(0,r.K2)(g,"interpolateToCurve"),(0,r.K2)(m,"formatUrl");var y=(0,r.K2)((function(e){const t=e.split("."),n=t.length-1,i=t[n];let o=window;for(let c=0;c1?a-1:0),l=1;l{n+=b(e,t),t=e}));return w(e,n/2)}function x(e){return 1===e.length?e[0]:v(e)}(0,r.K2)(b,"distance"),(0,r.K2)(v,"traverseEdge"),(0,r.K2)(x,"calcLabelPosition");var k=(0,r.K2)((function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:2;const n=Math.pow(10,t);return Math.round(e*n)/n}),"roundNumber"),w=(0,r.K2)(((e,t)=>{let n,r=t;for(const i of e){if(n){const e=b(i,n);if(0===e)return n;if(e=1)return{x:i.x,y:i.y};if(t>0&&t<1)return{x:k((1-t)*n.x+t*i.x,5),y:k((1-t)*n.y+t*i.y,5)}}}n=i}throw new Error("Could not find a suitable point for the given distance")}),"calculatePoint"),S=(0,r.K2)(((e,t,n)=>{r.Rm.info(`our points ${JSON.stringify(t)}`),t[0]!==n&&(t=t.reverse());const i=w(t,25),o=e?10:5,a=Math.atan2(t[0].y-i.y,t[0].x-i.x),s={x:0,y:0};return s.x=Math.sin(a)*o+(t[0].x+i.x)/2,s.y=-Math.cos(a)*o+(t[0].y+i.y)/2,s}),"calcCardinalityPosition");function C(e,t,n){const i=structuredClone(n);r.Rm.info("our points",i),"start_left"!==t&&"start_right"!==t&&i.reverse();const o=w(i,25+e),a=10+.5*e,s=Math.atan2(i[0].y-o.y,i[0].x-o.x),l={x:0,y:0};return"start_left"===t?(l.x=Math.sin(s+Math.PI)*a+(i[0].x+o.x)/2,l.y=-Math.cos(s+Math.PI)*a+(i[0].y+o.y)/2):"end_right"===t?(l.x=Math.sin(s-Math.PI)*a+(i[0].x+o.x)/2-5,l.y=-Math.cos(s-Math.PI)*a+(i[0].y+o.y)/2-5):"end_left"===t?(l.x=Math.sin(s)*a+(i[0].x+o.x)/2-5,l.y=-Math.cos(s)*a+(i[0].y+o.y)/2-5):(l.x=Math.sin(s)*a+(i[0].x+o.x)/2,l.y=-Math.cos(s)*a+(i[0].y+o.y)/2),l}function _(e){let t="",n="";for(const r of e)void 0!==r&&(r.startsWith("color:")||r.startsWith("text-align:")?n=n+r+";":t=t+r+";");return{style:t,labelStyle:n}}(0,r.K2)(C,"calcTerminalLabelPosition"),(0,r.K2)(_,"getStylesFromArray");var A=0,T=(0,r.K2)((()=>(A++,"id-"+Math.random().toString(36).substr(2,12)+"-"+A)),"generateId");function E(e){let t="";const n="0123456789abcdef";for(let r=0;rE(e.length)),"random"),M=(0,r.K2)((function(){return{x:0,y:0,fill:void 0,anchor:"start",style:"#666",width:100,height:100,textMargin:0,rx:0,ry:0,valign:void 0,text:""}}),"getTextObj"),L=(0,r.K2)((function(e,t){const n=t.text.replace(r.Y2.lineBreakRegex," "),[,i]=q(t.fontSize),o=e.append("text");o.attr("x",t.x),o.attr("y",t.y),o.style("text-anchor",t.anchor),o.style("font-family",t.fontFamily),o.style("font-size",i),o.style("font-weight",t.fontWeight),o.attr("fill",t.fill),void 0!==t.class&&o.attr("class",t.class);const a=o.append("tspan");return a.attr("x",t.x+2*t.textMargin),a.attr("fill",t.fill),a.text(n),o}),"drawSimpleText"),P=(0,a.A)(((e,t,n)=>{if(!e)return e;if(n=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",joinWith:"
    "},n),r.Y2.lineBreakRegex.test(e))return e;const i=e.split(" ").filter(Boolean),o=[];let a="";return i.forEach(((e,r)=>{const s=B(`${e} `,n),l=B(a,n);if(s>t){const{hyphenatedStrings:r,remainingWord:i}=O(e,t,"-",n);o.push(a,...r),a=i}else l+s>=t?(o.push(a),a=e):a=[a,e].filter(Boolean).join(" ");r+1===i.length&&o.push(a)})),o.filter((e=>""!==e)).join(n.joinWith)}),((e,t,n)=>`${e}${t}${n.fontSize}${n.fontWeight}${n.fontFamily}${n.joinWith}`)),O=(0,a.A)((function(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-",r=arguments.length>3?arguments[3]:void 0;r=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",margin:0},r);const i=[...e],o=[];let a="";return i.forEach(((e,s)=>{const l=`${a}${e}`;if(B(l,r)>=t){const e=s+1,t=i.length===e,r=`${l}${n}`;o.push(t?l:r),a=""}else a=l})),{hyphenatedStrings:o,remainingWord:a}}),(function(e,t){let n=arguments.length>3?arguments[3]:void 0;return`${e}${t}${arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-"}${n.fontSize}${n.fontWeight}${n.fontFamily}`}));function $(e,t){return z(e,t).height}function B(e,t){return z(e,t).width}(0,r.K2)($,"calculateTextHeight"),(0,r.K2)(B,"calculateTextWidth");var D,z=(0,a.A)(((e,t)=>{const{fontSize:n=12,fontFamily:i="Arial",fontWeight:a=400}=t;if(!e)return{width:0,height:0};const[,s]=q(n),c=["sans-serif",i],u=e.split(r.Y2.lineBreakRegex),h=[],d=(0,o.Ltv)("body");if(!d.remove)return{width:0,height:0,lineHeight:0};const f=d.append("svg");for(const r of c){let e=0;const t={width:0,height:0,lineHeight:0};for(const n of u){const i=M();i.text=n||l;const o=L(f,i).style("font-size",s).style("font-weight",a).style("font-family",r),c=(o._groups||o)[0][0].getBBox();if(0===c.width&&0===c.height)throw new Error("svg element not in render tree");t.width=Math.round(Math.max(t.width,c.width)),e=Math.round(c.height),t.height+=e,t.lineHeight=Math.round(Math.max(t.lineHeight,e))}h.push(t)}f.remove();return h[isNaN(h[1].height)||isNaN(h[1].width)||isNaN(h[1].lineHeight)||h[0].height>h[1].height&&h[0].width>h[1].width&&h[0].lineHeight>h[1].lineHeight?0:1]}),((e,t)=>`${e}${t.fontSize}${t.fontWeight}${t.fontFamily}`)),I=class{constructor(){let e=arguments.length>0&&void 0!==arguments[0]&&arguments[0],t=arguments.length>1?arguments[1]:void 0;this.count=0,this.count=t?t.length:0,this.next=e?()=>this.count++:()=>Date.now()}static#e=(()=>(0,r.K2)(this,"InitIDGenerator"))()},N=(0,r.K2)((function(e){return D=D||document.createElement("div"),e=escape(e).replace(/%26/g,"&").replace(/%23/g,"#").replace(/%3B/g,";"),D.innerHTML=e,unescape(D.textContent)}),"entityDecode");function R(e){return"str"in e}(0,r.K2)(R,"isDetailedError");var j=(0,r.K2)(((e,t,n,r)=>{if(!r)return;const i=e.node()?.getBBox();i&&e.append("text").text(r).attr("text-anchor","middle").attr("x",i.x+i.width/2).attr("y",-n).attr("class",t)}),"insertTitle"),q=(0,r.K2)((e=>{if("number"===typeof e)return[e,e+"px"];const t=parseInt(e??"",10);return Number.isNaN(t)?[void 0,void 0]:e===String(t)?[t,e+"px"]:[t,e]}),"parseFontSize");function H(e,t){return(0,s.A)({},e,t)}(0,r.K2)(H,"cleanAndMerge");var W={assignWithDepth:r.hH,wrapLabel:P,calculateTextHeight:$,calculateTextWidth:B,calculateTextDimensions:z,cleanAndMerge:H,detectInit:h,detectDirective:d,isSubstringInArray:p,interpolateToCurve:g,calcLabelPosition:x,calcCardinalityPosition:S,calcTerminalLabelPosition:C,formatUrl:m,getStylesFromArray:_,generateId:T,random:F,runFunc:y,entityDecode:N,insertTitle:j,parseFontSize:q,InitIDGenerator:I},K=(0,r.K2)((function(e){let t=e;return t=t.replace(/style.*:\S*#.*;/g,(function(e){return e.substring(0,e.length-1)})),t=t.replace(/classDef.*:\S*#.*;/g,(function(e){return e.substring(0,e.length-1)})),t=t.replace(/#\w+;/g,(function(e){const t=e.substring(1,e.length-1);return/^\+?\d+$/.test(t)?"\ufb02\xb0\xb0"+t+"\xb6\xdf":"\ufb02\xb0"+t+"\xb6\xdf"})),t}),"encodeEntities"),U=(0,r.K2)((function(e){return e.replace(/\ufb02\xb0\xb0/g,"&#").replace(/\ufb02\xb0/g,"&").replace(/\xb6\xdf/g,";")}),"decodeEntities"),V=(0,r.K2)(((e,t,n,r)=>{let{counter:i=0,prefix:o,suffix:a}=n;return r||`${o?`${o}_`:""}${e}_${t}_${i}${a?`_${a}`:""}`}),"getEdgeId");function Y(e){return e??null}(0,r.K2)(Y,"handleUndefinedAttr")},944:()=>{!function(e){var t=/(?:"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"|'(?:\\(?:\r\n|[\s\S])|[^'\\\r\n])*')/;e.languages.css={comment:/\/\*[\s\S]*?\*\//,atrule:{pattern:RegExp("@[\\w-](?:"+/[^;{\s"']|\s+(?!\s)/.source+"|"+t.source+")*?"+/(?:;|(?=\s*\{))/.source),inside:{rule:/^@[\w-]+/,"selector-function-argument":{pattern:/(\bselector\s*\(\s*(?![\s)]))(?:[^()\s]|\s+(?![\s)])|\((?:[^()]|\([^()]*\))*\))+(?=\s*\))/,lookbehind:!0,alias:"selector"},keyword:{pattern:/(^|[^\w-])(?:and|not|only|or)(?![\w-])/,lookbehind:!0}}},url:{pattern:RegExp("\\burl\\((?:"+t.source+"|"+/(?:[^\\\r\n()"']|\\[\s\S])*/.source+")\\)","i"),greedy:!0,inside:{function:/^url/i,punctuation:/^\(|\)$/,string:{pattern:RegExp("^"+t.source+"$"),alias:"url"}}},selector:{pattern:RegExp("(^|[{}\\s])[^{}\\s](?:[^{};\"'\\s]|\\s+(?![\\s{])|"+t.source+")*(?=\\s*\\{)"),lookbehind:!0},string:{pattern:t,greedy:!0},property:{pattern:/(^|[^-\w\xA0-\uFFFF])(?!\s)[-_a-z\xA0-\uFFFF](?:(?!\s)[-\w\xA0-\uFFFF])*(?=\s*:)/i,lookbehind:!0},important:/!important\b/i,function:{pattern:/(^|[^-a-z0-9])[-a-z0-9]+(?=\()/i,lookbehind:!0},punctuation:/[(){};:,]/},e.languages.css.atrule.inside.rest=e.languages.css;var n=e.languages.markup;n&&(n.tag.addInlined("style","css"),n.tag.addAttribute("style","css"))}(Prism)},958:(e,t,n)=>{"use strict";n.d(t,{DA:()=>x,IU:()=>B,KX:()=>_,U:()=>$,U7:()=>Tt,U_:()=>Ft,Zk:()=>u,aP:()=>Ct,gh:()=>Et,lC:()=>d,on:()=>At});var r=n(8434),i=n(9711),o=n(2596),a=n(634),s=n(3759),l=n(3638),c=n(8205),u=(0,s.K2)((async(e,t,n)=>{let r;const i=t.useHtmlLabels||(0,s._3)((0,s.D7)()?.htmlLabels);r=n||"node default";const c=e.insert("g").attr("class",r).attr("id",t.domId||t.id),u=c.insert("g").attr("class","label").attr("style",(0,a.KL)(t.labelStyle));let h;h=void 0===t.label?"":"string"===typeof t.label?t.label:t.label[0];const d=await(0,o.GZ)(u,(0,s.jZ)((0,a.Sm)(h),(0,s.D7)()),{useHtmlLabels:i,width:t.width||(0,s.D7)().flowchart?.wrappingWidth,cssClasses:"markdown-node-label",style:t.labelStyle,addSvgBackground:!!t.icon||!!t.img});let f=d.getBBox();const p=(t?.padding??0)/2;if(i){const e=d.children[0],t=(0,l.Ltv)(d),n=e.getElementsByTagName("img");if(n){const e=""===h.replace(/]*>/g,"").trim();await Promise.all([...n].map((t=>new Promise((n=>{function r(){if(t.style.display="flex",t.style.flexDirection="column",e){const e=(0,s.D7)().fontSize?(0,s.D7)().fontSize:window.getComputedStyle(document.body).fontSize,n=5,[r=s.UI.fontSize]=(0,a.I5)(e),i=r*n+"px";t.style.minWidth=i,t.style.maxWidth=i}else t.style.width="100%";n(t)}(0,s.K2)(r,"setupImage"),setTimeout((()=>{t.complete&&r()})),t.addEventListener("error",r),t.addEventListener("load",r)})))))}f=e.getBoundingClientRect(),t.attr("width",f.width),t.attr("height",f.height)}return i?u.attr("transform","translate("+-f.width/2+", "+-f.height/2+")"):u.attr("transform","translate(0, "+-f.height/2+")"),t.centerLabel&&u.attr("transform","translate("+-f.width/2+", "+-f.height/2+")"),u.insert("rect",":first-child"),{shapeSvg:c,bbox:f,halfPadding:p,label:u}}),"labelHelper"),h=(0,s.K2)((async(e,t,n)=>{const r=n.useHtmlLabels||(0,s._3)((0,s.D7)()?.flowchart?.htmlLabels),i=e.insert("g").attr("class","label").attr("style",n.labelStyle||""),c=await(0,o.GZ)(i,(0,s.jZ)((0,a.Sm)(t),(0,s.D7)()),{useHtmlLabels:r,width:n.width||(0,s.D7)()?.flowchart?.wrappingWidth,style:n.labelStyle,addSvgBackground:!!n.icon||!!n.img});let u=c.getBBox();const h=n.padding/2;if((0,s._3)((0,s.D7)()?.flowchart?.htmlLabels)){const e=c.children[0],t=(0,l.Ltv)(c);u=e.getBoundingClientRect(),t.attr("width",u.width),t.attr("height",u.height)}return r?i.attr("transform","translate("+-u.width/2+", "+-u.height/2+")"):i.attr("transform","translate(0, "+-u.height/2+")"),n.centerLabel&&i.attr("transform","translate("+-u.width/2+", "+-u.height/2+")"),i.insert("rect",":first-child"),{shapeSvg:e,bbox:u,halfPadding:h,label:i}}),"insertLabel"),d=(0,s.K2)(((e,t)=>{const n=t.node().getBBox();e.width=n.width,e.height=n.height}),"updateNodeBounds"),f=(0,s.K2)(((e,t)=>("handDrawn"===e.look?"rough-node":"node")+" "+e.cssClasses+" "+(t||"")),"getNodeClasses");function p(e){const t=e.map(((e,t)=>`${0===t?"M":"L"}${e.x},${e.y}`));return t.push("Z"),t.join(" ")}function g(e,t,n,r,i,o){const a=[],s=n-e,l=r-t,c=s/o,u=2*Math.PI/c,h=t+l/2;for(let d=0;d<=50;d++){const t=e+d/50*s,n=h+i*Math.sin(u*(t-e));a.push({x:t,y:n})}return a}function m(e,t,n,r,i,o){const a=[],s=i*Math.PI/180,l=(o*Math.PI/180-s)/(r-1);for(let c=0;c{var n,r,i=e.x,o=e.y,a=t.x-i,s=t.y-o,l=e.width/2,c=e.height/2;return Math.abs(s)*l>Math.abs(a)*c?(s<0&&(c=-c),n=0===s?0:c*a/s,r=c):(a<0&&(l=-l),n=l,r=0===a?0:l*s/a),{x:i+n,y:o+r}}),"intersectRect");function b(e,t){t&&e.attr("style",t)}async function v(e){const t=(0,l.Ltv)(document.createElementNS("http://www.w3.org/2000/svg","foreignObject")),n=t.append("xhtml:div");let r=e.label;e.label&&(0,s.Wi)(e.label)&&(r=await(0,s.VJ)(e.label.replace(s.Y2.lineBreakRegex,"\n"),(0,s.D7)()));const i=e.isNode?"nodeLabel":"edgeLabel";return n.html('"+r+""),b(n,e.labelStyle),n.style("display","inline-block"),n.style("padding-right","1px"),n.style("white-space","nowrap"),n.attr("xmlns","http://www.w3.org/1999/xhtml"),t.node()}(0,s.K2)(b,"applyStyle"),(0,s.K2)(v,"addHtmlLabel");var x=(0,s.K2)((async(e,t,n,r)=>{let i=e||"";if("object"===typeof i&&(i=i[0]),(0,s._3)((0,s.D7)().flowchart.htmlLabels)){i=i.replace(/\\n|\n/g,"
    "),s.Rm.info("vertexText"+i);const e={isNode:r,label:(0,a.Sm)(i).replace(/fa[blrs]?:fa-[\w-]+/g,(e=>``)),labelStyle:t?t.replace("fill:","color:"):t};return await v(e)}{const e=document.createElementNS("http://www.w3.org/2000/svg","text");e.setAttribute("style",t.replace("color:","fill:"));let r=[];r="string"===typeof i?i.split(/\\n|\n|/gi):Array.isArray(i)?i:[];for(const t of r){const r=document.createElementNS("http://www.w3.org/2000/svg","tspan");r.setAttributeNS("http://www.w3.org/XML/1998/namespace","xml:space","preserve"),r.setAttribute("dy","1em"),r.setAttribute("x","0"),n?r.setAttribute("class","title-row"):r.setAttribute("class","row"),r.textContent=t.trim(),e.appendChild(r)}return e}}),"createLabel"),k=(0,s.K2)(((e,t,n,r,i)=>["M",e+i,t,"H",e+n-i,"A",i,i,0,0,1,e+n,t+i,"V",t+r-i,"A",i,i,0,0,1,e+n-i,t+r,"H",e+i,"A",i,i,0,0,1,e,t+r-i,"V",t+i,"A",i,i,0,0,1,e+i,t,"Z"].join(" ")),"createRoundedRectPathD"),w=(0,s.K2)((e=>{const{handDrawnSeed:t}=(0,s.D7)();return{fill:e,hachureAngle:120,hachureGap:4,fillWeight:2,roughness:.7,stroke:e,seed:t}}),"solidStateFill"),S=(0,s.K2)((e=>{const t=C([...e.cssCompiledStyles||[],...e.cssStyles||[]]);return{stylesMap:t,stylesArray:[...t]}}),"compileStyles"),C=(0,s.K2)((e=>{const t=new Map;return e.forEach((e=>{const[n,r]=e.split(":");t.set(n.trim(),r?.trim())})),t}),"styles2Map"),_=(0,s.K2)((e=>"color"===e||"font-size"===e||"font-family"===e||"font-weight"===e||"font-style"===e||"text-decoration"===e||"text-align"===e||"text-transform"===e||"line-height"===e||"letter-spacing"===e||"word-spacing"===e||"text-shadow"===e||"text-overflow"===e||"white-space"===e||"word-wrap"===e||"word-break"===e||"overflow-wrap"===e||"hyphens"===e),"isLabelStyle"),A=(0,s.K2)((e=>{const{stylesArray:t}=S(e),n=[],r=[],i=[],o=[];return t.forEach((e=>{const t=e[0];_(t)?n.push(e.join(":")+" !important"):(r.push(e.join(":")+" !important"),t.includes("stroke")&&i.push(e.join(":")+" !important"),"fill"===t&&o.push(e.join(":")+" !important"))})),{labelStyles:n.join(";"),nodeStyles:r.join(";"),stylesArray:t,borderStyles:i,backgroundStyles:o}}),"styles2String"),T=(0,s.K2)(((e,t)=>{const{themeVariables:n,handDrawnSeed:r}=(0,s.D7)(),{nodeBorder:i,mainBkg:o}=n,{stylesMap:a}=S(e);return Object.assign({roughness:.7,fill:a.get("fill")||o,fillStyle:"hachure",fillWeight:4,hachureGap:5.2,stroke:a.get("stroke")||i,seed:r,strokeWidth:a.get("stroke-width")?.replace("px","")||1.3,fillLineDash:[0,0]},t)}),"userNodeOverrides"),E=(0,s.K2)((async(e,t)=>{s.Rm.info("Creating subgraph rect for ",t.id,t);const n=(0,s.D7)(),{themeVariables:i,handDrawnSeed:a}=n,{clusterBkg:u,clusterBorder:h}=i,{labelStyles:d,nodeStyles:f,borderStyles:p,backgroundStyles:g}=A(t),m=e.insert("g").attr("class","cluster "+t.cssClasses).attr("id",t.id).attr("data-look",t.look),b=(0,s._3)(n.flowchart.htmlLabels),v=m.insert("g").attr("class","cluster-label "),x=await(0,o.GZ)(v,t.label,{style:t.labelStyle,useHtmlLabels:b,isNode:!0});let w=x.getBBox();if((0,s._3)(n.flowchart.htmlLabels)){const e=x.children[0],t=(0,l.Ltv)(x);w=e.getBoundingClientRect(),t.attr("width",w.width),t.attr("height",w.height)}const S=t.width<=w.width+t.padding?w.width+t.padding:t.width;t.width<=w.width+t.padding?t.diff=(S-t.width)/2-t.padding:t.diff=-t.padding;const C=t.height,_=t.x-S/2,E=t.y-C/2;let F;if(s.Rm.trace("Data ",t,JSON.stringify(t)),"handDrawn"===t.look){const e=c.A.svg(m),n=T(t,{roughness:.7,fill:u,stroke:h,fillWeight:3,seed:a}),r=e.path(k(_,E,S,C,0),n);F=m.insert((()=>(s.Rm.debug("Rough node insert CXC",r),r)),":first-child"),F.select("path:nth-child(2)").attr("style",p.join(";")),F.select("path").attr("style",g.join(";").replace("fill","stroke"))}else F=m.insert("rect",":first-child"),F.attr("style",f).attr("rx",t.rx).attr("ry",t.ry).attr("x",_).attr("y",E).attr("width",S).attr("height",C);const{subGraphTitleTopMargin:M}=(0,r.O)(n);if(v.attr("transform",`translate(${t.x-w.width/2}, ${t.y-t.height/2+M})`),d){const e=v.select("span");e&&e.attr("style",d)}const L=F.node().getBBox();return t.offsetX=0,t.width=L.width,t.height=L.height,t.offsetY=w.height-t.padding/2,t.intersect=function(e){return y(t,e)},{cluster:m,labelBBox:w}}),"rect"),F=(0,s.K2)(((e,t)=>{const n=e.insert("g").attr("class","note-cluster").attr("id",t.id),r=n.insert("rect",":first-child"),i=0*t.padding,o=i/2;r.attr("rx",t.rx).attr("ry",t.ry).attr("x",t.x-t.width/2-o).attr("y",t.y-t.height/2-o).attr("width",t.width+i).attr("height",t.height+i).attr("fill","none");const a=r.node().getBBox();return t.width=a.width,t.height=a.height,t.intersect=function(e){return y(t,e)},{cluster:n,labelBBox:{width:0,height:0}}}),"noteGroup"),M=(0,s.K2)((async(e,t)=>{const n=(0,s.D7)(),{themeVariables:r,handDrawnSeed:i}=n,{altBackground:o,compositeBackground:a,compositeTitleBackground:u,nodeBorder:h}=r,d=e.insert("g").attr("class",t.cssClasses).attr("id",t.id).attr("data-id",t.id).attr("data-look",t.look),f=d.insert("g",":first-child"),p=d.insert("g").attr("class","cluster-label");let g=d.append("rect");const m=p.node().appendChild(await x(t.label,t.labelStyle,void 0,!0));let b=m.getBBox();if((0,s._3)(n.flowchart.htmlLabels)){const e=m.children[0],t=(0,l.Ltv)(m);b=e.getBoundingClientRect(),t.attr("width",b.width),t.attr("height",b.height)}const v=0*t.padding,w=v/2,S=(t.width<=b.width+t.padding?b.width+t.padding:t.width)+v;t.width<=b.width+t.padding?t.diff=(S-t.width)/2-t.padding:t.diff=-t.padding;const C=t.height+v,_=t.height+v-b.height-6,A=t.x-S/2,T=t.y-C/2;t.width=S;const E=t.y-t.height/2-w+b.height+2;let F;if("handDrawn"===t.look){const e=t.cssClasses.includes("statediagram-cluster-alt"),n=c.A.svg(d),r=t.rx||t.ry?n.path(k(A,T,S,C,10),{roughness:.7,fill:u,fillStyle:"solid",stroke:h,seed:i}):n.rectangle(A,T,S,C,{seed:i});F=d.insert((()=>r),":first-child");const s=n.rectangle(A,E,S,_,{fill:e?o:a,fillStyle:e?"hachure":"solid",stroke:h,seed:i});F=d.insert((()=>r),":first-child"),g=d.insert((()=>s))}else{F=f.insert("rect",":first-child");const e="outer";F.attr("class",e).attr("x",A).attr("y",T).attr("width",S).attr("height",C).attr("data-look",t.look),g.attr("class","inner").attr("x",A).attr("y",E).attr("width",S).attr("height",_)}p.attr("transform",`translate(${t.x-b.width/2}, ${T+1-((0,s._3)(n.flowchart.htmlLabels)?0:3)})`);const M=F.node().getBBox();return t.height=M.height,t.offsetX=0,t.offsetY=b.height-t.padding/2,t.labelBBox=b,t.intersect=function(e){return y(t,e)},{cluster:d,labelBBox:b}}),"roundedWithTitle"),L=(0,s.K2)((async(e,t)=>{s.Rm.info("Creating subgraph rect for ",t.id,t);const n=(0,s.D7)(),{themeVariables:i,handDrawnSeed:a}=n,{clusterBkg:u,clusterBorder:h}=i,{labelStyles:d,nodeStyles:f,borderStyles:p,backgroundStyles:g}=A(t),m=e.insert("g").attr("class","cluster "+t.cssClasses).attr("id",t.id).attr("data-look",t.look),b=(0,s._3)(n.flowchart.htmlLabels),v=m.insert("g").attr("class","cluster-label "),x=await(0,o.GZ)(v,t.label,{style:t.labelStyle,useHtmlLabels:b,isNode:!0,width:t.width});let w=x.getBBox();if((0,s._3)(n.flowchart.htmlLabels)){const e=x.children[0],t=(0,l.Ltv)(x);w=e.getBoundingClientRect(),t.attr("width",w.width),t.attr("height",w.height)}const S=t.width<=w.width+t.padding?w.width+t.padding:t.width;t.width<=w.width+t.padding?t.diff=(S-t.width)/2-t.padding:t.diff=-t.padding;const C=t.height,_=t.x-S/2,E=t.y-C/2;let F;if(s.Rm.trace("Data ",t,JSON.stringify(t)),"handDrawn"===t.look){const e=c.A.svg(m),n=T(t,{roughness:.7,fill:u,stroke:h,fillWeight:4,seed:a}),r=e.path(k(_,E,S,C,t.rx),n);F=m.insert((()=>(s.Rm.debug("Rough node insert CXC",r),r)),":first-child"),F.select("path:nth-child(2)").attr("style",p.join(";")),F.select("path").attr("style",g.join(";").replace("fill","stroke"))}else F=m.insert("rect",":first-child"),F.attr("style",f).attr("rx",t.rx).attr("ry",t.ry).attr("x",_).attr("y",E).attr("width",S).attr("height",C);const{subGraphTitleTopMargin:M}=(0,r.O)(n);if(v.attr("transform",`translate(${t.x-w.width/2}, ${t.y-t.height/2+M})`),d){const e=v.select("span");e&&e.attr("style",d)}const L=F.node().getBBox();return t.offsetX=0,t.width=L.width,t.height=L.height,t.offsetY=w.height-t.padding/2,t.intersect=function(e){return y(t,e)},{cluster:m,labelBBox:w}}),"kanbanSection"),P={rect:E,squareRect:E,roundedWithTitle:M,noteGroup:F,divider:(0,s.K2)(((e,t)=>{const n=(0,s.D7)(),{themeVariables:r,handDrawnSeed:i}=n,{nodeBorder:o}=r,a=e.insert("g").attr("class",t.cssClasses).attr("id",t.id).attr("data-look",t.look),l=a.insert("g",":first-child"),u=0*t.padding,h=t.width+u;t.diff=-t.padding;const d=t.height+u,f=t.x-h/2,p=t.y-d/2;let g;if(t.width=h,"handDrawn"===t.look){const e=c.A.svg(a).rectangle(f,p,h,d,{fill:"lightgrey",roughness:.5,strokeLineDash:[5],stroke:o,seed:i});g=a.insert((()=>e),":first-child")}else{g=l.insert("rect",":first-child");const e="divider";g.attr("class",e).attr("x",f).attr("y",p).attr("width",h).attr("height",d).attr("data-look",t.look)}const m=g.node().getBBox();return t.height=m.height,t.offsetX=0,t.offsetY=0,t.intersect=function(e){return y(t,e)},{cluster:a,labelBBox:{}}}),"divider"),kanbanSection:L},O=new Map,$=(0,s.K2)((async(e,t)=>{const n=t.shape||"rect",r=await P[n](e,t);return O.set(t.id,r),r}),"insertCluster"),B=(0,s.K2)((()=>{O=new Map}),"clear");function D(e,t){return e.intersect(t)}(0,s.K2)(D,"intersectNode");var z=D;function I(e,t,n,r){var i=e.x,o=e.y,a=i-r.x,s=o-r.y,l=Math.sqrt(t*t*s*s+n*n*a*a),c=Math.abs(t*n*a/l);r.x0}(0,s.K2)(q,"intersectLine"),(0,s.K2)(H,"sameSign");var W=q;function K(e,t,n){let r=e.x,i=e.y,o=[],a=Number.POSITIVE_INFINITY,s=Number.POSITIVE_INFINITY;"function"===typeof t.forEach?t.forEach((function(e){a=Math.min(a,e.x),s=Math.min(s,e.y)})):(a=Math.min(a,t.x),s=Math.min(s,t.y));let l=r-e.width/2-a,c=i-e.height/2-s;for(let u=0;u1&&o.sort((function(e,t){let r=e.x-n.x,i=e.y-n.y,o=Math.sqrt(r*r+i*i),a=t.x-n.x,s=t.y-n.y,l=Math.sqrt(a*a+s*s);return op),":first-child");return g.attr("class","anchor").attr("style",(0,a.KL)(l)),d(t,g),t.intersect=function(e){return s.Rm.info("Circle intersect",t,1,e),U.circle(t,1,e)},o}function Y(e,t,n,r,i,o,a){const s=(e+n)/2,l=(t+r)/2,c=Math.atan2(r-t,n-e),u=(n-e)/2/i,h=(r-t)/2/o,d=Math.sqrt(u**2+h**2);if(d>1)throw new Error("The given radii are too small to create an arc between the points.");const f=Math.sqrt(1-d**2),p=s+f*o*Math.sin(c)*(a?-1:1),g=l-f*i*Math.cos(c)*(a?-1:1),m=Math.atan2((t-g)/o,(e-p)/i);let y=Math.atan2((r-g)/o,(n-p)/i)-m;a&&y<0&&(y+=2*Math.PI),!a&&y>0&&(y-=2*Math.PI);const b=[];for(let v=0;v<20;v++){const e=m+v/19*y,t=p+i*Math.cos(e),n=g+o*Math.sin(e);b.push({x:t,y:n})}return b}async function G(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=o.width+t.padding+20,s=o.height+t.padding,l=s/2,h=l/(2.5+s/50),{cssStyles:g}=t,m=[{x:a/2,y:-s/2},{x:-a/2,y:-s/2},...Y(-a/2,-s/2,-a/2,s/2,h,l,!1),{x:a/2,y:s/2},...Y(a/2,s/2,a/2,-s/2,h,l,!0)],y=c.A.svg(i),b=T(t,{});"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const v=p(m),x=y.path(v,b),k=i.insert((()=>x),":first-child");return k.attr("class","basic label-container"),g&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",g),r&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",r),k.attr("transform",`translate(${h/2}, 0)`),d(t,k),t.intersect=function(e){return U.polygon(t,m,e)},i}function X(e,t,n,r){return e.insert("polygon",":first-child").attr("points",r.map((function(e){return e.x+","+e.y})).join(" ")).attr("class","label-container").attr("transform","translate("+-t/2+","+n/2+")")}async function Q(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=o.height+t.padding,s=o.width+t.padding+12,l=-a,h=[{x:12,y:l},{x:s,y:l},{x:s,y:0},{x:0,y:0},{x:0,y:l+12},{x:12,y:l}];let g;const{cssStyles:m}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=p(h),o=e.path(r,n);g=i.insert((()=>o),":first-child").attr("transform",`translate(${-s/2}, ${a/2})`),m&&g.attr("style",m)}else g=X(i,s,a,h);return r&&g.attr("style",r),d(t,g),t.intersect=function(e){return U.polygon(t,h,e)},i}function Z(e,t){const{nodeStyles:n}=A(t);t.label="";const r=e.insert("g").attr("class",f(t)).attr("id",t.domId??t.id),{cssStyles:i}=t,o=Math.max(28,t.width??0),a=[{x:0,y:o/2},{x:o/2,y:0},{x:0,y:-o/2},{x:-o/2,y:0}],s=c.A.svg(r),l=T(t,{});"handDrawn"!==t.look&&(l.roughness=0,l.fillStyle="solid");const u=p(a),h=s.path(u,l),d=r.insert((()=>h),":first-child");return i&&"handDrawn"!==t.look&&d.selectAll("path").attr("style",i),n&&"handDrawn"!==t.look&&d.selectAll("path").attr("style",n),t.width=28,t.height=28,t.intersect=function(e){return U.polygon(t,a,e)},r}async function J(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,halfPadding:l}=await u(e,t,f(t)),h=o.width/2+l;let p;const{cssStyles:g}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=e.circle(0,0,2*h,n);p=i.insert((()=>r),":first-child"),p.attr("class","basic label-container").attr("style",(0,a.KL)(g))}else p=i.insert("circle",":first-child").attr("class","basic label-container").attr("style",r).attr("r",h).attr("cx",0).attr("cy",0);return d(t,p),t.intersect=function(e){return s.Rm.info("Circle intersect",t,h,e),U.circle(t,h,e)},i}function ee(e){const t=Math.cos(Math.PI/4),n=Math.sin(Math.PI/4),r=2*e;return`M ${-r/2*t},${r/2*n} L ${r/2*t},${-r/2*n}\n M ${r/2*t},${r/2*n} L ${-r/2*t},${-r/2*n}`}function te(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n,t.label="";const i=e.insert("g").attr("class",f(t)).attr("id",t.domId??t.id),o=Math.max(30,t?.width??0),{cssStyles:a}=t,l=c.A.svg(i),u=T(t,{});"handDrawn"!==t.look&&(u.roughness=0,u.fillStyle="solid");const h=l.circle(0,0,2*o,u),p=ee(o),g=l.path(p,u),m=i.insert((()=>h),":first-child");return m.insert((()=>g)),a&&"handDrawn"!==t.look&&m.selectAll("path").attr("style",a),r&&"handDrawn"!==t.look&&m.selectAll("path").attr("style",r),d(t,m),t.intersect=function(e){s.Rm.info("crossedCircle intersect",t,{radius:o,point:e});return U.circle(t,o,e)},i}function ne(e,t,n){let r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:100,i=arguments.length>5&&void 0!==arguments[5]?arguments[5]:180;const o=[],a=(arguments.length>4&&void 0!==arguments[4]?arguments[4]:0)*Math.PI/180,s=(i*Math.PI/180-a)/(r-1);for(let l=0;lS),":first-child").attr("stroke-opacity",0),C.insert((()=>k),":first-child"),C.attr("class","text"),g&&"handDrawn"!==t.look&&C.selectAll("path").attr("style",g),r&&"handDrawn"!==t.look&&C.selectAll("path").attr("style",r),C.attr("transform",`translate(${h}, 0)`),a.attr("transform",`translate(${-s/2+h-(o.x-(o.left??0))},${-l/2+(t.padding??0)/2-(o.y-(o.top??0))})`),d(t,C),t.intersect=function(e){return U.polygon(t,y,e)},i}function ie(e,t,n){let r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:100,i=arguments.length>5&&void 0!==arguments[5]?arguments[5]:180;const o=[],a=(arguments.length>4&&void 0!==arguments[4]?arguments[4]:0)*Math.PI/180,s=(i*Math.PI/180-a)/(r-1);for(let l=0;lS),":first-child").attr("stroke-opacity",0),C.insert((()=>k),":first-child"),C.attr("class","text"),g&&"handDrawn"!==t.look&&C.selectAll("path").attr("style",g),r&&"handDrawn"!==t.look&&C.selectAll("path").attr("style",r),C.attr("transform",`translate(${-h}, 0)`),a.attr("transform",`translate(${-s/2+(t.padding??0)/2-(o.x-(o.left??0))},${-l/2+(t.padding??0)/2-(o.y-(o.top??0))})`),d(t,C),t.intersect=function(e){return U.polygon(t,y,e)},i}function ae(e,t,n){let r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:100,i=arguments.length>5&&void 0!==arguments[5]?arguments[5]:180;const o=[],a=(arguments.length>4&&void 0!==arguments[4]?arguments[4]:0)*Math.PI/180,s=(i*Math.PI/180-a)/(r-1);for(let l=0;lE),":first-child").attr("stroke-opacity",0),F.insert((()=>w),":first-child"),F.insert((()=>C),":first-child"),F.attr("class","text"),g&&"handDrawn"!==t.look&&F.selectAll("path").attr("style",g),r&&"handDrawn"!==t.look&&F.selectAll("path").attr("style",r),F.attr("transform",`translate(${h-h/4}, 0)`),a.attr("transform",`translate(${-s/2+(t.padding??0)/2-(o.x-(o.left??0))},${-l/2+(t.padding??0)/2-(o.y-(o.top??0))})`),d(t,F),t.intersect=function(e){return U.polygon(t,b,e)},i}async function le(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(80,1.25*(o.width+2*(t.padding??0)),t?.width??0),s=Math.max(20,o.height+2*(t.padding??0),t?.height??0),l=s/2,{cssStyles:h}=t,g=c.A.svg(i),y=T(t,{});"handDrawn"!==t.look&&(y.roughness=0,y.fillStyle="solid");const b=a-l,v=s/4,x=[{x:b,y:0},{x:v,y:0},{x:0,y:s/2},{x:v,y:s},{x:b,y:s},...m(-b,-s/2,l,50,270,90)],k=p(x),w=g.path(k,y),S=i.insert((()=>w),":first-child");return S.attr("class","basic label-container"),h&&"handDrawn"!==t.look&&S.selectChildren("path").attr("style",h),r&&"handDrawn"!==t.look&&S.selectChildren("path").attr("style",r),S.attr("transform",`translate(${-a/2}, ${-s/2})`),d(t,S),t.intersect=function(e){return U.polygon(t,x,e)},i}(0,s.K2)(V,"anchor"),(0,s.K2)(Y,"generateArcPoints"),(0,s.K2)(G,"bowTieRect"),(0,s.K2)(X,"insertPolygonShape"),(0,s.K2)(Q,"card"),(0,s.K2)(Z,"choice"),(0,s.K2)(J,"circle"),(0,s.K2)(ee,"createLine"),(0,s.K2)(te,"crossedCircle"),(0,s.K2)(ne,"generateCirclePoints"),(0,s.K2)(re,"curlyBraceLeft"),(0,s.K2)(ie,"generateCirclePoints"),(0,s.K2)(oe,"curlyBraceRight"),(0,s.K2)(ae,"generateCirclePoints"),(0,s.K2)(se,"curlyBraces"),(0,s.K2)(le,"curvedTrapezoid");var ce=(0,s.K2)(((e,t,n,r,i,o)=>[`M${e},${t+o}`,`a${i},${o} 0,0,0 ${n},0`,`a${i},${o} 0,0,0 ${-n},0`,`l0,${r}`,`a${i},${o} 0,0,0 ${n},0`,"l0,"+-r].join(" ")),"createCylinderPathD"),ue=(0,s.K2)(((e,t,n,r,i,o)=>[`M${e},${t+o}`,`M${e+n},${t+o}`,`a${i},${o} 0,0,0 ${-n},0`,`l0,${r}`,`a${i},${o} 0,0,0 ${n},0`,"l0,"+-r].join(" ")),"createOuterCylinderPathD"),he=(0,s.K2)(((e,t,n,r,i,o)=>[`M${e-n/2},${-r/2}`,`a${i},${o} 0,0,0 ${n},0`].join(" ")),"createInnerCylinderPathD");async function de(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:s}=await u(e,t,f(t)),l=Math.max(o.width+t.padding,t.width??0),h=l/2,p=h/(2.5+l/50),g=Math.max(o.height+p+t.padding,t.height??0);let m;const{cssStyles:y}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=ue(0,0,l,g,h,p),r=he(0,p,l,g,h,p),o=e.path(n,T(t,{})),a=e.path(r,T(t,{fill:"none"}));m=i.insert((()=>a),":first-child"),m=i.insert((()=>o),":first-child"),m.attr("class","basic label-container"),y&&m.attr("style",y)}else{const e=ce(0,0,l,g,h,p);m=i.insert("path",":first-child").attr("d",e).attr("class","basic label-container").attr("style",(0,a.KL)(y)).attr("style",r)}return m.attr("label-offset-y",p),m.attr("transform",`translate(${-l/2}, ${-(g/2+p)})`),d(t,m),s.attr("transform",`translate(${-o.width/2-(o.x-(o.left??0))}, ${-o.height/2+(t.padding??0)/1.5-(o.y-(o.top??0))})`),t.intersect=function(e){const n=U.rect(t,e),r=n.x-(t.x??0);if(0!=h&&(Math.abs(r)<(t.width??0)/2||Math.abs(r)==(t.width??0)/2&&Math.abs(n.y-(t.y??0))>(t.height??0)/2-p)){let i=p*p*(1-r*r/(h*h));i>0&&(i=Math.sqrt(i)),i=p-i,e.y-(t.y??0)>0&&(i=-i),n.y+=i}return n},i}async function fe(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=o.width+t.padding,l=o.height+t.padding,h=.2*l,p=-s/2,g=-l/2-h/2,{cssStyles:m}=t,y=c.A.svg(i),b=T(t,{});"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const v=[{x:p,y:g+h},{x:-p,y:g+h},{x:-p,y:-g},{x:p,y:-g},{x:p,y:g},{x:-p,y:g},{x:-p,y:g+h}],x=y.polygon(v.map((e=>[e.x,e.y])),b),k=i.insert((()=>x),":first-child");return k.attr("class","basic label-container"),m&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",m),r&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",r),a.attr("transform",`translate(${p+(t.padding??0)/2-(o.x-(o.left??0))}, ${g+h+(t.padding??0)/2-(o.y-(o.top??0))})`),d(t,k),t.intersect=function(e){return U.rect(t,e)},i}async function pe(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,halfPadding:l}=await u(e,t,f(t)),h=o.width/2+l+5,p=o.width/2+l;let g;const{cssStyles:m}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{roughness:.2,strokeWidth:2.5}),r=T(t,{roughness:.2,strokeWidth:1.5}),o=e.circle(0,0,2*h,n),s=e.circle(0,0,2*p,r);g=i.insert("g",":first-child"),g.attr("class",(0,a.KL)(t.cssClasses)).attr("style",(0,a.KL)(m)),g.node()?.appendChild(o),g.node()?.appendChild(s)}else{g=i.insert("g",":first-child");const e=g.insert("circle",":first-child"),t=g.insert("circle");g.attr("class","basic label-container").attr("style",r),e.attr("class","outer-circle").attr("style",r).attr("r",h).attr("cx",0).attr("cy",0),t.attr("class","inner-circle").attr("style",r).attr("r",p).attr("cx",0).attr("cy",0)}return d(t,g),t.intersect=function(e){return s.Rm.info("DoubleCircle intersect",t,h,e),U.circle(t,h,e)},i}function ge(e,t,n){let{config:{themeVariables:r}}=n;const{labelStyles:i,nodeStyles:o}=A(t);t.label="",t.labelStyle=i;const a=e.insert("g").attr("class",f(t)).attr("id",t.domId??t.id),{cssStyles:l}=t,u=c.A.svg(a),{nodeBorder:h}=r,p=T(t,{fillStyle:"solid"});"handDrawn"!==t.look&&(p.roughness=0);const g=u.circle(0,0,14,p),m=a.insert((()=>g),":first-child");return m.selectAll("path").attr("style",`fill: ${h} !important;`),l&&l.length>0&&"handDrawn"!==t.look&&m.selectAll("path").attr("style",l),o&&"handDrawn"!==t.look&&m.selectAll("path").attr("style",o),d(t,m),t.intersect=function(e){s.Rm.info("filledCircle intersect",t,{radius:7,point:e});return U.circle(t,7,e)},a}async function me(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),l=o.width+(t.padding??0),h=l+o.height,g=l+o.height,m=[{x:0,y:-h},{x:g,y:-h},{x:g/2,y:0}],{cssStyles:y}=t,b=c.A.svg(i),v=T(t,{});"handDrawn"!==t.look&&(v.roughness=0,v.fillStyle="solid");const x=p(m),k=b.path(x,v),w=i.insert((()=>k),":first-child").attr("transform",`translate(${-h/2}, ${h/2})`);return y&&"handDrawn"!==t.look&&w.selectChildren("path").attr("style",y),r&&"handDrawn"!==t.look&&w.selectChildren("path").attr("style",r),t.width=l,t.height=h,d(t,w),a.attr("transform",`translate(${-o.width/2-(o.x-(o.left??0))}, ${-h/2+(t.padding??0)/2+(o.y-(o.top??0))})`),t.intersect=function(e){return s.Rm.info("Triangle intersect",t,m,e),U.polygon(t,m,e)},i}function ye(e,t,n){let{dir:r,config:{state:i,themeVariables:o}}=n;const{nodeStyles:a}=A(t);t.label="";const s=e.insert("g").attr("class",f(t)).attr("id",t.domId??t.id),{cssStyles:l}=t;let u=Math.max(70,t?.width??0),h=Math.max(10,t?.height??0);"LR"===r&&(u=Math.max(10,t?.width??0),h=Math.max(70,t?.height??0));const p=-1*u/2,g=-1*h/2,m=c.A.svg(s),y=T(t,{stroke:o.lineColor,fill:o.lineColor});"handDrawn"!==t.look&&(y.roughness=0,y.fillStyle="solid");const b=m.rectangle(p,g,u,h,y),v=s.insert((()=>b),":first-child");l&&"handDrawn"!==t.look&&v.selectAll("path").attr("style",l),a&&"handDrawn"!==t.look&&v.selectAll("path").attr("style",a),d(t,v);const x=i?.padding??0;return t.width&&t.height&&(t.width+=x/2||0,t.height+=x/2||0),t.intersect=function(e){return U.rect(t,e)},s}async function be(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(80,o.width+2*(t.padding??0),t?.width??0),l=Math.max(50,o.height+2*(t.padding??0),t?.height??0),h=l/2,{cssStyles:g}=t,y=c.A.svg(i),b=T(t,{});"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const v=[{x:-a/2,y:-l/2},{x:a/2-h,y:-l/2},...m(-a/2+h,0,h,50,90,270),{x:a/2-h,y:l/2},{x:-a/2,y:l/2}],x=p(v),k=y.path(x,b),w=i.insert((()=>k),":first-child");return w.attr("class","basic label-container"),g&&"handDrawn"!==t.look&&w.selectChildren("path").attr("style",g),r&&"handDrawn"!==t.look&&w.selectChildren("path").attr("style",r),d(t,w),t.intersect=function(e){s.Rm.info("Pill intersect",t,{radius:h,point:e});return U.polygon(t,v,e)},i}(0,s.K2)(de,"cylinder"),(0,s.K2)(fe,"dividedRectangle"),(0,s.K2)(pe,"doublecircle"),(0,s.K2)(ge,"filledCircle"),(0,s.K2)(me,"flippedTriangle"),(0,s.K2)(ye,"forkJoin"),(0,s.K2)(be,"halfRoundedRectangle");var ve=(0,s.K2)(((e,t,n,r,i)=>[`M${e+i},${t}`,`L${e+n-i},${t}`,`L${e+n},${t-r/2}`,`L${e+n-i},${t-r}`,`L${e+i},${t-r}`,`L${e},${t-r/2}`,"Z"].join(" ")),"createHexagonPathD");async function xe(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=o.height+t.padding,s=a/4,l=o.width+2*s+t.padding,h=[{x:s,y:0},{x:l-s,y:0},{x:l,y:-a/2},{x:l-s,y:-a},{x:s,y:-a},{x:0,y:-a/2}];let p;const{cssStyles:g}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=ve(0,0,l,a,s),o=e.path(r,n);p=i.insert((()=>o),":first-child").attr("transform",`translate(${-l/2}, ${a/2})`),g&&p.attr("style",g)}else p=X(i,l,a,h);return r&&p.attr("style",r),t.width=l,t.height=a,d(t,p),t.intersect=function(e){return U.polygon(t,h,e)},i}async function ke(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.label="",t.labelStyle=n;const{shapeSvg:i}=await u(e,t,f(t)),o=Math.max(30,t?.width??0),a=Math.max(30,t?.height??0),{cssStyles:l}=t,h=c.A.svg(i),g=T(t,{});"handDrawn"!==t.look&&(g.roughness=0,g.fillStyle="solid");const m=[{x:0,y:0},{x:o,y:0},{x:0,y:a},{x:o,y:a}],y=p(m),b=h.path(y,g),v=i.insert((()=>b),":first-child");return v.attr("class","basic label-container"),l&&"handDrawn"!==t.look&&v.selectChildren("path").attr("style",l),r&&"handDrawn"!==t.look&&v.selectChildren("path").attr("style",r),v.attr("transform",`translate(${-o/2}, ${-a/2})`),d(t,v),t.intersect=function(e){s.Rm.info("Pill intersect",t,{points:m});return U.polygon(t,m,e)},i}async function we(e,t,n){let{config:{themeVariables:r,flowchart:o}}=n;const{labelStyles:a}=A(t);t.labelStyle=a;const l=t.assetHeight??48,h=t.assetWidth??48,f=Math.max(l,h),p=o?.wrappingWidth;t.width=Math.max(f,p??0);const{shapeSvg:g,bbox:m,label:y}=await u(e,t,"icon-shape default"),b="t"===t.pos,v=f,x=f,{nodeBorder:k}=r,{stylesMap:w}=S(t),C=-x/2,_=-v/2,E=t.label?8:0,F=c.A.svg(g),M=T(t,{stroke:"none",fill:"none"});"handDrawn"!==t.look&&(M.roughness=0,M.fillStyle="solid");const L=F.rectangle(C,_,x,v,M),P=Math.max(x,m.width),O=v+m.height+E,$=F.rectangle(-P/2,-O/2,P,O,{...M,fill:"transparent",stroke:"none"}),B=g.insert((()=>L),":first-child"),D=g.insert((()=>$));if(t.icon){const e=g.append("g");e.html(`${await(0,i.WY)(t.icon,{height:f,width:f,fallbackPrefix:""})}`);const n=e.node().getBBox(),r=n.width,o=n.height,a=n.x,s=n.y;e.attr("transform",`translate(${-r/2-a},${b?m.height/2+E/2-o/2-s:-m.height/2-E/2-o/2-s})`),e.attr("style",`color: ${w.get("stroke")??k};`)}return y.attr("transform",`translate(${-m.width/2-(m.x-(m.left??0))},${b?-O/2:O/2-m.height})`),B.attr("transform",`translate(0,${b?m.height/2+E/2:-m.height/2-E/2})`),d(t,D),t.intersect=function(e){if(s.Rm.info("iconSquare intersect",t,e),!t.label)return U.rect(t,e);const n=t.x??0,r=t.y??0,i=t.height??0;let o=[];o=b?[{x:n-m.width/2,y:r-i/2},{x:n+m.width/2,y:r-i/2},{x:n+m.width/2,y:r-i/2+m.height+E},{x:n+x/2,y:r-i/2+m.height+E},{x:n+x/2,y:r+i/2},{x:n-x/2,y:r+i/2},{x:n-x/2,y:r-i/2+m.height+E},{x:n-m.width/2,y:r-i/2+m.height+E}]:[{x:n-x/2,y:r-i/2},{x:n+x/2,y:r-i/2},{x:n+x/2,y:r-i/2+v},{x:n+m.width/2,y:r-i/2+v},{x:n+m.width/2/2,y:r+i/2},{x:n-m.width/2,y:r+i/2},{x:n-m.width/2,y:r-i/2+v},{x:n-x/2,y:r-i/2+v}];return U.polygon(t,o,e)},g}async function Se(e,t,n){let{config:{themeVariables:r,flowchart:o}}=n;const{labelStyles:a}=A(t);t.labelStyle=a;const l=t.assetHeight??48,h=t.assetWidth??48,f=Math.max(l,h),p=o?.wrappingWidth;t.width=Math.max(f,p??0);const{shapeSvg:g,bbox:m,label:y}=await u(e,t,"icon-shape default"),b=t.label?8:0,v="t"===t.pos,{nodeBorder:x,mainBkg:k}=r,{stylesMap:w}=S(t),C=c.A.svg(g),_=T(t,{});"handDrawn"!==t.look&&(_.roughness=0,_.fillStyle="solid");const E=w.get("fill");_.stroke=E??k;const F=g.append("g");t.icon&&F.html(`${await(0,i.WY)(t.icon,{height:f,width:f,fallbackPrefix:""})}`);const M=F.node().getBBox(),L=M.width,P=M.height,O=M.x,$=M.y,B=Math.max(L,P)*Math.SQRT2+40,D=C.circle(0,0,B,_),z=Math.max(B,m.width),I=B+m.height+b,N=C.rectangle(-z/2,-I/2,z,I,{..._,fill:"transparent",stroke:"none"}),R=g.insert((()=>D),":first-child"),j=g.insert((()=>N));return F.attr("transform",`translate(${-L/2-O},${v?m.height/2+b/2-P/2-$:-m.height/2-b/2-P/2-$})`),F.attr("style",`color: ${w.get("stroke")??x};`),y.attr("transform",`translate(${-m.width/2-(m.x-(m.left??0))},${v?-I/2:I/2-m.height})`),R.attr("transform",`translate(0,${v?m.height/2+b/2:-m.height/2-b/2})`),d(t,j),t.intersect=function(e){s.Rm.info("iconSquare intersect",t,e);return U.rect(t,e)},g}async function Ce(e,t,n){let{config:{themeVariables:r,flowchart:o}}=n;const{labelStyles:a}=A(t);t.labelStyle=a;const l=t.assetHeight??48,h=t.assetWidth??48,f=Math.max(l,h),p=o?.wrappingWidth;t.width=Math.max(f,p??0);const{shapeSvg:g,bbox:m,halfPadding:y,label:b}=await u(e,t,"icon-shape default"),v="t"===t.pos,x=f+2*y,w=f+2*y,{nodeBorder:C,mainBkg:_}=r,{stylesMap:E}=S(t),F=-w/2,M=-x/2,L=t.label?8:0,P=c.A.svg(g),O=T(t,{});"handDrawn"!==t.look&&(O.roughness=0,O.fillStyle="solid");const $=E.get("fill");O.stroke=$??_;const B=P.path(k(F,M,w,x,5),O),D=Math.max(w,m.width),z=x+m.height+L,I=P.rectangle(-D/2,-z/2,D,z,{...O,fill:"transparent",stroke:"none"}),N=g.insert((()=>B),":first-child").attr("class","icon-shape2"),R=g.insert((()=>I));if(t.icon){const e=g.append("g");e.html(`${await(0,i.WY)(t.icon,{height:f,width:f,fallbackPrefix:""})}`);const n=e.node().getBBox(),r=n.width,o=n.height,a=n.x,s=n.y;e.attr("transform",`translate(${-r/2-a},${v?m.height/2+L/2-o/2-s:-m.height/2-L/2-o/2-s})`),e.attr("style",`color: ${E.get("stroke")??C};`)}return b.attr("transform",`translate(${-m.width/2-(m.x-(m.left??0))},${v?-z/2:z/2-m.height})`),N.attr("transform",`translate(0,${v?m.height/2+L/2:-m.height/2-L/2})`),d(t,R),t.intersect=function(e){if(s.Rm.info("iconSquare intersect",t,e),!t.label)return U.rect(t,e);const n=t.x??0,r=t.y??0,i=t.height??0;let o=[];o=v?[{x:n-m.width/2,y:r-i/2},{x:n+m.width/2,y:r-i/2},{x:n+m.width/2,y:r-i/2+m.height+L},{x:n+w/2,y:r-i/2+m.height+L},{x:n+w/2,y:r+i/2},{x:n-w/2,y:r+i/2},{x:n-w/2,y:r-i/2+m.height+L},{x:n-m.width/2,y:r-i/2+m.height+L}]:[{x:n-w/2,y:r-i/2},{x:n+w/2,y:r-i/2},{x:n+w/2,y:r-i/2+x},{x:n+m.width/2,y:r-i/2+x},{x:n+m.width/2/2,y:r+i/2},{x:n-m.width/2,y:r+i/2},{x:n-m.width/2,y:r-i/2+x},{x:n-w/2,y:r-i/2+x}];return U.polygon(t,o,e)},g}async function _e(e,t,n){let{config:{themeVariables:r,flowchart:o}}=n;const{labelStyles:a}=A(t);t.labelStyle=a;const l=t.assetHeight??48,h=t.assetWidth??48,f=Math.max(l,h),p=o?.wrappingWidth;t.width=Math.max(f,p??0);const{shapeSvg:g,bbox:m,halfPadding:y,label:b}=await u(e,t,"icon-shape default"),v="t"===t.pos,x=f+2*y,w=f+2*y,{nodeBorder:C,mainBkg:_}=r,{stylesMap:E}=S(t),F=-w/2,M=-x/2,L=t.label?8:0,P=c.A.svg(g),O=T(t,{});"handDrawn"!==t.look&&(O.roughness=0,O.fillStyle="solid");const $=E.get("fill");O.stroke=$??_;const B=P.path(k(F,M,w,x,.1),O),D=Math.max(w,m.width),z=x+m.height+L,I=P.rectangle(-D/2,-z/2,D,z,{...O,fill:"transparent",stroke:"none"}),N=g.insert((()=>B),":first-child"),R=g.insert((()=>I));if(t.icon){const e=g.append("g");e.html(`${await(0,i.WY)(t.icon,{height:f,width:f,fallbackPrefix:""})}`);const n=e.node().getBBox(),r=n.width,o=n.height,a=n.x,s=n.y;e.attr("transform",`translate(${-r/2-a},${v?m.height/2+L/2-o/2-s:-m.height/2-L/2-o/2-s})`),e.attr("style",`color: ${E.get("stroke")??C};`)}return b.attr("transform",`translate(${-m.width/2-(m.x-(m.left??0))},${v?-z/2:z/2-m.height})`),N.attr("transform",`translate(0,${v?m.height/2+L/2:-m.height/2-L/2})`),d(t,R),t.intersect=function(e){if(s.Rm.info("iconSquare intersect",t,e),!t.label)return U.rect(t,e);const n=t.x??0,r=t.y??0,i=t.height??0;let o=[];o=v?[{x:n-m.width/2,y:r-i/2},{x:n+m.width/2,y:r-i/2},{x:n+m.width/2,y:r-i/2+m.height+L},{x:n+w/2,y:r-i/2+m.height+L},{x:n+w/2,y:r+i/2},{x:n-w/2,y:r+i/2},{x:n-w/2,y:r-i/2+m.height+L},{x:n-m.width/2,y:r-i/2+m.height+L}]:[{x:n-w/2,y:r-i/2},{x:n+w/2,y:r-i/2},{x:n+w/2,y:r-i/2+x},{x:n+m.width/2,y:r-i/2+x},{x:n+m.width/2/2,y:r+i/2},{x:n-m.width/2,y:r+i/2},{x:n-m.width/2,y:r-i/2+x},{x:n-w/2,y:r-i/2+x}];return U.polygon(t,o,e)},g}async function Ae(e,t,n){let{config:{flowchart:r}}=n;const i=new Image;i.src=t?.img??"",await i.decode();const o=Number(i.naturalWidth.toString().replace("px","")),a=Number(i.naturalHeight.toString().replace("px",""));t.imageAspectRatio=o/a;const{labelStyles:l}=A(t);t.labelStyle=l;const h=r?.wrappingWidth;t.defaultWidth=r?.wrappingWidth;const f=Math.max(t.label?h??0:0,t?.assetWidth??o),p="on"===t.constraint&&t?.assetHeight?t.assetHeight*t.imageAspectRatio:f,g="on"===t.constraint?p/t.imageAspectRatio:t?.assetHeight??a;t.width=Math.max(p,h??0);const{shapeSvg:m,bbox:y,label:b}=await u(e,t,"image-shape default"),v="t"===t.pos,x=-p/2,k=-g/2,w=t.label?8:0,S=c.A.svg(m),C=T(t,{});"handDrawn"!==t.look&&(C.roughness=0,C.fillStyle="solid");const _=S.rectangle(x,k,p,g,C),E=Math.max(p,y.width),F=g+y.height+w,M=S.rectangle(-E/2,-F/2,E,F,{...C,fill:"none",stroke:"none"}),L=m.insert((()=>_),":first-child"),P=m.insert((()=>M));if(t.img){const e=m.append("image");e.attr("href",t.img),e.attr("width",p),e.attr("height",g),e.attr("preserveAspectRatio","none"),e.attr("transform",`translate(${-p/2},${v?F/2-g:-F/2})`)}return b.attr("transform",`translate(${-y.width/2-(y.x-(y.left??0))},${v?-g/2-y.height/2-w/2:g/2-y.height/2+w/2})`),L.attr("transform",`translate(0,${v?y.height/2+w/2:-y.height/2-w/2})`),d(t,P),t.intersect=function(e){if(s.Rm.info("iconSquare intersect",t,e),!t.label)return U.rect(t,e);const n=t.x??0,r=t.y??0,i=t.height??0;let o=[];o=v?[{x:n-y.width/2,y:r-i/2},{x:n+y.width/2,y:r-i/2},{x:n+y.width/2,y:r-i/2+y.height+w},{x:n+p/2,y:r-i/2+y.height+w},{x:n+p/2,y:r+i/2},{x:n-p/2,y:r+i/2},{x:n-p/2,y:r-i/2+y.height+w},{x:n-y.width/2,y:r-i/2+y.height+w}]:[{x:n-p/2,y:r-i/2},{x:n+p/2,y:r-i/2},{x:n+p/2,y:r-i/2+g},{x:n+y.width/2,y:r-i/2+g},{x:n+y.width/2/2,y:r+i/2},{x:n-y.width/2,y:r+i/2},{x:n-y.width/2,y:r-i/2+g},{x:n-p/2,y:r-i/2+g}];return U.polygon(t,o,e)},m}async function Te(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(o.width+2*(t.padding??0),t?.width??0),s=Math.max(o.height+2*(t.padding??0),t?.height??0),l=[{x:0,y:0},{x:a,y:0},{x:a+3*s/6,y:-s},{x:-3*s/6,y:-s}];let h;const{cssStyles:g}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=p(l),o=e.path(r,n);h=i.insert((()=>o),":first-child").attr("transform",`translate(${-a/2}, ${s/2})`),g&&h.attr("style",g)}else h=X(i,a,s,l);return r&&h.attr("style",r),t.width=a,t.height=s,d(t,h),t.intersect=function(e){return U.polygon(t,l,e)},i}async function Ee(e,t,n){const{labelStyles:r,nodeStyles:i}=A(t);t.labelStyle=r;const{shapeSvg:o,bbox:s}=await u(e,t,f(t)),l=Math.max(s.width+2*n.labelPaddingX,t?.width||0),h=Math.max(s.height+2*n.labelPaddingY,t?.height||0),p=-l/2,g=-h/2;let m,{rx:y,ry:b}=t;const{cssStyles:v}=t;if(n?.rx&&n.ry&&(y=n.rx,b=n.ry),"handDrawn"===t.look){const e=c.A.svg(o),n=T(t,{}),r=y||b?e.path(k(p,g,l,h,y||0),n):e.rectangle(p,g,l,h,n);m=o.insert((()=>r),":first-child"),m.attr("class","basic label-container").attr("style",(0,a.KL)(v))}else m=o.insert("rect",":first-child"),m.attr("class","basic label-container").attr("style",i).attr("rx",(0,a.KL)(y)).attr("ry",(0,a.KL)(b)).attr("x",p).attr("y",g).attr("width",l).attr("height",h);return d(t,m),t.intersect=function(e){return U.rect(t,e)},o}async function Fe(e,t){const{shapeSvg:n,bbox:r,label:i}=await u(e,t,"label"),o=n.insert("rect",":first-child");return o.attr("width",.1).attr("height",.1),n.attr("class","label edgeLabel"),i.attr("transform",`translate(${-r.width/2-(r.x-(r.left??0))}, ${-r.height/2-(r.y-(r.top??0))})`),d(t,o),t.intersect=function(e){return U.rect(t,e)},n}async function Me(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(o.width+(t.padding??0),t?.width??0),s=Math.max(o.height+(t.padding??0),t?.height??0),l=[{x:0,y:0},{x:a+3*s/6,y:0},{x:a,y:-s},{x:-3*s/6,y:-s}];let h;const{cssStyles:g}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=p(l),o=e.path(r,n);h=i.insert((()=>o),":first-child").attr("transform",`translate(${-a/2}, ${s/2})`),g&&h.attr("style",g)}else h=X(i,a,s,l);return r&&h.attr("style",r),t.width=a,t.height=s,d(t,h),t.intersect=function(e){return U.polygon(t,l,e)},i}async function Le(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(o.width+(t.padding??0),t?.width??0),s=Math.max(o.height+(t.padding??0),t?.height??0),l=[{x:-3*s/6,y:0},{x:a,y:0},{x:a+3*s/6,y:-s},{x:0,y:-s}];let h;const{cssStyles:g}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=p(l),o=e.path(r,n);h=i.insert((()=>o),":first-child").attr("transform",`translate(${-a/2}, ${s/2})`),g&&h.attr("style",g)}else h=X(i,a,s,l);return r&&h.attr("style",r),t.width=a,t.height=s,d(t,h),t.intersect=function(e){return U.polygon(t,l,e)},i}function Pe(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.label="",t.labelStyle=n;const i=e.insert("g").attr("class",f(t)).attr("id",t.domId??t.id),{cssStyles:o}=t,a=Math.max(35,t?.width??0),l=Math.max(35,t?.height??0),u=[{x:a,y:0},{x:0,y:l+3.5},{x:a-14,y:l+3.5},{x:0,y:2*l},{x:a,y:l-3.5},{x:14,y:l-3.5}],h=c.A.svg(i),g=T(t,{});"handDrawn"!==t.look&&(g.roughness=0,g.fillStyle="solid");const m=p(u),y=h.path(m,g),b=i.insert((()=>y),":first-child");return o&&"handDrawn"!==t.look&&b.selectAll("path").attr("style",o),r&&"handDrawn"!==t.look&&b.selectAll("path").attr("style",r),b.attr("transform",`translate(-${a/2},${-l})`),d(t,b),t.intersect=function(e){s.Rm.info("lightningBolt intersect",t,e);return U.polygon(t,u,e)},i}(0,s.K2)(xe,"hexagon"),(0,s.K2)(ke,"hourglass"),(0,s.K2)(we,"icon"),(0,s.K2)(Se,"iconCircle"),(0,s.K2)(Ce,"iconRounded"),(0,s.K2)(_e,"iconSquare"),(0,s.K2)(Ae,"imageSquare"),(0,s.K2)(Te,"inv_trapezoid"),(0,s.K2)(Ee,"drawRect"),(0,s.K2)(Fe,"labelRect"),(0,s.K2)(Me,"lean_left"),(0,s.K2)(Le,"lean_right"),(0,s.K2)(Pe,"lightningBolt");var Oe=(0,s.K2)(((e,t,n,r,i,o,a)=>[`M${e},${t+o}`,`a${i},${o} 0,0,0 ${n},0`,`a${i},${o} 0,0,0 ${-n},0`,`l0,${r}`,`a${i},${o} 0,0,0 ${n},0`,"l0,"+-r,`M${e},${t+o+a}`,`a${i},${o} 0,0,0 ${n},0`].join(" ")),"createCylinderPathD"),$e=(0,s.K2)(((e,t,n,r,i,o,a)=>[`M${e},${t+o}`,`M${e+n},${t+o}`,`a${i},${o} 0,0,0 ${-n},0`,`l0,${r}`,`a${i},${o} 0,0,0 ${n},0`,"l0,"+-r,`M${e},${t+o+a}`,`a${i},${o} 0,0,0 ${n},0`].join(" ")),"createOuterCylinderPathD"),Be=(0,s.K2)(((e,t,n,r,i,o)=>[`M${e-n/2},${-r/2}`,`a${i},${o} 0,0,0 ${n},0`].join(" ")),"createInnerCylinderPathD");async function De(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:s}=await u(e,t,f(t)),l=Math.max(o.width+(t.padding??0),t.width??0),h=l/2,p=h/(2.5+l/50),g=Math.max(o.height+p+(t.padding??0),t.height??0),m=.1*g;let y;const{cssStyles:b}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=$e(0,0,l,g,h,p,m),r=Be(0,p,l,g,h,p),o=T(t,{}),a=e.path(n,o),s=e.path(r,o);i.insert((()=>s),":first-child").attr("class","line"),y=i.insert((()=>a),":first-child"),y.attr("class","basic label-container"),b&&y.attr("style",b)}else{const e=Oe(0,0,l,g,h,p,m);y=i.insert("path",":first-child").attr("d",e).attr("class","basic label-container").attr("style",(0,a.KL)(b)).attr("style",r)}return y.attr("label-offset-y",p),y.attr("transform",`translate(${-l/2}, ${-(g/2+p)})`),d(t,y),s.attr("transform",`translate(${-o.width/2-(o.x-(o.left??0))}, ${-o.height/2+p-(o.y-(o.top??0))})`),t.intersect=function(e){const n=U.rect(t,e),r=n.x-(t.x??0);if(0!=h&&(Math.abs(r)<(t.width??0)/2||Math.abs(r)==(t.width??0)/2&&Math.abs(n.y-(t.y??0))>(t.height??0)/2-p)){let i=p*p*(1-r*r/(h*h));i>0&&(i=Math.sqrt(i)),i=p-i,e.y-(t.y??0)>0&&(i=-i),n.y+=i}return n},i}async function ze(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=Math.max(o.width+2*(t.padding??0),t?.width??0),l=Math.max(o.height+2*(t.padding??0),t?.height??0),h=l/4,p=l+h,{cssStyles:m}=t,y=c.A.svg(i),b=T(t,{});"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const v=[{x:-s/2-s/2*.1,y:-p/2},{x:-s/2-s/2*.1,y:p/2},...g(-s/2-s/2*.1,p/2,s/2+s/2*.1,p/2,h,.8),{x:s/2+s/2*.1,y:-p/2},{x:-s/2-s/2*.1,y:-p/2},{x:-s/2,y:-p/2},{x:-s/2,y:p/2*1.1},{x:-s/2,y:-p/2}],x=y.polygon(v.map((e=>[e.x,e.y])),b),k=i.insert((()=>x),":first-child");return k.attr("class","basic label-container"),m&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",m),r&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",r),k.attr("transform",`translate(0,${-h/2})`),a.attr("transform",`translate(${-s/2+(t.padding??0)+s/2*.1/2-(o.x-(o.left??0))},${-l/2+(t.padding??0)-h/2-(o.y-(o.top??0))})`),d(t,k),t.intersect=function(e){return U.polygon(t,v,e)},i}async function Ie(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=Math.max(o.width+2*(t.padding??0),t?.width??0),l=Math.max(o.height+2*(t.padding??0),t?.height??0),h=-s/2,g=-l/2,{cssStyles:m}=t,y=c.A.svg(i),b=T(t,{}),v=[{x:h-5,y:g+5},{x:h-5,y:g+l+5},{x:h+s-5,y:g+l+5},{x:h+s-5,y:g+l},{x:h+s,y:g+l},{x:h+s,y:g+l-5},{x:h+s+5,y:g+l-5},{x:h+s+5,y:g-5},{x:h+5,y:g-5},{x:h+5,y:g},{x:h,y:g},{x:h,y:g+5}],x=[{x:h,y:g+5},{x:h+s-5,y:g+5},{x:h+s-5,y:g+l},{x:h+s,y:g+l},{x:h+s,y:g},{x:h,y:g}];"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const k=p(v),w=y.path(k,b),S=p(x),C=y.path(S,{...b,fill:"none"}),_=i.insert((()=>C),":first-child");return _.insert((()=>w),":first-child"),_.attr("class","basic label-container"),m&&"handDrawn"!==t.look&&_.selectAll("path").attr("style",m),r&&"handDrawn"!==t.look&&_.selectAll("path").attr("style",r),a.attr("transform",`translate(${-o.width/2-5-(o.x-(o.left??0))}, ${-o.height/2+5-(o.y-(o.top??0))})`),d(t,_),t.intersect=function(e){return U.polygon(t,v,e)},i}async function Ne(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=Math.max(o.width+2*(t.padding??0),t?.width??0),l=Math.max(o.height+2*(t.padding??0),t?.height??0),h=l/4,m=l+h,y=-s/2,b=-m/2,{cssStyles:v}=t,x=g(y-5,b+m+5,y+s-5,b+m+5,h,.8),k=x?.[x.length-1],w=[{x:y-5,y:b+5},{x:y-5,y:b+m+5},...x,{x:y+s-5,y:k.y-5},{x:y+s,y:k.y-5},{x:y+s,y:k.y-10},{x:y+s+5,y:k.y-10},{x:y+s+5,y:b-5},{x:y+5,y:b-5},{x:y+5,y:b},{x:y,y:b},{x:y,y:b+5}],S=[{x:y,y:b+5},{x:y+s-5,y:b+5},{x:y+s-5,y:k.y-5},{x:y+s,y:k.y-5},{x:y+s,y:b},{x:y,y:b}],C=c.A.svg(i),_=T(t,{});"handDrawn"!==t.look&&(_.roughness=0,_.fillStyle="solid");const E=p(w),F=C.path(E,_),M=p(S),L=C.path(M,_),P=i.insert((()=>F),":first-child");return P.insert((()=>L)),P.attr("class","basic label-container"),v&&"handDrawn"!==t.look&&P.selectAll("path").attr("style",v),r&&"handDrawn"!==t.look&&P.selectAll("path").attr("style",r),P.attr("transform",`translate(0,${-h/2})`),a.attr("transform",`translate(${-o.width/2-5-(o.x-(o.left??0))}, ${-o.height/2+5-h/2-(o.y-(o.top??0))})`),d(t,P),t.intersect=function(e){return U.polygon(t,w,e)},i}async function Re(e,t,n){let{config:{themeVariables:r}}=n;const{labelStyles:i,nodeStyles:o}=A(t);t.labelStyle=i;t.useHtmlLabels||!1!==(0,s.zj)().flowchart?.htmlLabels||(t.centerLabel=!0);const{shapeSvg:a,bbox:l}=await u(e,t,f(t)),h=Math.max(l.width+2*(t.padding??0),t?.width??0),p=Math.max(l.height+2*(t.padding??0),t?.height??0),g=-h/2,m=-p/2,{cssStyles:y}=t,b=c.A.svg(a),v=T(t,{fill:r.noteBkgColor,stroke:r.noteBorderColor});"handDrawn"!==t.look&&(v.roughness=0,v.fillStyle="solid");const x=b.rectangle(g,m,h,p,v),k=a.insert((()=>x),":first-child");return k.attr("class","basic label-container"),y&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",y),o&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",o),d(t,k),t.intersect=function(e){return U.rect(t,e)},a}(0,s.K2)(De,"linedCylinder"),(0,s.K2)(ze,"linedWaveEdgedRect"),(0,s.K2)(Ie,"multiRect"),(0,s.K2)(Ne,"multiWaveEdgedRectangle"),(0,s.K2)(Re,"note");var je=(0,s.K2)(((e,t,n)=>[`M${e+n/2},${t}`,`L${e+n},${t-n/2}`,`L${e+n/2},${t-n}`,`L${e},${t-n/2}`,"Z"].join(" ")),"createDecisionBoxPathD");async function qe(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=o.width+t.padding+(o.height+t.padding),l=[{x:a/2,y:0},{x:a,y:-a/2},{x:a/2,y:-a},{x:0,y:-a/2}];let h;const{cssStyles:p}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=je(0,0,a),o=e.path(r,n);h=i.insert((()=>o),":first-child").attr("transform",`translate(${-a/2}, ${a/2})`),p&&h.attr("style",p)}else h=X(i,a,a,l);return r&&h.attr("style",r),d(t,h),t.intersect=function(e){return s.Rm.debug("APA12 Intersect called SPLIT\npoint:",e,"\nnode:\n",t,"\nres:",U.polygon(t,l,e)),U.polygon(t,l,e)},i}async function He(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=-Math.max(o.width+(t.padding??0),t?.width??0)/2,l=-Math.max(o.height+(t.padding??0),t?.height??0)/2,h=l/2,g=[{x:s+h,y:l},{x:s,y:0},{x:s+h,y:-l},{x:-s,y:-l},{x:-s,y:l}],{cssStyles:m}=t,y=c.A.svg(i),b=T(t,{});"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const v=p(g),x=y.path(v,b),k=i.insert((()=>x),":first-child");return k.attr("class","basic label-container"),m&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",m),r&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",r),k.attr("transform",`translate(${-h/2},0)`),a.attr("transform",`translate(${-h/2-o.width/2-(o.x-(o.left??0))}, ${-o.height/2-(o.y-(o.top??0))})`),d(t,k),t.intersect=function(e){return U.polygon(t,g,e)},i}async function We(e,t){const{labelStyles:n,nodeStyles:r}=A(t);let i;t.labelStyle=n,i=t.cssClasses?"node "+t.cssClasses:"node default";const o=e.insert("g").attr("class",i).attr("id",t.domId||t.id),a=o.insert("g"),u=o.insert("g").attr("class","label").attr("style",r),h=t.description,f=t.label,p=u.node().appendChild(await x(f,t.labelStyle,!0,!0));let g={width:0,height:0};if((0,s._3)((0,s.D7)()?.flowchart?.htmlLabels)){const e=p.children[0],t=(0,l.Ltv)(p);g=e.getBoundingClientRect(),t.attr("width",g.width),t.attr("height",g.height)}s.Rm.info("Text 2",h);const m=h||[],y=p.getBBox(),b=u.node().appendChild(await x(m.join?m.join("
    "):m,t.labelStyle,!0,!0)),v=b.children[0],w=(0,l.Ltv)(b);g=v.getBoundingClientRect(),w.attr("width",g.width),w.attr("height",g.height);const S=(t.padding||0)/2;(0,l.Ltv)(b).attr("transform","translate( "+(g.width>y.width?0:(y.width-g.width)/2)+", "+(y.height+S+5)+")"),(0,l.Ltv)(p).attr("transform","translate( "+(g.width(s.Rm.debug("Rough node insert CXC",r),i)),":first-child"),M=o.insert((()=>(s.Rm.debug("Rough node insert CXC",r),r)),":first-child")}else M=a.insert("rect",":first-child"),L=a.insert("line"),M.attr("class","outer title-state").attr("style",r).attr("x",-g.width/2-S).attr("y",-g.height/2-S).attr("width",g.width+(t.padding||0)).attr("height",g.height+(t.padding||0)),L.attr("class","divider").attr("x1",-g.width/2-S).attr("x2",g.width/2+S).attr("y1",-g.height/2-S+y.height+S).attr("y2",-g.height/2-S+y.height+S);return d(t,M),t.intersect=function(e){return U.rect(t,e)},o}async function Ke(e,t){return Ee(e,t,{rx:5,ry:5,classes:"",labelPaddingX:1*(t?.padding||0),labelPaddingY:1*(t?.padding||0)})}async function Ue(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:s}=await u(e,t,f(t)),l=t?.padding??0,h=Math.max(o.width+2*(t.padding??0),t?.width??0),p=Math.max(o.height+2*(t.padding??0),t?.height??0),g=-o.width/2-l,m=-o.height/2-l,{cssStyles:y}=t,b=c.A.svg(i),v=T(t,{});"handDrawn"!==t.look&&(v.roughness=0,v.fillStyle="solid");const x=[{x:g,y:m},{x:g+h+8,y:m},{x:g+h+8,y:m+p},{x:g-8,y:m+p},{x:g-8,y:m},{x:g,y:m},{x:g,y:m+p}],k=b.polygon(x.map((e=>[e.x,e.y])),v),w=i.insert((()=>k),":first-child");return w.attr("class","basic label-container").attr("style",(0,a.KL)(y)),r&&"handDrawn"!==t.look&&w.selectAll("path").attr("style",r),y&&"handDrawn"!==t.look&&w.selectAll("path").attr("style",r),s.attr("transform",`translate(${-h/2+4+(t.padding??0)-(o.x-(o.left??0))},${-p/2+(t.padding??0)-(o.y-(o.top??0))})`),d(t,w),t.intersect=function(e){return U.rect(t,e)},i}async function Ve(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=Math.max(o.width+2*(t.padding??0),t?.width??0),l=Math.max(o.height+2*(t.padding??0),t?.height??0),h=-s/2,g=-l/2,{cssStyles:m}=t,y=c.A.svg(i),b=T(t,{});"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const v=[{x:h,y:g},{x:h,y:g+l},{x:h+s,y:g+l},{x:h+s,y:g-l/2}],x=p(v),k=y.path(x,b),w=i.insert((()=>k),":first-child");return w.attr("class","basic label-container"),m&&"handDrawn"!==t.look&&w.selectChildren("path").attr("style",m),r&&"handDrawn"!==t.look&&w.selectChildren("path").attr("style",r),w.attr("transform",`translate(0, ${l/4})`),a.attr("transform",`translate(${-s/2+(t.padding??0)-(o.x-(o.left??0))}, ${-l/4+(t.padding??0)-(o.y-(o.top??0))})`),d(t,w),t.intersect=function(e){return U.polygon(t,v,e)},i}async function Ye(e,t){return Ee(e,t,{rx:0,ry:0,classes:"",labelPaddingX:2*(t?.padding||0),labelPaddingY:1*(t?.padding||0)})}async function Ge(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),s=o.height+t.padding,l=o.width+s/4+t.padding;let h;const{cssStyles:p}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=k(-l/2,-s/2,l,s,s/2),o=e.path(r,n);h=i.insert((()=>o),":first-child"),h.attr("class","basic label-container").attr("style",(0,a.KL)(p))}else h=i.insert("rect",":first-child"),h.attr("class","basic label-container").attr("style",r).attr("rx",s/2).attr("ry",s/2).attr("x",-l/2).attr("y",-s/2).attr("width",l).attr("height",s);return d(t,h),t.intersect=function(e){return U.rect(t,e)},i}async function Xe(e,t){return Ee(e,t,{rx:5,ry:5,classes:"flowchart-node"})}function Qe(e,t,n){let{config:{themeVariables:r}}=n;const{labelStyles:i,nodeStyles:o}=A(t);t.labelStyle=i;const{cssStyles:a}=t,{lineColor:s,stateBorder:l,nodeBorder:u}=r,h=e.insert("g").attr("class","node default").attr("id",t.domId||t.id),f=c.A.svg(h),p=T(t,{});"handDrawn"!==t.look&&(p.roughness=0,p.fillStyle="solid");const g=f.circle(0,0,14,{...p,stroke:s,strokeWidth:2}),m=l??u,y=f.circle(0,0,5,{...p,fill:m,stroke:m,strokeWidth:2,fillStyle:"solid"}),b=h.insert((()=>g),":first-child");return b.insert((()=>y)),a&&b.selectAll("path").attr("style",a),o&&b.selectAll("path").attr("style",o),d(t,b),t.intersect=function(e){return U.circle(t,7,e)},h}function Ze(e,t,n){let{config:{themeVariables:r}}=n;const{lineColor:i}=r,o=e.insert("g").attr("class","node default").attr("id",t.domId||t.id);let a;if("handDrawn"===t.look){const e=c.A.svg(o).circle(0,0,14,w(i));a=o.insert((()=>e)),a.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14)}else a=o.insert("circle",":first-child"),a.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14);return d(t,a),t.intersect=function(e){return U.circle(t,7,e)},o}async function Je(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),s=(t?.padding||0)/2,l=o.width+t.padding,h=o.height+t.padding,p=-o.width/2-s,g=-o.height/2-s,m=[{x:0,y:0},{x:l,y:0},{x:l,y:-h},{x:0,y:-h},{x:0,y:0},{x:-8,y:0},{x:l+8,y:0},{x:l+8,y:-h},{x:-8,y:-h},{x:-8,y:0}];if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=e.rectangle(p-8,g,l+16,h,n),o=e.line(p,g,p,g+h,n),s=e.line(p+l,g,p+l,g+h,n);i.insert((()=>o),":first-child"),i.insert((()=>s),":first-child");const u=i.insert((()=>r),":first-child"),{cssStyles:f}=t;u.attr("class","basic label-container").attr("style",(0,a.KL)(f)),d(t,u)}else{const e=X(i,l,h,m);r&&e.attr("style",r),d(t,e)}return t.intersect=function(e){return U.polygon(t,m,e)},i}async function et(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(o.width+2*(t.padding??0),t?.width??0),s=Math.max(o.height+2*(t.padding??0),t?.height??0),l=-a/2,h=-s/2,g=.2*s,m=.2*s,{cssStyles:y}=t,b=c.A.svg(i),v=T(t,{}),x=[{x:l-g/2,y:h},{x:l+a+g/2,y:h},{x:l+a+g/2,y:h+s},{x:l-g/2,y:h+s}],k=[{x:l+a-g/2,y:h+s},{x:l+a+g/2,y:h+s},{x:l+a+g/2,y:h+s-m}];"handDrawn"!==t.look&&(v.roughness=0,v.fillStyle="solid");const w=p(x),S=b.path(w,v),C=p(k),_=b.path(C,{...v,fillStyle:"solid"}),E=i.insert((()=>_),":first-child");return E.insert((()=>S),":first-child"),E.attr("class","basic label-container"),y&&"handDrawn"!==t.look&&E.selectAll("path").attr("style",y),r&&"handDrawn"!==t.look&&E.selectAll("path").attr("style",r),d(t,E),t.intersect=function(e){return U.polygon(t,x,e)},i}async function tt(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=Math.max(o.width+2*(t.padding??0),t?.width??0),l=Math.max(o.height+2*(t.padding??0),t?.height??0),h=l/4,m=.2*s,y=.2*l,b=l+h,{cssStyles:v}=t,x=c.A.svg(i),k=T(t,{});"handDrawn"!==t.look&&(k.roughness=0,k.fillStyle="solid");const w=[{x:-s/2-s/2*.1,y:b/2},...g(-s/2-s/2*.1,b/2,s/2+s/2*.1,b/2,h,.8),{x:s/2+s/2*.1,y:-b/2},{x:-s/2-s/2*.1,y:-b/2}],S=-s/2+s/2*.1,C=-b/2-.4*y,_=[{x:S+s-m,y:1.4*(C+l)},{x:S+s,y:C+l-y},{x:S+s,y:.9*(C+l)},...g(S+s,1.3*(C+l),S+s-m,1.5*(C+l),.03*-l,.5)],E=p(w),F=x.path(E,k),M=p(_),L=x.path(M,{...k,fillStyle:"solid"}),P=i.insert((()=>L),":first-child");return P.insert((()=>F),":first-child"),P.attr("class","basic label-container"),v&&"handDrawn"!==t.look&&P.selectAll("path").attr("style",v),r&&"handDrawn"!==t.look&&P.selectAll("path").attr("style",r),P.attr("transform",`translate(0,${-h/2})`),a.attr("transform",`translate(${-s/2+(t.padding??0)-(o.x-(o.left??0))},${-l/2+(t.padding??0)-h/2-(o.y-(o.top??0))})`),d(t,P),t.intersect=function(e){return U.polygon(t,w,e)},i}async function nt(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(o.width+t.padding,t?.width||0),s=Math.max(o.height+t.padding,t?.height||0),l=-a/2,c=-s/2,h=i.insert("rect",":first-child");return h.attr("class","text").attr("style",r).attr("rx",0).attr("ry",0).attr("x",l).attr("y",c).attr("width",a).attr("height",s),d(t,h),t.intersect=function(e){return U.rect(t,e)},i}(0,s.K2)(qe,"question"),(0,s.K2)(He,"rect_left_inv_arrow"),(0,s.K2)(We,"rectWithTitle"),(0,s.K2)(Ke,"roundedRect"),(0,s.K2)(Ue,"shadedProcess"),(0,s.K2)(Ve,"slopedRect"),(0,s.K2)(Ye,"squareRect"),(0,s.K2)(Ge,"stadium"),(0,s.K2)(Xe,"state"),(0,s.K2)(Qe,"stateEnd"),(0,s.K2)(Ze,"stateStart"),(0,s.K2)(Je,"subroutine"),(0,s.K2)(et,"taggedRect"),(0,s.K2)(tt,"taggedWaveEdgedRectangle"),(0,s.K2)(nt,"text");var rt=(0,s.K2)(((e,t,n,r,i,o)=>`M${e},${t}\n a${i},${o} 0,0,1 0,${-r}\n l${n},0\n a${i},${o} 0,0,1 0,${r}\n M${n},${-r}\n a${i},${o} 0,0,0 0,${r}\n l${-n},0`),"createCylinderPathD"),it=(0,s.K2)(((e,t,n,r,i,o)=>[`M${e},${t}`,`M${e+n},${t}`,`a${i},${o} 0,0,0 0,${-r}`,`l${-n},0`,`a${i},${o} 0,0,0 0,${r}`,`l${n},0`].join(" ")),"createOuterCylinderPathD"),ot=(0,s.K2)(((e,t,n,r,i,o)=>[`M${e+n/2},${-r/2}`,`a${i},${o} 0,0,0 0,${r}`].join(" ")),"createInnerCylinderPathD");async function at(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:s,halfPadding:l}=await u(e,t,f(t)),h="neo"===t.look?2*l:l,p=o.height+h,g=p/2,m=g/(2.5+p/50),y=o.width+m+h,{cssStyles:b}=t;let v;if("handDrawn"===t.look){const e=c.A.svg(i),n=it(0,0,y,p,m,g),r=ot(0,0,y,p,m,g),o=e.path(n,T(t,{})),a=e.path(r,T(t,{fill:"none"}));v=i.insert((()=>a),":first-child"),v=i.insert((()=>o),":first-child"),v.attr("class","basic label-container"),b&&v.attr("style",b)}else{const e=rt(0,0,y,p,m,g);v=i.insert("path",":first-child").attr("d",e).attr("class","basic label-container").attr("style",(0,a.KL)(b)).attr("style",r),v.attr("class","basic label-container"),b&&v.selectAll("path").attr("style",b),r&&v.selectAll("path").attr("style",r)}return v.attr("label-offset-x",m),v.attr("transform",`translate(${-y/2}, ${p/2} )`),s.attr("transform",`translate(${-o.width/2-m-(o.x-(o.left??0))}, ${-o.height/2-(o.y-(o.top??0))})`),d(t,v),t.intersect=function(e){const n=U.rect(t,e),r=n.y-(t.y??0);if(0!=g&&(Math.abs(r)<(t.height??0)/2||Math.abs(r)==(t.height??0)/2&&Math.abs(n.x-(t.x??0))>(t.width??0)/2-m)){let i=m*m*(1-r*r/(g*g));0!=i&&(i=Math.sqrt(Math.abs(i))),i=m-i,e.x-(t.x??0)>0&&(i=-i),n.x+=i}return n},i}async function st(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=o.width+t.padding,s=o.height+t.padding,l=[{x:-3*s/6,y:0},{x:a+3*s/6,y:0},{x:a,y:-s},{x:0,y:-s}];let h;const{cssStyles:g}=t;if("handDrawn"===t.look){const e=c.A.svg(i),n=T(t,{}),r=p(l),o=e.path(r,n);h=i.insert((()=>o),":first-child").attr("transform",`translate(${-a/2}, ${s/2})`),g&&h.attr("style",g)}else h=X(i,a,s,l);return r&&h.attr("style",r),t.width=a,t.height=s,d(t,h),t.intersect=function(e){return U.polygon(t,l,e)},i}async function lt(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(60,o.width+2*(t.padding??0),t?.width??0),s=Math.max(20,o.height+2*(t.padding??0),t?.height??0),{cssStyles:l}=t,h=c.A.svg(i),g=T(t,{});"handDrawn"!==t.look&&(g.roughness=0,g.fillStyle="solid");const m=[{x:-a/2*.8,y:-s/2},{x:a/2*.8,y:-s/2},{x:a/2,y:-s/2*.6},{x:a/2,y:s/2},{x:-a/2,y:s/2},{x:-a/2,y:-s/2*.6}],y=p(m),b=h.path(y,g),v=i.insert((()=>b),":first-child");return v.attr("class","basic label-container"),l&&"handDrawn"!==t.look&&v.selectChildren("path").attr("style",l),r&&"handDrawn"!==t.look&&v.selectChildren("path").attr("style",r),d(t,v),t.intersect=function(e){return U.polygon(t,m,e)},i}async function ct(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),l=(0,s._3)((0,s.D7)().flowchart?.htmlLabels),h=o.width+(t.padding??0),g=h+o.height,m=h+o.height,y=[{x:0,y:0},{x:m,y:0},{x:m/2,y:-g}],{cssStyles:b}=t,v=c.A.svg(i),x=T(t,{});"handDrawn"!==t.look&&(x.roughness=0,x.fillStyle="solid");const k=p(y),w=v.path(k,x),S=i.insert((()=>w),":first-child").attr("transform",`translate(${-g/2}, ${g/2})`);return b&&"handDrawn"!==t.look&&S.selectChildren("path").attr("style",b),r&&"handDrawn"!==t.look&&S.selectChildren("path").attr("style",r),t.width=h,t.height=g,d(t,S),a.attr("transform",`translate(${-o.width/2-(o.x-(o.left??0))}, ${g/2-(o.height+(t.padding??0)/(l?2:1)-(o.y-(o.top??0)))})`),t.intersect=function(e){return s.Rm.info("Triangle intersect",t,y,e),U.polygon(t,y,e)},i}async function ut(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=Math.max(o.width+2*(t.padding??0),t?.width??0),l=Math.max(o.height+2*(t.padding??0),t?.height??0),h=l/8,m=l+h,{cssStyles:y}=t,b=70-s,v=b>0?b/2:0,x=c.A.svg(i),k=T(t,{});"handDrawn"!==t.look&&(k.roughness=0,k.fillStyle="solid");const w=[{x:-s/2-v,y:m/2},...g(-s/2-v,m/2,s/2+v,m/2,h,.8),{x:s/2+v,y:-m/2},{x:-s/2-v,y:-m/2}],S=p(w),C=x.path(S,k),_=i.insert((()=>C),":first-child");return _.attr("class","basic label-container"),y&&"handDrawn"!==t.look&&_.selectAll("path").attr("style",y),r&&"handDrawn"!==t.look&&_.selectAll("path").attr("style",r),_.attr("transform",`translate(0,${-h/2})`),a.attr("transform",`translate(${-s/2+(t.padding??0)-(o.x-(o.left??0))},${-l/2+(t.padding??0)-h-(o.y-(o.top??0))})`),d(t,_),t.intersect=function(e){return U.polygon(t,w,e)},i}async function ht(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o}=await u(e,t,f(t)),a=Math.max(o.width+2*(t.padding??0),t?.width??0),s=Math.max(o.height+2*(t.padding??0),t?.height??0),l=a/s;let h=a,m=s;h>m*l?m=h/l:h=m*l,h=Math.max(h,100),m=Math.max(m,50);const y=Math.min(.2*m,m/4),b=m+2*y,{cssStyles:v}=t,x=c.A.svg(i),k=T(t,{});"handDrawn"!==t.look&&(k.roughness=0,k.fillStyle="solid");const w=[{x:-h/2,y:b/2},...g(-h/2,b/2,h/2,b/2,y,1),{x:h/2,y:-b/2},...g(h/2,-b/2,-h/2,-b/2,y,-1)],S=p(w),C=x.path(S,k),_=i.insert((()=>C),":first-child");return _.attr("class","basic label-container"),v&&"handDrawn"!==t.look&&_.selectAll("path").attr("style",v),r&&"handDrawn"!==t.look&&_.selectAll("path").attr("style",r),d(t,_),t.intersect=function(e){return U.polygon(t,w,e)},i}async function dt(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const{shapeSvg:i,bbox:o,label:a}=await u(e,t,f(t)),s=Math.max(o.width+2*(t.padding??0),t?.width??0),l=Math.max(o.height+2*(t.padding??0),t?.height??0),h=-s/2,p=-l/2,{cssStyles:g}=t,m=c.A.svg(i),y=T(t,{}),b=[{x:h-5,y:p-5},{x:h-5,y:p+l},{x:h+s,y:p+l},{x:h+s,y:p-5}],v=`M${h-5},${p-5} L${h+s},${p-5} L${h+s},${p+l} L${h-5},${p+l} L${h-5},${p-5}\n M${h-5},${p} L${h+s},${p}\n M${h},${p-5} L${h},${p+l}`;"handDrawn"!==t.look&&(y.roughness=0,y.fillStyle="solid");const x=m.path(v,y),k=i.insert((()=>x),":first-child");return k.attr("transform","translate(2.5, 2.5)"),k.attr("class","basic label-container"),g&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",g),r&&"handDrawn"!==t.look&&k.selectAll("path").attr("style",r),a.attr("transform",`translate(${-o.width/2+2.5-(o.x-(o.left??0))}, ${-o.height/2+2.5-(o.y-(o.top??0))})`),d(t,k),t.intersect=function(e){return U.polygon(t,b,e)},i}async function ft(e,t){const n=t;if(n.alias&&(t.label=n.alias),"handDrawn"===t.look){const{themeVariables:n}=(0,s.zj)(),{background:r}=n,i={...t,id:t.id+"-background",look:"default",cssStyles:["stroke: none",`fill: ${r}`]};await ft(e,i)}const r=(0,s.zj)();t.useHtmlLabels=r.htmlLabels;let i=r.er?.diagramPadding??10,o=r.er?.entityPadding??6;const{cssStyles:u}=t,{labelStyles:h}=A(t);if(0===n.attributes.length&&t.label){const n={rx:0,ry:0,labelPaddingX:i,labelPaddingY:1.5*i,classes:""};(0,a.Un)(t.label,r)+2*n.labelPaddingX0){const e=m.width+2*i-(v+x+k+w);v+=e/_,x+=e/_,k>0&&(k+=e/_),w>0&&(w+=e/_)}const F=v+x+k+w,M=c.A.svg(g),L=T(t,{});"handDrawn"!==t.look&&(L.roughness=0,L.fillStyle="solid");const P=Math.max(E.width+2*i,t?.width||0,F),O=Math.max(E.height+(b[0]||y)+o,t?.height||0),$=-P/2,B=-O/2;g.selectAll("g:not(:first-child)").each(((e,t,n)=>{const r=(0,l.Ltv)(n[t]),a=r.attr("transform");let s=0,c=0;if(a){const e=RegExp(/translate\(([^,]+),([^)]+)\)/).exec(a);e&&(s=parseFloat(e[1]),c=parseFloat(e[2]),r.attr("class").includes("attribute-name")?s+=v:r.attr("class").includes("attribute-keys")?s+=v+x:r.attr("class").includes("attribute-comment")&&(s+=v+x+k))}r.attr("transform",`translate(${$+i/2+s}, ${c+B+m.height+o/2})`)})),g.select(".name").attr("transform","translate("+-m.width/2+", "+(B+o/2)+")");const D=M.rectangle($,B,P,O,L),z=g.insert((()=>D),":first-child").attr("style",u.join("")),{themeVariables:I}=(0,s.zj)(),{rowEven:N,rowOdd:R,nodeBorder:j}=I;b.push(0);for(const[a,s]of b.entries()){if(0===a&&b.length>1)continue;const e=a%2===0&&0!==s,t=M.rectangle($,m.height+B+s,P,m.height,{...L,fill:e?N:R,stroke:j});g.insert((()=>t),"g.label").attr("style",u.join("")).attr("class","row-rect-"+(a%2===0?"even":"odd"))}let q=M.line($,m.height+B,P+$,m.height+B,L);g.insert((()=>q)).attr("class","divider"),q=M.line(v+$,m.height+B,v+$,O+B,L),g.insert((()=>q)).attr("class","divider"),S&&(q=M.line(v+x+$,m.height+B,v+x+$,O+B,L),g.insert((()=>q)).attr("class","divider")),C&&(q=M.line(v+x+k+$,m.height+B,v+x+k+$,O+B,L),g.insert((()=>q)).attr("class","divider"));for(const a of b)q=M.line($,m.height+B+a,P+$,m.height+B+a,L),g.insert((()=>q)).attr("class","divider");return d(t,z),t.intersect=function(e){return U.rect(t,e)},g}async function pt(e,t,n){let r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:0,i=arguments.length>4&&void 0!==arguments[4]?arguments[4]:0,c=arguments.length>5&&void 0!==arguments[5]?arguments[5]:[],u=arguments.length>6&&void 0!==arguments[6]?arguments[6]:"";const h=e.insert("g").attr("class",`label ${c.join(" ")}`).attr("transform",`translate(${r}, ${i})`).attr("style",u);t!==(0,s.QO)(t)&&(t=(t=(0,s.QO)(t)).replaceAll("<","<").replaceAll(">",">"));const d=h.node().appendChild(await(0,o.GZ)(h,t,{width:(0,a.Un)(t,n)+100,style:u,useHtmlLabels:n.htmlLabels},n));if(t.includes("<")||t.includes(">")){let e=d.children[0];for(e.textContent=e.textContent.replaceAll("<","<").replaceAll(">",">");e.childNodes[0];)e=e.childNodes[0],e.textContent=e.textContent.replaceAll("<","<").replaceAll(">",">")}let f=d.getBBox();if((0,s._3)(n.htmlLabels)){const e=d.children[0];e.style.textAlign="start";const t=(0,l.Ltv)(d);f=e.getBoundingClientRect(),t.attr("width",f.width),t.attr("height",f.height)}return f}async function gt(e,t,n,r){let i=arguments.length>4&&void 0!==arguments[4]?arguments[4]:n.class.padding??12;const o=r?0:3,a=e.insert("g").attr("class",f(t)).attr("id",t.domId||t.id);let s=null,l=null,c=null,u=null,h=0,d=0,p=0;if(s=a.insert("g").attr("class","annotation-group text"),t.annotations.length>0){const e=t.annotations[0];await mt(s,{text:`\xab${e}\xbb`},0);h=s.node().getBBox().height}l=a.insert("g").attr("class","label-group text"),await mt(l,t,0,["font-weight: bolder"]);const g=l.node().getBBox();d=g.height,c=a.insert("g").attr("class","members-group text");let m=0;for(const f of t.members){m+=await mt(c,f,m,[f.parseClassifier()])+o}p=c.node().getBBox().height,p<=0&&(p=i/2),u=a.insert("g").attr("class","methods-group text");let y=0;for(const f of t.methods){y+=await mt(u,f,y,[f.parseClassifier()])+o}let b=a.node().getBBox();if(null!==s){const e=s.node().getBBox();s.attr("transform",`translate(${-e.width/2})`)}return l.attr("transform",`translate(${-g.width/2}, ${h})`),b=a.node().getBBox(),c.attr("transform",`translate(0, ${h+d+2*i})`),b=a.node().getBBox(),u.attr("transform",`translate(0, ${h+d+(p?p+4*i:2*i)})`),b=a.node().getBBox(),{shapeSvg:a,bbox:b}}async function mt(e,t,n){let r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:[];const i=e.insert("g").attr("class","label").attr("style",r.join("; ")),c=(0,s.zj)();let u="useHtmlLabels"in t?t.useHtmlLabels:(0,s._3)(c.htmlLabels)??!0,h="";h="text"in t?t.text:t.label,!u&&h.startsWith("\\")&&(h=h.substring(1)),(0,s.Wi)(h)&&(u=!0);const d=await(0,o.GZ)(i,(0,s.oB)((0,a.Sm)(h)),{width:(0,a.Un)(h,c)+50,classes:"markdown-node-label",useHtmlLabels:u},c);let f,p=1;if(u){const e=d.children[0],t=(0,l.Ltv)(d);p=e.innerHTML.split("
    ").length,e.innerHTML.includes("")&&(p+=e.innerHTML.split("").length-1);const n=e.getElementsByTagName("img");if(n){const e=""===h.replace(/]*>/g,"").trim();await Promise.all([...n].map((t=>new Promise((n=>{function r(){if(t.style.display="flex",t.style.flexDirection="column",e){const e=c.fontSize?.toString()??window.getComputedStyle(document.body).fontSize,n=5,r=parseInt(e,10)*n+"px";t.style.minWidth=r,t.style.maxWidth=r}else t.style.width="100%";n(t)}(0,s.K2)(r,"setupImage"),setTimeout((()=>{t.complete&&r()})),t.addEventListener("error",r),t.addEventListener("load",r)})))))}f=e.getBoundingClientRect(),t.attr("width",f.width),t.attr("height",f.height)}else{r.includes("font-weight: bolder")&&(0,l.Ltv)(d).selectAll("tspan").attr("font-weight",""),p=d.children.length;const e=d.children[0];if(""===d.textContent||d.textContent.includes(">")){e.textContent=h[0]+h.substring(1).replaceAll(">",">").replaceAll("<","<").trim();" "===h[1]&&(e.textContent=e.textContent[0]+" "+e.textContent.substring(1))}"undefined"===e.textContent&&(e.textContent=""),f=d.getBBox()}return i.attr("transform","translate(0,"+(-f.height/(2*p)+n)+")"),f.height}async function yt(e,t){const n=(0,s.D7)(),r=n.class.padding??12,i=r,o=t.useHtmlLabels??(0,s._3)(n.htmlLabels)??!0,a=t;a.annotations=a.annotations??[],a.members=a.members??[],a.methods=a.methods??[];const{shapeSvg:u,bbox:h}=await gt(e,t,n,o,i),{labelStyles:f,nodeStyles:p}=A(t);t.labelStyle=f,t.cssStyles=a.styles||"";const g=a.styles?.join(";")||p||"";t.cssStyles||(t.cssStyles=g.replaceAll("!important","").split(";"));const m=0===a.members.length&&0===a.methods.length&&!n.class?.hideEmptyMembersBox,y=c.A.svg(u),b=T(t,{});"handDrawn"!==t.look&&(b.roughness=0,b.fillStyle="solid");const v=h.width;let x=h.height;0===a.members.length&&0===a.methods.length?x+=i:a.members.length>0&&0===a.methods.length&&(x+=2*i);const k=-v/2,w=-x/2,S=y.rectangle(k-r,w-r-(m?r:0===a.members.length&&0===a.methods.length?-r/2:0),v+2*r,x+2*r+(m?2*r:0===a.members.length&&0===a.methods.length?-r:0),b),C=u.insert((()=>S),":first-child");C.attr("class","basic label-container");const _=C.node().getBBox();u.selectAll(".text").each(((e,t,n)=>{const i=(0,l.Ltv)(n[t]),s=i.attr("transform");let c=0;if(s){const e=RegExp(/translate\(([^,]+),([^)]+)\)/).exec(s);e&&(c=parseFloat(e[2]))}let h=c+w+r-(m?r:0===a.members.length&&0===a.methods.length?-r/2:0);o||(h-=4);let d=k;(i.attr("class").includes("label-group")||i.attr("class").includes("annotation-group"))&&(d=-i.node()?.getBBox().width/2||0,u.selectAll("text").each((function(e,t,n){"middle"===window.getComputedStyle(n[t]).textAnchor&&(d=0)}))),i.attr("transform",`translate(${d}, ${h})`)}));const E=u.select(".annotation-group").node().getBBox().height-(m?r/2:0)||0,F=u.select(".label-group").node().getBBox().height-(m?r/2:0)||0,M=u.select(".members-group").node().getBBox().height-(m?r/2:0)||0;if(a.members.length>0||a.methods.length>0||m){const e=y.line(_.x,E+F+w+r,_.x+_.width,E+F+w+r,b);u.insert((()=>e)).attr("class","divider").attr("style",g)}if(m||a.members.length>0||a.methods.length>0){const e=y.line(_.x,E+F+M+w+2*i+r,_.x+_.width,E+F+M+w+r+2*i,b);u.insert((()=>e)).attr("class","divider").attr("style",g)}if("handDrawn"!==a.look&&u.selectAll("path").attr("style",g),C.select(":nth-child(2)").attr("style",g),u.selectAll(".divider").select("path").attr("style",g),t.labelStyle?u.selectAll("span").attr("style",t.labelStyle):u.selectAll("span").attr("style",g),!o){const e=RegExp(/color\s*:\s*([^;]*)/),t=e.exec(g);if(t){const e=t[0].replace("color","fill");u.selectAll("tspan").attr("style",e)}else if(f){const t=e.exec(f);if(t){const e=t[0].replace("color","fill");u.selectAll("tspan").attr("style",e)}}}return d(t,C),t.intersect=function(e){return U.rect(t,e)},u}async function bt(e,t){const{labelStyles:n,nodeStyles:r}=A(t);t.labelStyle=n;const i=t,o=t,a="verifyMethod"in t,s=f(t),u=e.insert("g").attr("class",s).attr("id",t.domId??t.id);let h;h=a?await vt(u,`<<${i.type}>>`,0,t.labelStyle):await vt(u,"<<Element>>",0,t.labelStyle);let p=h;const g=await vt(u,i.name,p,t.labelStyle+"; font-weight: bold;");if(p+=g+20,a){p+=await vt(u,""+(i.requirementId?`Id: ${i.requirementId}`:""),p,t.labelStyle);p+=await vt(u,""+(i.text?`Text: ${i.text}`:""),p,t.labelStyle);p+=await vt(u,""+(i.risk?`Risk: ${i.risk}`:""),p,t.labelStyle),await vt(u,""+(i.verifyMethod?`Verification: ${i.verifyMethod}`:""),p,t.labelStyle)}else{p+=await vt(u,""+(o.type?`Type: ${o.type}`:""),p,t.labelStyle),await vt(u,""+(o.docRef?`Doc Ref: ${o.docRef}`:""),p,t.labelStyle)}const m=(u.node()?.getBBox().width??200)+20,y=(u.node()?.getBBox().height??200)+20,b=-m/2,v=-y/2,x=c.A.svg(u),k=T(t,{});"handDrawn"!==t.look&&(k.roughness=0,k.fillStyle="solid");const w=x.rectangle(b,v,m,y,k),S=u.insert((()=>w),":first-child");if(S.attr("class","basic label-container").attr("style",r),u.selectAll(".label").each(((e,t,n)=>{const r=(0,l.Ltv)(n[t]),i=r.attr("transform");let o=0,a=0;if(i){const e=RegExp(/translate\(([^,]+),([^)]+)\)/).exec(i);e&&(o=parseFloat(e[1]),a=parseFloat(e[2]))}const s=a-y/2;let c=b+10;0!==t&&1!==t||(c=o),r.attr("transform",`translate(${c}, ${s+20})`)})),p>h+g+20){const e=x.line(b,v+h+g+20,b+m,v+h+g+20,k);u.insert((()=>e)).attr("style",r)}return d(t,S),t.intersect=function(e){return U.rect(t,e)},u}async function vt(e,t,n){let r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:"";if(""===t)return 0;const i=e.insert("g").attr("class","label").attr("style",r),c=(0,s.D7)(),u=c.htmlLabels??!0,h=await(0,o.GZ)(i,(0,s.oB)((0,a.Sm)(t)),{width:(0,a.Un)(t,c)+50,classes:"markdown-node-label",useHtmlLabels:u,style:r},c);let d;if(u){const e=h.children[0],t=(0,l.Ltv)(h);d=e.getBoundingClientRect(),t.attr("width",d.width),t.attr("height",d.height)}else{const e=h.children[0];for(const t of e.children)t.textContent=t.textContent.replaceAll(">",">").replaceAll("<","<"),r&&t.setAttribute("style",r);d=h.getBBox(),d.height+=6}return i.attr("transform",`translate(${-d.width/2},${-d.height/2+n})`),d.height}(0,s.K2)(at,"tiltedCylinder"),(0,s.K2)(st,"trapezoid"),(0,s.K2)(lt,"trapezoidalPentagon"),(0,s.K2)(ct,"triangle"),(0,s.K2)(ut,"waveEdgedRectangle"),(0,s.K2)(ht,"waveRectangle"),(0,s.K2)(dt,"windowPane"),(0,s.K2)(ft,"erBox"),(0,s.K2)(pt,"addText"),(0,s.K2)(gt,"textHelper"),(0,s.K2)(mt,"addText"),(0,s.K2)(yt,"classBox"),(0,s.K2)(bt,"requirementBox"),(0,s.K2)(vt,"addText");var xt=(0,s.K2)((e=>{switch(e){case"Very High":return"red";case"High":return"orange";case"Medium":return null;case"Low":return"blue";case"Very Low":return"lightblue"}}),"colorFromPriority");async function kt(e,t,n){let{config:r}=n;const{labelStyles:i,nodeStyles:o}=A(t);t.labelStyle=i||"";const a=t.width;t.width=(t.width??200)-10;const{shapeSvg:s,bbox:l,label:p}=await u(e,t,f(t)),g=t.padding||10;let m,y="";"ticket"in t&&t.ticket&&r?.kanban?.ticketBaseUrl&&(y=r?.kanban?.ticketBaseUrl.replace("#TICKET#",t.ticket),m=s.insert("svg:a",":first-child").attr("class","kanban-ticket-link").attr("xlink:href",y).attr("target","_blank"));const b={useHtmlLabels:t.useHtmlLabels,labelStyle:t.labelStyle||"",width:t.width,img:t.img,padding:t.padding||8,centerLabel:!1};let v,x;({label:v,bbox:x}=m?await h(m,"ticket"in t&&t.ticket||"",b):await h(s,"ticket"in t&&t.ticket||"",b));const{label:w,bbox:S}=await h(s,"assigned"in t&&t.assigned||"",b);t.width=a;const C=t?.width||0,_=Math.max(x.height,S.height)/2,E=Math.max(l.height+20,t?.height||0)+_,F=-C/2,M=-E/2;let L;p.attr("transform","translate("+(g-C/2)+", "+(-_-l.height/2)+")"),v.attr("transform","translate("+(g-C/2)+", "+(-_+l.height/2)+")"),w.attr("transform","translate("+(g+C/2-S.width-20)+", "+(-_+l.height/2)+")");const{rx:P,ry:O}=t,{cssStyles:$}=t;if("handDrawn"===t.look){const e=c.A.svg(s),n=T(t,{}),r=P||O?e.path(k(F,M,C,E,P||0),n):e.rectangle(F,M,C,E,n);L=s.insert((()=>r),":first-child"),L.attr("class","basic label-container").attr("style",$||null)}else{L=s.insert("rect",":first-child"),L.attr("class","basic label-container __APA__").attr("style",o).attr("rx",P??5).attr("ry",O??5).attr("x",F).attr("y",M).attr("width",C).attr("height",E);const e="priority"in t&&t.priority;if(e){const t=s.append("line"),n=F+2,r=M+Math.floor((P??0)/2),i=M+E-Math.floor((P??0)/2);t.attr("x1",n).attr("y1",r).attr("x2",n).attr("y2",i).attr("stroke-width","4").attr("stroke",xt(e))}}return d(t,L),t.height=E,t.intersect=function(e){return U.rect(t,e)},s}(0,s.K2)(kt,"kanbanItem");var wt=[{semanticName:"Process",name:"Rectangle",shortName:"rect",description:"Standard process shape",aliases:["proc","process","rectangle"],internalAliases:["squareRect"],handler:Ye},{semanticName:"Event",name:"Rounded Rectangle",shortName:"rounded",description:"Represents an event",aliases:["event"],internalAliases:["roundedRect"],handler:Ke},{semanticName:"Terminal Point",name:"Stadium",shortName:"stadium",description:"Terminal point",aliases:["terminal","pill"],handler:Ge},{semanticName:"Subprocess",name:"Framed Rectangle",shortName:"fr-rect",description:"Subprocess",aliases:["subprocess","subproc","framed-rectangle","subroutine"],handler:Je},{semanticName:"Database",name:"Cylinder",shortName:"cyl",description:"Database storage",aliases:["db","database","cylinder"],handler:de},{semanticName:"Start",name:"Circle",shortName:"circle",description:"Starting point",aliases:["circ"],handler:J},{semanticName:"Decision",name:"Diamond",shortName:"diam",description:"Decision-making step",aliases:["decision","diamond","question"],handler:qe},{semanticName:"Prepare Conditional",name:"Hexagon",shortName:"hex",description:"Preparation or condition step",aliases:["hexagon","prepare"],handler:xe},{semanticName:"Data Input/Output",name:"Lean Right",shortName:"lean-r",description:"Represents input or output",aliases:["lean-right","in-out"],internalAliases:["lean_right"],handler:Le},{semanticName:"Data Input/Output",name:"Lean Left",shortName:"lean-l",description:"Represents output or input",aliases:["lean-left","out-in"],internalAliases:["lean_left"],handler:Me},{semanticName:"Priority Action",name:"Trapezoid Base Bottom",shortName:"trap-b",description:"Priority action",aliases:["priority","trapezoid-bottom","trapezoid"],handler:st},{semanticName:"Manual Operation",name:"Trapezoid Base Top",shortName:"trap-t",description:"Represents a manual task",aliases:["manual","trapezoid-top","inv-trapezoid"],internalAliases:["inv_trapezoid"],handler:Te},{semanticName:"Stop",name:"Double Circle",shortName:"dbl-circ",description:"Represents a stop point",aliases:["double-circle"],internalAliases:["doublecircle"],handler:pe},{semanticName:"Text Block",name:"Text Block",shortName:"text",description:"Text block",handler:nt},{semanticName:"Card",name:"Notched Rectangle",shortName:"notch-rect",description:"Represents a card",aliases:["card","notched-rectangle"],handler:Q},{semanticName:"Lined/Shaded Process",name:"Lined Rectangle",shortName:"lin-rect",description:"Lined process shape",aliases:["lined-rectangle","lined-process","lin-proc","shaded-process"],handler:Ue},{semanticName:"Start",name:"Small Circle",shortName:"sm-circ",description:"Small starting point",aliases:["start","small-circle"],internalAliases:["stateStart"],handler:Ze},{semanticName:"Stop",name:"Framed Circle",shortName:"fr-circ",description:"Stop point",aliases:["stop","framed-circle"],internalAliases:["stateEnd"],handler:Qe},{semanticName:"Fork/Join",name:"Filled Rectangle",shortName:"fork",description:"Fork or join in process flow",aliases:["join"],internalAliases:["forkJoin"],handler:ye},{semanticName:"Collate",name:"Hourglass",shortName:"hourglass",description:"Represents a collate operation",aliases:["hourglass","collate"],handler:ke},{semanticName:"Comment",name:"Curly Brace",shortName:"brace",description:"Adds a comment",aliases:["comment","brace-l"],handler:re},{semanticName:"Comment Right",name:"Curly Brace",shortName:"brace-r",description:"Adds a comment",handler:oe},{semanticName:"Comment with braces on both sides",name:"Curly Braces",shortName:"braces",description:"Adds a comment",handler:se},{semanticName:"Com Link",name:"Lightning Bolt",shortName:"bolt",description:"Communication link",aliases:["com-link","lightning-bolt"],handler:Pe},{semanticName:"Document",name:"Document",shortName:"doc",description:"Represents a document",aliases:["doc","document"],handler:ut},{semanticName:"Delay",name:"Half-Rounded Rectangle",shortName:"delay",description:"Represents a delay",aliases:["half-rounded-rectangle"],handler:be},{semanticName:"Direct Access Storage",name:"Horizontal Cylinder",shortName:"h-cyl",description:"Direct access storage",aliases:["das","horizontal-cylinder"],handler:at},{semanticName:"Disk Storage",name:"Lined Cylinder",shortName:"lin-cyl",description:"Disk storage",aliases:["disk","lined-cylinder"],handler:De},{semanticName:"Display",name:"Curved Trapezoid",shortName:"curv-trap",description:"Represents a display",aliases:["curved-trapezoid","display"],handler:le},{semanticName:"Divided Process",name:"Divided Rectangle",shortName:"div-rect",description:"Divided process shape",aliases:["div-proc","divided-rectangle","divided-process"],handler:fe},{semanticName:"Extract",name:"Triangle",shortName:"tri",description:"Extraction process",aliases:["extract","triangle"],handler:ct},{semanticName:"Internal Storage",name:"Window Pane",shortName:"win-pane",description:"Internal storage",aliases:["internal-storage","window-pane"],handler:dt},{semanticName:"Junction",name:"Filled Circle",shortName:"f-circ",description:"Junction point",aliases:["junction","filled-circle"],handler:ge},{semanticName:"Loop Limit",name:"Trapezoidal Pentagon",shortName:"notch-pent",description:"Loop limit step",aliases:["loop-limit","notched-pentagon"],handler:lt},{semanticName:"Manual File",name:"Flipped Triangle",shortName:"flip-tri",description:"Manual file operation",aliases:["manual-file","flipped-triangle"],handler:me},{semanticName:"Manual Input",name:"Sloped Rectangle",shortName:"sl-rect",description:"Manual input step",aliases:["manual-input","sloped-rectangle"],handler:Ve},{semanticName:"Multi-Document",name:"Stacked Document",shortName:"docs",description:"Multiple documents",aliases:["documents","st-doc","stacked-document"],handler:Ne},{semanticName:"Multi-Process",name:"Stacked Rectangle",shortName:"st-rect",description:"Multiple processes",aliases:["procs","processes","stacked-rectangle"],handler:Ie},{semanticName:"Stored Data",name:"Bow Tie Rectangle",shortName:"bow-rect",description:"Stored data",aliases:["stored-data","bow-tie-rectangle"],handler:G},{semanticName:"Summary",name:"Crossed Circle",shortName:"cross-circ",description:"Summary",aliases:["summary","crossed-circle"],handler:te},{semanticName:"Tagged Document",name:"Tagged Document",shortName:"tag-doc",description:"Tagged document",aliases:["tag-doc","tagged-document"],handler:tt},{semanticName:"Tagged Process",name:"Tagged Rectangle",shortName:"tag-rect",description:"Tagged process",aliases:["tagged-rectangle","tag-proc","tagged-process"],handler:et},{semanticName:"Paper Tape",name:"Flag",shortName:"flag",description:"Paper tape",aliases:["paper-tape"],handler:ht},{semanticName:"Odd",name:"Odd",shortName:"odd",description:"Odd shape",internalAliases:["rect_left_inv_arrow"],handler:He},{semanticName:"Lined Document",name:"Lined Document",shortName:"lin-doc",description:"Lined document",aliases:["lined-document"],handler:ze}],St=(0,s.K2)((()=>{const e={state:Xe,choice:Z,note:Re,rectWithTitle:We,labelRect:Fe,iconSquare:_e,iconCircle:Se,icon:we,iconRounded:Ce,imageSquare:Ae,anchor:V,kanbanItem:kt,classBox:yt,erBox:ft,requirementBox:bt},t=[...Object.entries(e),...wt.flatMap((e=>[e.shortName,..."aliases"in e?e.aliases:[],..."internalAliases"in e?e.internalAliases:[]].map((t=>[t,e.handler]))))];return Object.fromEntries(t)}),"generateShapeMap")();function Ct(e){return e in St}(0,s.K2)(Ct,"isValidShape");var _t=new Map;async function At(e,t,n){let r,i;"rect"===t.shape&&(t.rx&&t.ry?t.shape="roundedRect":t.shape="squareRect");const o=t.shape?St[t.shape]:void 0;if(!o)throw new Error(`No such shape: ${t.shape}. Please check your syntax.`);if(t.link){let a;"sandbox"===n.config.securityLevel?a="_top":t.linkTarget&&(a=t.linkTarget||"_blank"),r=e.insert("svg:a").attr("xlink:href",t.link).attr("target",a??null),i=await o(r,t,n)}else i=await o(e,t,n),r=i;return t.tooltip&&i.attr("title",t.tooltip),_t.set(t.id,r),t.haveCallback&&r.attr("class",r.attr("class")+" clickable"),r}(0,s.K2)(At,"insertNode");var Tt=(0,s.K2)(((e,t)=>{_t.set(t.id,e)}),"setNodeElem"),Et=(0,s.K2)((()=>{_t.clear()}),"clear"),Ft=(0,s.K2)((e=>{const t=_t.get(e.id);s.Rm.trace("Transforming node",e.diff,e,"translate("+(e.x-e.width/2-5)+", "+e.width/2+")");const n=e.diff||0;return e.clusterNode?t.attr("transform","translate("+(e.x+n-e.width/2)+", "+(e.y-e.height/2-8)+")"):t.attr("transform","translate("+e.x+", "+e.y+")"),n}),"positionNode")},1230:()=>{!function(){if("undefined"!==typeof Prism&&"undefined"!==typeof document){var e="line-numbers",t=/\n(?!$)/g,n=Prism.plugins.lineNumbers={getLine:function(t,n){if("PRE"===t.tagName&&t.classList.contains(e)){var r=t.querySelector(".line-numbers-rows");if(r){var i=parseInt(t.getAttribute("data-start"),10)||1,o=i+(r.children.length-1);no&&(n=o);var a=n-i;return r.children[a]}}},resize:function(e){i([e])},assumeViewportIndependence:!0},r=void 0;window.addEventListener("resize",(function(){n.assumeViewportIndependence&&r===window.innerWidth||(r=window.innerWidth,i(Array.prototype.slice.call(document.querySelectorAll("pre."+e))))})),Prism.hooks.add("complete",(function(n){if(n.code){var r=n.element,o=r.parentNode;if(o&&/pre/i.test(o.nodeName)&&!r.querySelector(".line-numbers-rows")&&Prism.util.isActive(r,e)){r.classList.remove(e),o.classList.add(e);var a,s=n.code.match(t),l=s?s.length+1:1,c=new Array(l+1).join("");(a=document.createElement("span")).setAttribute("aria-hidden","true"),a.className="line-numbers-rows",a.innerHTML=c,o.hasAttribute("data-start")&&(o.style.counterReset="linenumber "+(parseInt(o.getAttribute("data-start"),10)-1)),n.element.appendChild(a),i([o]),Prism.hooks.run("line-numbers",n)}}})),Prism.hooks.add("line-numbers",(function(e){e.plugins=e.plugins||{},e.plugins.lineNumbers=!0}))}function i(e){if(0!=(e=e.filter((function(e){var t=function(e){if(!e)return null;return window.getComputedStyle?getComputedStyle(e):e.currentStyle||null}(e)["white-space"];return"pre-wrap"===t||"pre-line"===t}))).length){var n=e.map((function(e){var n=e.querySelector("code"),r=e.querySelector(".line-numbers-rows");if(n&&r){var i=e.querySelector(".line-numbers-sizer"),o=n.textContent.split(t);i||((i=document.createElement("span")).className="line-numbers-sizer",n.appendChild(i)),i.innerHTML="0",i.style.display="block";var a=i.getBoundingClientRect().height;return i.innerHTML="",{element:e,lines:o,lineHeights:[],oneLinerHeight:a,sizer:i}}})).filter(Boolean);n.forEach((function(e){var t=e.sizer,n=e.lines,r=e.lineHeights,i=e.oneLinerHeight;r[n.length-1]=void 0,n.forEach((function(e,n){if(e&&e.length>1){var o=t.appendChild(document.createElement("span"));o.style.display="block",o.textContent=e}else r[n]=i}))})),n.forEach((function(e){for(var t=e.sizer,n=e.lineHeights,r=0,i=0;i{"use strict";n.d(t,{A:()=>l});var r=n(4306);const i=function(){return!1};var o="object"==typeof exports&&exports&&!exports.nodeType&&exports,a=o&&"object"==typeof module&&module&&!module.nodeType&&module,s=a&&a.exports===o?r.A.Buffer:void 0;const l=(s?s.isBuffer:void 0)||i},1458:(e,t,n)=>{"use strict";n.d(t,{A:()=>g});var r=n(4186),i=n(5481);const o={re:/^#((?:[a-f0-9]{2}){2,4}|[a-f0-9]{3})$/i,parse:e=>{if(35!==e.charCodeAt(0))return;const t=e.match(o.re);if(!t)return;const n=t[1],i=parseInt(n,16),a=n.length,s=a%4===0,l=a>4,c=l?1:17,u=l?8:4,h=s?0:-1,d=l?255:15;return r.A.set({r:(i>>u*(h+3)&d)*c,g:(i>>u*(h+2)&d)*c,b:(i>>u*(h+1)&d)*c,a:s?(i&d)*c/255:1},e)},stringify:e=>{const{r:t,g:n,b:r,a:o}=e;return o<1?`#${i.Y[Math.round(t)]}${i.Y[Math.round(n)]}${i.Y[Math.round(r)]}${i.Y[Math.round(255*o)]}`:`#${i.Y[Math.round(t)]}${i.Y[Math.round(n)]}${i.Y[Math.round(r)]}`}},a=o;var s=n(7170);const l={re:/^hsla?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(?:deg|grad|rad|turn)?)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(%)?))?\s*?\)$/i,hueRe:/^(.+?)(deg|grad|rad|turn)$/i,_hue2deg:e=>{const t=e.match(l.hueRe);if(t){const[,e,n]=t;switch(n){case"grad":return s.A.channel.clamp.h(.9*parseFloat(e));case"rad":return s.A.channel.clamp.h(180*parseFloat(e)/Math.PI);case"turn":return s.A.channel.clamp.h(360*parseFloat(e))}}return s.A.channel.clamp.h(parseFloat(e))},parse:e=>{const t=e.charCodeAt(0);if(104!==t&&72!==t)return;const n=e.match(l.re);if(!n)return;const[,i,o,a,c,u]=n;return r.A.set({h:l._hue2deg(i),s:s.A.channel.clamp.s(parseFloat(o)),l:s.A.channel.clamp.l(parseFloat(a)),a:c?s.A.channel.clamp.a(u?parseFloat(c)/100:parseFloat(c)):1},e)},stringify:e=>{const{h:t,s:n,l:r,a:i}=e;return i<1?`hsla(${s.A.lang.round(t)}, ${s.A.lang.round(n)}%, ${s.A.lang.round(r)}%, ${i})`:`hsl(${s.A.lang.round(t)}, ${s.A.lang.round(n)}%, ${s.A.lang.round(r)}%)`}},c=l,u={colors:{aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyanaqua:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",transparent:"#00000000",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"},parse:e=>{e=e.toLowerCase();const t=u.colors[e];if(t)return a.parse(t)},stringify:e=>{const t=a.stringify(e);for(const n in u.colors)if(u.colors[n]===t)return n}},h=u,d={re:/^rgba?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?)))?\s*?\)$/i,parse:e=>{const t=e.charCodeAt(0);if(114!==t&&82!==t)return;const n=e.match(d.re);if(!n)return;const[,i,o,a,l,c,u,h,f]=n;return r.A.set({r:s.A.channel.clamp.r(o?2.55*parseFloat(i):parseFloat(i)),g:s.A.channel.clamp.g(l?2.55*parseFloat(a):parseFloat(a)),b:s.A.channel.clamp.b(u?2.55*parseFloat(c):parseFloat(c)),a:h?s.A.channel.clamp.a(f?parseFloat(h)/100:parseFloat(h)):1},e)},stringify:e=>{const{r:t,g:n,b:r,a:i}=e;return i<1?`rgba(${s.A.lang.round(t)}, ${s.A.lang.round(n)}, ${s.A.lang.round(r)}, ${s.A.lang.round(i)})`:`rgb(${s.A.lang.round(t)}, ${s.A.lang.round(n)}, ${s.A.lang.round(r)})`}},f=d,p={format:{keyword:u,hex:a,rgb:d,rgba:d,hsl:l,hsla:l},parse:e=>{if("string"!==typeof e)return e;const t=a.parse(e)||f.parse(e)||c.parse(e)||h.parse(e);if(t)return t;throw new Error(`Unsupported color format: "${e}"`)},stringify:e=>!e.changed&&e.color?e.color:e.type.is(i.Z.HSL)||void 0===e.data.r?c.stringify(e):e.a<1||!Number.isInteger(e.r)||!Number.isInteger(e.g)||!Number.isInteger(e.b)?f.stringify(e):a.stringify(e)},g=p},1497:(e,t,n)=>{"use strict";var r=n(3218);function i(){}function o(){}o.resetWarningCache=i,e.exports=function(){function e(e,t,n,i,o,a){if(a!==r){var s=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw s.name="Invariant Violation",s}}function t(){return e}e.isRequired=e;var n={array:e,bigint:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:o,resetWarningCache:i};return n.PropTypes=n,n}},1499:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=n(4306).A.Uint8Array},1580:(e,t,n)=>{"use strict";n.d(t,{IU:()=>m,Jo:()=>T,T_:()=>x,g0:()=>M,jP:()=>b});var r=n(2598),i=n(958),o=n(8434),a=n(2596),s=n(634),l=n(3759),c=n(3638),u=n(8205),h=(0,l.K2)(((e,t,n,r,i,o)=>{t.arrowTypeStart&&f(e,"start",t.arrowTypeStart,n,r,i,o),t.arrowTypeEnd&&f(e,"end",t.arrowTypeEnd,n,r,i,o)}),"addEdgeMarkers"),d={arrow_cross:{type:"cross",fill:!1},arrow_point:{type:"point",fill:!0},arrow_barb:{type:"barb",fill:!0},arrow_circle:{type:"circle",fill:!1},aggregation:{type:"aggregation",fill:!1},extension:{type:"extension",fill:!1},composition:{type:"composition",fill:!0},dependency:{type:"dependency",fill:!0},lollipop:{type:"lollipop",fill:!1},only_one:{type:"onlyOne",fill:!1},zero_or_one:{type:"zeroOrOne",fill:!1},one_or_more:{type:"oneOrMore",fill:!1},zero_or_more:{type:"zeroOrMore",fill:!1},requirement_arrow:{type:"requirement_arrow",fill:!1},requirement_contains:{type:"requirement_contains",fill:!1}},f=(0,l.K2)(((e,t,n,r,i,o,a)=>{const s=d[n];if(!s)return void l.Rm.warn(`Unknown arrow type: ${n}`);const c=`${i}_${o}-${s.type}${"start"===t?"Start":"End"}`;if(a&&""!==a.trim()){const n=`${c}_${a.replace(/[^\dA-Za-z]/g,"_")}`;if(!document.getElementById(n)){const e=document.getElementById(c);if(e){const t=e.cloneNode(!0);t.id=n;t.querySelectorAll("path, circle, line").forEach((e=>{e.setAttribute("stroke",a),s.fill&&e.setAttribute("fill",a)})),e.parentNode?.appendChild(t)}}e.attr(`marker-${t}`,`url(${r}#${n})`)}else e.attr(`marker-${t}`,`url(${r}#${c})`)}),"addEdgeMarker"),p=new Map,g=new Map,m=(0,l.K2)((()=>{p.clear(),g.clear()}),"clear"),y=(0,l.K2)((e=>e?e.reduce(((e,t)=>e+";"+t),""):""),"getLabelStyles"),b=(0,l.K2)((async(e,t)=>{let n=(0,l._3)((0,l.D7)().flowchart.htmlLabels);const r=await(0,a.GZ)(e,t.label,{style:y(t.labelStyle),useHtmlLabels:n,addSvgBackground:!0,isNode:!1});l.Rm.info("abc82",t,t.labelType);const o=e.insert("g").attr("class","edgeLabel"),s=o.insert("g").attr("class","label");s.node().appendChild(r);let u,h=r.getBBox();if(n){const e=r.children[0],t=(0,c.Ltv)(r);h=e.getBoundingClientRect(),t.attr("width",h.width),t.attr("height",h.height)}if(s.attr("transform","translate("+-h.width/2+", "+-h.height/2+")"),p.set(t.id,o),t.width=h.width,t.height=h.height,t.startLabelLeft){const n=await(0,i.DA)(t.startLabelLeft,y(t.labelStyle)),r=e.insert("g").attr("class","edgeTerminals"),o=r.insert("g").attr("class","inner");u=o.node().appendChild(n);const a=n.getBBox();o.attr("transform","translate("+-a.width/2+", "+-a.height/2+")"),g.get(t.id)||g.set(t.id,{}),g.get(t.id).startLeft=r,v(u,t.startLabelLeft)}if(t.startLabelRight){const n=await(0,i.DA)(t.startLabelRight,y(t.labelStyle)),r=e.insert("g").attr("class","edgeTerminals"),o=r.insert("g").attr("class","inner");u=r.node().appendChild(n),o.node().appendChild(n);const a=n.getBBox();o.attr("transform","translate("+-a.width/2+", "+-a.height/2+")"),g.get(t.id)||g.set(t.id,{}),g.get(t.id).startRight=r,v(u,t.startLabelRight)}if(t.endLabelLeft){const n=await(0,i.DA)(t.endLabelLeft,y(t.labelStyle)),r=e.insert("g").attr("class","edgeTerminals"),o=r.insert("g").attr("class","inner");u=o.node().appendChild(n);const a=n.getBBox();o.attr("transform","translate("+-a.width/2+", "+-a.height/2+")"),r.node().appendChild(n),g.get(t.id)||g.set(t.id,{}),g.get(t.id).endLeft=r,v(u,t.endLabelLeft)}if(t.endLabelRight){const n=await(0,i.DA)(t.endLabelRight,y(t.labelStyle)),r=e.insert("g").attr("class","edgeTerminals"),o=r.insert("g").attr("class","inner");u=o.node().appendChild(n);const a=n.getBBox();o.attr("transform","translate("+-a.width/2+", "+-a.height/2+")"),r.node().appendChild(n),g.get(t.id)||g.set(t.id,{}),g.get(t.id).endRight=r,v(u,t.endLabelRight)}return r}),"insertEdgeLabel");function v(e,t){(0,l.D7)().flowchart.htmlLabels&&e&&(e.style.width=9*t.length+"px",e.style.height="12px")}(0,l.K2)(v,"setTerminalWidth");var x=(0,l.K2)(((e,t)=>{l.Rm.debug("Moving label abc88 ",e.id,e.label,p.get(e.id),t);let n=t.updatedPath?t.updatedPath:t.originalPath;const r=(0,l.D7)(),{subGraphTitleTotalMargin:i}=(0,o.O)(r);if(e.label){const r=p.get(e.id);let o=e.x,a=e.y;if(n){const r=s._K.calcLabelPosition(n);l.Rm.debug("Moving label "+e.label+" from (",o,",",a,") to (",r.x,",",r.y,") abc88"),t.updatedPath&&(o=r.x,a=r.y)}r.attr("transform",`translate(${o}, ${a+i/2})`)}if(e.startLabelLeft){const t=g.get(e.id).startLeft;let r=e.x,i=e.y;if(n){const t=s._K.calcTerminalLabelPosition(e.arrowTypeStart?10:0,"start_left",n);r=t.x,i=t.y}t.attr("transform",`translate(${r}, ${i})`)}if(e.startLabelRight){const t=g.get(e.id).startRight;let r=e.x,i=e.y;if(n){const t=s._K.calcTerminalLabelPosition(e.arrowTypeStart?10:0,"start_right",n);r=t.x,i=t.y}t.attr("transform",`translate(${r}, ${i})`)}if(e.endLabelLeft){const t=g.get(e.id).endLeft;let r=e.x,i=e.y;if(n){const t=s._K.calcTerminalLabelPosition(e.arrowTypeEnd?10:0,"end_left",n);r=t.x,i=t.y}t.attr("transform",`translate(${r}, ${i})`)}if(e.endLabelRight){const t=g.get(e.id).endRight;let r=e.x,i=e.y;if(n){const t=s._K.calcTerminalLabelPosition(e.arrowTypeEnd?10:0,"end_right",n);r=t.x,i=t.y}t.attr("transform",`translate(${r}, ${i})`)}}),"positionEdgeLabel"),k=(0,l.K2)(((e,t)=>{const n=e.x,r=e.y,i=Math.abs(t.x-n),o=Math.abs(t.y-r),a=e.width/2,s=e.height/2;return i>=a||o>=s}),"outsideNode"),w=(0,l.K2)(((e,t,n)=>{l.Rm.debug(`intersection calc abc89:\n outsidePoint: ${JSON.stringify(t)}\n insidePoint : ${JSON.stringify(n)}\n node : x:${e.x} y:${e.y} w:${e.width} h:${e.height}`);const r=e.x,i=e.y,o=Math.abs(r-n.x),a=e.width/2;let s=n.xMath.abs(r-t.x)*c){let e=n.y{l.Rm.warn("abc88 cutPathAtIntersect",e,t);let n=[],r=e[0],i=!1;return e.forEach((e=>{if(l.Rm.info("abc88 checking point",e,t),k(t,e)||i)l.Rm.warn("abc88 outside",e,r),r=e,i||n.push(e);else{const o=w(t,r,e);l.Rm.debug("abc88 inside",e,r,o),l.Rm.debug("abc88 intersection",o,t);let a=!1;n.forEach((e=>{a=a||e.x===o.x&&e.y===o.y})),n.some((e=>e.x===o.x&&e.y===o.y))?l.Rm.warn("abc88 no intersect",o,n):n.push(o),i=!0}})),l.Rm.debug("returning points",n),n}),"cutPathAtIntersect");function C(e){const t=[],n=[];for(let r=1;r5&&Math.abs(o.y-i.y)>5||i.y===o.y&&o.x===a.x&&Math.abs(o.x-i.x)>5&&Math.abs(o.y-a.y)>5)&&(t.push(o),n.push(r))}return{cornerPoints:t,cornerPointPositions:n}}(0,l.K2)(C,"extractCornerPoints");var _=(0,l.K2)((function(e,t,n){const r=t.x-e.x,i=t.y-e.y,o=n/Math.sqrt(r*r+i*i);return{x:t.x-o*r,y:t.y-o*i}}),"findAdjacentPoint"),A=(0,l.K2)((function(e){const{cornerPointPositions:t}=C(e),n=[];for(let r=0;r10&&Math.abs(i.y-t.y)>=10){l.Rm.debug("Corner point fixing",Math.abs(i.x-t.x),Math.abs(i.y-t.y));const e=5;d=o.x===a.x?{x:c<0?a.x-e+h:a.x+e-h,y:u<0?a.y-h:a.y+h}:{x:c<0?a.x-h:a.x+h,y:u<0?a.y-e+h:a.y+e-h}}else l.Rm.debug("Corner point skipping fixing",Math.abs(i.x-t.x),Math.abs(i.y-t.y));n.push(d,s)}else n.push(e[r]);return n}),"fixCorners"),T=(0,l.K2)((function(e,t,n,o,a,s,d){const{handDrawnSeed:f}=(0,l.D7)();let p=t.points,g=!1;const m=a;var y=s;const b=[];for(const r in t.cssCompiledStyles)(0,i.KX)(r)||b.push(t.cssCompiledStyles[r]);y.intersect&&m.intersect&&(p=p.slice(1,t.points.length-1),p.unshift(m.intersect(p[0])),l.Rm.debug("Last point APA12",t.start,"--\x3e",t.end,p[p.length-1],y,y.intersect(p[p.length-1])),p.push(y.intersect(p[p.length-1]))),t.toCluster&&(l.Rm.info("to cluster abc88",n.get(t.toCluster)),p=S(t.points,n.get(t.toCluster).node),g=!0),t.fromCluster&&(l.Rm.debug("from cluster abc88",n.get(t.fromCluster),JSON.stringify(p,null,2)),p=S(p.reverse(),n.get(t.fromCluster).node).reverse(),g=!0);let v=p.filter((e=>!Number.isNaN(e.y)));v=A(v);let x=c.qrM;switch(x=c.lUB,t.curve){case"linear":x=c.lUB;break;case"basis":default:x=c.qrM;break;case"cardinal":x=c.y8u;break;case"bumpX":x=c.Wi0;break;case"bumpY":x=c.PGM;break;case"catmullRom":x=c.oDi;break;case"monotoneX":x=c.nVG;break;case"monotoneY":x=c.uxU;break;case"natural":x=c.Xf2;break;case"step":x=c.GZz;break;case"stepAfter":x=c.UPb;break;case"stepBefore":x=c.dyv}const{x:k,y:w}=(0,r.R)(t),C=(0,c.n8j)().x(k).y(w).curve(x);let _,T;switch(t.thickness){case"normal":default:_="edge-thickness-normal";break;case"thick":_="edge-thickness-thick";break;case"invisible":_="edge-thickness-invisible"}switch(t.pattern){case"solid":default:_+=" edge-pattern-solid";break;case"dotted":_+=" edge-pattern-dotted";break;case"dashed":_+=" edge-pattern-dashed"}let E=C(v);const F=Array.isArray(t.style)?t.style:[t.style];let M=F.find((e=>e?.startsWith("stroke:")));if("handDrawn"===t.look){const n=u.A.svg(e);Object.assign([],v);const r=n.path(E,{roughness:.3,seed:f});_+=" transition",T=(0,c.Ltv)(r).select("path").attr("id",t.id).attr("class"," "+_+(t.classes?" "+t.classes:"")).attr("style",F?F.reduce(((e,t)=>e+";"+t),""):"");let i=T.attr("d");T.attr("d",i),e.node().appendChild(T.node())}else{const n=b.join(";"),r=F?F.reduce(((e,t)=>e+t+";"),""):"";let i="";t.animate&&(i=" edge-animation-fast"),t.animation&&(i=" edge-animation-"+t.animation);const o=n?n+";"+r+";":r;T=e.append("path").attr("d",E).attr("id",t.id).attr("class"," "+_+(t.classes?" "+t.classes:"")+(i??"")).attr("style",o),M=o.match(/stroke:([^;]+)/)?.[1]}let L="";((0,l.D7)().flowchart.arrowMarkerAbsolute||(0,l.D7)().state.arrowMarkerAbsolute)&&(L=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search,L=L.replace(/\(/g,"\\(").replace(/\)/g,"\\)")),l.Rm.info("arrowTypeStart",t.arrowTypeStart),l.Rm.info("arrowTypeEnd",t.arrowTypeEnd),h(T,t,L,d,o,M);let P={};return g&&(P.updatedPath=p),P.originalPath=t.points,P}),"insertEdge"),E=(0,l.K2)(((e,t,n,r)=>{t.forEach((t=>{F[t](e,n,r)}))}),"insertMarkers"),F={extension:(0,l.K2)(((e,t,n)=>{l.Rm.trace("Making markers for ",n),e.append("defs").append("marker").attr("id",n+"_"+t+"-extensionStart").attr("class","marker extension "+t).attr("refX",18).attr("refY",7).attr("markerWidth",190).attr("markerHeight",240).attr("orient","auto").append("path").attr("d","M 1,7 L18,13 V 1 Z"),e.append("defs").append("marker").attr("id",n+"_"+t+"-extensionEnd").attr("class","marker extension "+t).attr("refX",1).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 1,1 V 13 L18,7 Z")}),"extension"),composition:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-compositionStart").attr("class","marker composition "+t).attr("refX",18).attr("refY",7).attr("markerWidth",190).attr("markerHeight",240).attr("orient","auto").append("path").attr("d","M 18,7 L9,13 L1,7 L9,1 Z"),e.append("defs").append("marker").attr("id",n+"_"+t+"-compositionEnd").attr("class","marker composition "+t).attr("refX",1).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 18,7 L9,13 L1,7 L9,1 Z")}),"composition"),aggregation:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-aggregationStart").attr("class","marker aggregation "+t).attr("refX",18).attr("refY",7).attr("markerWidth",190).attr("markerHeight",240).attr("orient","auto").append("path").attr("d","M 18,7 L9,13 L1,7 L9,1 Z"),e.append("defs").append("marker").attr("id",n+"_"+t+"-aggregationEnd").attr("class","marker aggregation "+t).attr("refX",1).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 18,7 L9,13 L1,7 L9,1 Z")}),"aggregation"),dependency:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-dependencyStart").attr("class","marker dependency "+t).attr("refX",6).attr("refY",7).attr("markerWidth",190).attr("markerHeight",240).attr("orient","auto").append("path").attr("d","M 5,7 L9,13 L1,7 L9,1 Z"),e.append("defs").append("marker").attr("id",n+"_"+t+"-dependencyEnd").attr("class","marker dependency "+t).attr("refX",13).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 18,7 L9,13 L14,7 L9,1 Z")}),"dependency"),lollipop:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-lollipopStart").attr("class","marker lollipop "+t).attr("refX",13).attr("refY",7).attr("markerWidth",190).attr("markerHeight",240).attr("orient","auto").append("circle").attr("stroke","black").attr("fill","transparent").attr("cx",7).attr("cy",7).attr("r",6),e.append("defs").append("marker").attr("id",n+"_"+t+"-lollipopEnd").attr("class","marker lollipop "+t).attr("refX",1).attr("refY",7).attr("markerWidth",190).attr("markerHeight",240).attr("orient","auto").append("circle").attr("stroke","black").attr("fill","transparent").attr("cx",7).attr("cy",7).attr("r",6)}),"lollipop"),point:(0,l.K2)(((e,t,n)=>{e.append("marker").attr("id",n+"_"+t+"-pointEnd").attr("class","marker "+t).attr("viewBox","0 0 10 10").attr("refX",5).attr("refY",5).attr("markerUnits","userSpaceOnUse").attr("markerWidth",8).attr("markerHeight",8).attr("orient","auto").append("path").attr("d","M 0 0 L 10 5 L 0 10 z").attr("class","arrowMarkerPath").style("stroke-width",1).style("stroke-dasharray","1,0"),e.append("marker").attr("id",n+"_"+t+"-pointStart").attr("class","marker "+t).attr("viewBox","0 0 10 10").attr("refX",4.5).attr("refY",5).attr("markerUnits","userSpaceOnUse").attr("markerWidth",8).attr("markerHeight",8).attr("orient","auto").append("path").attr("d","M 0 5 L 10 10 L 10 0 z").attr("class","arrowMarkerPath").style("stroke-width",1).style("stroke-dasharray","1,0")}),"point"),circle:(0,l.K2)(((e,t,n)=>{e.append("marker").attr("id",n+"_"+t+"-circleEnd").attr("class","marker "+t).attr("viewBox","0 0 10 10").attr("refX",11).attr("refY",5).attr("markerUnits","userSpaceOnUse").attr("markerWidth",11).attr("markerHeight",11).attr("orient","auto").append("circle").attr("cx","5").attr("cy","5").attr("r","5").attr("class","arrowMarkerPath").style("stroke-width",1).style("stroke-dasharray","1,0"),e.append("marker").attr("id",n+"_"+t+"-circleStart").attr("class","marker "+t).attr("viewBox","0 0 10 10").attr("refX",-1).attr("refY",5).attr("markerUnits","userSpaceOnUse").attr("markerWidth",11).attr("markerHeight",11).attr("orient","auto").append("circle").attr("cx","5").attr("cy","5").attr("r","5").attr("class","arrowMarkerPath").style("stroke-width",1).style("stroke-dasharray","1,0")}),"circle"),cross:(0,l.K2)(((e,t,n)=>{e.append("marker").attr("id",n+"_"+t+"-crossEnd").attr("class","marker cross "+t).attr("viewBox","0 0 11 11").attr("refX",12).attr("refY",5.2).attr("markerUnits","userSpaceOnUse").attr("markerWidth",11).attr("markerHeight",11).attr("orient","auto").append("path").attr("d","M 1,1 l 9,9 M 10,1 l -9,9").attr("class","arrowMarkerPath").style("stroke-width",2).style("stroke-dasharray","1,0"),e.append("marker").attr("id",n+"_"+t+"-crossStart").attr("class","marker cross "+t).attr("viewBox","0 0 11 11").attr("refX",-1).attr("refY",5.2).attr("markerUnits","userSpaceOnUse").attr("markerWidth",11).attr("markerHeight",11).attr("orient","auto").append("path").attr("d","M 1,1 l 9,9 M 10,1 l -9,9").attr("class","arrowMarkerPath").style("stroke-width",2).style("stroke-dasharray","1,0")}),"cross"),barb:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-barbEnd").attr("refX",19).attr("refY",7).attr("markerWidth",20).attr("markerHeight",14).attr("markerUnits","userSpaceOnUse").attr("orient","auto").append("path").attr("d","M 19,7 L9,13 L14,7 L9,1 Z")}),"barb"),only_one:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-onlyOneStart").attr("class","marker onlyOne "+t).attr("refX",0).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("d","M9,0 L9,18 M15,0 L15,18"),e.append("defs").append("marker").attr("id",n+"_"+t+"-onlyOneEnd").attr("class","marker onlyOne "+t).attr("refX",18).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("d","M3,0 L3,18 M9,0 L9,18")}),"only_one"),zero_or_one:(0,l.K2)(((e,t,n)=>{const r=e.append("defs").append("marker").attr("id",n+"_"+t+"-zeroOrOneStart").attr("class","marker zeroOrOne "+t).attr("refX",0).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto");r.append("circle").attr("fill","white").attr("cx",21).attr("cy",9).attr("r",6),r.append("path").attr("d","M9,0 L9,18");const i=e.append("defs").append("marker").attr("id",n+"_"+t+"-zeroOrOneEnd").attr("class","marker zeroOrOne "+t).attr("refX",30).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto");i.append("circle").attr("fill","white").attr("cx",9).attr("cy",9).attr("r",6),i.append("path").attr("d","M21,0 L21,18")}),"zero_or_one"),one_or_more:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-oneOrMoreStart").attr("class","marker oneOrMore "+t).attr("refX",18).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("d","M0,18 Q 18,0 36,18 Q 18,36 0,18 M42,9 L42,27"),e.append("defs").append("marker").attr("id",n+"_"+t+"-oneOrMoreEnd").attr("class","marker oneOrMore "+t).attr("refX",27).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("d","M3,9 L3,27 M9,18 Q27,0 45,18 Q27,36 9,18")}),"one_or_more"),zero_or_more:(0,l.K2)(((e,t,n)=>{const r=e.append("defs").append("marker").attr("id",n+"_"+t+"-zeroOrMoreStart").attr("class","marker zeroOrMore "+t).attr("refX",18).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto");r.append("circle").attr("fill","white").attr("cx",48).attr("cy",18).attr("r",6),r.append("path").attr("d","M0,18 Q18,0 36,18 Q18,36 0,18");const i=e.append("defs").append("marker").attr("id",n+"_"+t+"-zeroOrMoreEnd").attr("class","marker zeroOrMore "+t).attr("refX",39).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto");i.append("circle").attr("fill","white").attr("cx",9).attr("cy",18).attr("r",6),i.append("path").attr("d","M21,18 Q39,0 57,18 Q39,36 21,18")}),"zero_or_more"),requirement_arrow:(0,l.K2)(((e,t,n)=>{e.append("defs").append("marker").attr("id",n+"_"+t+"-requirement_arrowEnd").attr("refX",20).attr("refY",10).attr("markerWidth",20).attr("markerHeight",20).attr("orient","auto").append("path").attr("d","M0,0\n L20,10\n M20,10\n L0,20")}),"requirement_arrow"),requirement_contains:(0,l.K2)(((e,t,n)=>{const r=e.append("defs").append("marker").attr("id",n+"_"+t+"-requirement_containsStart").attr("refX",0).attr("refY",10).attr("markerWidth",20).attr("markerHeight",20).attr("orient","auto").append("g");r.append("circle").attr("cx",10).attr("cy",10).attr("r",9).attr("fill","none"),r.append("line").attr("x1",1).attr("x2",19).attr("y1",10).attr("y2",10),r.append("line").attr("y1",1).attr("y2",19).attr("x1",10).attr("x2",10)}),"requirement_contains")},M=E},1602:(e,t,n)=>{"use strict";n.r(t),n.d(t,{GlobalStyles:()=>Qe,StyledEngineProvider:()=>Xe,ThemeContext:()=>Le,css:()=>Ue,default:()=>Ze,internal_processStyles:()=>Je,keyframes:()=>Ve});var r=n(8168),i=n(5043),o=n.t(i,2);var a=function(){function e(e){var t=this;this._insertTag=function(e){var n;n=0===t.tags.length?t.insertionPoint?t.insertionPoint.nextSibling:t.prepend?t.container.firstChild:t.before:t.tags[t.tags.length-1].nextSibling,t.container.insertBefore(e,n),t.tags.push(e)},this.isSpeedy=void 0===e.speedy||e.speedy,this.tags=[],this.ctr=0,this.nonce=e.nonce,this.key=e.key,this.container=e.container,this.prepend=e.prepend,this.insertionPoint=e.insertionPoint,this.before=null}var t=e.prototype;return t.hydrate=function(e){e.forEach(this._insertTag)},t.insert=function(e){this.ctr%(this.isSpeedy?65e3:1)===0&&this._insertTag(function(e){var t=document.createElement("style");return t.setAttribute("data-emotion",e.key),void 0!==e.nonce&&t.setAttribute("nonce",e.nonce),t.appendChild(document.createTextNode("")),t.setAttribute("data-s",""),t}(this));var t=this.tags[this.tags.length-1];if(this.isSpeedy){var n=function(e){if(e.sheet)return e.sheet;for(var t=0;t0?f(S,--k):0,v--,10===w&&(v=1,b--),w}function T(){return w=k2||L(w)>3?"":" "}function D(e,t){for(;--t&&T()&&!(w<48||w>102||w>57&&w<65||w>70&&w<97););return M(e,F()+(t<6&&32==E()&&32==T()))}function z(e){for(;T();)switch(w){case e:return k;case 34:case 39:34!==e&&39!==e&&z(w);break;case 40:41===e&&z(e);break;case 92:T()}return k}function I(e,t){for(;T()&&e+w!==57&&(e+w!==84||47!==E()););return"/*"+M(t,k-1)+"*"+l(47===e?e:T())}function N(e){for(;!L(E());)T();return M(e,k)}var R="-ms-",j="-moz-",q="-webkit-",H="comm",W="rule",K="decl",U="@keyframes";function V(e,t){for(var n="",r=m(e),i=0;i0&&g(O)-m&&y(v>32?J(O+";",r,n,m-1):J(h(O," ","")+";",r,n,m-2),c);break;case 59:O+=";";default:if(y(P=Q(O,t,n,u,p,i,s,_,M=[],L=[],m),o),123===C)if(0===p)X(O,t,P,P,M,o,m,s,L);else switch(99===b&&110===f(O,3)?100:b){case 100:case 108:case 109:case 115:X(e,P,P,r&&y(Q(e,P,P,0,0,i,s,_,i,M=[],m),L),i,L,m,s,r?M:L);break;default:X(O,P,P,P,[""],L,0,s,L)}}u=p=v=0,k=S=1,_=O="",m=a;break;case 58:m=1+g(O),v=x;default:if(k<1)if(123==C)--k;else if(125==C&&0==k++&&125==A())continue;switch(O+=l(C),C*k){case 38:S=p>0?1:(O+="\f",-1);break;case 44:s[u++]=(g(O)-1)*S,S=1;break;case 64:45===E()&&(O+=$(T())),b=E(),p=m=g(_=O+=N(F())),C++;break;case 45:45===x&&2==g(O)&&(k=0)}}return o}function Q(e,t,n,r,i,o,a,l,c,d,f){for(var g=i-1,y=0===i?o:[""],b=m(y),v=0,x=0,k=0;v0?y[w]+" "+S:h(S,/&\f/g,y[w])))&&(c[k++]=_);return C(e,t,n,0===i?W:l,c,d,f)}function Z(e,t,n){return C(e,t,n,H,l(w),p(e,2,-2),0)}function J(e,t,n,r){return C(e,t,n,K,p(e,0,r),p(e,r+1,-1),r)}var ee=function(e,t,n){for(var r=0,i=0;r=i,i=E(),38===r&&12===i&&(t[n]=1),!L(i);)T();return M(e,k)},te=function(e,t){return O(function(e,t){var n=-1,r=44;do{switch(L(r)){case 0:38===r&&12===E()&&(t[n]=1),e[n]+=ee(k-1,t,n);break;case 2:e[n]+=$(r);break;case 4:if(44===r){e[++n]=58===E()?"&\f":"",t[n]=e[n].length;break}default:e[n]+=l(r)}}while(r=T());return e}(P(e),t))},ne=new WeakMap,re=function(e){if("rule"===e.type&&e.parent&&!(e.length<1)){for(var t=e.value,n=e.parent,r=e.column===n.column&&e.line===n.line;"rule"!==n.type;)if(!(n=n.parent))return;if((1!==e.props.length||58===t.charCodeAt(0)||ne.get(n))&&!r){ne.set(e,!0);for(var i=[],o=te(t,i),a=n.props,s=0,l=0;s6)switch(f(e,t+1)){case 109:if(45!==f(e,t+4))break;case 102:return h(e,/(.+:)(.+)-([^]+)/,"$1"+q+"$2-$3$1"+j+(108==f(e,t+3)?"$3":"$2-$3"))+e;case 115:return~d(e,"stretch")?oe(h(e,"stretch","fill-available"),t)+e:e}break;case 4949:if(115!==f(e,t+1))break;case 6444:switch(f(e,g(e)-3-(~d(e,"!important")&&10))){case 107:return h(e,":",":"+q)+e;case 101:return h(e,/(.+:)([^;!]+)(;|!.+)?/,"$1"+q+(45===f(e,14)?"inline-":"")+"box$3$1"+q+"$2$3$1"+R+"$2box$3")+e}break;case 5936:switch(f(e,t+11)){case 114:return q+e+R+h(e,/[svh]\w+-[tblr]{2}/,"tb")+e;case 108:return q+e+R+h(e,/[svh]\w+-[tblr]{2}/,"tb-rl")+e;case 45:return q+e+R+h(e,/[svh]\w+-[tblr]{2}/,"lr")+e}return q+e+R+e+e}return e}var ae=[function(e,t,n,r){if(e.length>-1&&!e.return)switch(e.type){case K:e.return=oe(e.value,e.length);break;case U:return V([_(e,{value:h(e.value,"@","@"+q)})],r);case W:if(e.length)return function(e,t){return e.map(t).join("")}(e.props,(function(t){switch(function(e,t){return(e=t.exec(e))?e[0]:e}(t,/(::plac\w+|:read-\w+)/)){case":read-only":case":read-write":return V([_(e,{props:[h(t,/:(read-\w+)/,":-moz-$1")]})],r);case"::placeholder":return V([_(e,{props:[h(t,/:(plac\w+)/,":"+q+"input-$1")]}),_(e,{props:[h(t,/:(plac\w+)/,":-moz-$1")]}),_(e,{props:[h(t,/:(plac\w+)/,R+"input-$1")]})],r)}return""}))}}],se=function(e){var t=e.key;if("css"===t){var n=document.querySelectorAll("style[data-emotion]:not([data-s])");Array.prototype.forEach.call(n,(function(e){-1!==e.getAttribute("data-emotion").indexOf(" ")&&(document.head.appendChild(e),e.setAttribute("data-s",""))}))}var r,i,o=e.stylisPlugins||ae,s={},l=[];r=e.container||document.head,Array.prototype.forEach.call(document.querySelectorAll('style[data-emotion^="'+t+' "]'),(function(e){for(var t=e.getAttribute("data-emotion").split(" "),n=1;n=4;++r,i-=4)t=1540483477*(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))+(59797*(t>>>16)<<16),n=1540483477*(65535&(t^=t>>>24))+(59797*(t>>>16)<<16)^1540483477*(65535&n)+(59797*(n>>>16)<<16);switch(i){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n=1540483477*(65535&(n^=255&e.charCodeAt(r)))+(59797*(n>>>16)<<16)}return(((n=1540483477*(65535&(n^=n>>>13))+(59797*(n>>>16)<<16))^n>>>15)>>>0).toString(36)}(i)+l;return{name:c,styles:i,next:we}}var _e=!!o.useInsertionEffect&&o.useInsertionEffect,Ae=_e||function(e){return e()},Te=_e||i.useLayoutEffect,Ee=i.createContext("undefined"!==typeof HTMLElement?se({key:"css"}):null),Fe=Ee.Provider,Me=function(e){return(0,i.forwardRef)((function(t,n){var r=(0,i.useContext)(Ee);return e(t,r,n)}))},Le=i.createContext({});var Pe={}.hasOwnProperty,Oe="__EMOTION_TYPE_PLEASE_DO_NOT_USE__",$e=function(e){var t=e.cache,n=e.serialized,r=e.isStringTag;return ce(t,n,r),Ae((function(){return ue(t,n,r)})),null},Be=Me((function(e,t,n){var r=e.css;"string"===typeof r&&void 0!==t.registered[r]&&(r=t.registered[r]);var o=e[Oe],a=[r],s="";"string"===typeof e.className?s=le(t.registered,a,e.className):null!=e.className&&(s=e.className+" ");var l=Ce(a,void 0,i.useContext(Le));s+=t.key+"-"+l.name;var c={};for(var u in e)Pe.call(e,u)&&"css"!==u&&u!==Oe&&(c[u]=e[u]);return c.className=s,n&&(c.ref=n),i.createElement(i.Fragment,null,i.createElement($e,{cache:t,serialized:l,isStringTag:"string"===typeof o}),i.createElement(o,c))})),De=Be,ze=/^((children|dangerouslySetInnerHTML|key|ref|autoFocus|defaultValue|defaultChecked|innerHTML|suppressContentEditableWarning|suppressHydrationWarning|valueLink|abbr|accept|acceptCharset|accessKey|action|allow|allowUserMedia|allowPaymentRequest|allowFullScreen|allowTransparency|alt|async|autoComplete|autoPlay|capture|cellPadding|cellSpacing|challenge|charSet|checked|cite|classID|className|cols|colSpan|content|contentEditable|contextMenu|controls|controlsList|coords|crossOrigin|data|dateTime|decoding|default|defer|dir|disabled|disablePictureInPicture|disableRemotePlayback|download|draggable|encType|enterKeyHint|fetchpriority|fetchPriority|form|formAction|formEncType|formMethod|formNoValidate|formTarget|frameBorder|headers|height|hidden|high|href|hrefLang|htmlFor|httpEquiv|id|inputMode|integrity|is|keyParams|keyType|kind|label|lang|list|loading|loop|low|marginHeight|marginWidth|max|maxLength|media|mediaGroup|method|min|minLength|multiple|muted|name|nonce|noValidate|open|optimum|pattern|placeholder|playsInline|poster|preload|profile|radioGroup|readOnly|referrerPolicy|rel|required|reversed|role|rows|rowSpan|sandbox|scope|scoped|scrolling|seamless|selected|shape|size|sizes|slot|span|spellCheck|src|srcDoc|srcLang|srcSet|start|step|style|summary|tabIndex|target|title|translate|type|useMap|value|width|wmode|wrap|about|datatype|inlist|prefix|property|resource|typeof|vocab|autoCapitalize|autoCorrect|autoSave|color|incremental|fallback|inert|itemProp|itemScope|itemType|itemID|itemRef|on|option|results|security|unselectable|accentHeight|accumulate|additive|alignmentBaseline|allowReorder|alphabetic|amplitude|arabicForm|ascent|attributeName|attributeType|autoReverse|azimuth|baseFrequency|baselineShift|baseProfile|bbox|begin|bias|by|calcMode|capHeight|clip|clipPathUnits|clipPath|clipRule|colorInterpolation|colorInterpolationFilters|colorProfile|colorRendering|contentScriptType|contentStyleType|cursor|cx|cy|d|decelerate|descent|diffuseConstant|direction|display|divisor|dominantBaseline|dur|dx|dy|edgeMode|elevation|enableBackground|end|exponent|externalResourcesRequired|fill|fillOpacity|fillRule|filter|filterRes|filterUnits|floodColor|floodOpacity|focusable|fontFamily|fontSize|fontSizeAdjust|fontStretch|fontStyle|fontVariant|fontWeight|format|from|fr|fx|fy|g1|g2|glyphName|glyphOrientationHorizontal|glyphOrientationVertical|glyphRef|gradientTransform|gradientUnits|hanging|horizAdvX|horizOriginX|ideographic|imageRendering|in|in2|intercept|k|k1|k2|k3|k4|kernelMatrix|kernelUnitLength|kerning|keyPoints|keySplines|keyTimes|lengthAdjust|letterSpacing|lightingColor|limitingConeAngle|local|markerEnd|markerMid|markerStart|markerHeight|markerUnits|markerWidth|mask|maskContentUnits|maskUnits|mathematical|mode|numOctaves|offset|opacity|operator|order|orient|orientation|origin|overflow|overlinePosition|overlineThickness|panose1|paintOrder|pathLength|patternContentUnits|patternTransform|patternUnits|pointerEvents|points|pointsAtX|pointsAtY|pointsAtZ|preserveAlpha|preserveAspectRatio|primitiveUnits|r|radius|refX|refY|renderingIntent|repeatCount|repeatDur|requiredExtensions|requiredFeatures|restart|result|rotate|rx|ry|scale|seed|shapeRendering|slope|spacing|specularConstant|specularExponent|speed|spreadMethod|startOffset|stdDeviation|stemh|stemv|stitchTiles|stopColor|stopOpacity|strikethroughPosition|strikethroughThickness|string|stroke|strokeDasharray|strokeDashoffset|strokeLinecap|strokeLinejoin|strokeMiterlimit|strokeOpacity|strokeWidth|surfaceScale|systemLanguage|tableValues|targetX|targetY|textAnchor|textDecoration|textRendering|textLength|to|transform|u1|u2|underlinePosition|underlineThickness|unicode|unicodeBidi|unicodeRange|unitsPerEm|vAlphabetic|vHanging|vIdeographic|vMathematical|values|vectorEffect|version|vertAdvY|vertOriginX|vertOriginY|viewBox|viewTarget|visibility|widths|wordSpacing|writingMode|x|xHeight|x1|x2|xChannelSelector|xlinkActuate|xlinkArcrole|xlinkHref|xlinkRole|xlinkShow|xlinkTitle|xlinkType|xmlBase|xmlns|xmlnsXlink|xmlLang|xmlSpace|y|y1|y2|yChannelSelector|z|zoomAndPan|for|class|autofocus)|(([Dd][Aa][Tt][Aa]|[Aa][Rr][Ii][Aa]|x)-.*))$/,Ie=de((function(e){return ze.test(e)||111===e.charCodeAt(0)&&110===e.charCodeAt(1)&&e.charCodeAt(2)<91})),Ne=function(e){return"theme"!==e},Re=function(e){return"string"===typeof e&&e.charCodeAt(0)>96?Ie:Ne},je=function(e,t,n){var r;if(t){var i=t.shouldForwardProp;r=e.__emotion_forwardProp&&i?function(t){return e.__emotion_forwardProp(t)&&i(t)}:i}return"function"!==typeof r&&n&&(r=e.__emotion_forwardProp),r},qe=function(e){var t=e.cache,n=e.serialized,r=e.isStringTag;return ce(t,n,r),Ae((function(){return ue(t,n,r)})),null},He=function e(t,n){var o,a,s=t.__emotion_real===t,l=s&&t.__emotion_base||t;void 0!==n&&(o=n.label,a=n.target);var c=je(t,n,s),u=c||Re(l),h=!u("as");return function(){var d=arguments,f=s&&void 0!==t.__emotion_styles?t.__emotion_styles.slice(0):[];if(void 0!==o&&f.push("label:"+o+";"),null==d[0]||void 0===d[0].raw)f.push.apply(f,d);else{var p=d[0];f.push(p[0]);for(var g=d.length,m=1;m{return t(void 0===(r=e)||null===r||0===Object.keys(r).length?n:e);var r}:t;return(0,Ye.jsx)(Ke,{styles:r})}function Ze(e,t){return He(e,t)}"object"===typeof document&&(Ge=se({key:"css",prepend:!0}));const Je=(e,t)=>{Array.isArray(e.__emotion_styles)&&(e.__emotion_styles=t(e.__emotion_styles))}},1632:()=>{!function(e){e.languages.diff={coord:[/^(?:\*{3}|-{3}|\+{3}).*$/m,/^@@.*@@$/m,/^\d.*$/m]};var t={"deleted-sign":"-","deleted-arrow":"<","inserted-sign":"+","inserted-arrow":">",unchanged:" ",diff:"!"};Object.keys(t).forEach((function(n){var r=t[n],i=[];/^\w+$/.test(n)||i.push(/\w+/.exec(n)[0]),"diff"===n&&i.push("bold"),e.languages.diff[n]={pattern:RegExp("^(?:["+r+"].*(?:\r\n?|\n|(?![\\s\\S])))+","m"),alias:i,inside:{line:{pattern:/(.)(?=[\s\S]).*(?:\r\n?|\n)?/,lookbehind:!0},prefix:{pattern:/[\s\S]/,alias:/\w+/.exec(n)[0]}}}})),Object.defineProperty(e.languages.diff,"PREFIXES",{value:t})}(Prism)},1758:()=>{!function(){if("undefined"!==typeof Prism&&"undefined"!==typeof document&&document.querySelector){var e="line-numbers",t="linkable-line-numbers",n=/\n(?!$)/g,r=function(){var e;return function(){if("undefined"===typeof e){var t=document.createElement("div");t.style.fontSize="13px",t.style.lineHeight="1.5",t.style.padding="0",t.style.border="0",t.innerHTML=" 
     ",document.body.appendChild(t),e=38===t.offsetHeight,document.body.removeChild(t)}return e}}(),i=!0;Prism.plugins.lineHighlight={highlightLines:function(o,c,u){var h=(c="string"===typeof c?c:o.getAttribute("data-line")||"").replace(/\s+/g,"").split(",").filter(Boolean),d=+o.getAttribute("data-line-offset")||0,f=(r()?parseInt:parseFloat)(getComputedStyle(o).lineHeight),p=Prism.util.isActive(o,e),g=o.querySelector("code"),m=p?o:g||o,y=[],b=g.textContent.match(n),v=b?b.length+1:1,x=g&&m!=g?function(e,t){var n=getComputedStyle(e),r=getComputedStyle(t);function i(e){return+e.substr(0,e.length-2)}return t.offsetTop+i(r.borderTopWidth)+i(r.paddingTop)-i(n.paddingTop)}(o,g):0;h.forEach((function(e){var t=e.split("-"),n=+t[0],r=+t[1]||n;if(!((r=Math.min(v+d,r))n&&i.setAttribute("data-end",String(r)),i.style.top=(n-d-1)*f+x+"px",i.textContent=new Array(r-n+2).join(" \n")}));y.push((function(){i.style.width=o.scrollWidth+"px"})),y.push((function(){m.appendChild(i)}))}}));var k=o.id;if(p&&Prism.util.isActive(o,t)&&k){s(o,t)||y.push((function(){o.classList.add(t)}));var w=parseInt(o.getAttribute("data-start")||"1");a(".line-numbers-rows > span",o).forEach((function(e,t){var n=t+w;e.onclick=function(){var e=k+"."+n;i=!1,location.hash=e,setTimeout((function(){i=!0}),1)}}))}return function(){y.forEach(l)}}};var o=0;Prism.hooks.add("before-sanity-check",(function(e){var t=e.element.parentElement;if(c(t)){var n=0;a(".line-highlight",t).forEach((function(e){n+=e.textContent.length,e.parentNode.removeChild(e)})),n&&/^(?: \n)+$/.test(e.code.slice(-n))&&(e.code=e.code.slice(0,-n))}})),Prism.hooks.add("complete",(function t(n){var r=n.element.parentElement;if(c(r)){clearTimeout(o);var i=Prism.plugins.lineNumbers,a=n.plugins&&n.plugins.lineNumbers;if(s(r,e)&&i&&!a)Prism.hooks.add("line-numbers",t);else Prism.plugins.lineHighlight.highlightLines(r)(),o=setTimeout(u,1)}})),window.addEventListener("hashchange",u),window.addEventListener("resize",(function(){a("pre").filter(c).map((function(e){return Prism.plugins.lineHighlight.highlightLines(e)})).forEach(l)}))}function a(e,t){return Array.prototype.slice.call((t||document).querySelectorAll(e))}function s(e,t){return e.classList.contains(t)}function l(e){e()}function c(e){return!(!e||!/pre/i.test(e.nodeName))&&(!!e.hasAttribute("data-line")||!(!e.id||!Prism.util.isActive(e,t)))}function u(){var e=location.hash.slice(1);a(".temporary.line-highlight").forEach((function(e){e.parentNode.removeChild(e)}));var t=(e.match(/\.([\d,-]+)$/)||[,""])[1];if(t&&!document.getElementById(e)){var n=e.slice(0,e.lastIndexOf(".")),r=document.getElementById(n);if(r)r.hasAttribute("data-line")||r.setAttribute("data-line",""),Prism.plugins.lineHighlight.highlightLines(r,t,"temporary ")(),i&&document.querySelector(".temporary.line-highlight").scrollIntoView()}}}()},1869:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});var r=n(2041),i=n(7858),o=n(7515);const a=function(e,t){return(0,o.A)((0,i.A)(e,t,r.A),e+"")}},1954:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});var r=n(3101),i=n(5009),o=Object.prototype.hasOwnProperty;const a=function(e,t,n){var a=e[t];o.call(e,t)&&(0,i.A)(a,n)&&(void 0!==n||t in e)||(0,r.A)(e,t,n)}},2041:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=function(e){return e}},2476:(e,t,n)=>{"use strict";n.d(t,{A:()=>d});const r=function(){this.__data__=[],this.size=0};var i=n(5009);const o=function(e,t){for(var n=e.length;n--;)if((0,i.A)(e[n][0],t))return n;return-1};var a=Array.prototype.splice;const s=function(e){var t=this.__data__,n=o(t,e);return!(n<0)&&(n==t.length-1?t.pop():a.call(t,n,1),--this.size,!0)};const l=function(e){var t=this.__data__,n=o(t,e);return n<0?void 0:t[n][1]};const c=function(e){return o(this.__data__,e)>-1};const u=function(e,t){var n=this.__data__,r=o(n,e);return r<0?(++this.size,n.push([e,t])):n[r][1]=t,this};function h(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t{"use strict";n.d(t,{W6:()=>Me,GZ:()=>$e,hE:()=>Oe});var r=n(634),i=n(3759),o=n(3638);function a(){return{async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null}}let s={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};function l(e){s=e}const c={exec:()=>null};function u(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n="string"===typeof e?e:e.source;const r={replace:(e,t)=>{let i="string"===typeof t?t:t.source;return i=i.replace(h.caret,"$1"),n=n.replace(e,i),r},getRegex:()=>new RegExp(n,t)};return r}const h={codeRemoveIndent:/^(?: {1,4}| {0,3}\t)/gm,outputLinkReplace:/\\([\[\]])/g,indentCodeCompensation:/^(\s+)(?:```)/,beginningSpace:/^\s+/,endingHash:/#$/,startingSpaceChar:/^ /,endingSpaceChar:/ $/,nonSpaceChar:/[^ ]/,newLineCharGlobal:/\n/g,tabCharGlobal:/\t/g,multipleSpaceGlobal:/\s+/g,blankLine:/^[ \t]*$/,doubleBlankLine:/\n[ \t]*\n[ \t]*$/,blockquoteStart:/^ {0,3}>/,blockquoteSetextReplace:/\n {0,3}((?:=+|-+) *)(?=\n|$)/g,blockquoteSetextReplace2:/^ {0,3}>[ \t]?/gm,listReplaceTabs:/^\t+/,listReplaceNesting:/^ {1,4}(?=( {4})*[^ ])/g,listIsTask:/^\[[ xX]\] /,listReplaceTask:/^\[[ xX]\] +/,anyLine:/\n.*\n/,hrefBrackets:/^<(.*)>$/,tableDelimiter:/[:|]/,tableAlignChars:/^\||\| *$/g,tableRowBlankLine:/\n[ \t]*$/,tableAlignRight:/^ *-+: *$/,tableAlignCenter:/^ *:-+: *$/,tableAlignLeft:/^ *:-+ *$/,startATag:/^/i,startPreScriptTag:/^<(pre|code|kbd|script)(\s|>)/i,endPreScriptTag:/^<\/(pre|code|kbd|script)(\s|>)/i,startAngleBracket:/^$/,pedanticHrefTitle:/^([^'"]*[^\s])\s+(['"])(.*)\2/,unicodeAlphaNumeric:/[\p{L}\p{N}]/u,escapeTest:/[&<>"']/,escapeReplace:/[&<>"']/g,escapeTestNoEncode:/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/,escapeReplaceNoEncode:/[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/g,unescapeTest:/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi,caret:/(^|[^\[])\^/g,percentDecode:/%25/g,findPipe:/\|/g,splitPipe:/ \|/,slashPipe:/\\\|/g,carriageReturn:/\r\n|\r/g,spaceLine:/^ +$/gm,notSpaceStart:/^\S*/,endingNewline:/\n$/,listItemRegex:e=>new RegExp(`^( {0,3}${e})((?:[\t ][^\\n]*)?(?:\\n|$))`),nextBulletRegex:e=>new RegExp(`^ {0,${Math.min(3,e-1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`),hrRegex:e=>new RegExp(`^ {0,${Math.min(3,e-1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`),fencesBeginRegex:e=>new RegExp(`^ {0,${Math.min(3,e-1)}}(?:\`\`\`|~~~)`),headingBeginRegex:e=>new RegExp(`^ {0,${Math.min(3,e-1)}}#`),htmlBeginRegex:e=>new RegExp(`^ {0,${Math.min(3,e-1)}}<(?:[a-z].*>|!--)`,"i")},d=/^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,f=/(?:[*+-]|\d{1,9}[.)])/,p=/^(?!bull |blockCode|fences|blockquote|heading|html|table)((?:.|\n(?!\s*?\n|bull |blockCode|fences|blockquote|heading|html|table))+?)\n {0,3}(=+|-+) *(?:\n+|$)/,g=u(p).replace(/bull/g,f).replace(/blockCode/g,/(?: {4}| {0,3}\t)/).replace(/fences/g,/ {0,3}(?:`{3,}|~{3,})/).replace(/blockquote/g,/ {0,3}>/).replace(/heading/g,/ {0,3}#{1,6}/).replace(/html/g,/ {0,3}<[^\n>]+>\n/).replace(/\|table/g,"").getRegex(),m=u(p).replace(/bull/g,f).replace(/blockCode/g,/(?: {4}| {0,3}\t)/).replace(/fences/g,/ {0,3}(?:`{3,}|~{3,})/).replace(/blockquote/g,/ {0,3}>/).replace(/heading/g,/ {0,3}#{1,6}/).replace(/html/g,/ {0,3}<[^\n>]+>\n/).replace(/table/g,/ {0,3}\|?(?:[:\- ]*\|)+[\:\- ]*\n/).getRegex(),y=/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,b=/(?!\s*\])(?:\\.|[^\[\]\\])+/,v=u(/^ {0,3}\[(label)\]: *(?:\n[ \t]*)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n[ \t]*)?| *\n[ \t]*)(title))? *(?:\n+|$)/).replace("label",b).replace("title",/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/).getRegex(),x=u(/^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/).replace(/bull/g,f).getRegex(),k="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|search|section|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",w=/|$))/,S=u("^ {0,3}(?:<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|\\n*|$)|\\n*|$)|)[\\s\\S]*?(?:(?:\\n[ \t]*)+\\n|$)|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n[ \t]*)+\\n|$)|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n[ \t]*)+\\n|$))","i").replace("comment",w).replace("tag",k).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),C=u(y).replace("hr",d).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("|table","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",k).getRegex(),_={blockquote:u(/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/).replace("paragraph",C).getRegex(),code:/^((?: {4}| {0,3}\t)[^\n]+(?:\n(?:[ \t]*(?:\n|$))*)?)+/,def:v,fences:/^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,hr:d,html:S,lheading:g,list:x,newline:/^(?:[ \t]*(?:\n|$))+/,paragraph:C,table:c,text:/^[^\n]+/},A=u("^ *([^\\n ].*)\\n {0,3}((?:\\| *)?:?-+:? *(?:\\| *:?-+:? *)*(?:\\| *)?)(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)").replace("hr",d).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("blockquote"," {0,3}>").replace("code","(?: {4}| {0,3}\t)[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",k).getRegex(),T={..._,lheading:m,table:A,paragraph:u(y).replace("hr",d).replace("heading"," {0,3}#{1,6}(?:\\s|$)").replace("|lheading","").replace("table",A).replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|textarea|!--)").replace("tag",k).getRegex()},E={..._,html:u("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",w).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:c,lheading:/^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,paragraph:u(y).replace("hr",d).replace("heading"," *#{1,6} *[^\n]").replace("lheading",g).replace("|table","").replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").replace("|tag","").getRegex()},F=/^( {2,}|\\)\n(?!\s*$)/,M=/[\p{P}\p{S}]/u,L=/[\s\p{P}\p{S}]/u,P=/[^\s\p{P}\p{S}]/u,O=u(/^((?![*_])punctSpace)/,"u").replace(/punctSpace/g,L).getRegex(),$=/(?!~)[\p{P}\p{S}]/u,B=/^(?:\*+(?:((?!\*)punct)|[^\s*]))|^_+(?:((?!_)punct)|([^\s_]))/,D=u(B,"u").replace(/punct/g,M).getRegex(),z=u(B,"u").replace(/punct/g,$).getRegex(),I="^[^_*]*?__[^_*]*?\\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\\*)punct(\\*+)(?=[\\s]|$)|notPunctSpace(\\*+)(?!\\*)(?=punctSpace|$)|(?!\\*)punctSpace(\\*+)(?=notPunctSpace)|[\\s](\\*+)(?!\\*)(?=punct)|(?!\\*)punct(\\*+)(?!\\*)(?=punct)|notPunctSpace(\\*+)(?=notPunctSpace)",N=u(I,"gu").replace(/notPunctSpace/g,P).replace(/punctSpace/g,L).replace(/punct/g,M).getRegex(),R=u(I,"gu").replace(/notPunctSpace/g,/(?:[^\s\p{P}\p{S}]|~)/u).replace(/punctSpace/g,/(?!~)[\s\p{P}\p{S}]/u).replace(/punct/g,$).getRegex(),j=u("^[^_*]*?\\*\\*[^_*]*?_[^_*]*?(?=\\*\\*)|[^_]+(?=[^_])|(?!_)punct(_+)(?=[\\s]|$)|notPunctSpace(_+)(?!_)(?=punctSpace|$)|(?!_)punctSpace(_+)(?=notPunctSpace)|[\\s](_+)(?!_)(?=punct)|(?!_)punct(_+)(?!_)(?=punct)","gu").replace(/notPunctSpace/g,P).replace(/punctSpace/g,L).replace(/punct/g,M).getRegex(),q=u(/\\(punct)/,"gu").replace(/punct/g,M).getRegex(),H=u(/^<(scheme:[^\s\x00-\x1f<>]*|email)>/).replace("scheme",/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/).replace("email",/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/).getRegex(),W=u(w).replace("(?:--\x3e|$)","--\x3e").getRegex(),K=u("^comment|^|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^").replace("comment",W).replace("attribute",/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/).getRegex(),U=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,V=u(/^!?\[(label)\]\(\s*(href)(?:(?:[ \t]*(?:\n[ \t]*)?)(title))?\s*\)/).replace("label",U).replace("href",/<(?:\\.|[^\n<>\\])+>|[^ \t\n\x00-\x1f]*/).replace("title",/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/).getRegex(),Y=u(/^!?\[(label)\]\[(ref)\]/).replace("label",U).replace("ref",b).getRegex(),G=u(/^!?\[(ref)\](?:\[\])?/).replace("ref",b).getRegex(),X={_backpedal:c,anyPunctuation:q,autolink:H,blockSkip:/\[[^[\]]*?\]\((?:\\.|[^\\\(\)]|\((?:\\.|[^\\\(\)])*\))*\)|`[^`]*?`|<[^<>]*?>/g,br:F,code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,del:c,emStrongLDelim:D,emStrongRDelimAst:N,emStrongRDelimUnd:j,escape:/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,link:V,nolink:G,punctuation:O,reflink:Y,reflinkSearch:u("reflink|nolink(?!\\()","g").replace("reflink",Y).replace("nolink",G).getRegex(),tag:K,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\":">",'"':""","'":"'"},re=e=>ne[e];function ie(e,t){if(t){if(h.escapeTest.test(e))return e.replace(h.escapeReplace,re)}else if(h.escapeTestNoEncode.test(e))return e.replace(h.escapeReplaceNoEncode,re);return e}function oe(e){try{e=encodeURI(e).replace(h.percentDecode,"%")}catch{return null}return e}function ae(e,t){const n=e.replace(h.findPipe,((e,t,n)=>{let r=!1,i=t;for(;--i>=0&&"\\"===n[i];)r=!r;return r?"|":" |"})).split(h.splitPipe);let r=0;if(n[0].trim()||n.shift(),n.length>0&&!n.at(-1)?.trim()&&n.pop(),t)if(n.length>t)n.splice(t);else for(;n.length0)return{type:"space",raw:t[0]}}code(e){const t=this.rules.block.code.exec(e);if(t){const e=t[0].replace(this.rules.other.codeRemoveIndent,"");return{type:"code",raw:t[0],codeBlockStyle:"indented",text:this.options.pedantic?e:se(e,"\n")}}}fences(e){const t=this.rules.block.fences.exec(e);if(t){const e=t[0],n=function(e,t,n){const r=e.match(n.other.indentCodeCompensation);if(null===r)return t;const i=r[1];return t.split("\n").map((e=>{const t=e.match(n.other.beginningSpace);if(null===t)return e;const[r]=t;return r.length>=i.length?e.slice(i.length):e})).join("\n")}(e,t[3]||"",this.rules);return{type:"code",raw:e,lang:t[2]?t[2].trim().replace(this.rules.inline.anyPunctuation,"$1"):t[2],text:n}}}heading(e){const t=this.rules.block.heading.exec(e);if(t){let e=t[2].trim();if(this.rules.other.endingHash.test(e)){const t=se(e,"#");this.options.pedantic?e=t.trim():t&&!this.rules.other.endingSpaceChar.test(t)||(e=t.trim())}return{type:"heading",raw:t[0],depth:t[1].length,text:e,tokens:this.lexer.inline(e)}}}hr(e){const t=this.rules.block.hr.exec(e);if(t)return{type:"hr",raw:se(t[0],"\n")}}blockquote(e){const t=this.rules.block.blockquote.exec(e);if(t){let e=se(t[0],"\n").split("\n"),n="",r="";const i=[];for(;e.length>0;){let t=!1;const o=[];let a;for(a=0;a1,i={type:"list",raw:"",ordered:r,start:r?+n.slice(0,-1):"",loose:!1,items:[]};n=r?`\\d{1,9}\\${n.slice(-1)}`:`\\${n}`,this.options.pedantic&&(n=r?n:"[*+-]");const o=this.rules.other.listItemRegex(n);let a=!1;for(;e;){let n=!1,r="",s="";if(!(t=o.exec(e)))break;if(this.rules.block.hr.test(e))break;r=t[0],e=e.substring(r.length);let l=t[2].split("\n",1)[0].replace(this.rules.other.listReplaceTabs,(e=>" ".repeat(3*e.length))),c=e.split("\n",1)[0],u=!l.trim(),h=0;if(this.options.pedantic?(h=2,s=l.trimStart()):u?h=t[1].length+1:(h=t[2].search(this.rules.other.nonSpaceChar),h=h>4?1:h,s=l.slice(h),h+=t[1].length),u&&this.rules.other.blankLine.test(c)&&(r+=c+"\n",e=e.substring(c.length+1),n=!0),!n){const t=this.rules.other.nextBulletRegex(h),n=this.rules.other.hrRegex(h),i=this.rules.other.fencesBeginRegex(h),o=this.rules.other.headingBeginRegex(h),a=this.rules.other.htmlBeginRegex(h);for(;e;){const d=e.split("\n",1)[0];let f;if(c=d,this.options.pedantic?(c=c.replace(this.rules.other.listReplaceNesting," "),f=c):f=c.replace(this.rules.other.tabCharGlobal," "),i.test(c))break;if(o.test(c))break;if(a.test(c))break;if(t.test(c))break;if(n.test(c))break;if(f.search(this.rules.other.nonSpaceChar)>=h||!c.trim())s+="\n"+f.slice(h);else{if(u)break;if(l.replace(this.rules.other.tabCharGlobal," ").search(this.rules.other.nonSpaceChar)>=4)break;if(i.test(l))break;if(o.test(l))break;if(n.test(l))break;s+="\n"+c}u||c.trim()||(u=!0),r+=d+"\n",e=e.substring(d.length+1),l=f.slice(h)}}i.loose||(a?i.loose=!0:this.rules.other.doubleBlankLine.test(r)&&(a=!0));let d,f=null;this.options.gfm&&(f=this.rules.other.listIsTask.exec(s),f&&(d="[ ] "!==f[0],s=s.replace(this.rules.other.listReplaceTask,""))),i.items.push({type:"list_item",raw:r,task:!!f,checked:d,loose:!1,text:s,tokens:[]}),i.raw+=r}const s=i.items.at(-1);if(!s)return;s.raw=s.raw.trimEnd(),s.text=s.text.trimEnd(),i.raw=i.raw.trimEnd();for(let e=0;e"space"===e.type)),n=t.length>0&&t.some((e=>this.rules.other.anyLine.test(e.raw)));i.loose=n}if(i.loose)for(let e=0;e({text:e,tokens:this.lexer.inline(e),header:!1,align:o.align[t]}))));return o}}lheading(e){const t=this.rules.block.lheading.exec(e);if(t)return{type:"heading",raw:t[0],depth:"="===t[2].charAt(0)?1:2,text:t[1],tokens:this.lexer.inline(t[1])}}paragraph(e){const t=this.rules.block.paragraph.exec(e);if(t){const e="\n"===t[1].charAt(t[1].length-1)?t[1].slice(0,-1):t[1];return{type:"paragraph",raw:t[0],text:e,tokens:this.lexer.inline(e)}}}text(e){const t=this.rules.block.text.exec(e);if(t)return{type:"text",raw:t[0],text:t[0],tokens:this.lexer.inline(t[0])}}escape(e){const t=this.rules.inline.escape.exec(e);if(t)return{type:"escape",raw:t[0],text:t[1]}}tag(e){const t=this.rules.inline.tag.exec(e);if(t)return!this.lexer.state.inLink&&this.rules.other.startATag.test(t[0])?this.lexer.state.inLink=!0:this.lexer.state.inLink&&this.rules.other.endATag.test(t[0])&&(this.lexer.state.inLink=!1),!this.lexer.state.inRawBlock&&this.rules.other.startPreScriptTag.test(t[0])?this.lexer.state.inRawBlock=!0:this.lexer.state.inRawBlock&&this.rules.other.endPreScriptTag.test(t[0])&&(this.lexer.state.inRawBlock=!1),{type:"html",raw:t[0],inLink:this.lexer.state.inLink,inRawBlock:this.lexer.state.inRawBlock,block:!1,text:t[0]}}link(e){const t=this.rules.inline.link.exec(e);if(t){const e=t[2].trim();if(!this.options.pedantic&&this.rules.other.startAngleBracket.test(e)){if(!this.rules.other.endAngleBracket.test(e))return;const t=se(e.slice(0,-1),"\\");if((e.length-t.length)%2===0)return}else{const e=function(e,t){if(-1===e.indexOf(t[1]))return-1;let n=0;for(let r=0;r0?-2:-1}(t[2],"()");if(-2===e)return;if(e>-1){const n=(0===t[0].indexOf("!")?5:4)+t[1].length+e;t[2]=t[2].substring(0,e),t[0]=t[0].substring(0,n).trim(),t[3]=""}}let n=t[2],r="";if(this.options.pedantic){const e=this.rules.other.pedanticHrefTitle.exec(n);e&&(n=e[1],r=e[3])}else r=t[3]?t[3].slice(1,-1):"";return n=n.trim(),this.rules.other.startAngleBracket.test(n)&&(n=this.options.pedantic&&!this.rules.other.endAngleBracket.test(e)?n.slice(1):n.slice(1,-1)),le(t,{href:n?n.replace(this.rules.inline.anyPunctuation,"$1"):n,title:r?r.replace(this.rules.inline.anyPunctuation,"$1"):r},t[0],this.lexer,this.rules)}}reflink(e,t){let n;if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){const e=t[(n[2]||n[1]).replace(this.rules.other.multipleSpaceGlobal," ").toLowerCase()];if(!e){const e=n[0].charAt(0);return{type:"text",raw:e,text:e}}return le(n,e,n[0],this.lexer,this.rules)}}emStrong(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"",r=this.rules.inline.emStrongLDelim.exec(e);if(!r)return;if(r[3]&&n.match(this.rules.other.unicodeAlphaNumeric))return;if(!(r[1]||r[2]||"")||!n||this.rules.inline.punctuation.exec(n)){const n=[...r[0]].length-1;let i,o,a=n,s=0;const l="*"===r[0][0]?this.rules.inline.emStrongRDelimAst:this.rules.inline.emStrongRDelimUnd;for(l.lastIndex=0,t=t.slice(-1*e.length+n);null!=(r=l.exec(t));){if(i=r[1]||r[2]||r[3]||r[4]||r[5]||r[6],!i)continue;if(o=[...i].length,r[3]||r[4]){a+=o;continue}if((r[5]||r[6])&&n%3&&!((n+o)%3)){s+=o;continue}if(a-=o,a>0)continue;o=Math.min(o,o+a+s);const t=[...r[0]][0].length,l=e.slice(0,n+r.index+t+o);if(Math.min(n,o)%2){const e=l.slice(1,-1);return{type:"em",raw:l,text:e,tokens:this.lexer.inlineTokens(e)}}const c=l.slice(2,-2);return{type:"strong",raw:l,text:c,tokens:this.lexer.inlineTokens(c)}}}}codespan(e){const t=this.rules.inline.code.exec(e);if(t){let e=t[2].replace(this.rules.other.newLineCharGlobal," ");const n=this.rules.other.nonSpaceChar.test(e),r=this.rules.other.startingSpaceChar.test(e)&&this.rules.other.endingSpaceChar.test(e);return n&&r&&(e=e.substring(1,e.length-1)),{type:"codespan",raw:t[0],text:e}}}br(e){const t=this.rules.inline.br.exec(e);if(t)return{type:"br",raw:t[0]}}del(e){const t=this.rules.inline.del.exec(e);if(t)return{type:"del",raw:t[0],text:t[2],tokens:this.lexer.inlineTokens(t[2])}}autolink(e){const t=this.rules.inline.autolink.exec(e);if(t){let e,n;return"@"===t[2]?(e=t[1],n="mailto:"+e):(e=t[1],n=e),{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}url(e){let t;if(t=this.rules.inline.url.exec(e)){let e,n;if("@"===t[2])e=t[0],n="mailto:"+e;else{let r;do{r=t[0],t[0]=this.rules.inline._backpedal.exec(t[0])?.[0]??""}while(r!==t[0]);e=t[0],n="www."===t[1]?"http://"+t[0]:t[0]}return{type:"link",raw:t[0],text:e,href:n,tokens:[{type:"text",raw:e,text:e}]}}}inlineText(e){const t=this.rules.inline.text.exec(e);if(t){const e=this.lexer.state.inRawBlock;return{type:"text",raw:t[0],text:t[0],escaped:e}}}}class ue{tokens;options;state;tokenizer;inlineQueue;constructor(e){this.tokens=[],this.tokens.links=Object.create(null),this.options=e||s,this.options.tokenizer=this.options.tokenizer||new ce,this.tokenizer=this.options.tokenizer,this.tokenizer.options=this.options,this.tokenizer.lexer=this,this.inlineQueue=[],this.state={inLink:!1,inRawBlock:!1,top:!0};const t={other:h,block:ee.normal,inline:te.normal};this.options.pedantic?(t.block=ee.pedantic,t.inline=te.pedantic):this.options.gfm&&(t.block=ee.gfm,this.options.breaks?t.inline=te.breaks:t.inline=te.gfm),this.tokenizer.rules=t}static get rules(){return{block:ee,inline:te}}static lex(e,t){return new ue(t).lex(e)}static lexInline(e,t){return new ue(t).inlineTokens(e)}lex(e){e=e.replace(h.carriageReturn,"\n"),this.blockTokens(e,this.tokens);for(let t=0;t1&&void 0!==arguments[1]?arguments[1]:[],n=arguments.length>2&&void 0!==arguments[2]&&arguments[2];for(this.options.pedantic&&(e=e.replace(h.tabCharGlobal," ").replace(h.spaceLine,""));e;){let r;if(this.options.extensions?.block?.some((n=>!!(r=n.call({lexer:this},e,t))&&(e=e.substring(r.raw.length),t.push(r),!0))))continue;if(r=this.tokenizer.space(e)){e=e.substring(r.raw.length);const n=t.at(-1);1===r.raw.length&&void 0!==n?n.raw+="\n":t.push(r);continue}if(r=this.tokenizer.code(e)){e=e.substring(r.raw.length);const n=t.at(-1);"paragraph"===n?.type||"text"===n?.type?(n.raw+="\n"+r.raw,n.text+="\n"+r.text,this.inlineQueue.at(-1).src=n.text):t.push(r);continue}if(r=this.tokenizer.fences(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.heading(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.hr(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.blockquote(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.list(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.html(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.def(e)){e=e.substring(r.raw.length);const n=t.at(-1);"paragraph"===n?.type||"text"===n?.type?(n.raw+="\n"+r.raw,n.text+="\n"+r.raw,this.inlineQueue.at(-1).src=n.text):this.tokens.links[r.tag]||(this.tokens.links[r.tag]={href:r.href,title:r.title});continue}if(r=this.tokenizer.table(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.lheading(e)){e=e.substring(r.raw.length),t.push(r);continue}let i=e;if(this.options.extensions?.startBlock){let t=1/0;const n=e.slice(1);let r;this.options.extensions.startBlock.forEach((e=>{r=e.call({lexer:this},n),"number"===typeof r&&r>=0&&(t=Math.min(t,r))})),t<1/0&&t>=0&&(i=e.substring(0,t+1))}if(this.state.top&&(r=this.tokenizer.paragraph(i))){const o=t.at(-1);n&&"paragraph"===o?.type?(o.raw+="\n"+r.raw,o.text+="\n"+r.text,this.inlineQueue.pop(),this.inlineQueue.at(-1).src=o.text):t.push(r),n=i.length!==e.length,e=e.substring(r.raw.length)}else if(r=this.tokenizer.text(e)){e=e.substring(r.raw.length);const n=t.at(-1);"text"===n?.type?(n.raw+="\n"+r.raw,n.text+="\n"+r.text,this.inlineQueue.pop(),this.inlineQueue.at(-1).src=n.text):t.push(r)}else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return this.state.top=!0,t}inline(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[];return this.inlineQueue.push({src:e,tokens:t}),t}inlineTokens(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=e,r=null;if(this.tokens.links){const e=Object.keys(this.tokens.links);if(e.length>0)for(;null!=(r=this.tokenizer.rules.inline.reflinkSearch.exec(n));)e.includes(r[0].slice(r[0].lastIndexOf("[")+1,-1))&&(n=n.slice(0,r.index)+"["+"a".repeat(r[0].length-2)+"]"+n.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex))}for(;null!=(r=this.tokenizer.rules.inline.anyPunctuation.exec(n));)n=n.slice(0,r.index)+"++"+n.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);for(;null!=(r=this.tokenizer.rules.inline.blockSkip.exec(n));)n=n.slice(0,r.index)+"["+"a".repeat(r[0].length-2)+"]"+n.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);let i=!1,o="";for(;e;){let r;if(i||(o=""),i=!1,this.options.extensions?.inline?.some((n=>!!(r=n.call({lexer:this},e,t))&&(e=e.substring(r.raw.length),t.push(r),!0))))continue;if(r=this.tokenizer.escape(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.tag(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.link(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.reflink(e,this.tokens.links)){e=e.substring(r.raw.length);const n=t.at(-1);"text"===r.type&&"text"===n?.type?(n.raw+=r.raw,n.text+=r.text):t.push(r);continue}if(r=this.tokenizer.emStrong(e,n,o)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.codespan(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.br(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.del(e)){e=e.substring(r.raw.length),t.push(r);continue}if(r=this.tokenizer.autolink(e)){e=e.substring(r.raw.length),t.push(r);continue}if(!this.state.inLink&&(r=this.tokenizer.url(e))){e=e.substring(r.raw.length),t.push(r);continue}let a=e;if(this.options.extensions?.startInline){let t=1/0;const n=e.slice(1);let r;this.options.extensions.startInline.forEach((e=>{r=e.call({lexer:this},n),"number"===typeof r&&r>=0&&(t=Math.min(t,r))})),t<1/0&&t>=0&&(a=e.substring(0,t+1))}if(r=this.tokenizer.inlineText(a)){e=e.substring(r.raw.length),"_"!==r.raw.slice(-1)&&(o=r.raw.slice(-1)),i=!0;const n=t.at(-1);"text"===n?.type?(n.raw+=r.raw,n.text+=r.text):t.push(r)}else if(e){const t="Infinite loop on byte: "+e.charCodeAt(0);if(this.options.silent){console.error(t);break}throw new Error(t)}}return t}}class he{options;parser;constructor(e){this.options=e||s}space(e){return""}code(e){let{text:t,lang:n,escaped:r}=e;const i=(n||"").match(h.notSpaceStart)?.[0],o=t.replace(h.endingNewline,"")+"\n";return i?'
    '+(r?o:ie(o,!0))+"
    \n":"
    "+(r?o:ie(o,!0))+"
    \n"}blockquote(e){let{tokens:t}=e;return`
    \n${this.parser.parse(t)}
    \n`}html(e){let{text:t}=e;return t}heading(e){let{tokens:t,depth:n}=e;return`${this.parser.parseInline(t)}\n`}hr(e){return"
    \n"}list(e){const t=e.ordered,n=e.start;let r="";for(let o=0;o\n"+r+"\n"}listitem(e){let t="";if(e.task){const n=this.checkbox({checked:!!e.checked});e.loose?"paragraph"===e.tokens[0]?.type?(e.tokens[0].text=n+" "+e.tokens[0].text,e.tokens[0].tokens&&e.tokens[0].tokens.length>0&&"text"===e.tokens[0].tokens[0].type&&(e.tokens[0].tokens[0].text=n+" "+ie(e.tokens[0].tokens[0].text),e.tokens[0].tokens[0].escaped=!0)):e.tokens.unshift({type:"text",raw:n+" ",text:n+" ",escaped:!0}):t+=n+" "}return t+=this.parser.parse(e.tokens,!!e.loose),`
  • ${t}
  • \n`}checkbox(e){let{checked:t}=e;return"'}paragraph(e){let{tokens:t}=e;return`

    ${this.parser.parseInline(t)}

    \n`}table(e){let t="",n="";for(let i=0;i${r}`),"\n\n"+t+"\n"+r+"
    \n"}tablerow(e){let{text:t}=e;return`\n${t}\n`}tablecell(e){const t=this.parser.parseInline(e.tokens),n=e.header?"th":"td";return(e.align?`<${n} align="${e.align}">`:`<${n}>`)+t+`\n`}strong(e){let{tokens:t}=e;return`${this.parser.parseInline(t)}`}em(e){let{tokens:t}=e;return`${this.parser.parseInline(t)}`}codespan(e){let{text:t}=e;return`${ie(t,!0)}`}br(e){return"
    "}del(e){let{tokens:t}=e;return`${this.parser.parseInline(t)}`}link(e){let{href:t,title:n,tokens:r}=e;const i=this.parser.parseInline(r),o=oe(t);if(null===o)return i;t=o;let a='
    ",a}image(e){let{href:t,title:n,text:r,tokens:i}=e;i&&(r=this.parser.parseInline(i,this.parser.textRenderer));const o=oe(t);if(null===o)return ie(r);t=o;let a=`${r}1&&void 0!==arguments[1])||arguments[1],n="";for(let r=0;r1&&void 0!==arguments[1]?arguments[1]:this.renderer,n="";for(let r=0;rnew Set(["preprocess","postprocess","processAllTokens"]))();preprocess(e){return e}postprocess(e){return e}processAllTokens(e){return e}provideLexer(){return this.block?ue.lex:ue.lexInline}provideParser(){return this.block?fe.parse:fe.parseInline}}const ge=new class{defaults={async:!1,breaks:!1,extensions:null,gfm:!0,hooks:null,pedantic:!1,renderer:null,silent:!1,tokenizer:null,walkTokens:null};options=this.setOptions;parse=this.parseMarkdown(!0);parseInline=this.parseMarkdown(!1);Parser=(()=>fe)();Renderer=(()=>he)();TextRenderer=(()=>de)();Lexer=(()=>ue)();Tokenizer=(()=>ce)();Hooks=(()=>pe)();constructor(){this.use(...arguments)}walkTokens(e,t){let n=[];for(const r of e)switch(n=n.concat(t.call(this,r)),r.type){case"table":{const e=r;for(const r of e.header)n=n.concat(this.walkTokens(r.tokens,t));for(const r of e.rows)for(const e of r)n=n.concat(this.walkTokens(e.tokens,t));break}case"list":{const e=r;n=n.concat(this.walkTokens(e.items,t));break}default:{const e=r;this.defaults.extensions?.childTokens?.[e.type]?this.defaults.extensions.childTokens[e.type].forEach((r=>{const i=e[r].flat(1/0);n=n.concat(this.walkTokens(i,t))})):e.tokens&&(n=n.concat(this.walkTokens(e.tokens,t)))}}return n}use(){const e=this.defaults.extensions||{renderers:{},childTokens:{}};for(var t=arguments.length,n=new Array(t),r=0;r{const n={...t};if(n.async=this.defaults.async||n.async||!1,t.extensions&&(t.extensions.forEach((t=>{if(!t.name)throw new Error("extension name required");if("renderer"in t){const n=e.renderers[t.name];e.renderers[t.name]=n?function(){for(var e=arguments.length,r=new Array(e),i=0;i{if(this.defaults.async)return Promise.resolve(i.call(e,t)).then((t=>o.call(e,t)));const n=i.call(e,t);return o.call(e,n)}:e[r]=function(){for(var t=arguments.length,n=new Array(t),r=0;r{const r={...n},i={...this.defaults,...r},o=this.onError(!!i.silent,!!i.async);if(!0===this.defaults.async&&!1===r.async)return o(new Error("marked(): The async option was set to true by an extension. Remove async: false from the parse options object to return a Promise."));if("undefined"===typeof t||null===t)return o(new Error("marked(): input parameter is undefined or null"));if("string"!==typeof t)return o(new Error("marked(): input parameter is of type "+Object.prototype.toString.call(t)+", string expected"));i.hooks&&(i.hooks.options=i,i.hooks.block=e);const a=i.hooks?i.hooks.provideLexer():e?ue.lex:ue.lexInline,s=i.hooks?i.hooks.provideParser():e?fe.parse:fe.parseInline;if(i.async)return Promise.resolve(i.hooks?i.hooks.preprocess(t):t).then((e=>a(e,i))).then((e=>i.hooks?i.hooks.processAllTokens(e):e)).then((e=>i.walkTokens?Promise.all(this.walkTokens(e,i.walkTokens)).then((()=>e)):e)).then((e=>s(e,i))).then((e=>i.hooks?i.hooks.postprocess(e):e)).catch(o);try{i.hooks&&(t=i.hooks.preprocess(t));let e=a(t,i);i.hooks&&(e=i.hooks.processAllTokens(e)),i.walkTokens&&this.walkTokens(e,i.walkTokens);let n=s(e,i);return i.hooks&&(n=i.hooks.postprocess(n)),n}catch(l){return o(l)}}}onError(e,t){return n=>{if(n.message+="\nPlease report this to https://github.com/markedjs/marked.",e){const e="

    An error occurred:

    "+ie(n.message+"",!0)+"
    ";return t?Promise.resolve(e):e}if(t)return Promise.reject(n);throw n}}};function me(e,t){return ge.parse(e,t)}me.options=me.setOptions=function(e){return ge.setOptions(e),me.defaults=ge.defaults,l(me.defaults),me},me.getDefaults=a,me.defaults=s,me.use=function(){return ge.use(...arguments),me.defaults=ge.defaults,l(me.defaults),me},me.walkTokens=function(e,t){return ge.walkTokens(e,t)},me.parseInline=ge.parseInline,me.Parser=fe,me.parser=fe.parse,me.Renderer=he,me.TextRenderer=de,me.Lexer=ue,me.lexer=ue.lex,me.Tokenizer=ce,me.Hooks=pe,me.parse=me;me.options,me.setOptions,me.use,me.walkTokens,me.parseInline,fe.parse,ue.lex;var ye=n(7330);function be(e,t){let{markdownAutoWrap:n}=t;const r=e.replace(//g,"\n").replace(/\n{2,}/g,"\n"),i=(0,ye.T)(r);return!1===n?i.replace(/ /g," "):i}function ve(e){const t=be(e,arguments.length>1&&void 0!==arguments[1]?arguments[1]:{}),n=me.lexer(t),r=[[]];let o=0;function a(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"normal";if("text"===e.type){e.text.split("\n").forEach(((e,n)=>{0!==n&&(o++,r.push([])),e.split(" ").forEach((e=>{(e=e.replace(/'/g,"'"))&&r[o].push({content:e,type:t})}))}))}else"strong"===e.type||"em"===e.type?e.tokens.forEach((t=>{a(t,e.type)})):"html"===e.type&&r[o].push({content:e.text,type:"normal"})}return(0,i.K2)(a,"processNode"),n.forEach((e=>{"paragraph"===e.type?e.tokens?.forEach((e=>{a(e)})):"html"===e.type&&r[o].push({content:e.text,type:"normal"})})),r}function xe(e){let{markdownAutoWrap:t}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const n=me.lexer(e);function r(e){return"text"===e.type?!1===t?e.text.replace(/\n */g,"
    ").replace(/ /g," "):e.text.replace(/\n */g,"
    "):"strong"===e.type?`${e.tokens?.map(r).join("")}`:"em"===e.type?`${e.tokens?.map(r).join("")}`:"paragraph"===e.type?`

    ${e.tokens?.map(r).join("")}

    `:"space"===e.type?"":"html"===e.type?`${e.text}`:"escape"===e.type?e.text:`Unsupported markdown: ${e.type}`}return(0,i.K2)(r,"output"),n.map(r).join("")}function ke(e){return Intl.Segmenter?[...(new Intl.Segmenter).segment(e)].map((e=>e.segment)):[...e]}function we(e,t){return Se(e,[],ke(t.content),t.type)}function Se(e,t,n,r){if(0===n.length)return[{content:t.join(""),type:r},{content:"",type:r}];const[i,...o]=n,a=[...t,i];return e([{content:a.join(""),type:r}])?Se(e,a,o,r):(0===t.length&&i&&(t.push(i),n.shift()),[{content:t.join(""),type:r},{content:n.join(""),type:r}])}function Ce(e,t){if(e.some((e=>{let{content:t}=e;return t.includes("\n")})))throw new Error("splitLineToFitWidth does not support newlines in the line");return _e(e,t)}function _e(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[],r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:[];if(0===e.length)return r.length>0&&n.push(r),n.length>0?n:[];let i="";" "===e[0].content&&(i=" ",e.shift());const o=e.shift()??{content:" ",type:"normal"},a=[...r];if(""!==i&&a.push({content:i,type:"normal"}),a.push(o),t(a))return _e(e,t,n,a);if(r.length>0)n.push(r),e.unshift(o);else if(o.content){const[r,i]=we(t,o);n.push([r]),i.content&&e.unshift(i)}return _e(e,t,n)}function Ae(e,t){t&&e.attr("style",t)}async function Te(e,t,n,r){let o=arguments.length>4&&void 0!==arguments[4]&&arguments[4];const a=e.append("foreignObject");a.attr("width",10*n+"px"),a.attr("height",10*n+"px");const s=a.append("xhtml:div");let l=t.label;t.label&&(0,i.Wi)(t.label)&&(l=await(0,i.VJ)(t.label.replace(i.Y2.lineBreakRegex,"\n"),(0,i.D7)()));const c=t.isNode?"nodeLabel":"edgeLabel",u=s.append("span");u.html(l),Ae(u,t.labelStyle),u.attr("class",`${c} ${r}`),Ae(s,t.labelStyle),s.style("display","table-cell"),s.style("white-space","nowrap"),s.style("line-height","1.5"),s.style("max-width",n+"px"),s.style("text-align","center"),s.attr("xmlns","http://www.w3.org/1999/xhtml"),o&&s.attr("class","labelBkg");let h=s.node().getBoundingClientRect();return h.width===n&&(s.style("display","table"),s.style("white-space","break-spaces"),s.style("width",n+"px"),h=s.node().getBoundingClientRect()),a.node()}function Ee(e,t,n){return e.append("tspan").attr("class","text-outer-tspan").attr("x",0).attr("y",t*n-.1+"em").attr("dy",n+"em")}function Fe(e,t,n){const r=e.append("text"),i=Ee(r,1,t);Pe(i,n);const o=i.node().getComputedTextLength();return r.remove(),o}function Me(e,t,n){const r=e.append("text"),i=Ee(r,1,t);Pe(i,[{content:n,type:"normal"}]);const o=i.node()?.getBoundingClientRect();return o&&r.remove(),o}function Le(e,t,n){let r=arguments.length>3&&void 0!==arguments[3]&&arguments[3];const o=t.append("g"),a=o.insert("rect").attr("class","background").attr("style","stroke: none"),s=o.append("text").attr("y","-10.1");let l=0;for(const c of n){const t=(0,i.K2)((t=>Fe(o,1.1,t)<=e),"checkWidth"),n=t(c)?[c]:Ce(c,t);for(const e of n){Pe(Ee(s,l,1.1),e),l++}}if(r){const e=s.node().getBBox(),t=2;return a.attr("x",e.x-t).attr("y",e.y-t).attr("width",e.width+2*t).attr("height",e.height+2*t),o.node()}return s.node()}function Pe(e,t){e.text(""),t.forEach(((t,n)=>{const r=e.append("tspan").attr("font-style","em"===t.type?"italic":"normal").attr("class","text-inner-tspan").attr("font-weight","strong"===t.type?"bold":"normal");0===n?r.text(t.content):r.text(" "+t.content)}))}function Oe(e){return e.replace(/fa[bklrs]?:fa-[\w-]+/g,(e=>``))}(0,i.K2)(be,"preprocessMarkdown"),(0,i.K2)(ve,"markdownToLines"),(0,i.K2)(xe,"markdownToHTML"),(0,i.K2)(ke,"splitTextToChars"),(0,i.K2)(we,"splitWordToFitWidth"),(0,i.K2)(Se,"splitWordToFitWidthRecursion"),(0,i.K2)(Ce,"splitLineToFitWidth"),(0,i.K2)(_e,"splitLineToFitWidthRecursion"),(0,i.K2)(Ae,"applyStyle"),(0,i.K2)(Te,"addHtmlSpan"),(0,i.K2)(Ee,"createTspan"),(0,i.K2)(Fe,"computeWidthOfText"),(0,i.K2)(Me,"computeDimensionOfText"),(0,i.K2)(Le,"createFormattedText"),(0,i.K2)(Pe,"updateTextContentAndStyles"),(0,i.K2)(Oe,"replaceIconSubstring");var $e=(0,i.K2)((async function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",{style:n="",isTitle:a=!1,classes:s="",useHtmlLabels:l=!0,isNode:c=!0,width:u=200,addSvgBackground:h=!1}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},d=arguments.length>3?arguments[3]:void 0;if(i.Rm.debug("XYZ createText",t,n,a,s,l,c,"addSvgBackground: ",h),l){const o=xe(t,d),a=Oe((0,r.Sm)(o)),l=t.replace(/\\\\/g,"\\"),f={isNode:c,label:(0,i.Wi)(t)?l:a,labelStyle:n.replace("fill:","color:")};return await Te(e,f,u,s,h)}{const r=Le(u,e,ve(t.replace(//g,"
    ").replace("
    ","
    "),d),!!t&&h);if(c){/stroke:/.exec(n)&&(n=n.replace("stroke:","lineColor:"));const e=n.replace(/stroke:[^;]+;?/g,"").replace(/stroke-width:[^;]+;?/g,"").replace(/fill:[^;]+;?/g,"").replace(/color:/g,"fill:");(0,o.Ltv)(r).attr("style",e)}else{const e=n.replace(/stroke:[^;]+;?/g,"").replace(/stroke-width:[^;]+;?/g,"").replace(/fill:[^;]+;?/g,"").replace(/background:/g,"fill:");(0,o.Ltv)(r).select("rect").attr("style",e.replace(/background:/g,"fill:"));const t=n.replace(/stroke:[^;]+;?/g,"").replace(/stroke-width:[^;]+;?/g,"").replace(/fill:[^;]+;?/g,"").replace(/color:/g,"fill:");(0,o.Ltv)(r).select("text").attr("style",t)}return r}}),"createText")},2598:(e,t,n)=>{"use strict";n.d(t,{R:()=>s});var r=n(3759),i={aggregation:18,extension:18,composition:18,dependency:6,lollipop:13.5,arrow_point:4};function o(e,t){if(void 0===e||void 0===t)return{angle:0,deltaX:0,deltaY:0};e=a(e),t=a(t);const[n,r]=[e.x,e.y],[i,o]=[t.x,t.y],s=i-n,l=o-r;return{angle:Math.atan(l/s),deltaX:s,deltaY:l}}(0,r.K2)(o,"calculateDeltaAndAngle");var a=(0,r.K2)((e=>Array.isArray(e)?{x:e[0],y:e[1]}:e),"pointTransformer"),s=(0,r.K2)((e=>({x:(0,r.K2)((function(t,n,r){let s=0;const l=a(r[0]).x=0?1:-1)}else if(n===r.length-1&&Object.hasOwn(i,e.arrowTypeEnd)){const{angle:t,deltaX:n}=o(r[r.length-1],r[r.length-2]);s=i[e.arrowTypeEnd]*Math.cos(t)*(n>=0?1:-1)}const c=Math.abs(a(t).x-a(r[r.length-1]).x),u=Math.abs(a(t).y-a(r[r.length-1]).y),h=Math.abs(a(t).x-a(r[0]).x),d=Math.abs(a(t).y-a(r[0]).y),f=i[e.arrowTypeStart],p=i[e.arrowTypeEnd];if(c0&&u0&&d=0?1:-1)}else if(n===r.length-1&&Object.hasOwn(i,e.arrowTypeEnd)){const{angle:t,deltaY:n}=o(r[r.length-1],r[r.length-2]);s=i[e.arrowTypeEnd]*Math.abs(Math.sin(t))*(n>=0?1:-1)}const c=Math.abs(a(t).y-a(r[r.length-1]).y),u=Math.abs(a(t).x-a(r[r.length-1]).x),h=Math.abs(a(t).y-a(r[0]).y),d=Math.abs(a(t).x-a(r[0]).x),f=i[e.arrowTypeStart],p=i[e.arrowTypeEnd];if(c0&&u0&&d{"use strict";var n=Symbol.for("react.transitional.element"),r=Symbol.for("react.fragment");function i(e,t,r){var i=null;if(void 0!==r&&(i=""+r),void 0!==t.key&&(i=""+t.key),"key"in t)for(var o in r={},t)"key"!==o&&(r[o]=t[o]);else r=t;return t=r.ref,{$$typeof:n,type:e,key:i,ref:void 0!==t?t:null,props:r}}t.Fragment=r,t.jsx=i,t.jsxs=i},2854:()=>{!function(){if("undefined"!==typeof Prism&&"undefined"!==typeof document){var e=[],t={},n=function(){};Prism.plugins.toolbar={};var r=Prism.plugins.toolbar.registerButton=function(n,r){var i;i="function"===typeof r?r:function(e){var t;return"function"===typeof r.onClick?((t=document.createElement("button")).type="button",t.addEventListener("click",(function(){r.onClick.call(this,e)}))):"string"===typeof r.url?(t=document.createElement("a")).href=r.url:t=document.createElement("span"),r.className&&t.classList.add(r.className),t.textContent=r.text,t},n in t?console.warn('There is a button with the key "'+n+'" registered already.'):e.push(t[n]=i)},i=Prism.plugins.toolbar.hook=function(r){var i=r.element.parentNode;if(i&&/pre/i.test(i.nodeName)&&!i.parentNode.classList.contains("code-toolbar")){var o=document.createElement("div");o.classList.add("code-toolbar"),i.parentNode.insertBefore(o,i),o.appendChild(i);var a=document.createElement("div");a.classList.add("toolbar");var s=e,l=function(e){for(;e;){var t=e.getAttribute("data-toolbar-order");if(null!=t)return(t=t.trim()).length?t.split(/\s*,\s*/g):[];e=e.parentElement}}(r.element);l&&(s=l.map((function(e){return t[e]||n}))),s.forEach((function(e){var t=e(r);if(t){var n=document.createElement("div");n.classList.add("toolbar-item"),n.appendChild(t),a.appendChild(n)}})),o.appendChild(a)}};r("label",(function(e){var t=e.element.parentNode;if(t&&/pre/i.test(t.nodeName)&&t.hasAttribute("data-label")){var n,r,i=t.getAttribute("data-label");try{r=document.querySelector("template#"+i)}catch(o){}return r?n=r.content:(t.hasAttribute("data-url")?(n=document.createElement("a")).href=t.getAttribute("data-url"):n=document.createElement("span"),n.textContent=i),n}})),Prism.hooks.add("complete",i)}}()},2863:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(7840);function i(e,t){if("function"!=typeof e||null!=t&&"function"!=typeof t)throw new TypeError("Expected a function");var n=function(){var r=arguments,i=t?t.apply(this,r):r[0],o=n.cache;if(o.has(i))return o.get(i);var a=e.apply(this,r);return n.cache=o.set(i,a)||o,a};return n.cache=new(i.Cache||r.A),n}i.Cache=r.A;const o=i},2999:(e,t,n)=>{var r=function(e){var t=/(?:^|\s)lang(?:uage)?-([\w-]+)(?=\s|$)/i,n=0,r={},i={manual:e.Prism&&e.Prism.manual,disableWorkerMessageHandler:e.Prism&&e.Prism.disableWorkerMessageHandler,util:{encode:function e(t){return t instanceof o?new o(t.type,e(t.content),t.alias):Array.isArray(t)?t.map(e):t.replace(/&/g,"&").replace(/=h.reach);S+=w.value.length,w=w.next){var C=w.value;if(t.length>e.length)return;if(!(C instanceof o)){var _,A=1;if(b){if(!(_=a(k,S,e,y))||_.index>=e.length)break;var T=_.index,E=_.index+_[0].length,F=S;for(F+=w.value.length;T>=F;)F+=(w=w.next).value.length;if(S=F-=w.value.length,w.value instanceof o)continue;for(var M=w;M!==t.tail&&(Fh.reach&&(h.reach=$);var B=w.prev;if(P&&(B=c(t,B,P),S+=P.length),u(t,B,A),w=c(t,B,new o(d,m?i.tokenize(L,m):L,v,L)),O&&c(t,w,O),A>1){var D={cause:d+","+p,reach:$};s(e,t,n,w.prev,S,D),h&&D.reach>h.reach&&(h.reach=D.reach)}}}}}}function l(){var e={value:null,prev:null,next:null},t={value:null,prev:e,next:null};e.next=t,this.head=e,this.tail=t,this.length=0}function c(e,t,n){var r=t.next,i={value:n,prev:t,next:r};return t.next=i,r.prev=i,e.length++,i}function u(e,t,n){for(var r=t.next,i=0;i"+o.content+""},!e.document)return e.addEventListener?(i.disableWorkerMessageHandler||e.addEventListener("message",(function(t){var n=JSON.parse(t.data),r=n.language,o=n.code,a=n.immediateClose;e.postMessage(i.highlight(o,i.languages[r],r)),a&&e.close()}),!1),i):i;var h=i.util.currentScript();function d(){i.manual||i.highlightAll()}if(h&&(i.filename=h.src,h.hasAttribute("data-manual")&&(i.manual=!0)),!i.manual){var f=document.readyState;"loading"===f||"interactive"===f&&h&&h.defer?document.addEventListener("DOMContentLoaded",d):window.requestAnimationFrame?window.requestAnimationFrame(d):window.setTimeout(d,16)}return i}("undefined"!==typeof window?window:"undefined"!==typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope?self:{});e.exports&&(e.exports=r),"undefined"!==typeof n.g&&(n.g.Prism=r),r.languages.markup={comment:{pattern://,greedy:!0},prolog:{pattern:/<\?[\s\S]+?\?>/,greedy:!0},doctype:{pattern:/"'[\]]|"[^"]*"|'[^']*')+(?:\[(?:[^<"'\]]|"[^"]*"|'[^']*'|<(?!!--)|)*\]\s*)?>/i,greedy:!0,inside:{"internal-subset":{pattern:/(^[^\[]*\[)[\s\S]+(?=\]>$)/,lookbehind:!0,greedy:!0,inside:null},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},punctuation:/^$|[[\]]/,"doctype-tag":/^DOCTYPE/i,name:/[^\s<>'"]+/}},cdata:{pattern://i,greedy:!0},tag:{pattern:/<\/?(?!\d)[^\s>\/=$<%]+(?:\s(?:\s*[^\s>\/=]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))|(?=[\s/>])))+)?\s*\/?>/,greedy:!0,inside:{tag:{pattern:/^<\/?[^\s>\/]+/,inside:{punctuation:/^<\/?/,namespace:/^[^\s>\/:]+:/}},"special-attr":[],"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+)/,inside:{punctuation:[{pattern:/^=/,alias:"attr-equals"},{pattern:/^(\s*)["']|["']$/,lookbehind:!0}]}},punctuation:/\/?>/,"attr-name":{pattern:/[^\s>\/]+/,inside:{namespace:/^[^\s>\/:]+:/}}}},entity:[{pattern:/&[\da-z]{1,8};/i,alias:"named-entity"},/&#x?[\da-f]{1,8};/i]},r.languages.markup.tag.inside["attr-value"].inside.entity=r.languages.markup.entity,r.languages.markup.doctype.inside["internal-subset"].inside=r.languages.markup,r.hooks.add("wrap",(function(e){"entity"===e.type&&(e.attributes.title=e.content.replace(/&/,"&"))})),Object.defineProperty(r.languages.markup.tag,"addInlined",{value:function(e,t){var n={};n["language-"+t]={pattern:/(^$)/i,lookbehind:!0,inside:r.languages[t]},n.cdata=/^$/i;var i={"included-cdata":{pattern://i,inside:n}};i["language-"+t]={pattern:/[\s\S]+/,inside:r.languages[t]};var o={};o[e]={pattern:RegExp(/(<__[^>]*>)(?:))*\]\]>|(?!)/.source.replace(/__/g,(function(){return e})),"i"),lookbehind:!0,greedy:!0,inside:i},r.languages.insertBefore("markup","cdata",o)}}),Object.defineProperty(r.languages.markup.tag,"addAttribute",{value:function(e,t){r.languages.markup.tag.inside["special-attr"].push({pattern:RegExp(/(^|["'\s])/.source+"(?:"+e+")"+/\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))/.source,"i"),lookbehind:!0,inside:{"attr-name":/^[^\s=]+/,"attr-value":{pattern:/=[\s\S]+/,inside:{value:{pattern:/(^=\s*(["']|(?!["'])))\S[\s\S]*(?=\2$)/,lookbehind:!0,alias:[t,"language-"+t],inside:r.languages[t]},punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}}}})}}),r.languages.html=r.languages.markup,r.languages.mathml=r.languages.markup,r.languages.svg=r.languages.markup,r.languages.xml=r.languages.extend("markup",{}),r.languages.ssml=r.languages.xml,r.languages.atom=r.languages.xml,r.languages.rss=r.languages.xml,function(e){var t=/(?:"(?:\\(?:\r\n|[\s\S])|[^"\\\r\n])*"|'(?:\\(?:\r\n|[\s\S])|[^'\\\r\n])*')/;e.languages.css={comment:/\/\*[\s\S]*?\*\//,atrule:{pattern:RegExp("@[\\w-](?:"+/[^;{\s"']|\s+(?!\s)/.source+"|"+t.source+")*?"+/(?:;|(?=\s*\{))/.source),inside:{rule:/^@[\w-]+/,"selector-function-argument":{pattern:/(\bselector\s*\(\s*(?![\s)]))(?:[^()\s]|\s+(?![\s)])|\((?:[^()]|\([^()]*\))*\))+(?=\s*\))/,lookbehind:!0,alias:"selector"},keyword:{pattern:/(^|[^\w-])(?:and|not|only|or)(?![\w-])/,lookbehind:!0}}},url:{pattern:RegExp("\\burl\\((?:"+t.source+"|"+/(?:[^\\\r\n()"']|\\[\s\S])*/.source+")\\)","i"),greedy:!0,inside:{function:/^url/i,punctuation:/^\(|\)$/,string:{pattern:RegExp("^"+t.source+"$"),alias:"url"}}},selector:{pattern:RegExp("(^|[{}\\s])[^{}\\s](?:[^{};\"'\\s]|\\s+(?![\\s{])|"+t.source+")*(?=\\s*\\{)"),lookbehind:!0},string:{pattern:t,greedy:!0},property:{pattern:/(^|[^-\w\xA0-\uFFFF])(?!\s)[-_a-z\xA0-\uFFFF](?:(?!\s)[-\w\xA0-\uFFFF])*(?=\s*:)/i,lookbehind:!0},important:/!important\b/i,function:{pattern:/(^|[^-a-z0-9])[-a-z0-9]+(?=\()/i,lookbehind:!0},punctuation:/[(){};:,]/},e.languages.css.atrule.inside.rest=e.languages.css;var n=e.languages.markup;n&&(n.tag.addInlined("style","css"),n.tag.addAttribute("style","css"))}(r),r.languages.clike={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},"class-name":{pattern:/(\b(?:class|extends|implements|instanceof|interface|new|trait)\s+|\bcatch\s+\()[\w.\\]+/i,lookbehind:!0,inside:{punctuation:/[.\\]/}},keyword:/\b(?:break|catch|continue|do|else|finally|for|function|if|in|instanceof|new|null|return|throw|try|while)\b/,boolean:/\b(?:false|true)\b/,function:/\b\w+(?=\()/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,operator:/[<>]=?|[!=]=?=?|--?|\+\+?|&&?|\|\|?|[?*/~^%]/,punctuation:/[{}[\];(),.:]/},r.languages.javascript=r.languages.extend("clike",{"class-name":[r.languages.clike["class-name"],{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$A-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\.(?:constructor|prototype))/,lookbehind:!0}],keyword:[{pattern:/((?:^|\})\s*)catch\b/,lookbehind:!0},{pattern:/(^|[^.]|\.\.\.\s*)\b(?:as|assert(?=\s*\{)|async(?=\s*(?:function\b|\(|[$\w\xA0-\uFFFF]|$))|await|break|case|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally(?=\s*(?:\{|$))|for|from(?=\s*(?:['"]|$))|function|(?:get|set)(?=\s*(?:[#\[$\w\xA0-\uFFFF]|$))|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)\b/,lookbehind:!0}],function:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*(?:\.\s*(?:apply|bind|call)\s*)?\()/,number:{pattern:RegExp(/(^|[^\w$])/.source+"(?:"+/NaN|Infinity/.source+"|"+/0[bB][01]+(?:_[01]+)*n?/.source+"|"+/0[oO][0-7]+(?:_[0-7]+)*n?/.source+"|"+/0[xX][\dA-Fa-f]+(?:_[\dA-Fa-f]+)*n?/.source+"|"+/\d+(?:_\d+)*n/.source+"|"+/(?:\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\.\d+(?:_\d+)*)(?:[Ee][+-]?\d+(?:_\d+)*)?/.source+")"+/(?![\w$])/.source),lookbehind:!0},operator:/--|\+\+|\*\*=?|=>|&&=?|\|\|=?|[!=]==|<<=?|>>>?=?|[-+*/%&|^!=<>]=?|\.{3}|\?\?=?|\?\.?|[~:]/}),r.languages.javascript["class-name"][0].pattern=/(\b(?:class|extends|implements|instanceof|interface|new)\s+)[\w.\\]+/,r.languages.insertBefore("javascript","keyword",{regex:{pattern:RegExp(/((?:^|[^$\w\xA0-\uFFFF."'\])\s]|\b(?:return|yield))\s*)/.source+/\//.source+"(?:"+/(?:\[(?:[^\]\\\r\n]|\\.)*\]|\\.|[^/\\\[\r\n])+\/[dgimyus]{0,7}/.source+"|"+/(?:\[(?:[^[\]\\\r\n]|\\.|\[(?:[^[\]\\\r\n]|\\.|\[(?:[^[\]\\\r\n]|\\.)*\])*\])*\]|\\.|[^/\\\[\r\n])+\/[dgimyus]{0,7}v[dgimyus]{0,7}/.source+")"+/(?=(?:\s|\/\*(?:[^*]|\*(?!\/))*\*\/)*(?:$|[\r\n,.;:})\]]|\/\/))/.source),lookbehind:!0,greedy:!0,inside:{"regex-source":{pattern:/^(\/)[\s\S]+(?=\/[a-z]*$)/,lookbehind:!0,alias:"language-regex",inside:r.languages.regex},"regex-delimiter":/^\/|\/$/,"regex-flags":/^[a-z]+$/}},"function-variable":{pattern:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*[=:]\s*(?:async\s*)?(?:\bfunction\b|(?:\((?:[^()]|\([^()]*\))*\)|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)\s*=>))/,alias:"function"},parameter:[{pattern:/(function(?:\s+(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)?\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\))/,lookbehind:!0,inside:r.languages.javascript},{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$a-z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*=>)/i,lookbehind:!0,inside:r.languages.javascript},{pattern:/(\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*=>)/,lookbehind:!0,inside:r.languages.javascript},{pattern:/((?:\b|\s|^)(?!(?:as|async|await|break|case|catch|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally|for|from|function|get|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|set|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)(?![$\w\xA0-\uFFFF]))(?:(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*)\(\s*|\]\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*\{)/,lookbehind:!0,inside:r.languages.javascript}],constant:/\b[A-Z](?:[A-Z_]|\dx?)*\b/}),r.languages.insertBefore("javascript","string",{hashbang:{pattern:/^#!.*/,greedy:!0,alias:"comment"},"template-string":{pattern:/`(?:\\[\s\S]|\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}|(?!\$\{)[^\\`])*`/,greedy:!0,inside:{"template-punctuation":{pattern:/^`|`$/,alias:"string"},interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}/,lookbehind:!0,inside:{"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"},rest:r.languages.javascript}},string:/[\s\S]+/}},"string-property":{pattern:/((?:^|[,{])[ \t]*)(["'])(?:\\(?:\r\n|[\s\S])|(?!\2)[^\\\r\n])*\2(?=\s*:)/m,lookbehind:!0,greedy:!0,alias:"property"}}),r.languages.insertBefore("javascript","operator",{"literal-property":{pattern:/((?:^|[,{])[ \t]*)(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*:)/m,lookbehind:!0,alias:"property"}}),r.languages.markup&&(r.languages.markup.tag.addInlined("script","javascript"),r.languages.markup.tag.addAttribute(/on(?:abort|blur|change|click|composition(?:end|start|update)|dblclick|error|focus(?:in|out)?|key(?:down|up)|load|mouse(?:down|enter|leave|move|out|over|up)|reset|resize|scroll|select|slotchange|submit|unload|wheel)/.source,"javascript")),r.languages.js=r.languages.javascript,function(){if("undefined"!==typeof r&&"undefined"!==typeof document){Element.prototype.matches||(Element.prototype.matches=Element.prototype.msMatchesSelector||Element.prototype.webkitMatchesSelector);var e={js:"javascript",py:"python",rb:"ruby",ps1:"powershell",psm1:"powershell",sh:"bash",bat:"batch",h:"c",tex:"latex"},t="data-src-status",n="loading",i="loaded",o="pre[data-src]:not(["+t+'="'+i+'"]):not(['+t+'="'+n+'"])';r.hooks.add("before-highlightall",(function(e){e.selector+=", "+o})),r.hooks.add("before-sanity-check",(function(a){var s=a.element;if(s.matches(o)){a.code="",s.setAttribute(t,n);var l=s.appendChild(document.createElement("CODE"));l.textContent="Loading\u2026";var c=s.getAttribute("data-src"),u=a.language;if("none"===u){var h=(/\.(\w+)$/.exec(c)||[,"none"])[1];u=e[h]||h}r.util.setLanguage(l,u),r.util.setLanguage(s,u);var d=r.plugins.autoloader;d&&d.loadLanguages(u),function(e,t,n){var r=new XMLHttpRequest;r.open("GET",e,!0),r.onreadystatechange=function(){4==r.readyState&&(r.status<400&&r.responseText?t(r.responseText):r.status>=400?n("\u2716 Error "+r.status+" while fetching file: "+r.statusText):n("\u2716 Error: File does not exist or is empty"))},r.send(null)}(c,(function(e){s.setAttribute(t,i);var n=function(e){var t=/^\s*(\d+)\s*(?:(,)\s*(?:(\d+)\s*)?)?$/.exec(e||"");if(t){var n=Number(t[1]),r=t[2],i=t[3];return r?i?[n,Number(i)]:[n,void 0]:[n,n]}}(s.getAttribute("data-range"));if(n){var o=e.split(/\r\n?|\n/g),a=n[0],c=null==n[1]?o.length:n[1];a<0&&(a+=o.length),a=Math.max(0,Math.min(a-1,o.length)),c<0&&(c+=o.length),c=Math.max(0,Math.min(c,o.length)),e=o.slice(a,c).join("\n"),s.hasAttribute("data-start")||s.setAttribute("data-start",String(a+1))}l.textContent=e,r.highlightElement(l)}),(function(e){s.setAttribute(t,"failed"),l.textContent=e}))}})),r.plugins.fileHighlight={highlight:function(e){for(var t,n=(e||document).querySelectorAll(o),i=0;t=n[i++];)r.highlightElement(t)}};var a=!1;r.fileHighlight=function(){a||(console.warn("Prism.fileHighlight is deprecated. Use `Prism.plugins.fileHighlight.highlight` instead."),a=!0),r.plugins.fileHighlight.highlight.apply(this,arguments)}}}()},3084:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=(0,n(5674).A)(Object.getPrototypeOf,Object)},3101:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(5920);const i=function(e,t,n){"__proto__"==t&&r.A?(0,r.A)(e,t,{configurable:!0,enumerable:!0,value:n,writable:!0}):e[t]=n}},3188:(e,t,n)=>{var r={"./prism-coy.css":[7684,7684],"./prism-coy.min.css":[1662,1662],"./prism-dark.css":[7955,7955],"./prism-dark.min.css":[7509,7509],"./prism-funky.css":[8584,8584],"./prism-funky.min.css":[8970,8970],"./prism-okaidia.css":[9941,9941],"./prism-okaidia.min.css":[8479,8479],"./prism-solarizedlight.css":[4436,4436],"./prism-solarizedlight.min.css":[1550,1550],"./prism-tomorrow.css":[2198,2198],"./prism-tomorrow.min.css":[6080,6080],"./prism-twilight.css":[6707,6707],"./prism-twilight.min.css":[8101,8101],"./prism.css":[5696,5696],"./prism.min.css":[7970,7970]};function i(e){if(!n.o(r,e))return Promise.resolve().then((()=>{var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}));var t=r[e],i=t[0];return n.e(t[1]).then((()=>n(i)))}i.keys=()=>Object.keys(r),i.id=3188,e.exports=i},3218:e=>{"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},3239:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=function(e){return null!=e&&"object"==typeof e}},3240:e=>{"use strict";var t=Object.prototype.hasOwnProperty,n=Object.prototype.toString,r=Object.defineProperty,i=Object.getOwnPropertyDescriptor,o=function(e){return"function"===typeof Array.isArray?Array.isArray(e):"[object Array]"===n.call(e)},a=function(e){if(!e||"[object Object]"!==n.call(e))return!1;var r,i=t.call(e,"constructor"),o=e.constructor&&e.constructor.prototype&&t.call(e.constructor.prototype,"isPrototypeOf");if(e.constructor&&!i&&!o)return!1;for(r in e);return"undefined"===typeof r||t.call(e,r)},s=function(e,t){r&&"__proto__"===t.name?r(e,t.name,{enumerable:!0,configurable:!0,value:t.newValue,writable:!0}):e[t.name]=t.newValue},l=function(e,n){if("__proto__"===n){if(!t.call(e,n))return;if(i)return i(e,n).value}return e[n]};e.exports=function e(){var t,n,r,i,c,u,h=arguments[0],d=1,f=arguments.length,p=!1;for("boolean"===typeof h&&(p=h,h=arguments[1]||{},d=2),(null==h||"object"!==typeof h&&"function"!==typeof h)&&(h={});d{"use strict";n.r(t),n.d(t,{default:()=>l,getFunctionName:()=>o});var r=n(528);const i=/^\s*function(?:\s|\s*\/\*.*\*\/\s*)+([^(\s/]*)\s*/;function o(e){const t=`${e}`.match(i);return t&&t[1]||""}function a(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return e.displayName||e.name||o(e)||t}function s(e,t,n){const r=a(t);return e.displayName||(""!==r?`${n}(${r})`:n)}function l(e){if(null!=e){if("string"===typeof e)return e;if("function"===typeof e)return a(e,"Component");if("object"===typeof e)switch(e.$$typeof){case r.vM:return s(e,e.render,"ForwardRef");case r.lD:return s(e,e.type,"memo");default:return}}}},3460:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(7790);const i=function(e,t){var n=t?(0,r.A)(e.buffer):e.buffer;return new e.constructor(n,e.byteOffset,e.length)}},3493:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(4067),i=n(3903);const o=function(e){return null!=e&&(0,i.A)(e.length)&&!(0,r.A)(e)}},3518:()=>{!function(){if("undefined"!==typeof Prism&&"undefined"!==typeof document)if(Prism.plugins.toolbar){var e={none:"Plain text",plain:"Plain text",plaintext:"Plain text",text:"Plain text",txt:"Plain text",html:"HTML",xml:"XML",svg:"SVG",mathml:"MathML",ssml:"SSML",rss:"RSS",css:"CSS",clike:"C-like",js:"JavaScript",abap:"ABAP",abnf:"ABNF",al:"AL",antlr4:"ANTLR4",g4:"ANTLR4",apacheconf:"Apache Configuration",apl:"APL",aql:"AQL",ino:"Arduino",arff:"ARFF",armasm:"ARM Assembly","arm-asm":"ARM Assembly",art:"Arturo",asciidoc:"AsciiDoc",adoc:"AsciiDoc",aspnet:"ASP.NET (C#)",asm6502:"6502 Assembly",asmatmel:"Atmel AVR Assembly",autohotkey:"AutoHotkey",autoit:"AutoIt",avisynth:"AviSynth",avs:"AviSynth","avro-idl":"Avro IDL",avdl:"Avro IDL",awk:"AWK",gawk:"GAWK",sh:"Shell",basic:"BASIC",bbcode:"BBcode",bbj:"BBj",bnf:"BNF",rbnf:"RBNF",bqn:"BQN",bsl:"BSL (1C:Enterprise)",oscript:"OneScript",csharp:"C#",cs:"C#",dotnet:"C#",cpp:"C++",cfscript:"CFScript",cfc:"CFScript",cil:"CIL",cilkc:"Cilk/C","cilk-c":"Cilk/C",cilkcpp:"Cilk/C++","cilk-cpp":"Cilk/C++",cilk:"Cilk/C++",cmake:"CMake",cobol:"COBOL",coffee:"CoffeeScript",conc:"Concurnas",csp:"Content-Security-Policy","css-extras":"CSS Extras",csv:"CSV",cue:"CUE",dataweave:"DataWeave",dax:"DAX",django:"Django/Jinja2",jinja2:"Django/Jinja2","dns-zone-file":"DNS zone file","dns-zone":"DNS zone file",dockerfile:"Docker",dot:"DOT (Graphviz)",gv:"DOT (Graphviz)",ebnf:"EBNF",editorconfig:"EditorConfig",ejs:"EJS",etlua:"Embedded Lua templating",erb:"ERB","excel-formula":"Excel Formula",xlsx:"Excel Formula",xls:"Excel Formula",fsharp:"F#","firestore-security-rules":"Firestore security rules",ftl:"FreeMarker Template Language",gml:"GameMaker Language",gamemakerlanguage:"GameMaker Language",gap:"GAP (CAS)",gcode:"G-code",gdscript:"GDScript",gedcom:"GEDCOM",gettext:"gettext",po:"gettext",glsl:"GLSL",gn:"GN",gni:"GN","linker-script":"GNU Linker Script",ld:"GNU Linker Script","go-module":"Go module","go-mod":"Go module",graphql:"GraphQL",hbs:"Handlebars",hs:"Haskell",hcl:"HCL",hlsl:"HLSL",http:"HTTP",hpkp:"HTTP Public-Key-Pins",hsts:"HTTP Strict-Transport-Security",ichigojam:"IchigoJam","icu-message-format":"ICU Message Format",idr:"Idris",ignore:".ignore",gitignore:".gitignore",hgignore:".hgignore",npmignore:".npmignore",inform7:"Inform 7",javadoc:"JavaDoc",javadoclike:"JavaDoc-like",javastacktrace:"Java stack trace",jq:"JQ",jsdoc:"JSDoc","js-extras":"JS Extras",json:"JSON",webmanifest:"Web App Manifest",json5:"JSON5",jsonp:"JSONP",jsstacktrace:"JS stack trace","js-templates":"JS Templates",keepalived:"Keepalived Configure",kts:"Kotlin Script",kt:"Kotlin",kumir:"KuMir (\u041a\u0443\u041c\u0438\u0440)",kum:"KuMir (\u041a\u0443\u041c\u0438\u0440)",latex:"LaTeX",tex:"TeX",context:"ConTeXt",lilypond:"LilyPond",ly:"LilyPond",emacs:"Lisp",elisp:"Lisp","emacs-lisp":"Lisp",llvm:"LLVM IR",log:"Log file",lolcode:"LOLCODE",magma:"Magma (CAS)",md:"Markdown","markup-templating":"Markup templating",matlab:"MATLAB",maxscript:"MAXScript",mel:"MEL",metafont:"METAFONT",mongodb:"MongoDB",moon:"MoonScript",n1ql:"N1QL",n4js:"N4JS",n4jsd:"N4JS","nand2tetris-hdl":"Nand To Tetris HDL",naniscript:"Naninovel Script",nani:"Naninovel Script",nasm:"NASM",neon:"NEON",nginx:"nginx",nsis:"NSIS",objectivec:"Objective-C",objc:"Objective-C",ocaml:"OCaml",opencl:"OpenCL",openqasm:"OpenQasm",qasm:"OpenQasm",parigp:"PARI/GP",objectpascal:"Object Pascal",psl:"PATROL Scripting Language",pcaxis:"PC-Axis",px:"PC-Axis",peoplecode:"PeopleCode",pcode:"PeopleCode",php:"PHP",phpdoc:"PHPDoc","php-extras":"PHP Extras","plant-uml":"PlantUML",plantuml:"PlantUML",plsql:"PL/SQL",powerquery:"PowerQuery",pq:"PowerQuery",mscript:"PowerQuery",powershell:"PowerShell",promql:"PromQL",properties:".properties",protobuf:"Protocol Buffers",purebasic:"PureBasic",pbfasm:"PureBasic",purs:"PureScript",py:"Python",qsharp:"Q#",qs:"Q#",q:"Q (kdb+ database)",qml:"QML",rkt:"Racket",cshtml:"Razor C#",razor:"Razor C#",jsx:"React JSX",tsx:"React TSX",renpy:"Ren'py",rpy:"Ren'py",res:"ReScript",rest:"reST (reStructuredText)",robotframework:"Robot Framework",robot:"Robot Framework",rb:"Ruby",sas:"SAS",sass:"Sass (Sass)",scss:"Sass (SCSS)","shell-session":"Shell session","sh-session":"Shell session",shellsession:"Shell session",sml:"SML",smlnj:"SML/NJ",solidity:"Solidity (Ethereum)",sol:"Solidity (Ethereum)","solution-file":"Solution file",sln:"Solution file",soy:"Soy (Closure Template)",sparql:"SPARQL",rq:"SPARQL","splunk-spl":"Splunk SPL",sqf:"SQF: Status Quo Function (Arma 3)",sql:"SQL",stata:"Stata Ado",iecst:"Structured Text (IEC 61131-3)",supercollider:"SuperCollider",sclang:"SuperCollider",systemd:"Systemd configuration file","t4-templating":"T4 templating","t4-cs":"T4 Text Templates (C#)",t4:"T4 Text Templates (C#)","t4-vb":"T4 Text Templates (VB)",tap:"TAP",tt2:"Template Toolkit 2",toml:"TOML",trickle:"trickle",troy:"troy",trig:"TriG",ts:"TypeScript",tsconfig:"TSConfig",uscript:"UnrealScript",uc:"UnrealScript",uorazor:"UO Razor Script",uri:"URI",url:"URL",vbnet:"VB.Net",vhdl:"VHDL",vim:"vim","visual-basic":"Visual Basic",vba:"VBA",vb:"Visual Basic",wasm:"WebAssembly","web-idl":"Web IDL",webidl:"Web IDL",wgsl:"WGSL",wiki:"Wiki markup",wolfram:"Wolfram language",nb:"Mathematica Notebook",wl:"Wolfram language",xeoracube:"XeoraCube","xml-doc":"XML doc (.net)",xojo:"Xojo (REALbasic)",xquery:"XQuery",yaml:"YAML",yml:"YAML",yang:"YANG"};Prism.plugins.toolbar.registerButton("show-language",(function(t){var n=t.element.parentNode;if(n&&/pre/i.test(n.nodeName)){var r,i=n.getAttribute("data-language")||e[t.language]||((r=t.language)?(r.substring(0,1).toUpperCase()+r.substring(1)).replace(/s(?=cript)/,"S"):r);if(i){var o=document.createElement("span");return o.textContent=i,o}}}))}else console.warn("Show Languages plugin loaded before Toolbar plugin.")}()},3636:()=>{Prism.languages.javascript=Prism.languages.extend("clike",{"class-name":[Prism.languages.clike["class-name"],{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$A-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\.(?:constructor|prototype))/,lookbehind:!0}],keyword:[{pattern:/((?:^|\})\s*)catch\b/,lookbehind:!0},{pattern:/(^|[^.]|\.\.\.\s*)\b(?:as|assert(?=\s*\{)|async(?=\s*(?:function\b|\(|[$\w\xA0-\uFFFF]|$))|await|break|case|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally(?=\s*(?:\{|$))|for|from(?=\s*(?:['"]|$))|function|(?:get|set)(?=\s*(?:[#\[$\w\xA0-\uFFFF]|$))|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)\b/,lookbehind:!0}],function:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*(?:\.\s*(?:apply|bind|call)\s*)?\()/,number:{pattern:RegExp(/(^|[^\w$])/.source+"(?:"+/NaN|Infinity/.source+"|"+/0[bB][01]+(?:_[01]+)*n?/.source+"|"+/0[oO][0-7]+(?:_[0-7]+)*n?/.source+"|"+/0[xX][\dA-Fa-f]+(?:_[\dA-Fa-f]+)*n?/.source+"|"+/\d+(?:_\d+)*n/.source+"|"+/(?:\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\.\d+(?:_\d+)*)(?:[Ee][+-]?\d+(?:_\d+)*)?/.source+")"+/(?![\w$])/.source),lookbehind:!0},operator:/--|\+\+|\*\*=?|=>|&&=?|\|\|=?|[!=]==|<<=?|>>>?=?|[-+*/%&|^!=<>]=?|\.{3}|\?\?=?|\?\.?|[~:]/}),Prism.languages.javascript["class-name"][0].pattern=/(\b(?:class|extends|implements|instanceof|interface|new)\s+)[\w.\\]+/,Prism.languages.insertBefore("javascript","keyword",{regex:{pattern:RegExp(/((?:^|[^$\w\xA0-\uFFFF."'\])\s]|\b(?:return|yield))\s*)/.source+/\//.source+"(?:"+/(?:\[(?:[^\]\\\r\n]|\\.)*\]|\\.|[^/\\\[\r\n])+\/[dgimyus]{0,7}/.source+"|"+/(?:\[(?:[^[\]\\\r\n]|\\.|\[(?:[^[\]\\\r\n]|\\.|\[(?:[^[\]\\\r\n]|\\.)*\])*\])*\]|\\.|[^/\\\[\r\n])+\/[dgimyus]{0,7}v[dgimyus]{0,7}/.source+")"+/(?=(?:\s|\/\*(?:[^*]|\*(?!\/))*\*\/)*(?:$|[\r\n,.;:})\]]|\/\/))/.source),lookbehind:!0,greedy:!0,inside:{"regex-source":{pattern:/^(\/)[\s\S]+(?=\/[a-z]*$)/,lookbehind:!0,alias:"language-regex",inside:Prism.languages.regex},"regex-delimiter":/^\/|\/$/,"regex-flags":/^[a-z]+$/}},"function-variable":{pattern:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*[=:]\s*(?:async\s*)?(?:\bfunction\b|(?:\((?:[^()]|\([^()]*\))*\)|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)\s*=>))/,alias:"function"},parameter:[{pattern:/(function(?:\s+(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)?\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\))/,lookbehind:!0,inside:Prism.languages.javascript},{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$a-z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*=>)/i,lookbehind:!0,inside:Prism.languages.javascript},{pattern:/(\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*=>)/,lookbehind:!0,inside:Prism.languages.javascript},{pattern:/((?:\b|\s|^)(?!(?:as|async|await|break|case|catch|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally|for|from|function|get|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|set|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)(?![$\w\xA0-\uFFFF]))(?:(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*)\(\s*|\]\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*\{)/,lookbehind:!0,inside:Prism.languages.javascript}],constant:/\b[A-Z](?:[A-Z_]|\dx?)*\b/}),Prism.languages.insertBefore("javascript","string",{hashbang:{pattern:/^#!.*/,greedy:!0,alias:"comment"},"template-string":{pattern:/`(?:\\[\s\S]|\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}|(?!\$\{)[^\\`])*`/,greedy:!0,inside:{"template-punctuation":{pattern:/^`|`$/,alias:"string"},interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\$\{(?:[^{}]|\{(?:[^{}]|\{[^}]*\})*\})+\}/,lookbehind:!0,inside:{"interpolation-punctuation":{pattern:/^\$\{|\}$/,alias:"punctuation"},rest:Prism.languages.javascript}},string:/[\s\S]+/}},"string-property":{pattern:/((?:^|[,{])[ \t]*)(["'])(?:\\(?:\r\n|[\s\S])|(?!\2)[^\\\r\n])*\2(?=\s*:)/m,lookbehind:!0,greedy:!0,alias:"property"}}),Prism.languages.insertBefore("javascript","operator",{"literal-property":{pattern:/((?:^|[,{])[ \t]*)(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*:)/m,lookbehind:!0,alias:"property"}}),Prism.languages.markup&&(Prism.languages.markup.tag.addInlined("script","javascript"),Prism.languages.markup.tag.addAttribute(/on(?:abort|blur|change|click|composition(?:end|start|update)|dblclick|error|focus(?:in|out)?|key(?:down|up)|load|mouse(?:down|enter|leave|move|out|over|up)|reset|resize|scroll|select|slotchange|submit|unload|wheel)/.source,"javascript")),Prism.languages.js=Prism.languages.javascript},3638:(e,t,n)=>{"use strict";function r(e,t){let n;if(void 0===t)for(const r of e)null!=r&&(n=r)&&(n=r);else{let r=-1;for(let i of e)null!=(i=t(i,++r,e))&&(n=i)&&(n=i)}return n}function i(e,t){let n;if(void 0===t)for(const r of e)null!=r&&(n>r||void 0===n&&r>=r)&&(n=r);else{let r=-1;for(let i of e)null!=(i=t(i,++r,e))&&(n>i||void 0===n&&i>=i)&&(n=i)}return n}function o(e){return e}n.d(t,{JLW:()=>Xa,l78:()=>b,tlR:()=>y,qrM:()=>cs,Yu4:()=>hs,IA3:()=>fs,Wi0:()=>gs,PGM:()=>ms,OEq:()=>bs,y8u:()=>ks,olC:()=>Ss,IrU:()=>_s,oDi:()=>Es,Q7f:()=>Ms,cVp:()=>Ps,lUB:()=>Ja,Lx9:()=>$s,nVG:()=>qs,uxU:()=>Hs,Xf2:()=>Us,GZz:()=>Ys,UPb:()=>Xs,dyv:()=>Gs,bEH:()=>rr,n8j:()=>ns,T9B:()=>r,jkA:()=>i,rLf:()=>os,WH:()=>dr,m4Y:()=>ii,UMr:()=>hr,w7C:()=>ka,zt:()=>wa,Ltv:()=>Sa,UAC:()=>ki,DCK:()=>Ji,TUC:()=>Mi,Agd:()=>vi,t6C:()=>gi,wXd:()=>yi,ABi:()=>Ai,Ui6:()=>Ri,rGn:()=>Li,ucG:()=>mi,YPH:()=>_i,Mol:()=>Fi,PGu:()=>Ti,GuW:()=>Ei});var a=1,s=2,l=3,c=4,u=1e-6;function h(e){return"translate("+e+",0)"}function d(e){return"translate(0,"+e+")"}function f(e){return t=>+e(t)}function p(e,t){return t=Math.max(0,e.bandwidth()-2*t)/2,e.round()&&(t=Math.round(t)),n=>+e(n)+t}function g(){return!this.__axis}function m(e,t){var n=[],r=null,i=null,m=6,y=6,b=3,v="undefined"!==typeof window&&window.devicePixelRatio>1?0:.5,x=e===a||e===c?-1:1,k=e===c||e===s?"x":"y",w=e===a||e===l?h:d;function S(h){var d=null==r?t.ticks?t.ticks.apply(t,n):t.domain():r,S=null==i?t.tickFormat?t.tickFormat.apply(t,n):o:i,C=Math.max(m,0)+b,_=t.range(),A=+_[0]+v,T=+_[_.length-1]+v,E=(t.bandwidth?p:f)(t.copy(),v),F=h.selection?h.selection():h,M=F.selectAll(".domain").data([null]),L=F.selectAll(".tick").data(d,t).order(),P=L.exit(),O=L.enter().append("g").attr("class","tick"),$=L.select("line"),B=L.select("text");M=M.merge(M.enter().insert("path",".tick").attr("class","domain").attr("stroke","currentColor")),L=L.merge(O),$=$.merge(O.append("line").attr("stroke","currentColor").attr(k+"2",x*m)),B=B.merge(O.append("text").attr("fill","currentColor").attr(k,x*C).attr("dy",e===a?"0em":e===l?"0.71em":"0.32em")),h!==F&&(M=M.transition(h),L=L.transition(h),$=$.transition(h),B=B.transition(h),P=P.transition(h).attr("opacity",u).attr("transform",(function(e){return isFinite(e=E(e))?w(e+v):this.getAttribute("transform")})),O.attr("opacity",u).attr("transform",(function(e){var t=this.parentNode.__axis;return w((t&&isFinite(t=t(e))?t:E(e))+v)}))),P.remove(),M.attr("d",e===c||e===s?y?"M"+x*y+","+A+"H"+v+"V"+T+"H"+x*y:"M"+v+","+A+"V"+T:y?"M"+A+","+x*y+"V"+v+"H"+T+"V"+x*y:"M"+A+","+v+"H"+T),L.attr("opacity",1).attr("transform",(function(e){return w(E(e)+v)})),$.attr(k+"2",x*m),B.attr(k,x*C).text(S),F.filter(g).attr("fill","none").attr("font-size",10).attr("font-family","sans-serif").attr("text-anchor",e===s?"start":e===c?"end":"middle"),F.each((function(){this.__axis=E}))}return S.scale=function(e){return arguments.length?(t=e,S):t},S.ticks=function(){return n=Array.from(arguments),S},S.tickArguments=function(e){return arguments.length?(n=null==e?[]:Array.from(e),S):n.slice()},S.tickValues=function(e){return arguments.length?(r=null==e?null:Array.from(e),S):r&&r.slice()},S.tickFormat=function(e){return arguments.length?(i=e,S):i},S.tickSize=function(e){return arguments.length?(m=y=+e,S):m},S.tickSizeInner=function(e){return arguments.length?(m=+e,S):m},S.tickSizeOuter=function(e){return arguments.length?(y=+e,S):y},S.tickPadding=function(e){return arguments.length?(b=+e,S):b},S.offset=function(e){return arguments.length?(v=+e,S):v},S}function y(e){return m(a,e)}function b(e){return m(l,e)}function v(){}function x(e){return null==e?v:function(){return this.querySelector(e)}}function k(){return[]}function w(e){return null==e?k:function(){return this.querySelectorAll(e)}}function S(e){return function(){return null==(t=e.apply(this,arguments))?[]:Array.isArray(t)?t:Array.from(t);var t}}function C(e){return function(){return this.matches(e)}}function _(e){return function(t){return t.matches(e)}}var A=Array.prototype.find;function T(){return this.firstElementChild}var E=Array.prototype.filter;function F(){return Array.from(this.children)}function M(e){return new Array(e.length)}function L(e,t){this.ownerDocument=e.ownerDocument,this.namespaceURI=e.namespaceURI,this._next=null,this._parent=e,this.__data__=t}function P(e,t,n,r,i,o){for(var a,s=0,l=t.length,c=o.length;st?1:e>=t?0:NaN}L.prototype={constructor:L,appendChild:function(e){return this._parent.insertBefore(e,this._next)},insertBefore:function(e,t){return this._parent.insertBefore(e,t)},querySelector:function(e){return this._parent.querySelector(e)},querySelectorAll:function(e){return this._parent.querySelectorAll(e)}};var z="http://www.w3.org/1999/xhtml";const I={svg:"http://www.w3.org/2000/svg",xhtml:z,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function N(e){var t=e+="",n=t.indexOf(":");return n>=0&&"xmlns"!==(t=e.slice(0,n))&&(e=e.slice(n+1)),I.hasOwnProperty(t)?{space:I[t],local:e}:e}function R(e){return function(){this.removeAttribute(e)}}function j(e){return function(){this.removeAttributeNS(e.space,e.local)}}function q(e,t){return function(){this.setAttribute(e,t)}}function H(e,t){return function(){this.setAttributeNS(e.space,e.local,t)}}function W(e,t){return function(){var n=t.apply(this,arguments);null==n?this.removeAttribute(e):this.setAttribute(e,n)}}function K(e,t){return function(){var n=t.apply(this,arguments);null==n?this.removeAttributeNS(e.space,e.local):this.setAttributeNS(e.space,e.local,n)}}function U(e){return e.ownerDocument&&e.ownerDocument.defaultView||e.document&&e||e.defaultView}function V(e){return function(){this.style.removeProperty(e)}}function Y(e,t,n){return function(){this.style.setProperty(e,t,n)}}function G(e,t,n){return function(){var r=t.apply(this,arguments);null==r?this.style.removeProperty(e):this.style.setProperty(e,r,n)}}function X(e,t){return e.style.getPropertyValue(t)||U(e).getComputedStyle(e,null).getPropertyValue(t)}function Q(e){return function(){delete this[e]}}function Z(e,t){return function(){this[e]=t}}function J(e,t){return function(){var n=t.apply(this,arguments);null==n?delete this[e]:this[e]=n}}function ee(e){return e.trim().split(/^|\s+/)}function te(e){return e.classList||new ne(e)}function ne(e){this._node=e,this._names=ee(e.getAttribute("class")||"")}function re(e,t){for(var n=te(e),r=-1,i=t.length;++r=0&&(this._names.splice(t,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(e){return this._names.indexOf(e)>=0}};var Ee=[null];function Fe(e,t){this._groups=e,this._parents=t}function Me(){return new Fe([[document.documentElement]],Ee)}Fe.prototype=Me.prototype={constructor:Fe,select:function(e){"function"!==typeof e&&(e=x(e));for(var t=this._groups,n=t.length,r=new Array(n),i=0;i=k&&(k=x+1);!(v=y[k])&&++k=0;)(r=i[o])&&(a&&4^r.compareDocumentPosition(a)&&a.parentNode.insertBefore(r,a),a=r);return this},sort:function(e){function t(t,n){return t&&n?e(t.__data__,n.__data__):!t-!n}e||(e=D);for(var n=this._groups,r=n.length,i=new Array(r),o=0;o1?this.each((null==t?V:"function"===typeof t?G:Y)(e,t,null==n?"":n)):X(this.node(),e)},property:function(e,t){return arguments.length>1?this.each((null==t?Q:"function"===typeof t?J:Z)(e,t)):this.node()[e]},classed:function(e,t){var n=ee(e+"");if(arguments.length<2){for(var r=te(this.node()),i=-1,o=n.length;++i=0&&(t=e.slice(n+1),e=e.slice(0,n)),{type:e,name:t}}))}(e+""),a=o.length;if(!(arguments.length<2)){for(s=t?Ce:Se,r=0;r{}};function Oe(){for(var e,t=0,n=arguments.length,r={};t=0&&(t=e.slice(n+1),e=e.slice(0,n)),e&&!r.hasOwnProperty(e))throw new Error("unknown type: "+e);return{type:e,name:t}}))),a=-1,s=o.length;if(!(arguments.length<2)){if(null!=t&&"function"!==typeof t)throw new Error("invalid callback: "+t);for(;++a0)for(var n,r,i=new Array(n),o=0;o=0&&t._call.call(void 0,e),t=t._next;--Re}()}finally{Re=0,function(){var e,t,n=Ie,r=1/0;for(;n;)n._call?(r>n._time&&(r=n._time),e=n,n=n._next):(t=n._next,n._next=null,n=e?e._next=t:Ie=t);Ne=e,et(r)}(),We=0}}function Je(){var e=Ue.now(),t=e-He;t>1e3&&(Ke-=t,He=e)}function et(e){Re||(je&&(je=clearTimeout(je)),e-We>24?(e<1/0&&(je=setTimeout(Ze,e-Ue.now()-Ke)),qe&&(qe=clearInterval(qe))):(qe||(He=Ue.now(),qe=setInterval(Je,1e3)),Re=1,Ve(Ze)))}function tt(e,t,n){var r=new Xe;return t=null==t?0:+t,r.restart((n=>{r.stop(),e(n+t)}),t,n),r}Xe.prototype=Qe.prototype={constructor:Xe,restart:function(e,t,n){if("function"!==typeof e)throw new TypeError("callback is not a function");n=(null==n?Ye():+n)+(null==t?0:+t),this._next||Ne===this||(Ne?Ne._next=this:Ie=this,Ne=this),this._call=e,this._time=n,et()},stop:function(){this._call&&(this._call=null,this._time=1/0,et())}};var nt=ze("start","end","cancel","interrupt"),rt=[];function it(e,t,n,r,i,o){var a=e.__transition;if(a){if(n in a)return}else e.__transition={};!function(e,t,n){var r,i=e.__transition;function o(e){n.state=1,n.timer.restart(a,n.delay,n.time),n.delay<=e&&a(e-n.delay)}function a(o){var c,u,h,d;if(1!==n.state)return l();for(c in i)if((d=i[c]).name===n.name){if(3===d.state)return tt(a);4===d.state?(d.state=6,d.timer.stop(),d.on.call("interrupt",e,e.__data__,d.index,d.group),delete i[c]):+c0)throw new Error("too late; already scheduled");return n}function at(e,t){var n=st(e,t);if(n.state>3)throw new Error("too late; already running");return n}function st(e,t){var n=e.__transition;if(!n||!(n=n[t]))throw new Error("transition not found");return n}function lt(e,t){return e=+e,t=+t,function(n){return e*(1-n)+t*n}}var ct,ut=180/Math.PI,ht={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1};function dt(e,t,n,r,i,o){var a,s,l;return(a=Math.sqrt(e*e+t*t))&&(e/=a,t/=a),(l=e*n+t*r)&&(n-=e*l,r-=t*l),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,l/=s),e*r180?t+=360:t-e>180&&(e+=360),o.push({i:n.push(i(n)+"rotate(",null,r)-2,x:lt(e,t)})):t&&n.push(i(n)+"rotate("+t+r)}(o.rotate,a.rotate,s,l),function(e,t,n,o){e!==t?o.push({i:n.push(i(n)+"skewX(",null,r)-2,x:lt(e,t)}):t&&n.push(i(n)+"skewX("+t+r)}(o.skewX,a.skewX,s,l),function(e,t,n,r,o,a){if(e!==n||t!==r){var s=o.push(i(o)+"scale(",null,",",null,")");a.push({i:s-4,x:lt(e,n)},{i:s-2,x:lt(t,r)})}else 1===n&&1===r||o.push(i(o)+"scale("+n+","+r+")")}(o.scaleX,o.scaleY,a.scaleX,a.scaleY,s,l),o=a=null,function(e){for(var t,n=-1,r=l.length;++n>8&15|t>>4&240,t>>4&15|240&t,(15&t)<<4|15&t,1):8===n?Nt(t>>24&255,t>>16&255,t>>8&255,(255&t)/255):4===n?Nt(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|240&t,((15&t)<<4|15&t)/255):null):(t=Et.exec(e))?new qt(t[1],t[2],t[3],1):(t=Ft.exec(e))?new qt(255*t[1]/100,255*t[2]/100,255*t[3]/100,1):(t=Mt.exec(e))?Nt(t[1],t[2],t[3],t[4]):(t=Lt.exec(e))?Nt(255*t[1]/100,255*t[2]/100,255*t[3]/100,t[4]):(t=Pt.exec(e))?Yt(t[1],t[2]/100,t[3]/100,1):(t=Ot.exec(e))?Yt(t[1],t[2]/100,t[3]/100,t[4]):$t.hasOwnProperty(e)?It($t[e]):"transparent"===e?new qt(NaN,NaN,NaN,0):null}function It(e){return new qt(e>>16&255,e>>8&255,255&e,1)}function Nt(e,t,n,r){return r<=0&&(e=t=n=NaN),new qt(e,t,n,r)}function Rt(e){return e instanceof kt||(e=zt(e)),e?new qt((e=e.rgb()).r,e.g,e.b,e.opacity):new qt}function jt(e,t,n,r){return 1===arguments.length?Rt(e):new qt(e,t,n,null==r?1:r)}function qt(e,t,n,r){this.r=+e,this.g=+t,this.b=+n,this.opacity=+r}function Ht(){return`#${Vt(this.r)}${Vt(this.g)}${Vt(this.b)}`}function Wt(){const e=Kt(this.opacity);return`${1===e?"rgb(":"rgba("}${Ut(this.r)}, ${Ut(this.g)}, ${Ut(this.b)}${1===e?")":`, ${e})`}`}function Kt(e){return isNaN(e)?1:Math.max(0,Math.min(1,e))}function Ut(e){return Math.max(0,Math.min(255,Math.round(e)||0))}function Vt(e){return((e=Ut(e))<16?"0":"")+e.toString(16)}function Yt(e,t,n,r){return r<=0?e=t=n=NaN:n<=0||n>=1?e=t=NaN:t<=0&&(e=NaN),new Xt(e,t,n,r)}function Gt(e){if(e instanceof Xt)return new Xt(e.h,e.s,e.l,e.opacity);if(e instanceof kt||(e=zt(e)),!e)return new Xt;if(e instanceof Xt)return e;var t=(e=e.rgb()).r/255,n=e.g/255,r=e.b/255,i=Math.min(t,n,r),o=Math.max(t,n,r),a=NaN,s=o-i,l=(o+i)/2;return s?(a=t===o?(n-r)/s+6*(n0&&l<1?0:a,new Xt(a,s,l,e.opacity)}function Xt(e,t,n,r){this.h=+e,this.s=+t,this.l=+n,this.opacity=+r}function Qt(e){return(e=(e||0)%360)<0?e+360:e}function Zt(e){return Math.max(0,Math.min(1,e||0))}function Jt(e,t,n){return 255*(e<60?t+(n-t)*e/60:e<180?n:e<240?t+(n-t)*(240-e)/60:t)}function en(e,t,n,r,i){var o=e*e,a=o*e;return((1-3*e+3*o-a)*t+(4-6*o+3*a)*n+(1+3*e+3*o-3*a)*r+a*i)/6}vt(kt,zt,{copy(e){return Object.assign(new this.constructor,this,e)},displayable(){return this.rgb().displayable()},hex:Bt,formatHex:Bt,formatHex8:function(){return this.rgb().formatHex8()},formatHsl:function(){return Gt(this).formatHsl()},formatRgb:Dt,toString:Dt}),vt(qt,jt,xt(kt,{brighter(e){return e=null==e?St:Math.pow(St,e),new qt(this.r*e,this.g*e,this.b*e,this.opacity)},darker(e){return e=null==e?wt:Math.pow(wt,e),new qt(this.r*e,this.g*e,this.b*e,this.opacity)},rgb(){return this},clamp(){return new qt(Ut(this.r),Ut(this.g),Ut(this.b),Kt(this.opacity))},displayable(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Ht,formatHex:Ht,formatHex8:function(){return`#${Vt(this.r)}${Vt(this.g)}${Vt(this.b)}${Vt(255*(isNaN(this.opacity)?1:this.opacity))}`},formatRgb:Wt,toString:Wt})),vt(Xt,(function(e,t,n,r){return 1===arguments.length?Gt(e):new Xt(e,t,n,null==r?1:r)}),xt(kt,{brighter(e){return e=null==e?St:Math.pow(St,e),new Xt(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=null==e?wt:Math.pow(wt,e),new Xt(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=this.h%360+360*(this.h<0),t=isNaN(e)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*t,i=2*n-r;return new qt(Jt(e>=240?e-240:e+120,i,r),Jt(e,i,r),Jt(e<120?e+240:e-120,i,r),this.opacity)},clamp(){return new Xt(Qt(this.h),Zt(this.s),Zt(this.l),Kt(this.opacity))},displayable(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl(){const e=Kt(this.opacity);return`${1===e?"hsl(":"hsla("}${Qt(this.h)}, ${100*Zt(this.s)}%, ${100*Zt(this.l)}%${1===e?")":`, ${e})`}`}}));const tn=e=>()=>e;function nn(e,t){return function(n){return e+n*t}}function rn(e){return 1===(e=+e)?on:function(t,n){return n-t?function(e,t,n){return e=Math.pow(e,n),t=Math.pow(t,n)-e,n=1/n,function(r){return Math.pow(e+r*t,n)}}(t,n,e):tn(isNaN(t)?n:t)}}function on(e,t){var n=t-e;return n?nn(e,n):tn(isNaN(e)?t:e)}const an=function e(t){var n=rn(t);function r(e,t){var r=n((e=jt(e)).r,(t=jt(t)).r),i=n(e.g,t.g),o=n(e.b,t.b),a=on(e.opacity,t.opacity);return function(t){return e.r=r(t),e.g=i(t),e.b=o(t),e.opacity=a(t),e+""}}return r.gamma=e,r}(1);function sn(e){return function(t){var n,r,i=t.length,o=new Array(i),a=new Array(i),s=new Array(i);for(n=0;n=1?(n=1,t-1):Math.floor(n*t),i=e[r],o=e[r+1],a=r>0?e[r-1]:2*i-o,s=ro&&(i=t.slice(o,i),s[a]?s[a]+=i:s[++a]=i),(n=n[0])===(r=r[0])?s[a]?s[a]+=r:s[++a]=r:(s[++a]=null,l.push({i:a,x:lt(n,r)})),o=cn.lastIndex;return o=0&&(e=e.slice(0,t)),!e||"start"===e}))}(t)?ot:at;return function(){var a=o(this,e),s=a.on;s!==r&&(i=(r=s).copy()).on(t,n),a.on=i}}(n,e,t))},attr:function(e,t){var n=N(e),r="transform"===n?gt:hn;return this.attrTween(e,"function"===typeof t?(n.local?yn:mn)(n,r,bt(this,"attr."+e,t)):null==t?(n.local?fn:dn)(n):(n.local?gn:pn)(n,r,t))},attrTween:function(e,t){var n="attr."+e;if(arguments.length<2)return(n=this.tween(n))&&n._value;if(null==t)return this.tween(n,null);if("function"!==typeof t)throw new Error;var r=N(e);return this.tween(n,(r.local?bn:vn)(r,t))},style:function(e,t,n){var r="transform"===(e+="")?pt:hn;return null==t?this.styleTween(e,function(e,t){var n,r,i;return function(){var o=X(this,e),a=(this.style.removeProperty(e),X(this,e));return o===a?null:o===n&&a===r?i:i=t(n=o,r=a)}}(e,r)).on("end.style."+e,_n(e)):"function"===typeof t?this.styleTween(e,function(e,t,n){var r,i,o;return function(){var a=X(this,e),s=n(this),l=s+"";return null==s&&(this.style.removeProperty(e),l=s=X(this,e)),a===l?null:a===r&&l===i?o:(i=l,o=t(r=a,s))}}(e,r,bt(this,"style."+e,t))).each(function(e,t){var n,r,i,o,a="style."+t,s="end."+a;return function(){var l=at(this,e),c=l.on,u=null==l.value[a]?o||(o=_n(t)):void 0;c===n&&i===u||(r=(n=c).copy()).on(s,i=u),l.on=r}}(this._id,e)):this.styleTween(e,function(e,t,n){var r,i,o=n+"";return function(){var a=X(this,e);return a===o?null:a===r?i:i=t(r=a,n)}}(e,r,t),n).on("end.style."+e,null)},styleTween:function(e,t,n){var r="style."+(e+="");if(arguments.length<2)return(r=this.tween(r))&&r._value;if(null==t)return this.tween(r,null);if("function"!==typeof t)throw new Error;return this.tween(r,function(e,t,n){var r,i;function o(){var o=t.apply(this,arguments);return o!==i&&(r=(i=o)&&function(e,t,n){return function(r){this.style.setProperty(e,t.call(this,r),n)}}(e,o,n)),r}return o._value=t,o}(e,t,null==n?"":n))},text:function(e){return this.tween("text","function"===typeof e?function(e){return function(){var t=e(this);this.textContent=null==t?"":t}}(bt(this,"text",e)):function(e){return function(){this.textContent=e}}(null==e?"":e+""))},textTween:function(e){var t="text";if(arguments.length<1)return(t=this.tween(t))&&t._value;if(null==e)return this.tween(t,null);if("function"!==typeof e)throw new Error;return this.tween(t,function(e){var t,n;function r(){var r=e.apply(this,arguments);return r!==n&&(t=(n=r)&&function(e){return function(t){this.textContent=e.call(this,t)}}(r)),t}return r._value=e,r}(e))},remove:function(){return this.on("end.remove",function(e){return function(){var t=this.parentNode;for(var n in this.__transition)if(+n!==e)return;t&&t.removeChild(this)}}(this._id))},tween:function(e,t){var n=this._id;if(e+="",arguments.length<2){for(var r,i=st(this.node(),n).tween,o=0,a=i.length;o2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",e,e.__data__,n.index,n.group),delete o[i]):a=!1;a&&delete e.__transition}}(this,e)}))},Le.prototype.transition=function(e){var t,n;e instanceof Tn?(t=e._id,e=e._name):(t=En(),(n=Mn).time=Ye(),e=null==e?null:e+"");for(var r=this._groups,i=r.length,o=0;oKn?Math.pow(e,1/3):e/Wn+qn}function Gn(e){return e>Hn?e*e*e:Wn*(e-qn)}function Xn(e){return 255*(e<=.0031308?12.92*e:1.055*Math.pow(e,1/2.4)-.055)}function Qn(e){return(e/=255)<=.04045?e/12.92:Math.pow((e+.055)/1.055,2.4)}function Zn(e){if(e instanceof er)return new er(e.h,e.c,e.l,e.opacity);if(e instanceof Vn||(e=Un(e)),0===e.a&&0===e.b)return new er(NaN,0180||n<-180?n-360*Math.round(n/360):n):tn(isNaN(e)?t:e)}));nr(on);function ir(e,t){switch(arguments.length){case 0:break;case 1:this.range(e);break;default:this.range(t).domain(e)}return this}class or extends Map{constructor(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:cr;if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:t}}),null!=e)for(const[n,r]of e)this.set(n,r)}get(e){return super.get(ar(this,e))}has(e){return super.has(ar(this,e))}set(e,t){return super.set(sr(this,e),t)}delete(e){return super.delete(lr(this,e))}}Set;function ar(e,t){let{_intern:n,_key:r}=e;const i=r(t);return n.has(i)?n.get(i):t}function sr(e,t){let{_intern:n,_key:r}=e;const i=r(t);return n.has(i)?n.get(i):(n.set(i,t),t)}function lr(e,t){let{_intern:n,_key:r}=e;const i=r(t);return n.has(i)&&(t=n.get(i),n.delete(i)),t}function cr(e){return null!==e&&"object"===typeof e?e.valueOf():e}const ur=Symbol("implicit");function hr(){var e=new or,t=[],n=[],r=ur;function i(i){let o=e.get(i);if(void 0===o){if(r!==ur)return r;e.set(i,o=t.push(i)-1)}return n[o%n.length]}return i.domain=function(n){if(!arguments.length)return t.slice();t=[],e=new or;for(const r of n)e.has(r)||e.set(r,t.push(r)-1);return i},i.range=function(e){return arguments.length?(n=Array.from(e),i):n.slice()},i.unknown=function(e){return arguments.length?(r=e,i):r},i.copy=function(){return hr(t,n).unknown(r)},ir.apply(i,arguments),i}function dr(){var e,t,n=hr().unknown(void 0),r=n.domain,i=n.range,o=0,a=1,s=!1,l=0,c=0,u=.5;function h(){var n=r().length,h=a=fr?10:o>=pr?5:o>=gr?2:1;let s,l,c;return i<0?(c=Math.pow(10,-i)/a,s=Math.round(e*c),l=Math.round(t*c),s/ct&&--l,c=-c):(c=Math.pow(10,i)*a,s=Math.round(e/c),l=Math.round(t/c),s*ct&&--l),lt?1:e>=t?0:NaN}function xr(e,t){return null==e||null==t?NaN:te?1:t>=e?0:NaN}function kr(e){let t,n,r;function i(e,r){let i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:e.length;if(i>>1;n(e[t],r)<0?i=t+1:o=t}while(ivr(e(t),n),r=(t,n)=>e(t)-n):(t=e===vr||e===xr?e:wr,n=e,r=e),{left:i,center:function(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0;const o=i(e,t,n,(arguments.length>3&&void 0!==arguments[3]?arguments[3]:e.length)-1);return o>n&&r(e[o-1],t)>-r(e[o],t)?o-1:o},right:function(e,r){let i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:e.length;if(i>>1;n(e[t],r)<=0?i=t+1:o=t}while(it&&(n=e,e=t,t=n),function(n){return Math.max(e,Math.min(t,n))}}(a[0],a[e-1])),r=e>2?zr:Dr,i=o=null,h}function h(t){return null==t||isNaN(t=+t)?n:(i||(i=r(a.map(e),s,l)))(e(c(t)))}return h.invert=function(n){return c(t((o||(o=r(s,a.map(e),lt)))(n)))},h.domain=function(e){return arguments.length?(a=Array.from(e,Pr),u()):a.slice()},h.range=function(e){return arguments.length?(s=Array.from(e),u()):s.slice()},h.rangeRound=function(e){return s=Array.from(e),l=Lr,u()},h.clamp=function(e){return arguments.length?(c=!!e||$r,u()):c!==$r},h.interpolate=function(e){return arguments.length?(l=e,u()):l},h.unknown=function(e){return arguments.length?(n=e,h):n},function(n,r){return e=n,t=r,u()}}function Rr(){return Nr()($r,$r)}var jr,qr=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Hr(e){if(!(t=qr.exec(e)))throw new Error("invalid format: "+e);var t;return new Wr({fill:t[1],align:t[2],sign:t[3],symbol:t[4],zero:t[5],width:t[6],comma:t[7],precision:t[8]&&t[8].slice(1),trim:t[9],type:t[10]})}function Wr(e){this.fill=void 0===e.fill?" ":e.fill+"",this.align=void 0===e.align?">":e.align+"",this.sign=void 0===e.sign?"-":e.sign+"",this.symbol=void 0===e.symbol?"":e.symbol+"",this.zero=!!e.zero,this.width=void 0===e.width?void 0:+e.width,this.comma=!!e.comma,this.precision=void 0===e.precision?void 0:+e.precision,this.trim=!!e.trim,this.type=void 0===e.type?"":e.type+""}function Kr(e,t){if((n=(e=t?e.toExponential(t-1):e.toExponential()).indexOf("e"))<0)return null;var n,r=e.slice(0,n);return[r.length>1?r[0]+r.slice(2):r,+e.slice(n+1)]}function Ur(e){return(e=Kr(Math.abs(e)))?e[1]:NaN}function Vr(e,t){var n=Kr(e,t);if(!n)return e+"";var r=n[0],i=n[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}Hr.prototype=Wr.prototype,Wr.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};const Yr={"%":(e,t)=>(100*e).toFixed(t),b:e=>Math.round(e).toString(2),c:e=>e+"",d:function(e){return Math.abs(e=Math.round(e))>=1e21?e.toLocaleString("en").replace(/,/g,""):e.toString(10)},e:(e,t)=>e.toExponential(t),f:(e,t)=>e.toFixed(t),g:(e,t)=>e.toPrecision(t),o:e=>Math.round(e).toString(8),p:(e,t)=>Vr(100*e,t),r:Vr,s:function(e,t){var n=Kr(e,t);if(!n)return e+"";var r=n[0],i=n[1],o=i-(jr=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,a=r.length;return o===a?r:o>a?r+new Array(o-a+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+Kr(e,Math.max(0,t+o-1))[0]},X:e=>Math.round(e).toString(16).toUpperCase(),x:e=>Math.round(e).toString(16)};function Gr(e){return e}var Xr,Qr,Zr,Jr=Array.prototype.map,ei=["y","z","a","f","p","n","\xb5","m","","k","M","G","T","P","E","Z","Y"];function ti(e){var t,n,r=void 0===e.grouping||void 0===e.thousands?Gr:(t=Jr.call(e.grouping,Number),n=e.thousands+"",function(e,r){for(var i=e.length,o=[],a=0,s=t[0],l=0;i>0&&s>0&&(l+s+1>r&&(s=Math.max(1,r-l)),o.push(e.substring(i-=s,i+s)),!((l+=s+1)>r));)s=t[a=(a+1)%t.length];return o.reverse().join(n)}),i=void 0===e.currency?"":e.currency[0]+"",o=void 0===e.currency?"":e.currency[1]+"",a=void 0===e.decimal?".":e.decimal+"",s=void 0===e.numerals?Gr:function(e){return function(t){return t.replace(/[0-9]/g,(function(t){return e[+t]}))}}(Jr.call(e.numerals,String)),l=void 0===e.percent?"%":e.percent+"",c=void 0===e.minus?"\u2212":e.minus+"",u=void 0===e.nan?"NaN":e.nan+"";function h(e){var t=(e=Hr(e)).fill,n=e.align,h=e.sign,d=e.symbol,f=e.zero,p=e.width,g=e.comma,m=e.precision,y=e.trim,b=e.type;"n"===b?(g=!0,b="g"):Yr[b]||(void 0===m&&(m=12),y=!0,b="g"),(f||"0"===t&&"="===n)&&(f=!0,t="0",n="=");var v="$"===d?i:"#"===d&&/[boxX]/.test(b)?"0"+b.toLowerCase():"",x="$"===d?o:/[%p]/.test(b)?l:"",k=Yr[b],w=/[defgprs%]/.test(b);function S(e){var i,o,l,d=v,S=x;if("c"===b)S=k(e)+S,e="";else{var C=(e=+e)<0||1/e<0;if(e=isNaN(e)?u:k(Math.abs(e),m),y&&(e=function(e){e:for(var t,n=e.length,r=1,i=-1;r0&&(i=0)}return i>0?e.slice(0,i)+e.slice(t+1):e}(e)),C&&0===+e&&"+"!==h&&(C=!1),d=(C?"("===h?h:c:"-"===h||"("===h?"":h)+d,S=("s"===b?ei[8+jr/3]:"")+S+(C&&"("===h?")":""),w)for(i=-1,o=e.length;++i(l=e.charCodeAt(i))||l>57){S=(46===l?a+e.slice(i+1):e.slice(i))+S,e=e.slice(0,i);break}}g&&!f&&(e=r(e,1/0));var _=d.length+e.length+S.length,A=_>1)+d+e+S+A.slice(_);break;default:e=A+d+e+S}return s(e)}return m=void 0===m?6:/[gprs]/.test(b)?Math.max(1,Math.min(21,m)):Math.max(0,Math.min(20,m)),S.toString=function(){return e+""},S}return{format:h,formatPrefix:function(e,t){var n=h(((e=Hr(e)).type="f",e)),r=3*Math.max(-8,Math.min(8,Math.floor(Ur(t)/3))),i=Math.pow(10,-r),o=ei[8+r/3];return function(e){return n(i*e)+o}}}}function ni(e,t,n,r){var i,o=br(e,t,n);switch((r=Hr(null==r?",f":r)).type){case"s":var a=Math.max(Math.abs(e),Math.abs(t));return null!=r.precision||isNaN(i=function(e,t){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(Ur(t)/3)))-Ur(Math.abs(e)))}(o,a))||(r.precision=i),Zr(r,a);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(i=function(e,t){return e=Math.abs(e),t=Math.abs(t)-e,Math.max(0,Ur(t)-Ur(e))+1}(o,Math.max(Math.abs(e),Math.abs(t))))||(r.precision=i-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(i=function(e){return Math.max(0,-Ur(Math.abs(e)))}(o))||(r.precision=i-2*("%"===r.type))}return Qr(r)}function ri(e){var t=e.domain;return e.ticks=function(e){var n=t();return function(e,t,n){if(!((n=+n)>0))return[];if((e=+e)===(t=+t))return[e];const r=t=i))return[];const s=o-i+1,l=new Array(s);if(r)if(a<0)for(let c=0;c0;){if((i=yr(l,c,n))===r)return o[a]=l,o[s]=c,t(o);if(i>0)l=Math.floor(l/i)*i,c=Math.ceil(c/i)*i;else{if(!(i<0))break;l=Math.ceil(l*i)/i,c=Math.floor(c*i)/i}r=i}return e},e}function ii(){var e=Rr();return e.copy=function(){return Ir(e,ii())},ir.apply(e,arguments),ri(e)}Xr=ti({thousands:",",grouping:[3],currency:["$",""]}),Qr=Xr.format,Zr=Xr.formatPrefix;const oi=1e3,ai=6e4,si=36e5,li=864e5,ci=6048e5,ui=2592e6,hi=31536e6,di=new Date,fi=new Date;function pi(e,t,n,r){function i(t){return e(t=0===arguments.length?new Date:new Date(+t)),t}return i.floor=t=>(e(t=new Date(+t)),t),i.ceil=n=>(e(n=new Date(n-1)),t(n,1),e(n),n),i.round=e=>{const t=i(e),n=i.ceil(e);return e-t(t(e=new Date(+e),null==n?1:Math.floor(n)),e),i.range=(n,r,o)=>{const a=[];if(n=i.ceil(n),o=null==o?1:Math.floor(o),!(n0))return a;let s;do{a.push(s=new Date(+n)),t(n,o),e(n)}while(spi((t=>{if(t>=t)for(;e(t),!n(t);)t.setTime(t-1)}),((e,r)=>{if(e>=e)if(r<0)for(;++r<=0;)for(;t(e,-1),!n(e););else for(;--r>=0;)for(;t(e,1),!n(e););})),n&&(i.count=(t,r)=>(di.setTime(+t),fi.setTime(+r),e(di),e(fi),Math.floor(n(di,fi))),i.every=e=>(e=Math.floor(e),isFinite(e)&&e>0?e>1?i.filter(r?t=>r(t)%e===0:t=>i.count(0,t)%e===0):i:null)),i}const gi=pi((()=>{}),((e,t)=>{e.setTime(+e+t)}),((e,t)=>t-e));gi.every=e=>(e=Math.floor(e),isFinite(e)&&e>0?e>1?pi((t=>{t.setTime(Math.floor(t/e)*e)}),((t,n)=>{t.setTime(+t+n*e)}),((t,n)=>(n-t)/e)):gi:null);gi.range;const mi=pi((e=>{e.setTime(e-e.getMilliseconds())}),((e,t)=>{e.setTime(+e+t*oi)}),((e,t)=>(t-e)/oi),(e=>e.getUTCSeconds())),yi=(mi.range,pi((e=>{e.setTime(e-e.getMilliseconds()-e.getSeconds()*oi)}),((e,t)=>{e.setTime(+e+t*ai)}),((e,t)=>(t-e)/ai),(e=>e.getMinutes()))),bi=(yi.range,pi((e=>{e.setUTCSeconds(0,0)}),((e,t)=>{e.setTime(+e+t*ai)}),((e,t)=>(t-e)/ai),(e=>e.getUTCMinutes()))),vi=(bi.range,pi((e=>{e.setTime(e-e.getMilliseconds()-e.getSeconds()*oi-e.getMinutes()*ai)}),((e,t)=>{e.setTime(+e+t*si)}),((e,t)=>(t-e)/si),(e=>e.getHours()))),xi=(vi.range,pi((e=>{e.setUTCMinutes(0,0,0)}),((e,t)=>{e.setTime(+e+t*si)}),((e,t)=>(t-e)/si),(e=>e.getUTCHours()))),ki=(xi.range,pi((e=>e.setHours(0,0,0,0)),((e,t)=>e.setDate(e.getDate()+t)),((e,t)=>(t-e-(t.getTimezoneOffset()-e.getTimezoneOffset())*ai)/li),(e=>e.getDate()-1))),wi=(ki.range,pi((e=>{e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCDate(e.getUTCDate()+t)}),((e,t)=>(t-e)/li),(e=>e.getUTCDate()-1))),Si=(wi.range,pi((e=>{e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCDate(e.getUTCDate()+t)}),((e,t)=>(t-e)/li),(e=>Math.floor(e/li))));Si.range;function Ci(e){return pi((t=>{t.setDate(t.getDate()-(t.getDay()+7-e)%7),t.setHours(0,0,0,0)}),((e,t)=>{e.setDate(e.getDate()+7*t)}),((e,t)=>(t-e-(t.getTimezoneOffset()-e.getTimezoneOffset())*ai)/ci))}const _i=Ci(0),Ai=Ci(1),Ti=Ci(2),Ei=Ci(3),Fi=Ci(4),Mi=Ci(5),Li=Ci(6);_i.range,Ai.range,Ti.range,Ei.range,Fi.range,Mi.range,Li.range;function Pi(e){return pi((t=>{t.setUTCDate(t.getUTCDate()-(t.getUTCDay()+7-e)%7),t.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCDate(e.getUTCDate()+7*t)}),((e,t)=>(t-e)/ci))}const Oi=Pi(0),$i=Pi(1),Bi=Pi(2),Di=Pi(3),zi=Pi(4),Ii=Pi(5),Ni=Pi(6),Ri=(Oi.range,$i.range,Bi.range,Di.range,zi.range,Ii.range,Ni.range,pi((e=>{e.setDate(1),e.setHours(0,0,0,0)}),((e,t)=>{e.setMonth(e.getMonth()+t)}),((e,t)=>t.getMonth()-e.getMonth()+12*(t.getFullYear()-e.getFullYear())),(e=>e.getMonth()))),ji=(Ri.range,pi((e=>{e.setUTCDate(1),e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCMonth(e.getUTCMonth()+t)}),((e,t)=>t.getUTCMonth()-e.getUTCMonth()+12*(t.getUTCFullYear()-e.getUTCFullYear())),(e=>e.getUTCMonth()))),qi=(ji.range,pi((e=>{e.setMonth(0,1),e.setHours(0,0,0,0)}),((e,t)=>{e.setFullYear(e.getFullYear()+t)}),((e,t)=>t.getFullYear()-e.getFullYear()),(e=>e.getFullYear())));qi.every=e=>isFinite(e=Math.floor(e))&&e>0?pi((t=>{t.setFullYear(Math.floor(t.getFullYear()/e)*e),t.setMonth(0,1),t.setHours(0,0,0,0)}),((t,n)=>{t.setFullYear(t.getFullYear()+n*e)})):null;qi.range;const Hi=pi((e=>{e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),((e,t)=>{e.setUTCFullYear(e.getUTCFullYear()+t)}),((e,t)=>t.getUTCFullYear()-e.getUTCFullYear()),(e=>e.getUTCFullYear()));Hi.every=e=>isFinite(e=Math.floor(e))&&e>0?pi((t=>{t.setUTCFullYear(Math.floor(t.getUTCFullYear()/e)*e),t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),((t,n)=>{t.setUTCFullYear(t.getUTCFullYear()+n*e)})):null;Hi.range;function Wi(e,t,n,r,i,o){const a=[[mi,1,oi],[mi,5,5e3],[mi,15,15e3],[mi,30,3e4],[o,1,ai],[o,5,3e5],[o,15,9e5],[o,30,18e5],[i,1,si],[i,3,108e5],[i,6,216e5],[i,12,432e5],[r,1,li],[r,2,1728e5],[n,1,ci],[t,1,ui],[t,3,7776e6],[e,1,hi]];function s(t,n,r){const i=Math.abs(n-t)/r,o=kr((e=>{let[,,t]=e;return t})).right(a,i);if(o===a.length)return e.every(br(t/hi,n/hi,r));if(0===o)return gi.every(Math.max(br(t,n,r),1));const[s,l]=a[i/a[o-1][2][e.toLowerCase(),t])))}function lo(e,t,n){var r=to.exec(t.slice(n,n+1));return r?(e.w=+r[0],n+r[0].length):-1}function co(e,t,n){var r=to.exec(t.slice(n,n+1));return r?(e.u=+r[0],n+r[0].length):-1}function uo(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.U=+r[0],n+r[0].length):-1}function ho(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.V=+r[0],n+r[0].length):-1}function fo(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.W=+r[0],n+r[0].length):-1}function po(e,t,n){var r=to.exec(t.slice(n,n+4));return r?(e.y=+r[0],n+r[0].length):-1}function go(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.y=+r[0]+(+r[0]>68?1900:2e3),n+r[0].length):-1}function mo(e,t,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(t.slice(n,n+6));return r?(e.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function yo(e,t,n){var r=to.exec(t.slice(n,n+1));return r?(e.q=3*r[0]-3,n+r[0].length):-1}function bo(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.m=r[0]-1,n+r[0].length):-1}function vo(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.d=+r[0],n+r[0].length):-1}function xo(e,t,n){var r=to.exec(t.slice(n,n+3));return r?(e.m=0,e.d=+r[0],n+r[0].length):-1}function ko(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.H=+r[0],n+r[0].length):-1}function wo(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.M=+r[0],n+r[0].length):-1}function So(e,t,n){var r=to.exec(t.slice(n,n+2));return r?(e.S=+r[0],n+r[0].length):-1}function Co(e,t,n){var r=to.exec(t.slice(n,n+3));return r?(e.L=+r[0],n+r[0].length):-1}function _o(e,t,n){var r=to.exec(t.slice(n,n+6));return r?(e.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function Ao(e,t,n){var r=no.exec(t.slice(n,n+1));return r?n+r[0].length:-1}function To(e,t,n){var r=to.exec(t.slice(n));return r?(e.Q=+r[0],n+r[0].length):-1}function Eo(e,t,n){var r=to.exec(t.slice(n));return r?(e.s=+r[0],n+r[0].length):-1}function Fo(e,t){return io(e.getDate(),t,2)}function Mo(e,t){return io(e.getHours(),t,2)}function Lo(e,t){return io(e.getHours()%12||12,t,2)}function Po(e,t){return io(1+ki.count(qi(e),e),t,3)}function Oo(e,t){return io(e.getMilliseconds(),t,3)}function $o(e,t){return Oo(e,t)+"000"}function Bo(e,t){return io(e.getMonth()+1,t,2)}function Do(e,t){return io(e.getMinutes(),t,2)}function zo(e,t){return io(e.getSeconds(),t,2)}function Io(e){var t=e.getDay();return 0===t?7:t}function No(e,t){return io(_i.count(qi(e)-1,e),t,2)}function Ro(e){var t=e.getDay();return t>=4||0===t?Fi(e):Fi.ceil(e)}function jo(e,t){return e=Ro(e),io(Fi.count(qi(e),e)+(4===qi(e).getDay()),t,2)}function qo(e){return e.getDay()}function Ho(e,t){return io(Ai.count(qi(e)-1,e),t,2)}function Wo(e,t){return io(e.getFullYear()%100,t,2)}function Ko(e,t){return io((e=Ro(e)).getFullYear()%100,t,2)}function Uo(e,t){return io(e.getFullYear()%1e4,t,4)}function Vo(e,t){var n=e.getDay();return io((e=n>=4||0===n?Fi(e):Fi.ceil(e)).getFullYear()%1e4,t,4)}function Yo(e){var t=e.getTimezoneOffset();return(t>0?"-":(t*=-1,"+"))+io(t/60|0,"0",2)+io(t%60,"0",2)}function Go(e,t){return io(e.getUTCDate(),t,2)}function Xo(e,t){return io(e.getUTCHours(),t,2)}function Qo(e,t){return io(e.getUTCHours()%12||12,t,2)}function Zo(e,t){return io(1+wi.count(Hi(e),e),t,3)}function Jo(e,t){return io(e.getUTCMilliseconds(),t,3)}function ea(e,t){return Jo(e,t)+"000"}function ta(e,t){return io(e.getUTCMonth()+1,t,2)}function na(e,t){return io(e.getUTCMinutes(),t,2)}function ra(e,t){return io(e.getUTCSeconds(),t,2)}function ia(e){var t=e.getUTCDay();return 0===t?7:t}function oa(e,t){return io(Oi.count(Hi(e)-1,e),t,2)}function aa(e){var t=e.getUTCDay();return t>=4||0===t?zi(e):zi.ceil(e)}function sa(e,t){return e=aa(e),io(zi.count(Hi(e),e)+(4===Hi(e).getUTCDay()),t,2)}function la(e){return e.getUTCDay()}function ca(e,t){return io($i.count(Hi(e)-1,e),t,2)}function ua(e,t){return io(e.getUTCFullYear()%100,t,2)}function ha(e,t){return io((e=aa(e)).getUTCFullYear()%100,t,2)}function da(e,t){return io(e.getUTCFullYear()%1e4,t,4)}function fa(e,t){var n=e.getUTCDay();return io((e=n>=4||0===n?zi(e):zi.ceil(e)).getUTCFullYear()%1e4,t,4)}function pa(){return"+0000"}function ga(){return"%"}function ma(e){return+e}function ya(e){return Math.floor(+e/1e3)}function ba(e){return new Date(e)}function va(e){return e instanceof Date?+e:+new Date(+e)}function xa(e,t,n,r,i,o,a,s,l,c){var u=Rr(),h=u.invert,d=u.domain,f=c(".%L"),p=c(":%S"),g=c("%I:%M"),m=c("%I %p"),y=c("%a %d"),b=c("%b %d"),v=c("%B"),x=c("%Y");function k(e){return(l(e)=12)]},q:function(e){return 1+~~(e.getMonth()/3)},Q:ma,s:ya,S:zo,u:Io,U:No,V:jo,w:qo,W:Ho,x:null,X:null,y:Wo,Y:Uo,Z:Yo,"%":ga},x={a:function(e){return a[e.getUTCDay()]},A:function(e){return o[e.getUTCDay()]},b:function(e){return l[e.getUTCMonth()]},B:function(e){return s[e.getUTCMonth()]},c:null,d:Go,e:Go,f:ea,g:ha,G:fa,H:Xo,I:Qo,j:Zo,L:Jo,m:ta,M:na,p:function(e){return i[+(e.getUTCHours()>=12)]},q:function(e){return 1+~~(e.getUTCMonth()/3)},Q:ma,s:ya,S:ra,u:ia,U:oa,V:sa,w:la,W:ca,x:null,X:null,y:ua,Y:da,Z:pa,"%":ga},k={a:function(e,t,n){var r=f.exec(t.slice(n));return r?(e.w=p.get(r[0].toLowerCase()),n+r[0].length):-1},A:function(e,t,n){var r=h.exec(t.slice(n));return r?(e.w=d.get(r[0].toLowerCase()),n+r[0].length):-1},b:function(e,t,n){var r=y.exec(t.slice(n));return r?(e.m=b.get(r[0].toLowerCase()),n+r[0].length):-1},B:function(e,t,n){var r=g.exec(t.slice(n));return r?(e.m=m.get(r[0].toLowerCase()),n+r[0].length):-1},c:function(e,n,r){return C(e,t,n,r)},d:vo,e:vo,f:_o,g:go,G:po,H:ko,I:ko,j:xo,L:Co,m:bo,M:wo,p:function(e,t,n){var r=c.exec(t.slice(n));return r?(e.p=u.get(r[0].toLowerCase()),n+r[0].length):-1},q:yo,Q:To,s:Eo,S:So,u:co,U:uo,V:ho,w:lo,W:fo,x:function(e,t,r){return C(e,n,t,r)},X:function(e,t,n){return C(e,r,t,n)},y:go,Y:po,Z:mo,"%":Ao};function w(e,t){return function(n){var r,i,o,a=[],s=-1,l=0,c=e.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in o||(o.w=1),"Z"in o?(i=(r=Xi(Qi(o.y,0,1))).getUTCDay(),r=i>4||0===i?$i.ceil(r):$i(r),r=wi.offset(r,7*(o.V-1)),o.y=r.getUTCFullYear(),o.m=r.getUTCMonth(),o.d=r.getUTCDate()+(o.w+6)%7):(i=(r=Gi(Qi(o.y,0,1))).getDay(),r=i>4||0===i?Ai.ceil(r):Ai(r),r=ki.offset(r,7*(o.V-1)),o.y=r.getFullYear(),o.m=r.getMonth(),o.d=r.getDate()+(o.w+6)%7)}else("W"in o||"U"in o)&&("w"in o||(o.w="u"in o?o.u%7:"W"in o?1:0),i="Z"in o?Xi(Qi(o.y,0,1)).getUTCDay():Gi(Qi(o.y,0,1)).getDay(),o.m=0,o.d="W"in o?(o.w+6)%7+7*o.W-(i+5)%7:o.w+7*o.U-(i+6)%7);return"Z"in o?(o.H+=o.Z/100|0,o.M+=o.Z%100,Xi(o)):Gi(o)}}function C(e,t,n,r){for(var i,o,a=0,s=t.length,l=n.length;a=l)return-1;if(37===(i=t.charCodeAt(a++))){if(i=t.charAt(a++),!(o=k[i in eo?t.charAt(a++):i])||(r=o(e,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return v.x=w(n,v),v.X=w(r,v),v.c=w(t,v),x.x=w(n,x),x.X=w(r,x),x.c=w(t,x),{format:function(e){var t=w(e+="",v);return t.toString=function(){return e},t},parse:function(e){var t=S(e+="",!1);return t.toString=function(){return e},t},utcFormat:function(e){var t=w(e+="",x);return t.toString=function(){return e},t},utcParse:function(e){var t=S(e+="",!0);return t.toString=function(){return e},t}}}(e),Ji=Zi.format,Zi.parse,Zi.utcFormat,Zi.utcParse}({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});const wa=function(e){for(var t=e.length/6|0,n=new Array(t),r=0;r=1?$a:e<=-1?-$a:Math.asin(e)}const za=Math.PI,Ia=2*za,Na=1e-6,Ra=Ia-Na;function ja(e){this._+=e[0];for(let t=1,n=e.length;t=0))throw new Error(`invalid digits: ${e}`);if(t>15)return ja;const n=10**t;return function(e){this._+=e[0];for(let t=1,r=e.length;tNa)if(Math.abs(u*s-l*c)>Na&&i){let d=n-o,f=r-a,p=s*s+l*l,g=d*d+f*f,m=Math.sqrt(p),y=Math.sqrt(h),b=i*Math.tan((za-Math.acos((p+h-g)/(2*m*y)))/2),v=b/y,x=b/m;Math.abs(v-1)>Na&&this._append`L${e+v*c},${t+v*u}`,this._append`A${i},${i},0,0,${+(u*d>c*f)},${this._x1=e+x*s},${this._y1=t+x*l}`}else this._append`L${this._x1=e},${this._y1=t}`;else;}arc(e,t,n,r,i,o){if(e=+e,t=+t,o=!!o,(n=+n)<0)throw new Error(`negative radius: ${n}`);let a=n*Math.cos(r),s=n*Math.sin(r),l=e+a,c=t+s,u=1^o,h=o?r-i:i-r;null===this._x1?this._append`M${l},${c}`:(Math.abs(this._x1-l)>Na||Math.abs(this._y1-c)>Na)&&this._append`L${l},${c}`,n&&(h<0&&(h=h%Ia+Ia),h>Ra?this._append`A${n},${n},0,1,${u},${e-a},${t-s}A${n},${n},0,1,${u},${this._x1=l},${this._y1=c}`:h>Na&&this._append`A${n},${n},0,${+(h>=za)},${u},${this._x1=e+n*Math.cos(i)},${this._y1=t+n*Math.sin(i)}`)}rect(e,t,n,r){this._append`M${this._x0=this._x1=+e},${this._y0=this._y1=+t}h${n=+n}v${+r}h${-n}Z`}toString(){return this._}}function Ha(e){let t=3;return e.digits=function(n){if(!arguments.length)return t;if(null==n)t=null;else{const e=Math.floor(n);if(!(e>=0))throw new RangeError(`invalid digits: ${n}`);t=e}return e},()=>new qa(t)}function Wa(e){return e.innerRadius}function Ka(e){return e.outerRadius}function Ua(e){return e.startAngle}function Va(e){return e.endAngle}function Ya(e){return e&&e.padAngle}function Ga(e,t,n,r,i,o,a){var s=e-n,l=t-r,c=(a?o:-o)/La(s*s+l*l),u=c*l,h=-c*s,d=e+u,f=t+h,p=n+u,g=r+h,m=(d+p)/2,y=(f+g)/2,b=p-d,v=g-f,x=b*b+v*v,k=i-o,w=d*g-p*f,S=(v<0?-1:1)*La(Ea(0,k*k*x-w*w)),C=(w*v-b*S)/x,_=(-w*b-v*S)/x,A=(w*v+b*S)/x,T=(-w*b+v*S)/x,E=C-m,F=_-y,M=A-m,L=T-y;return E*E+F*F>M*M+L*L&&(C=A,_=T),{cx:C,cy:_,x01:-u,y01:-h,x11:C*(i/k-1),y11:_*(i/k-1)}}function Xa(){var e=Wa,t=Ka,n=Ca(0),r=null,i=Ua,o=Va,a=Ya,s=null,l=Ha(c);function c(){var c,u,h,d=+e.apply(this,arguments),f=+t.apply(this,arguments),p=i.apply(this,arguments)-$a,g=o.apply(this,arguments)-$a,m=_a(g-p),y=g>p;if(s||(s=c=l()),fPa)if(m>Ba-Pa)s.moveTo(f*Ta(p),f*Ma(p)),s.arc(0,0,f,p,g,!y),d>Pa&&(s.moveTo(d*Ta(g),d*Ma(g)),s.arc(0,0,d,g,p,y));else{var b,v,x=p,k=g,w=p,S=g,C=m,_=m,A=a.apply(this,arguments)/2,T=A>Pa&&(r?+r.apply(this,arguments):La(d*d+f*f)),E=Fa(_a(f-d)/2,+n.apply(this,arguments)),F=E,M=E;if(T>Pa){var L=Da(T/d*Ma(A)),P=Da(T/f*Ma(A));(C-=2*L)>Pa?(w+=L*=y?1:-1,S-=L):(C=0,w=S=(p+g)/2),(_-=2*P)>Pa?(x+=P*=y?1:-1,k-=P):(_=0,x=k=(p+g)/2)}var O=f*Ta(x),$=f*Ma(x),B=d*Ta(S),D=d*Ma(S);if(E>Pa){var z,I=f*Ta(k),N=f*Ma(k),R=d*Ta(w),j=d*Ma(w);if(m1?0:h<-1?Oa:Math.acos(h))/2),V=La(z[0]*z[0]+z[1]*z[1]);F=Fa(E,(d-V)/(U-1)),M=Fa(E,(f-V)/(U+1))}else F=M=0}_>Pa?M>Pa?(b=Ga(R,j,O,$,f,M,y),v=Ga(I,N,B,D,f,M,y),s.moveTo(b.cx+b.x01,b.cy+b.y01),MPa&&C>Pa?F>Pa?(b=Ga(B,D,I,N,d,-F,y),v=Ga(O,$,R,j,d,-F,y),s.lineTo(b.cx+b.x01,b.cy+b.y01),Fe?1:t>=e?0:NaN}function is(e){return e}function os(){var e=is,t=rs,n=null,r=Ca(0),i=Ca(Ba),o=Ca(0);function a(a){var s,l,c,u,h,d=(a=Qa(a)).length,f=0,p=new Array(d),g=new Array(d),m=+r.apply(this,arguments),y=Math.min(Ba,Math.max(-Ba,i.apply(this,arguments)-m)),b=Math.min(Math.abs(y)/d,o.apply(this,arguments)),v=b*(y<0?-1:1);for(s=0;s0&&(f+=h);for(null!=t?p.sort((function(e,n){return t(g[e],g[n])})):null!=n&&p.sort((function(e,t){return n(a[e],a[t])})),s=0,c=f?(y-d*v)/f:0;s0?h*c:0)+v,g[l]={data:a[l],index:s,value:h,startAngle:m,endAngle:u,padAngle:b};return g}return a.value=function(t){return arguments.length?(e="function"===typeof t?t:Ca(+t),a):e},a.sortValues=function(e){return arguments.length?(t=e,n=null,a):t},a.sort=function(e){return arguments.length?(n=e,t=null,a):n},a.startAngle=function(e){return arguments.length?(r="function"===typeof e?e:Ca(+e),a):r},a.endAngle=function(e){return arguments.length?(i="function"===typeof e?e:Ca(+e),a):i},a.padAngle=function(e){return arguments.length?(o="function"===typeof e?e:Ca(+e),a):o},a}function as(){}function ss(e,t,n){e._context.bezierCurveTo((2*e._x0+e._x1)/3,(2*e._y0+e._y1)/3,(e._x0+2*e._x1)/3,(e._y0+2*e._y1)/3,(e._x0+4*e._x1+t)/6,(e._y0+4*e._y1+n)/6)}function ls(e){this._context=e}function cs(e){return new ls(e)}function us(e){this._context=e}function hs(e){return new us(e)}function ds(e){this._context=e}function fs(e){return new ds(e)}Za.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:this._context.lineTo(e,t)}}},ls.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:ss(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:ss(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},us.prototype={areaStart:as,areaEnd:as,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._x2=e,this._y2=t;break;case 1:this._point=2,this._x3=e,this._y3=t;break;case 2:this._point=3,this._x4=e,this._y4=t,this._context.moveTo((this._x0+4*this._x1+e)/6,(this._y0+4*this._y1+t)/6);break;default:ss(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}},ds.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+e)/6,r=(this._y0+4*this._y1+t)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:ss(this,e,t)}this._x0=this._x1,this._x1=e,this._y0=this._y1,this._y1=t}};class ps{constructor(e,t){this._context=e,this._x=t}areaStart(){this._line=0}areaEnd(){this._line=NaN}lineStart(){this._point=0}lineEnd(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line}point(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:this._x?this._context.bezierCurveTo(this._x0=(this._x0+e)/2,this._y0,this._x0,t,e,t):this._context.bezierCurveTo(this._x0,this._y0=(this._y0+t)/2,e,this._y0,e,t)}this._x0=e,this._y0=t}}function gs(e){return new ps(e,!0)}function ms(e){return new ps(e,!1)}function ys(e,t){this._basis=new ls(e),this._beta=t}ys.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var e=this._x,t=this._y,n=e.length-1;if(n>0)for(var r,i=e[0],o=t[0],a=e[n]-i,s=t[n]-o,l=-1;++l<=n;)r=l/n,this._basis.point(this._beta*e[l]+(1-this._beta)*(i+r*a),this._beta*t[l]+(1-this._beta)*(o+r*s));this._x=this._y=null,this._basis.lineEnd()},point:function(e,t){this._x.push(+e),this._y.push(+t)}};const bs=function e(t){function n(e){return 1===t?new ls(e):new ys(e,t)}return n.beta=function(t){return e(+t)},n}(.85);function vs(e,t,n){e._context.bezierCurveTo(e._x1+e._k*(e._x2-e._x0),e._y1+e._k*(e._y2-e._y0),e._x2+e._k*(e._x1-t),e._y2+e._k*(e._y1-n),e._x2,e._y2)}function xs(e,t){this._context=e,this._k=(1-t)/6}xs.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:vs(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2,this._x1=e,this._y1=t;break;case 2:this._point=3;default:vs(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const ks=function e(t){function n(e){return new xs(e,t)}return n.tension=function(t){return e(+t)},n}(0);function ws(e,t){this._context=e,this._k=(1-t)/6}ws.prototype={areaStart:as,areaEnd:as,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._x3=e,this._y3=t;break;case 1:this._point=2,this._context.moveTo(this._x4=e,this._y4=t);break;case 2:this._point=3,this._x5=e,this._y5=t;break;default:vs(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Ss=function e(t){function n(e){return new ws(e,t)}return n.tension=function(t){return e(+t)},n}(0);function Cs(e,t){this._context=e,this._k=(1-t)/6}Cs.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:vs(this,e,t)}this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const _s=function e(t){function n(e){return new Cs(e,t)}return n.tension=function(t){return e(+t)},n}(0);function As(e,t,n){var r=e._x1,i=e._y1,o=e._x2,a=e._y2;if(e._l01_a>Pa){var s=2*e._l01_2a+3*e._l01_a*e._l12_a+e._l12_2a,l=3*e._l01_a*(e._l01_a+e._l12_a);r=(r*s-e._x0*e._l12_2a+e._x2*e._l01_2a)/l,i=(i*s-e._y0*e._l12_2a+e._y2*e._l01_2a)/l}if(e._l23_a>Pa){var c=2*e._l23_2a+3*e._l23_a*e._l12_a+e._l12_2a,u=3*e._l23_a*(e._l23_a+e._l12_a);o=(o*c+e._x1*e._l23_2a-t*e._l12_2a)/u,a=(a*c+e._y1*e._l23_2a-n*e._l12_2a)/u}e._context.bezierCurveTo(r,i,o,a,e._x2,e._y2)}function Ts(e,t){this._context=e,this._alpha=t}Ts.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;break;case 2:this._point=3;default:As(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Es=function e(t){function n(e){return t?new Ts(e,t):new xs(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function Fs(e,t){this._context=e,this._alpha=t}Fs.prototype={areaStart:as,areaEnd:as,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=e,this._y3=t;break;case 1:this._point=2,this._context.moveTo(this._x4=e,this._y4=t);break;case 2:this._point=3,this._x5=e,this._y5=t;break;default:As(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Ms=function e(t){function n(e){return t?new Fs(e,t):new ws(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function Ls(e,t){this._context=e,this._alpha=t}Ls.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(e,t){if(e=+e,t=+t,this._point){var n=this._x2-e,r=this._y2-t;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:As(this,e,t)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=e,this._y0=this._y1,this._y1=this._y2,this._y2=t}};const Ps=function e(t){function n(e){return t?new Ls(e,t):new Cs(e,0)}return n.alpha=function(t){return e(+t)},n}(.5);function Os(e){this._context=e}function $s(e){return new Os(e)}function Bs(e){return e<0?-1:1}function Ds(e,t,n){var r=e._x1-e._x0,i=t-e._x1,o=(e._y1-e._y0)/(r||i<0&&-0),a=(n-e._y1)/(i||r<0&&-0),s=(o*i+a*r)/(r+i);return(Bs(o)+Bs(a))*Math.min(Math.abs(o),Math.abs(a),.5*Math.abs(s))||0}function zs(e,t){var n=e._x1-e._x0;return n?(3*(e._y1-e._y0)/n-t)/2:t}function Is(e,t,n){var r=e._x0,i=e._y0,o=e._x1,a=e._y1,s=(o-r)/3;e._context.bezierCurveTo(r+s,i+s*t,o-s,a-s*n,o,a)}function Ns(e){this._context=e}function Rs(e){this._context=new js(e)}function js(e){this._context=e}function qs(e){return new Ns(e)}function Hs(e){return new Rs(e)}function Ws(e){this._context=e}function Ks(e){var t,n,r=e.length-1,i=new Array(r),o=new Array(r),a=new Array(r);for(i[0]=0,o[0]=2,a[0]=e[0]+2*e[1],t=1;t=0;--t)i[t]=(a[t]-i[t+1])/o[t];for(o[r-1]=(e[r]+i[r-1])/2,t=0;t=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(e,t){switch(e=+e,t=+t,this._point){case 0:this._point=1,this._line?this._context.lineTo(e,t):this._context.moveTo(e,t);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,t),this._context.lineTo(e,t);else{var n=this._x*(1-this._t)+e*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,t)}}this._x=e,this._y=t}},Qs.prototype={constructor:Qs,scale:function(e){return 1===e?this:new Qs(this.k*e,this.x,this.y)},translate:function(e,t){return 0===e&0===t?this:new Qs(this.k,this.x+this.k*e,this.y+this.k*t)},apply:function(e){return[e[0]*this.k+this.x,e[1]*this.k+this.y]},applyX:function(e){return e*this.k+this.x},applyY:function(e){return e*this.k+this.y},invert:function(e){return[(e[0]-this.x)/this.k,(e[1]-this.y)/this.k]},invertX:function(e){return(e-this.x)/this.k},invertY:function(e){return(e-this.y)/this.k},rescaleX:function(e){return e.copy().domain(e.range().map(this.invertX,this).map(e.invert,e))},rescaleY:function(e){return e.copy().domain(e.range().map(this.invertY,this).map(e.invert,e))},toString:function(){return"translate("+this.x+","+this.y+") scale("+this.k+")"}};new Qs(1,0,0);Qs.prototype},3755:(e,t,n)=>{"use strict";t.J=void 0;var r=n(4830);function i(e){try{return decodeURIComponent(e)}catch(t){return e}}t.J=function(e){if(!e)return r.BLANK_URL;var t,n,o=i(e.trim());do{t=(o=i(o=(n=o,n.replace(r.ctrlCharactersRegex,"").replace(r.htmlEntitiesRegex,(function(e,t){return String.fromCharCode(t)}))).replace(r.htmlCtrlEntityRegex,"").replace(r.ctrlCharactersRegex,"").replace(r.whitespaceEscapeCharsRegex,"").trim())).match(r.ctrlCharactersRegex)||o.match(r.htmlEntitiesRegex)||o.match(r.htmlCtrlEntityRegex)||o.match(r.whitespaceEscapeCharsRegex)}while(t&&t.length>0);var a=o;if(!a)return r.BLANK_URL;if(function(e){return r.relativeFirstCharacters.indexOf(e[0])>-1}(a))return a;var s=a.trimStart(),l=s.match(r.urlSchemeRegex);if(!l)return a;var c=l[0].toLowerCase().trim();if(r.invalidProtocolRegex.test(c))return r.BLANK_URL;var u=s.replace(/\\/g,"/");if("mailto:"===c||c.includes("://"))return u;if("http:"===c||"https:"===c){if(!function(e){return URL.canParse(e)}(u))return r.BLANK_URL;var h=new URL(u);return h.protocol=h.protocol.toLowerCase(),h.hostname=h.hostname.toLowerCase(),h.toString()}return u}},3759:(e,t,n)=>{"use strict";n.d(t,{C0:()=>_,VA:()=>y,K2:()=>m,xA:()=>he,hH:()=>P,Dl:()=>Re,IU:()=>nt,Wt:()=>Qe,Y2:()=>qe,a$:()=>Ke,sb:()=>J,ME:()=>ft,UI:()=>X,Ch:()=>T,mW:()=>A,DB:()=>S,_3:()=>Me,EJ:()=>w,m7:()=>at,iN:()=>it,zj:()=>ce,D7:()=>ht,Gs:()=>vt,J$:()=>M,ab:()=>lt,Q2:()=>se,P$:()=>j,Wi:()=>Ne,H1:()=>ye,Rm:()=>v,QO:()=>Oe,Js:()=>bt,Xd:()=>E,VJ:()=>je,cL:()=>de,$i:()=>Q,jZ:()=>Se,oB:()=>pt,wZ:()=>oe,EI:()=>ot,SV:()=>rt,Nk:()=>le,XV:()=>dt,ke:()=>st,He:()=>x,UU:()=>ie,ot:()=>Ue,mj:()=>gt,tM:()=>Xe,H$:()=>K,B6:()=>ae});var r=n(446),i=n(1458),o=n(6453);const a=(e,t)=>{const n=i.A.parse(e),r={};for(const i in t)t[i]&&(r[i]=n[i]+t[i]);return(0,o.A)(e,r)};var s=n(6471);const l=function(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:50;const{r:r,g:o,b:a,a:l}=i.A.parse(e),{r:c,g:u,b:h,a:d}=i.A.parse(t),f=n/100,p=2*f-1,g=l-d,m=((p*g===-1?p:(p+g)/(1+p*g))+1)/2,y=1-m,b=r*m+c*y,v=o*m+u*y,x=a*m+h*y,k=l*f+d*(1-f);return(0,s.A)(b,v,x,k)},c=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:100;const n=i.A.parse(e);return n.r=255-n.r,n.g=255-n.g,n.b=255-n.b,l(n,e,t)};var u,h=n(5554),d=n(6362),f=n(8194),p=n(7977),g=Object.defineProperty,m=(e,t)=>g(e,"name",{value:t,configurable:!0}),y=(e,t)=>{for(var n in t)g(e,n,{get:t[n],enumerable:!0})},b={trace:0,debug:1,info:2,warn:3,error:4,fatal:5},v={trace:m((function(){}),"trace"),debug:m((function(){}),"debug"),info:m((function(){}),"info"),warn:m((function(){}),"warn"),error:m((function(){}),"error"),fatal:m((function(){}),"fatal")},x=m((function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:"fatal",t=b.fatal;"string"===typeof e?e.toLowerCase()in b&&(t=b[e]):"number"===typeof e&&(t=e),v.trace=()=>{},v.debug=()=>{},v.info=()=>{},v.warn=()=>{},v.error=()=>{},v.fatal=()=>{},t<=b.fatal&&(v.fatal=console.error?console.error.bind(console,k("FATAL"),"color: orange"):console.log.bind(console,"\x1b[35m",k("FATAL"))),t<=b.error&&(v.error=console.error?console.error.bind(console,k("ERROR"),"color: orange"):console.log.bind(console,"\x1b[31m",k("ERROR"))),t<=b.warn&&(v.warn=console.warn?console.warn.bind(console,k("WARN"),"color: orange"):console.log.bind(console,"\x1b[33m",k("WARN"))),t<=b.info&&(v.info=console.info?console.info.bind(console,k("INFO"),"color: lightblue"):console.log.bind(console,"\x1b[34m",k("INFO"))),t<=b.debug&&(v.debug=console.debug?console.debug.bind(console,k("DEBUG"),"color: lightgreen"):console.log.bind(console,"\x1b[32m",k("DEBUG"))),t<=b.trace&&(v.trace=console.debug?console.debug.bind(console,k("TRACE"),"color: lightgreen"):console.log.bind(console,"\x1b[32m",k("TRACE")))}),"setLogLevel"),k=m((e=>`%c${r().format("ss.SSS")} : ${e} : `),"format"),w=/^-{3}\s*[\n\r](.*?)[\n\r]-{3}\s*[\n\r]+/s,S=/%{2}{\s*(?:(\w+)\s*:|(\w+))\s*(?:(\w+)|((?:(?!}%{2}).|\r?\n)*))?\s*(?:}%{2})?/gi,C=/\s*%%.*\n/gm,_=class extends Error{static#e=(()=>m(this,"UnknownDiagramError"))();constructor(e){super(e),this.name="UnknownDiagramError"}},A={},T=m((function(e,t){e=e.replace(w,"").replace(S,"").replace(C,"\n");for(const[n,{detector:r}]of Object.entries(A)){if(r(e,t))return n}throw new _(`No diagram type detected matching given configuration for text: ${e}`)}),"detectType"),E=m((function(){for(var e=arguments.length,t=new Array(e),n=0;n{A[e]&&v.warn(`Detector with key ${e} already exists. Overwriting.`),A[e]={detector:t,loader:n},v.debug(`Detector with key ${e} added${n?" with loader":""}`)}),"addDetector"),M=m((e=>A[e].loader),"getDiagramLoader"),L=m((function(e,t){let{depth:n=2,clobber:r=!1}=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{};const i={depth:n,clobber:r};return Array.isArray(t)&&!Array.isArray(e)?(t.forEach((t=>L(e,t,i))),e):Array.isArray(t)&&Array.isArray(e)?(t.forEach((t=>{e.includes(t)||e.push(t)})),e):void 0===e||n<=0?void 0!==e&&null!==e&&"object"===typeof e&&"object"===typeof t?Object.assign(e,t):t:(void 0!==t&&"object"===typeof e&&"object"===typeof t&&Object.keys(t).forEach((i=>{"object"!==typeof t[i]||void 0!==e[i]&&"object"!==typeof e[i]?(r||"object"!==typeof e[i]&&"object"!==typeof t[i])&&(e[i]=t[i]):(void 0===e[i]&&(e[i]=Array.isArray(t[i])?[]:{}),e[i]=L(e[i],t[i],{depth:n-1,clobber:r}))})),e)}),"assignWithDepth"),P=L,O="#ffffff",$="#f2f2f2",B=m(((e,t)=>a(e,t?{s:-40,l:10}:{s:-40,l:-10})),"mkBorder"),D=class{static#e=(()=>m(this,"Theme"))();constructor(){this.background="#f4f4f4",this.primaryColor="#fff4dd",this.noteBkgColor="#fff5ad",this.noteTextColor="#333",this.THEME_COLOR_LIMIT=12,this.fontFamily='"trebuchet ms", verdana, arial, sans-serif',this.fontSize="16px"}updateColors(){if(this.primaryTextColor=this.primaryTextColor||(this.darkMode?"#eee":"#333"),this.secondaryColor=this.secondaryColor||a(this.primaryColor,{h:-120}),this.tertiaryColor=this.tertiaryColor||a(this.primaryColor,{h:180,l:5}),this.primaryBorderColor=this.primaryBorderColor||B(this.primaryColor,this.darkMode),this.secondaryBorderColor=this.secondaryBorderColor||B(this.secondaryColor,this.darkMode),this.tertiaryBorderColor=this.tertiaryBorderColor||B(this.tertiaryColor,this.darkMode),this.noteBorderColor=this.noteBorderColor||B(this.noteBkgColor,this.darkMode),this.noteBkgColor=this.noteBkgColor||"#fff5ad",this.noteTextColor=this.noteTextColor||"#333",this.secondaryTextColor=this.secondaryTextColor||c(this.secondaryColor),this.tertiaryTextColor=this.tertiaryTextColor||c(this.tertiaryColor),this.lineColor=this.lineColor||c(this.background),this.arrowheadColor=this.arrowheadColor||c(this.background),this.textColor=this.textColor||this.primaryTextColor,this.border2=this.border2||this.tertiaryBorderColor,this.nodeBkg=this.nodeBkg||this.primaryColor,this.mainBkg=this.mainBkg||this.primaryColor,this.nodeBorder=this.nodeBorder||this.primaryBorderColor,this.clusterBkg=this.clusterBkg||this.tertiaryColor,this.clusterBorder=this.clusterBorder||this.tertiaryBorderColor,this.defaultLinkColor=this.defaultLinkColor||this.lineColor,this.titleColor=this.titleColor||this.tertiaryTextColor,this.edgeLabelBackground=this.edgeLabelBackground||(this.darkMode?(0,h.A)(this.secondaryColor,30):this.secondaryColor),this.nodeTextColor=this.nodeTextColor||this.primaryTextColor,this.actorBorder=this.actorBorder||this.primaryBorderColor,this.actorBkg=this.actorBkg||this.mainBkg,this.actorTextColor=this.actorTextColor||this.primaryTextColor,this.actorLineColor=this.actorLineColor||this.actorBorder,this.labelBoxBkgColor=this.labelBoxBkgColor||this.actorBkg,this.signalColor=this.signalColor||this.textColor,this.signalTextColor=this.signalTextColor||this.textColor,this.labelBoxBorderColor=this.labelBoxBorderColor||this.actorBorder,this.labelTextColor=this.labelTextColor||this.actorTextColor,this.loopTextColor=this.loopTextColor||this.actorTextColor,this.activationBorderColor=this.activationBorderColor||(0,h.A)(this.secondaryColor,10),this.activationBkgColor=this.activationBkgColor||this.secondaryColor,this.sequenceNumberColor=this.sequenceNumberColor||c(this.lineColor),this.sectionBkgColor=this.sectionBkgColor||this.tertiaryColor,this.altSectionBkgColor=this.altSectionBkgColor||"white",this.sectionBkgColor=this.sectionBkgColor||this.secondaryColor,this.sectionBkgColor2=this.sectionBkgColor2||this.primaryColor,this.excludeBkgColor=this.excludeBkgColor||"#eeeeee",this.taskBorderColor=this.taskBorderColor||this.primaryBorderColor,this.taskBkgColor=this.taskBkgColor||this.primaryColor,this.activeTaskBorderColor=this.activeTaskBorderColor||this.primaryColor,this.activeTaskBkgColor=this.activeTaskBkgColor||(0,d.A)(this.primaryColor,23),this.gridColor=this.gridColor||"lightgrey",this.doneTaskBkgColor=this.doneTaskBkgColor||"lightgrey",this.doneTaskBorderColor=this.doneTaskBorderColor||"grey",this.critBorderColor=this.critBorderColor||"#ff8888",this.critBkgColor=this.critBkgColor||"red",this.todayLineColor=this.todayLineColor||"red",this.taskTextColor=this.taskTextColor||this.textColor,this.taskTextOutsideColor=this.taskTextOutsideColor||this.textColor,this.taskTextLightColor=this.taskTextLightColor||this.textColor,this.taskTextColor=this.taskTextColor||this.primaryTextColor,this.taskTextDarkColor=this.taskTextDarkColor||this.textColor,this.taskTextClickableColor=this.taskTextClickableColor||"#003163",this.personBorder=this.personBorder||this.primaryBorderColor,this.personBkg=this.personBkg||this.mainBkg,this.darkMode?(this.rowOdd=this.rowOdd||(0,h.A)(this.mainBkg,5)||"#ffffff",this.rowEven=this.rowEven||(0,h.A)(this.mainBkg,10)):(this.rowOdd=this.rowOdd||(0,d.A)(this.mainBkg,75)||"#ffffff",this.rowEven=this.rowEven||(0,d.A)(this.mainBkg,5)),this.transitionColor=this.transitionColor||this.lineColor,this.transitionLabelColor=this.transitionLabelColor||this.textColor,this.stateLabelColor=this.stateLabelColor||this.stateBkg||this.primaryTextColor,this.stateBkg=this.stateBkg||this.mainBkg,this.labelBackgroundColor=this.labelBackgroundColor||this.stateBkg,this.compositeBackground=this.compositeBackground||this.background||this.tertiaryColor,this.altBackground=this.altBackground||this.tertiaryColor,this.compositeTitleBackground=this.compositeTitleBackground||this.mainBkg,this.compositeBorder=this.compositeBorder||this.nodeBorder,this.innerEndBackground=this.nodeBorder,this.errorBkgColor=this.errorBkgColor||this.tertiaryColor,this.errorTextColor=this.errorTextColor||this.tertiaryTextColor,this.transitionColor=this.transitionColor||this.lineColor,this.specialStateColor=this.lineColor,this.cScale0=this.cScale0||this.primaryColor,this.cScale1=this.cScale1||this.secondaryColor,this.cScale2=this.cScale2||this.tertiaryColor,this.cScale3=this.cScale3||a(this.primaryColor,{h:30}),this.cScale4=this.cScale4||a(this.primaryColor,{h:60}),this.cScale5=this.cScale5||a(this.primaryColor,{h:90}),this.cScale6=this.cScale6||a(this.primaryColor,{h:120}),this.cScale7=this.cScale7||a(this.primaryColor,{h:150}),this.cScale8=this.cScale8||a(this.primaryColor,{h:210,l:150}),this.cScale9=this.cScale9||a(this.primaryColor,{h:270}),this.cScale10=this.cScale10||a(this.primaryColor,{h:300}),this.cScale11=this.cScale11||a(this.primaryColor,{h:330}),this.darkMode)for(let t=0;t{this[t]=e[t]})),this.updateColors(),t.forEach((t=>{this[t]=e[t]}))}},z=m((e=>{const t=new D;return t.calculate(e),t}),"getThemeVariables"),I=class{static#e=(()=>m(this,"Theme"))();constructor(){this.background="#333",this.primaryColor="#1f2020",this.secondaryColor=(0,d.A)(this.primaryColor,16),this.tertiaryColor=a(this.primaryColor,{h:-160}),this.primaryBorderColor=c(this.background),this.secondaryBorderColor=B(this.secondaryColor,this.darkMode),this.tertiaryBorderColor=B(this.tertiaryColor,this.darkMode),this.primaryTextColor=c(this.primaryColor),this.secondaryTextColor=c(this.secondaryColor),this.tertiaryTextColor=c(this.tertiaryColor),this.lineColor=c(this.background),this.textColor=c(this.background),this.mainBkg="#1f2020",this.secondBkg="calculated",this.mainContrastColor="lightgrey",this.darkTextColor=(0,d.A)(c("#323D47"),10),this.lineColor="calculated",this.border1="#ccc",this.border2=(0,s.A)(255,255,255,.25),this.arrowheadColor="calculated",this.fontFamily='"trebuchet ms", verdana, arial, sans-serif',this.fontSize="16px",this.labelBackground="#181818",this.textColor="#ccc",this.THEME_COLOR_LIMIT=12,this.nodeBkg="calculated",this.nodeBorder="calculated",this.clusterBkg="calculated",this.clusterBorder="calculated",this.defaultLinkColor="calculated",this.titleColor="#F9FFFE",this.edgeLabelBackground="calculated",this.actorBorder="calculated",this.actorBkg="calculated",this.actorTextColor="calculated",this.actorLineColor="calculated",this.signalColor="calculated",this.signalTextColor="calculated",this.labelBoxBkgColor="calculated",this.labelBoxBorderColor="calculated",this.labelTextColor="calculated",this.loopTextColor="calculated",this.noteBorderColor="calculated",this.noteBkgColor="#fff5ad",this.noteTextColor="calculated",this.activationBorderColor="calculated",this.activationBkgColor="calculated",this.sequenceNumberColor="black",this.sectionBkgColor=(0,h.A)("#EAE8D9",30),this.altSectionBkgColor="calculated",this.sectionBkgColor2="#EAE8D9",this.excludeBkgColor=(0,h.A)(this.sectionBkgColor,10),this.taskBorderColor=(0,s.A)(255,255,255,70),this.taskBkgColor="calculated",this.taskTextColor="calculated",this.taskTextLightColor="calculated",this.taskTextOutsideColor="calculated",this.taskTextClickableColor="#003163",this.activeTaskBorderColor=(0,s.A)(255,255,255,50),this.activeTaskBkgColor="#81B1DB",this.gridColor="calculated",this.doneTaskBkgColor="calculated",this.doneTaskBorderColor="grey",this.critBorderColor="#E83737",this.critBkgColor="#E83737",this.taskTextDarkColor="calculated",this.todayLineColor="#DB5757",this.personBorder=this.primaryBorderColor,this.personBkg=this.mainBkg,this.archEdgeColor="calculated",this.archEdgeArrowColor="calculated",this.archEdgeWidth="3",this.archGroupBorderColor=this.primaryBorderColor,this.archGroupBorderWidth="2px",this.rowOdd=this.rowOdd||(0,d.A)(this.mainBkg,5)||"#ffffff",this.rowEven=this.rowEven||(0,h.A)(this.mainBkg,10),this.labelColor="calculated",this.errorBkgColor="#a44141",this.errorTextColor="#ddd"}updateColors(){this.secondBkg=(0,d.A)(this.mainBkg,16),this.lineColor=this.mainContrastColor,this.arrowheadColor=this.mainContrastColor,this.nodeBkg=this.mainBkg,this.nodeBorder=this.border1,this.clusterBkg=this.secondBkg,this.clusterBorder=this.border2,this.defaultLinkColor=this.lineColor,this.edgeLabelBackground=(0,d.A)(this.labelBackground,25),this.actorBorder=this.border1,this.actorBkg=this.mainBkg,this.actorTextColor=this.mainContrastColor,this.actorLineColor=this.actorBorder,this.signalColor=this.mainContrastColor,this.signalTextColor=this.mainContrastColor,this.labelBoxBkgColor=this.actorBkg,this.labelBoxBorderColor=this.actorBorder,this.labelTextColor=this.mainContrastColor,this.loopTextColor=this.mainContrastColor,this.noteBorderColor=this.secondaryBorderColor,this.noteBkgColor=this.secondBkg,this.noteTextColor=this.secondaryTextColor,this.activationBorderColor=this.border1,this.activationBkgColor=this.secondBkg,this.altSectionBkgColor=this.background,this.taskBkgColor=(0,d.A)(this.mainBkg,23),this.taskTextColor=this.darkTextColor,this.taskTextLightColor=this.mainContrastColor,this.taskTextOutsideColor=this.taskTextLightColor,this.gridColor=this.mainContrastColor,this.doneTaskBkgColor=this.mainContrastColor,this.taskTextDarkColor=this.darkTextColor,this.archEdgeColor=this.lineColor,this.archEdgeArrowColor=this.lineColor,this.transitionColor=this.transitionColor||this.lineColor,this.transitionLabelColor=this.transitionLabelColor||this.textColor,this.stateLabelColor=this.stateLabelColor||this.stateBkg||this.primaryTextColor,this.stateBkg=this.stateBkg||this.mainBkg,this.labelBackgroundColor=this.labelBackgroundColor||this.stateBkg,this.compositeBackground=this.compositeBackground||this.background||this.tertiaryColor,this.altBackground=this.altBackground||"#555",this.compositeTitleBackground=this.compositeTitleBackground||this.mainBkg,this.compositeBorder=this.compositeBorder||this.nodeBorder,this.innerEndBackground=this.primaryBorderColor,this.specialStateColor="#f4f4f4",this.errorBkgColor=this.errorBkgColor||this.tertiaryColor,this.errorTextColor=this.errorTextColor||this.tertiaryTextColor,this.fillType0=this.primaryColor,this.fillType1=this.secondaryColor,this.fillType2=a(this.primaryColor,{h:64}),this.fillType3=a(this.secondaryColor,{h:64}),this.fillType4=a(this.primaryColor,{h:-64}),this.fillType5=a(this.secondaryColor,{h:-64}),this.fillType6=a(this.primaryColor,{h:128}),this.fillType7=a(this.secondaryColor,{h:128}),this.cScale1=this.cScale1||"#0b0000",this.cScale2=this.cScale2||"#4d1037",this.cScale3=this.cScale3||"#3f5258",this.cScale4=this.cScale4||"#4f2f1b",this.cScale5=this.cScale5||"#6e0a0a",this.cScale6=this.cScale6||"#3b0048",this.cScale7=this.cScale7||"#995a01",this.cScale8=this.cScale8||"#154706",this.cScale9=this.cScale9||"#161722",this.cScale10=this.cScale10||"#00296f",this.cScale11=this.cScale11||"#01629c",this.cScale12=this.cScale12||"#010029",this.cScale0=this.cScale0||this.primaryColor,this.cScale1=this.cScale1||this.secondaryColor,this.cScale2=this.cScale2||this.tertiaryColor,this.cScale3=this.cScale3||a(this.primaryColor,{h:30}),this.cScale4=this.cScale4||a(this.primaryColor,{h:60}),this.cScale5=this.cScale5||a(this.primaryColor,{h:90}),this.cScale6=this.cScale6||a(this.primaryColor,{h:120}),this.cScale7=this.cScale7||a(this.primaryColor,{h:150}),this.cScale8=this.cScale8||a(this.primaryColor,{h:210}),this.cScale9=this.cScale9||a(this.primaryColor,{h:270}),this.cScale10=this.cScale10||a(this.primaryColor,{h:300}),this.cScale11=this.cScale11||a(this.primaryColor,{h:330});for(let e=0;e{this[t]=e[t]})),this.updateColors(),t.forEach((t=>{this[t]=e[t]}))}},N=m((e=>{const t=new I;return t.calculate(e),t}),"getThemeVariables"),R=class{static#e=(()=>m(this,"Theme"))();constructor(){this.background="#f4f4f4",this.primaryColor="#ECECFF",this.secondaryColor=a(this.primaryColor,{h:120}),this.secondaryColor="#ffffde",this.tertiaryColor=a(this.primaryColor,{h:-160}),this.primaryBorderColor=B(this.primaryColor,this.darkMode),this.secondaryBorderColor=B(this.secondaryColor,this.darkMode),this.tertiaryBorderColor=B(this.tertiaryColor,this.darkMode),this.primaryTextColor=c(this.primaryColor),this.secondaryTextColor=c(this.secondaryColor),this.tertiaryTextColor=c(this.tertiaryColor),this.lineColor=c(this.background),this.textColor=c(this.background),this.background="white",this.mainBkg="#ECECFF",this.secondBkg="#ffffde",this.lineColor="#333333",this.border1="#9370DB",this.border2="#aaaa33",this.arrowheadColor="#333333",this.fontFamily='"trebuchet ms", verdana, arial, sans-serif',this.fontSize="16px",this.labelBackground="rgba(232,232,232, 0.8)",this.textColor="#333",this.THEME_COLOR_LIMIT=12,this.nodeBkg="calculated",this.nodeBorder="calculated",this.clusterBkg="calculated",this.clusterBorder="calculated",this.defaultLinkColor="calculated",this.titleColor="calculated",this.edgeLabelBackground="calculated",this.actorBorder="calculated",this.actorBkg="calculated",this.actorTextColor="black",this.actorLineColor="calculated",this.signalColor="calculated",this.signalTextColor="calculated",this.labelBoxBkgColor="calculated",this.labelBoxBorderColor="calculated",this.labelTextColor="calculated",this.loopTextColor="calculated",this.noteBorderColor="calculated",this.noteBkgColor="#fff5ad",this.noteTextColor="calculated",this.activationBorderColor="#666",this.activationBkgColor="#f4f4f4",this.sequenceNumberColor="white",this.sectionBkgColor="calculated",this.altSectionBkgColor="calculated",this.sectionBkgColor2="calculated",this.excludeBkgColor="#eeeeee",this.taskBorderColor="calculated",this.taskBkgColor="calculated",this.taskTextLightColor="calculated",this.taskTextColor=this.taskTextLightColor,this.taskTextDarkColor="calculated",this.taskTextOutsideColor=this.taskTextDarkColor,this.taskTextClickableColor="calculated",this.activeTaskBorderColor="calculated",this.activeTaskBkgColor="calculated",this.gridColor="calculated",this.doneTaskBkgColor="calculated",this.doneTaskBorderColor="calculated",this.critBorderColor="calculated",this.critBkgColor="calculated",this.todayLineColor="calculated",this.sectionBkgColor=(0,s.A)(102,102,255,.49),this.altSectionBkgColor="white",this.sectionBkgColor2="#fff400",this.taskBorderColor="#534fbc",this.taskBkgColor="#8a90dd",this.taskTextLightColor="white",this.taskTextColor="calculated",this.taskTextDarkColor="black",this.taskTextOutsideColor="calculated",this.taskTextClickableColor="#003163",this.activeTaskBorderColor="#534fbc",this.activeTaskBkgColor="#bfc7ff",this.gridColor="lightgrey",this.doneTaskBkgColor="lightgrey",this.doneTaskBorderColor="grey",this.critBorderColor="#ff8888",this.critBkgColor="red",this.todayLineColor="red",this.personBorder=this.primaryBorderColor,this.personBkg=this.mainBkg,this.archEdgeColor="calculated",this.archEdgeArrowColor="calculated",this.archEdgeWidth="3",this.archGroupBorderColor=this.primaryBorderColor,this.archGroupBorderWidth="2px",this.rowOdd="calculated",this.rowEven="calculated",this.labelColor="black",this.errorBkgColor="#552222",this.errorTextColor="#552222",this.updateColors()}updateColors(){this.cScale0=this.cScale0||this.primaryColor,this.cScale1=this.cScale1||this.secondaryColor,this.cScale2=this.cScale2||this.tertiaryColor,this.cScale3=this.cScale3||a(this.primaryColor,{h:30}),this.cScale4=this.cScale4||a(this.primaryColor,{h:60}),this.cScale5=this.cScale5||a(this.primaryColor,{h:90}),this.cScale6=this.cScale6||a(this.primaryColor,{h:120}),this.cScale7=this.cScale7||a(this.primaryColor,{h:150}),this.cScale8=this.cScale8||a(this.primaryColor,{h:210}),this.cScale9=this.cScale9||a(this.primaryColor,{h:270}),this.cScale10=this.cScale10||a(this.primaryColor,{h:300}),this.cScale11=this.cScale11||a(this.primaryColor,{h:330}),this.cScalePeer1=this.cScalePeer1||(0,h.A)(this.secondaryColor,45),this.cScalePeer2=this.cScalePeer2||(0,h.A)(this.tertiaryColor,40);for(let e=0;e{"calculated"===this[e]&&(this[e]=void 0)})),"object"!==typeof e)return void this.updateColors();const t=Object.keys(e);t.forEach((t=>{this[t]=e[t]})),this.updateColors(),t.forEach((t=>{this[t]=e[t]}))}},j=m((e=>{const t=new R;return t.calculate(e),t}),"getThemeVariables"),q=class{static#e=(()=>m(this,"Theme"))();constructor(){this.background="#f4f4f4",this.primaryColor="#cde498",this.secondaryColor="#cdffb2",this.background="white",this.mainBkg="#cde498",this.secondBkg="#cdffb2",this.lineColor="green",this.border1="#13540c",this.border2="#6eaa49",this.arrowheadColor="green",this.fontFamily='"trebuchet ms", verdana, arial, sans-serif',this.fontSize="16px",this.tertiaryColor=(0,d.A)("#cde498",10),this.primaryBorderColor=B(this.primaryColor,this.darkMode),this.secondaryBorderColor=B(this.secondaryColor,this.darkMode),this.tertiaryBorderColor=B(this.tertiaryColor,this.darkMode),this.primaryTextColor=c(this.primaryColor),this.secondaryTextColor=c(this.secondaryColor),this.tertiaryTextColor=c(this.primaryColor),this.lineColor=c(this.background),this.textColor=c(this.background),this.THEME_COLOR_LIMIT=12,this.nodeBkg="calculated",this.nodeBorder="calculated",this.clusterBkg="calculated",this.clusterBorder="calculated",this.defaultLinkColor="calculated",this.titleColor="#333",this.edgeLabelBackground="#e8e8e8",this.actorBorder="calculated",this.actorBkg="calculated",this.actorTextColor="black",this.actorLineColor="calculated",this.signalColor="#333",this.signalTextColor="#333",this.labelBoxBkgColor="calculated",this.labelBoxBorderColor="#326932",this.labelTextColor="calculated",this.loopTextColor="calculated",this.noteBorderColor="calculated",this.noteBkgColor="#fff5ad",this.noteTextColor="calculated",this.activationBorderColor="#666",this.activationBkgColor="#f4f4f4",this.sequenceNumberColor="white",this.sectionBkgColor="#6eaa49",this.altSectionBkgColor="white",this.sectionBkgColor2="#6eaa49",this.excludeBkgColor="#eeeeee",this.taskBorderColor="calculated",this.taskBkgColor="#487e3a",this.taskTextLightColor="white",this.taskTextColor="calculated",this.taskTextDarkColor="black",this.taskTextOutsideColor="calculated",this.taskTextClickableColor="#003163",this.activeTaskBorderColor="calculated",this.activeTaskBkgColor="calculated",this.gridColor="lightgrey",this.doneTaskBkgColor="lightgrey",this.doneTaskBorderColor="grey",this.critBorderColor="#ff8888",this.critBkgColor="red",this.todayLineColor="red",this.personBorder=this.primaryBorderColor,this.personBkg=this.mainBkg,this.archEdgeColor="calculated",this.archEdgeArrowColor="calculated",this.archEdgeWidth="3",this.archGroupBorderColor=this.primaryBorderColor,this.archGroupBorderWidth="2px",this.labelColor="black",this.errorBkgColor="#552222",this.errorTextColor="#552222"}updateColors(){this.actorBorder=(0,h.A)(this.mainBkg,20),this.actorBkg=this.mainBkg,this.labelBoxBkgColor=this.actorBkg,this.labelTextColor=this.actorTextColor,this.loopTextColor=this.actorTextColor,this.noteBorderColor=this.border2,this.noteTextColor=this.actorTextColor,this.actorLineColor=this.actorBorder,this.cScale0=this.cScale0||this.primaryColor,this.cScale1=this.cScale1||this.secondaryColor,this.cScale2=this.cScale2||this.tertiaryColor,this.cScale3=this.cScale3||a(this.primaryColor,{h:30}),this.cScale4=this.cScale4||a(this.primaryColor,{h:60}),this.cScale5=this.cScale5||a(this.primaryColor,{h:90}),this.cScale6=this.cScale6||a(this.primaryColor,{h:120}),this.cScale7=this.cScale7||a(this.primaryColor,{h:150}),this.cScale8=this.cScale8||a(this.primaryColor,{h:210}),this.cScale9=this.cScale9||a(this.primaryColor,{h:270}),this.cScale10=this.cScale10||a(this.primaryColor,{h:300}),this.cScale11=this.cScale11||a(this.primaryColor,{h:330}),this.cScalePeer1=this.cScalePeer1||(0,h.A)(this.secondaryColor,45),this.cScalePeer2=this.cScalePeer2||(0,h.A)(this.tertiaryColor,40);for(let e=0;e{this[t]=e[t]})),this.updateColors(),t.forEach((t=>{this[t]=e[t]}))}},H=m((e=>{const t=new q;return t.calculate(e),t}),"getThemeVariables"),W=class{static#e=(()=>m(this,"Theme"))();constructor(){this.primaryColor="#eee",this.contrast="#707070",this.secondaryColor=(0,d.A)(this.contrast,55),this.background="#ffffff",this.tertiaryColor=a(this.primaryColor,{h:-160}),this.primaryBorderColor=B(this.primaryColor,this.darkMode),this.secondaryBorderColor=B(this.secondaryColor,this.darkMode),this.tertiaryBorderColor=B(this.tertiaryColor,this.darkMode),this.primaryTextColor=c(this.primaryColor),this.secondaryTextColor=c(this.secondaryColor),this.tertiaryTextColor=c(this.tertiaryColor),this.lineColor=c(this.background),this.textColor=c(this.background),this.mainBkg="#eee",this.secondBkg="calculated",this.lineColor="#666",this.border1="#999",this.border2="calculated",this.note="#ffa",this.text="#333",this.critical="#d42",this.done="#bbb",this.arrowheadColor="#333333",this.fontFamily='"trebuchet ms", verdana, arial, sans-serif',this.fontSize="16px",this.THEME_COLOR_LIMIT=12,this.nodeBkg="calculated",this.nodeBorder="calculated",this.clusterBkg="calculated",this.clusterBorder="calculated",this.defaultLinkColor="calculated",this.titleColor="calculated",this.edgeLabelBackground="white",this.actorBorder="calculated",this.actorBkg="calculated",this.actorTextColor="calculated",this.actorLineColor=this.actorBorder,this.signalColor="calculated",this.signalTextColor="calculated",this.labelBoxBkgColor="calculated",this.labelBoxBorderColor="calculated",this.labelTextColor="calculated",this.loopTextColor="calculated",this.noteBorderColor="calculated",this.noteBkgColor="calculated",this.noteTextColor="calculated",this.activationBorderColor="#666",this.activationBkgColor="#f4f4f4",this.sequenceNumberColor="white",this.sectionBkgColor="calculated",this.altSectionBkgColor="white",this.sectionBkgColor2="calculated",this.excludeBkgColor="#eeeeee",this.taskBorderColor="calculated",this.taskBkgColor="calculated",this.taskTextLightColor="white",this.taskTextColor="calculated",this.taskTextDarkColor="calculated",this.taskTextOutsideColor="calculated",this.taskTextClickableColor="#003163",this.activeTaskBorderColor="calculated",this.activeTaskBkgColor="calculated",this.gridColor="calculated",this.doneTaskBkgColor="calculated",this.doneTaskBorderColor="calculated",this.critBkgColor="calculated",this.critBorderColor="calculated",this.todayLineColor="calculated",this.personBorder=this.primaryBorderColor,this.personBkg=this.mainBkg,this.archEdgeColor="calculated",this.archEdgeArrowColor="calculated",this.archEdgeWidth="3",this.archGroupBorderColor=this.primaryBorderColor,this.archGroupBorderWidth="2px",this.rowOdd=this.rowOdd||(0,d.A)(this.mainBkg,75)||"#ffffff",this.rowEven=this.rowEven||"#f4f4f4",this.labelColor="black",this.errorBkgColor="#552222",this.errorTextColor="#552222"}updateColors(){this.secondBkg=(0,d.A)(this.contrast,55),this.border2=this.contrast,this.actorBorder=(0,d.A)(this.border1,23),this.actorBkg=this.mainBkg,this.actorTextColor=this.text,this.actorLineColor=this.actorBorder,this.signalColor=this.text,this.signalTextColor=this.text,this.labelBoxBkgColor=this.actorBkg,this.labelBoxBorderColor=this.actorBorder,this.labelTextColor=this.text,this.loopTextColor=this.text,this.noteBorderColor="#999",this.noteBkgColor="#666",this.noteTextColor="#fff",this.cScale0=this.cScale0||"#555",this.cScale1=this.cScale1||"#F4F4F4",this.cScale2=this.cScale2||"#555",this.cScale3=this.cScale3||"#BBB",this.cScale4=this.cScale4||"#777",this.cScale5=this.cScale5||"#999",this.cScale6=this.cScale6||"#DDD",this.cScale7=this.cScale7||"#FFF",this.cScale8=this.cScale8||"#DDD",this.cScale9=this.cScale9||"#BBB",this.cScale10=this.cScale10||"#999",this.cScale11=this.cScale11||"#777";for(let e=0;e{this[t]=e[t]})),this.updateColors(),t.forEach((t=>{this[t]=e[t]}))}},K={base:{getThemeVariables:z},dark:{getThemeVariables:N},default:{getThemeVariables:j},forest:{getThemeVariables:H},neutral:{getThemeVariables:m((e=>{const t=new W;return t.calculate(e),t}),"getThemeVariables")}},U={flowchart:{useMaxWidth:!0,titleTopMargin:25,subGraphTitleMargin:{top:0,bottom:0},diagramPadding:8,htmlLabels:!0,nodeSpacing:50,rankSpacing:50,curve:"basis",padding:15,defaultRenderer:"dagre-wrapper",wrappingWidth:200},sequence:{useMaxWidth:!0,hideUnusedParticipants:!1,activationWidth:10,diagramMarginX:50,diagramMarginY:10,actorMargin:50,width:150,height:65,boxMargin:10,boxTextMargin:5,noteMargin:10,messageMargin:35,messageAlign:"center",mirrorActors:!0,forceMenus:!1,bottomMarginAdj:1,rightAngles:!1,showSequenceNumbers:!1,actorFontSize:14,actorFontFamily:'"Open Sans", sans-serif',actorFontWeight:400,noteFontSize:14,noteFontFamily:'"trebuchet ms", verdana, arial, sans-serif',noteFontWeight:400,noteAlign:"center",messageFontSize:16,messageFontFamily:'"trebuchet ms", verdana, arial, sans-serif',messageFontWeight:400,wrap:!1,wrapPadding:10,labelBoxWidth:50,labelBoxHeight:20},gantt:{useMaxWidth:!0,titleTopMargin:25,barHeight:20,barGap:4,topPadding:50,rightPadding:75,leftPadding:75,gridLineStartPadding:35,fontSize:11,sectionFontSize:11,numberSectionStyles:4,axisFormat:"%Y-%m-%d",topAxis:!1,displayMode:"",weekday:"sunday"},journey:{useMaxWidth:!0,diagramMarginX:50,diagramMarginY:10,leftMargin:150,width:150,height:50,boxMargin:10,boxTextMargin:5,noteMargin:10,messageMargin:35,messageAlign:"center",bottomMarginAdj:1,rightAngles:!1,taskFontSize:14,taskFontFamily:'"Open Sans", sans-serif',taskMargin:50,activationWidth:10,textPlacement:"fo",actorColours:["#8FBC8F","#7CFC00","#00FFFF","#20B2AA","#B0E0E6","#FFFFE0"],sectionFills:["#191970","#8B008B","#4B0082","#2F4F4F","#800000","#8B4513","#00008B"],sectionColours:["#fff"]},class:{useMaxWidth:!0,titleTopMargin:25,arrowMarkerAbsolute:!1,dividerMargin:10,padding:5,textHeight:10,defaultRenderer:"dagre-wrapper",htmlLabels:!1,hideEmptyMembersBox:!1},state:{useMaxWidth:!0,titleTopMargin:25,dividerMargin:10,sizeUnit:5,padding:8,textHeight:10,titleShift:-15,noteMargin:10,forkWidth:70,forkHeight:7,miniPadding:2,fontSizeFactor:5.02,fontSize:24,labelHeight:16,edgeLengthFactor:"20",compositTitleSize:35,radius:5,defaultRenderer:"dagre-wrapper"},er:{useMaxWidth:!0,titleTopMargin:25,diagramPadding:20,layoutDirection:"TB",minEntityWidth:100,minEntityHeight:75,entityPadding:15,nodeSpacing:140,rankSpacing:80,stroke:"gray",fill:"honeydew",fontSize:12},pie:{useMaxWidth:!0,textPosition:.75},quadrantChart:{useMaxWidth:!0,chartWidth:500,chartHeight:500,titleFontSize:20,titlePadding:10,quadrantPadding:5,xAxisLabelPadding:5,yAxisLabelPadding:5,xAxisLabelFontSize:16,yAxisLabelFontSize:16,quadrantLabelFontSize:16,quadrantTextTopPadding:5,pointTextPadding:5,pointLabelFontSize:12,pointRadius:5,xAxisPosition:"top",yAxisPosition:"left",quadrantInternalBorderStrokeWidth:1,quadrantExternalBorderStrokeWidth:2},xyChart:{useMaxWidth:!0,width:700,height:500,titleFontSize:20,titlePadding:10,showTitle:!0,xAxis:{$ref:"#/$defs/XYChartAxisConfig",showLabel:!0,labelFontSize:14,labelPadding:5,showTitle:!0,titleFontSize:16,titlePadding:5,showTick:!0,tickLength:5,tickWidth:2,showAxisLine:!0,axisLineWidth:2},yAxis:{$ref:"#/$defs/XYChartAxisConfig",showLabel:!0,labelFontSize:14,labelPadding:5,showTitle:!0,titleFontSize:16,titlePadding:5,showTick:!0,tickLength:5,tickWidth:2,showAxisLine:!0,axisLineWidth:2},chartOrientation:"vertical",plotReservedSpacePercent:50},requirement:{useMaxWidth:!0,rect_fill:"#f9f9f9",text_color:"#333",rect_border_size:"0.5px",rect_border_color:"#bbb",rect_min_width:200,rect_min_height:200,fontSize:14,rect_padding:10,line_height:20},mindmap:{useMaxWidth:!0,padding:10,maxNodeWidth:200},kanban:{useMaxWidth:!0,padding:8,sectionWidth:200,ticketBaseUrl:""},timeline:{useMaxWidth:!0,diagramMarginX:50,diagramMarginY:10,leftMargin:150,width:150,height:50,boxMargin:10,boxTextMargin:5,noteMargin:10,messageMargin:35,messageAlign:"center",bottomMarginAdj:1,rightAngles:!1,taskFontSize:14,taskFontFamily:'"Open Sans", sans-serif',taskMargin:50,activationWidth:10,textPlacement:"fo",actorColours:["#8FBC8F","#7CFC00","#00FFFF","#20B2AA","#B0E0E6","#FFFFE0"],sectionFills:["#191970","#8B008B","#4B0082","#2F4F4F","#800000","#8B4513","#00008B"],sectionColours:["#fff"],disableMulticolor:!1},gitGraph:{useMaxWidth:!0,titleTopMargin:25,diagramPadding:8,nodeLabel:{width:75,height:100,x:-25,y:0},mainBranchName:"main",mainBranchOrder:0,showCommitLabel:!0,showBranches:!0,rotateCommitLabel:!0,parallelCommits:!1,arrowMarkerAbsolute:!1},c4:{useMaxWidth:!0,diagramMarginX:50,diagramMarginY:10,c4ShapeMargin:50,c4ShapePadding:20,width:216,height:60,boxMargin:10,c4ShapeInRow:4,nextLinePaddingX:0,c4BoundaryInRow:2,personFontSize:14,personFontFamily:'"Open Sans", sans-serif',personFontWeight:"normal",external_personFontSize:14,external_personFontFamily:'"Open Sans", sans-serif',external_personFontWeight:"normal",systemFontSize:14,systemFontFamily:'"Open Sans", sans-serif',systemFontWeight:"normal",external_systemFontSize:14,external_systemFontFamily:'"Open Sans", sans-serif',external_systemFontWeight:"normal",system_dbFontSize:14,system_dbFontFamily:'"Open Sans", sans-serif',system_dbFontWeight:"normal",external_system_dbFontSize:14,external_system_dbFontFamily:'"Open Sans", sans-serif',external_system_dbFontWeight:"normal",system_queueFontSize:14,system_queueFontFamily:'"Open Sans", sans-serif',system_queueFontWeight:"normal",external_system_queueFontSize:14,external_system_queueFontFamily:'"Open Sans", sans-serif',external_system_queueFontWeight:"normal",boundaryFontSize:14,boundaryFontFamily:'"Open Sans", sans-serif',boundaryFontWeight:"normal",messageFontSize:12,messageFontFamily:'"Open Sans", sans-serif',messageFontWeight:"normal",containerFontSize:14,containerFontFamily:'"Open Sans", sans-serif',containerFontWeight:"normal",external_containerFontSize:14,external_containerFontFamily:'"Open Sans", sans-serif',external_containerFontWeight:"normal",container_dbFontSize:14,container_dbFontFamily:'"Open Sans", sans-serif',container_dbFontWeight:"normal",external_container_dbFontSize:14,external_container_dbFontFamily:'"Open Sans", sans-serif',external_container_dbFontWeight:"normal",container_queueFontSize:14,container_queueFontFamily:'"Open Sans", sans-serif',container_queueFontWeight:"normal",external_container_queueFontSize:14,external_container_queueFontFamily:'"Open Sans", sans-serif',external_container_queueFontWeight:"normal",componentFontSize:14,componentFontFamily:'"Open Sans", sans-serif',componentFontWeight:"normal",external_componentFontSize:14,external_componentFontFamily:'"Open Sans", sans-serif',external_componentFontWeight:"normal",component_dbFontSize:14,component_dbFontFamily:'"Open Sans", sans-serif',component_dbFontWeight:"normal",external_component_dbFontSize:14,external_component_dbFontFamily:'"Open Sans", sans-serif',external_component_dbFontWeight:"normal",component_queueFontSize:14,component_queueFontFamily:'"Open Sans", sans-serif',component_queueFontWeight:"normal",external_component_queueFontSize:14,external_component_queueFontFamily:'"Open Sans", sans-serif',external_component_queueFontWeight:"normal",wrap:!0,wrapPadding:10,person_bg_color:"#08427B",person_border_color:"#073B6F",external_person_bg_color:"#686868",external_person_border_color:"#8A8A8A",system_bg_color:"#1168BD",system_border_color:"#3C7FC0",system_db_bg_color:"#1168BD",system_db_border_color:"#3C7FC0",system_queue_bg_color:"#1168BD",system_queue_border_color:"#3C7FC0",external_system_bg_color:"#999999",external_system_border_color:"#8A8A8A",external_system_db_bg_color:"#999999",external_system_db_border_color:"#8A8A8A",external_system_queue_bg_color:"#999999",external_system_queue_border_color:"#8A8A8A",container_bg_color:"#438DD5",container_border_color:"#3C7FC0",container_db_bg_color:"#438DD5",container_db_border_color:"#3C7FC0",container_queue_bg_color:"#438DD5",container_queue_border_color:"#3C7FC0",external_container_bg_color:"#B3B3B3",external_container_border_color:"#A6A6A6",external_container_db_bg_color:"#B3B3B3",external_container_db_border_color:"#A6A6A6",external_container_queue_bg_color:"#B3B3B3",external_container_queue_border_color:"#A6A6A6",component_bg_color:"#85BBF0",component_border_color:"#78A8D8",component_db_bg_color:"#85BBF0",component_db_border_color:"#78A8D8",component_queue_bg_color:"#85BBF0",component_queue_border_color:"#78A8D8",external_component_bg_color:"#CCCCCC",external_component_border_color:"#BFBFBF",external_component_db_bg_color:"#CCCCCC",external_component_db_border_color:"#BFBFBF",external_component_queue_bg_color:"#CCCCCC",external_component_queue_border_color:"#BFBFBF"},sankey:{useMaxWidth:!0,width:600,height:400,linkColor:"gradient",nodeAlignment:"justify",showValues:!0,prefix:"",suffix:""},block:{useMaxWidth:!0,padding:8},packet:{useMaxWidth:!0,rowHeight:32,bitWidth:32,bitsPerRow:32,showBits:!0,paddingX:5,paddingY:5},architecture:{useMaxWidth:!0,padding:40,iconSize:80,fontSize:16},radar:{useMaxWidth:!0,width:600,height:600,marginTop:50,marginRight:50,marginBottom:50,marginLeft:50,axisScaleFactor:1,axisLabelFactor:1.05,curveTension:.17},theme:"default",look:"classic",handDrawnSeed:0,layout:"dagre",maxTextSize:5e4,maxEdges:500,darkMode:!1,fontFamily:'"trebuchet ms", verdana, arial, sans-serif;',logLevel:5,securityLevel:"strict",startOnLoad:!0,arrowMarkerAbsolute:!1,secure:["secure","securityLevel","startOnLoad","maxTextSize","suppressErrorRendering","maxEdges"],legacyMathML:!1,forceLegacyMathML:!1,deterministicIds:!1,fontSize:16,markdownAutoWrap:!0,suppressErrorRendering:!1},V={...U,deterministicIDSeed:void 0,elk:{mergeEdges:!1,nodePlacementStrategy:"BRANDES_KOEPF"},themeCSS:void 0,themeVariables:K.default.getThemeVariables(),sequence:{...U.sequence,messageFont:m((function(){return{fontFamily:this.messageFontFamily,fontSize:this.messageFontSize,fontWeight:this.messageFontWeight}}),"messageFont"),noteFont:m((function(){return{fontFamily:this.noteFontFamily,fontSize:this.noteFontSize,fontWeight:this.noteFontWeight}}),"noteFont"),actorFont:m((function(){return{fontFamily:this.actorFontFamily,fontSize:this.actorFontSize,fontWeight:this.actorFontWeight}}),"actorFont")},class:{hideEmptyMembersBox:!1},gantt:{...U.gantt,tickInterval:void 0,useWidth:void 0},c4:{...U.c4,useWidth:void 0,personFont:m((function(){return{fontFamily:this.personFontFamily,fontSize:this.personFontSize,fontWeight:this.personFontWeight}}),"personFont"),external_personFont:m((function(){return{fontFamily:this.external_personFontFamily,fontSize:this.external_personFontSize,fontWeight:this.external_personFontWeight}}),"external_personFont"),systemFont:m((function(){return{fontFamily:this.systemFontFamily,fontSize:this.systemFontSize,fontWeight:this.systemFontWeight}}),"systemFont"),external_systemFont:m((function(){return{fontFamily:this.external_systemFontFamily,fontSize:this.external_systemFontSize,fontWeight:this.external_systemFontWeight}}),"external_systemFont"),system_dbFont:m((function(){return{fontFamily:this.system_dbFontFamily,fontSize:this.system_dbFontSize,fontWeight:this.system_dbFontWeight}}),"system_dbFont"),external_system_dbFont:m((function(){return{fontFamily:this.external_system_dbFontFamily,fontSize:this.external_system_dbFontSize,fontWeight:this.external_system_dbFontWeight}}),"external_system_dbFont"),system_queueFont:m((function(){return{fontFamily:this.system_queueFontFamily,fontSize:this.system_queueFontSize,fontWeight:this.system_queueFontWeight}}),"system_queueFont"),external_system_queueFont:m((function(){return{fontFamily:this.external_system_queueFontFamily,fontSize:this.external_system_queueFontSize,fontWeight:this.external_system_queueFontWeight}}),"external_system_queueFont"),containerFont:m((function(){return{fontFamily:this.containerFontFamily,fontSize:this.containerFontSize,fontWeight:this.containerFontWeight}}),"containerFont"),external_containerFont:m((function(){return{fontFamily:this.external_containerFontFamily,fontSize:this.external_containerFontSize,fontWeight:this.external_containerFontWeight}}),"external_containerFont"),container_dbFont:m((function(){return{fontFamily:this.container_dbFontFamily,fontSize:this.container_dbFontSize,fontWeight:this.container_dbFontWeight}}),"container_dbFont"),external_container_dbFont:m((function(){return{fontFamily:this.external_container_dbFontFamily,fontSize:this.external_container_dbFontSize,fontWeight:this.external_container_dbFontWeight}}),"external_container_dbFont"),container_queueFont:m((function(){return{fontFamily:this.container_queueFontFamily,fontSize:this.container_queueFontSize,fontWeight:this.container_queueFontWeight}}),"container_queueFont"),external_container_queueFont:m((function(){return{fontFamily:this.external_container_queueFontFamily,fontSize:this.external_container_queueFontSize,fontWeight:this.external_container_queueFontWeight}}),"external_container_queueFont"),componentFont:m((function(){return{fontFamily:this.componentFontFamily,fontSize:this.componentFontSize,fontWeight:this.componentFontWeight}}),"componentFont"),external_componentFont:m((function(){return{fontFamily:this.external_componentFontFamily,fontSize:this.external_componentFontSize,fontWeight:this.external_componentFontWeight}}),"external_componentFont"),component_dbFont:m((function(){return{fontFamily:this.component_dbFontFamily,fontSize:this.component_dbFontSize,fontWeight:this.component_dbFontWeight}}),"component_dbFont"),external_component_dbFont:m((function(){return{fontFamily:this.external_component_dbFontFamily,fontSize:this.external_component_dbFontSize,fontWeight:this.external_component_dbFontWeight}}),"external_component_dbFont"),component_queueFont:m((function(){return{fontFamily:this.component_queueFontFamily,fontSize:this.component_queueFontSize,fontWeight:this.component_queueFontWeight}}),"component_queueFont"),external_component_queueFont:m((function(){return{fontFamily:this.external_component_queueFontFamily,fontSize:this.external_component_queueFontSize,fontWeight:this.external_component_queueFontWeight}}),"external_component_queueFont"),boundaryFont:m((function(){return{fontFamily:this.boundaryFontFamily,fontSize:this.boundaryFontSize,fontWeight:this.boundaryFontWeight}}),"boundaryFont"),messageFont:m((function(){return{fontFamily:this.messageFontFamily,fontSize:this.messageFontSize,fontWeight:this.messageFontWeight}}),"messageFont")},pie:{...U.pie,useWidth:984},xyChart:{...U.xyChart,useWidth:void 0},requirement:{...U.requirement,useWidth:void 0},packet:{...U.packet},radar:{...U.radar}},Y=m((function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return Object.keys(e).reduce(((n,r)=>Array.isArray(e[r])?n:"object"===typeof e[r]&&null!==e[r]?[...n,t+r,...Y(e[r],"")]:[...n,t+r]),[])}),"keyify"),G=new Set(Y(V,"")),X=V,Q=m((e=>{if(v.debug("sanitizeDirective called with",e),"object"===typeof e&&null!=e)if(Array.isArray(e))e.forEach((e=>Q(e)));else{for(const t of Object.keys(e)){if(v.debug("Checking key",t),t.startsWith("__")||t.includes("proto")||t.includes("constr")||!G.has(t)||null==e[t]){v.debug("sanitize deleting key: ",t),delete e[t];continue}if("object"===typeof e[t]){v.debug("sanitizing object",t),Q(e[t]);continue}const n=["themeCSS","fontFamily","altFontFamily"];for(const r of n)t.includes(r)&&(v.debug("sanitizing css option",t),e[t]=Z(e[t]))}if(e.themeVariables)for(const t of Object.keys(e.themeVariables)){const n=e.themeVariables[t];n?.match&&!n.match(/^[\d "#%(),.;A-Za-z]+$/)&&(e.themeVariables[t]="")}v.debug("After sanitization",e)}}),"sanitizeDirective"),Z=m((e=>{let t=0,n=0;for(const r of e){if(t{let n=P({},e),r={};for(const i of t)ue(i),r=P(r,i);if(n=P(n,r),r.theme&&r.theme in K){const e=P({},u),t=P(e.themeVariables||{},r.themeVariables);n.theme&&n.theme in K&&(n.themeVariables=K[n.theme].getThemeVariables(t))}return me(ne=n),ne}),"updateCurrentConfig"),ie=m((e=>(ee=P({},J),ee=P(ee,e),e.theme&&K[e.theme]&&(ee.themeVariables=K[e.theme].getThemeVariables(e.themeVariables)),re(ee,te),ee)),"setSiteConfig"),oe=m((e=>{u=P({},e)}),"saveConfigFromInitialize"),ae=m((e=>(ee=P(ee,e),re(ee,te),ee)),"updateSiteConfig"),se=m((()=>P({},ee)),"getSiteConfig"),le=m((e=>(me(e),P(ne,e),ce())),"setConfig"),ce=m((()=>P({},ne)),"getConfig"),ue=m((e=>{e&&(["secure",...ee.secure??[]].forEach((t=>{Object.hasOwn(e,t)&&(v.debug(`Denied attempt to modify a secure key ${t}`,e[t]),delete e[t])})),Object.keys(e).forEach((t=>{t.startsWith("__")&&delete e[t]})),Object.keys(e).forEach((t=>{"string"===typeof e[t]&&(e[t].includes("<")||e[t].includes(">")||e[t].includes("url(data:"))&&delete e[t],"object"===typeof e[t]&&ue(e[t])})))}),"sanitize"),he=m((e=>{Q(e),e.fontFamily&&!e.themeVariables?.fontFamily&&(e.themeVariables={...e.themeVariables,fontFamily:e.fontFamily}),te.push(e),re(ee,te)}),"addDirective"),de=m((function(){re(arguments.length>0&&void 0!==arguments[0]?arguments[0]:ee,te=[])}),"reset"),fe={LAZY_LOAD_DEPRECATED:"The configuration options lazyLoadedDiagrams and loadExternalDiagramsAtStartup are deprecated. Please use registerExternalDiagrams instead."},pe={},ge=m((e=>{pe[e]||(v.warn(fe[e]),pe[e]=!0)}),"issueWarning"),me=m((e=>{e&&(e.lazyLoadedDiagrams||e.loadExternalDiagramsAtStartup)&&ge("LAZY_LOAD_DEPRECATED")}),"checkConfig"),ye=//gi,be=m((e=>{if(!e)return[""];return Ee(e).replace(/\\n/g,"#br#").split("#br#")}),"getRows"),ve=(()=>{let e=!1;return()=>{e||(xe(),e=!0)}})();function xe(){const e="data-temp-href-target";p.A.addHook("beforeSanitizeAttributes",(t=>{t instanceof Element&&"A"===t.tagName&&t.hasAttribute("target")&&t.setAttribute(e,t.getAttribute("target")??"")})),p.A.addHook("afterSanitizeAttributes",(t=>{t instanceof Element&&"A"===t.tagName&&t.hasAttribute(e)&&(t.setAttribute("target",t.getAttribute(e)??""),t.removeAttribute(e),"_blank"===t.getAttribute("target")&&t.setAttribute("rel","noopener"))}))}m(xe,"setupDompurifyHooks");var ke=m((e=>{ve();return p.A.sanitize(e)}),"removeScript"),we=m(((e,t)=>{if(!1!==t.flowchart?.htmlLabels){const n=t.securityLevel;"antiscript"===n||"strict"===n?e=ke(e):"loose"!==n&&(e=(e=(e=Ee(e)).replace(//g,">")).replace(/=/g,"="),e=Te(e))}return e}),"sanitizeMore"),Se=m(((e,t)=>e?e=t.dompurifyConfig?p.A.sanitize(we(e,t),t.dompurifyConfig).toString():p.A.sanitize(we(e,t),{FORBID_TAGS:["style"]}).toString():e),"sanitizeText"),Ce=m(((e,t)=>"string"===typeof e?Se(e,t):e.flat().map((e=>Se(e,t)))),"sanitizeTextOrArray"),_e=m((e=>ye.test(e)),"hasBreaks"),Ae=m((e=>e.split(ye)),"splitBreaks"),Te=m((e=>e.replace(/#br#/g,"
    ")),"placeholderToBreak"),Ee=m((e=>e.replace(ye,"#br#")),"breakToPlaceholder"),Fe=m((e=>{let t="";return e&&(t=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search,t=t.replaceAll(/\(/g,"\\("),t=t.replaceAll(/\)/g,"\\)")),t}),"getUrl"),Me=m((e=>!1!==e&&!["false","null","0"].includes(String(e).trim().toLowerCase())),"evaluate"),Le=m((function(){for(var e=arguments.length,t=new Array(e),n=0;n!isNaN(e)));return Math.max(...r)}),"getMax"),Pe=m((function(){for(var e=arguments.length,t=new Array(e),n=0;n!isNaN(e)));return Math.min(...r)}),"getMin"),Oe=m((function(e){const t=e.split(/(,)/),n=[];for(let r=0;r0&&r+1Math.max(0,e.split(t).length-1)),"countOccurrence"),Be=m(((e,t)=>{const n=$e(e,"~"),r=$e(t,"~");return 1===n&&1===r}),"shouldCombineSets"),De=m((e=>{const t=$e(e,"~");let n=!1;if(t<=1)return e;t%2!==0&&e.startsWith("~")&&(e=e.substring(1),n=!0);const r=[...e];let i=r.indexOf("~"),o=r.lastIndexOf("~");for(;-1!==i&&-1!==o&&i!==o;)r[i]="<",r[o]=">",i=r.indexOf("~"),o=r.lastIndexOf("~");return n&&r.unshift("~"),r.join("")}),"processSet"),ze=m((()=>void 0!==window.MathMLElement),"isMathMLSupported"),Ie=/\$\$(.*)\$\$/g,Ne=m((e=>(e.match(Ie)?.length??0)>0),"hasKatex"),Re=m((async(e,t)=>{e=await je(e,t);const n=document.createElement("div");n.innerHTML=e,n.id="katex-temp",n.style.visibility="hidden",n.style.position="absolute",n.style.top="0";const r=document.querySelector("body");r?.insertAdjacentElement("beforeend",n);const i={width:n.clientWidth,height:n.clientHeight};return n.remove(),i}),"calculateMathMLDimensions"),je=m((async(e,t)=>{if(!Ne(e))return e;if(!(ze()||t.legacyMathML||t.forceLegacyMathML))return e.replace(Ie,"MathML is unsupported in this environment.");const{default:r}=await n.e(349).then(n.bind(n,349)),i=t.forceLegacyMathML||!ze()&&t.legacyMathML?"htmlAndMathml":"mathml";return e.split(ye).map((e=>Ne(e)?`
    ${e}
    `:`
    ${e}
    `)).join("").replace(Ie,((e,t)=>r.renderToString(t,{throwOnError:!0,displayMode:!0,output:i}).replace(/\n/g," ").replace(//g,"")))}),"renderKatex"),qe={getRows:be,sanitizeText:Se,sanitizeTextOrArray:Ce,hasBreaks:_e,splitBreaks:Ae,lineBreakRegex:ye,removeScript:ke,getUrl:Fe,evaluate:Me,getMax:Le,getMin:Pe},He=m((function(e,t){for(let n of t)e.attr(n[0],n[1])}),"d3Attrs"),We=m((function(e,t,n){let r=new Map;return n?(r.set("width","100%"),r.set("style",`max-width: ${t}px;`)):(r.set("height",e),r.set("width",t)),r}),"calculateSvgSizeAttrs"),Ke=m((function(e,t,n,r){const i=We(t,n,r);He(e,i)}),"configureSvgSize"),Ue=m((function(e,t,n,r){const i=t.node().getBBox(),o=i.width,a=i.height;v.info(`SVG bounds: ${o}x${a}`,i);let s=0,l=0;v.info(`Graph bounds: ${s}x${l}`,e),s=o+2*n,l=a+2*n,v.info(`Calculated bounds: ${s}x${l}`),Ke(t,l,s,r);const c=`${i.x-n} ${i.y-n} ${i.width+2*n} ${i.height+2*n}`;t.attr("viewBox",c)}),"setupGraphViewbox"),Ve={},Ye=m(((e,t,n)=>{let r="";return e in Ve&&Ve[e]?r=Ve[e](n):v.warn(`No theme found for ${e}`),` & {\n font-family: ${n.fontFamily};\n font-size: ${n.fontSize};\n fill: ${n.textColor}\n }\n @keyframes edge-animation-frame {\n from {\n stroke-dashoffset: 0;\n }\n }\n @keyframes dash {\n to {\n stroke-dashoffset: 0;\n }\n }\n & .edge-animation-slow {\n stroke-dasharray: 9,5 !important;\n stroke-dashoffset: 900;\n animation: dash 50s linear infinite;\n stroke-linecap: round;\n }\n & .edge-animation-fast {\n stroke-dasharray: 9,5 !important;\n stroke-dashoffset: 900;\n animation: dash 20s linear infinite;\n stroke-linecap: round;\n }\n /* Classes common for multiple diagrams */\n\n & .error-icon {\n fill: ${n.errorBkgColor};\n }\n & .error-text {\n fill: ${n.errorTextColor};\n stroke: ${n.errorTextColor};\n }\n\n & .edge-thickness-normal {\n stroke-width: 1px;\n }\n & .edge-thickness-thick {\n stroke-width: 3.5px\n }\n & .edge-pattern-solid {\n stroke-dasharray: 0;\n }\n & .edge-thickness-invisible {\n stroke-width: 0;\n fill: none;\n }\n & .edge-pattern-dashed{\n stroke-dasharray: 3;\n }\n .edge-pattern-dotted {\n stroke-dasharray: 2;\n }\n\n & .marker {\n fill: ${n.lineColor};\n stroke: ${n.lineColor};\n }\n & .marker.cross {\n stroke: ${n.lineColor};\n }\n\n & svg {\n font-family: ${n.fontFamily};\n font-size: ${n.fontSize};\n }\n & p {\n margin: 0\n }\n\n ${r}\n\n ${t}\n`}),"getStyles"),Ge=m(((e,t)=>{void 0!==t&&(Ve[e]=t)}),"addStylesForDiagram"),Xe=Ye,Qe={};y(Qe,{clear:()=>nt,getAccDescription:()=>at,getAccTitle:()=>it,getDiagramTitle:()=>lt,setAccDescription:()=>ot,setAccTitle:()=>rt,setDiagramTitle:()=>st});var Ze="",Je="",et="",tt=m((e=>Se(e,ce())),"sanitizeText"),nt=m((()=>{Ze="",et="",Je=""}),"clear"),rt=m((e=>{Ze=tt(e).replace(/^\s+/g,"")}),"setAccTitle"),it=m((()=>Ze),"getAccTitle"),ot=m((e=>{et=tt(e).replace(/\n\s+/g,"\n")}),"setAccDescription"),at=m((()=>et),"getAccDescription"),st=m((e=>{Je=tt(e)}),"setDiagramTitle"),lt=m((()=>Je),"getDiagramTitle"),ct=v,ut=x,ht=ce,dt=le,ft=J,pt=m((e=>Se(e,ht())),"sanitizeText"),gt=Ue,mt=m((()=>Qe),"getCommonDb"),yt={},bt=m(((e,t,n)=>{yt[e]&&ct.warn(`Diagram with id ${e} already registered. Overwriting.`),yt[e]=t,n&&F(e,n),Ge(e,t.styles),t.injectUtils?.(ct,ut,ht,pt,gt,mt(),(()=>{}))}),"registerDiagram"),vt=m((e=>{if(e in yt)return yt[e];throw new xt(e)}),"getDiagram"),xt=class extends Error{static#e=(()=>m(this,"DiagramNotFoundError"))();constructor(e){super(`Diagram ${e} not found.`)}}},3763:(e,t,n)=>{"use strict";e.exports=n(4983)},3815:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(9172);const i=function(e,t){return t?(0,r.A)(e,t,{clone:!1}):e}},3817:()=>{Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},3903:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=function(e){return"number"==typeof e&&e>-1&&e%1==0&&e<=9007199254740991}},3957:(e,t,n)=>{"use strict";n.d(t,{D:()=>o});var r=n(3759),i=n(3638),o=(0,r.K2)((e=>{const{securityLevel:t}=(0,r.D7)();let n=(0,i.Ltv)("body");if("sandbox"===t){const t=(0,i.Ltv)(`#i${e}`),r=t.node()?.contentDocument??document;n=(0,i.Ltv)(r.body)}return n.select(`#${e}`)}),"selectSvgElement")},3994:()=>{!function(e){var t=e.util.clone(e.languages.javascript),n=/(?:\s|\/\/.*(?!.)|\/\*(?:[^*]|\*(?!\/))\*\/)/.source,r=/(?:\{(?:\{(?:\{[^{}]*\}|[^{}])*\}|[^{}])*\})/.source,i=/(?:\{*\.{3}(?:[^{}]|)*\})/.source;function o(e,t){return e=e.replace(//g,(function(){return n})).replace(//g,(function(){return r})).replace(//g,(function(){return i})),RegExp(e,t)}i=o(i).source,e.languages.jsx=e.languages.extend("markup",t),e.languages.jsx.tag.pattern=o(/<\/?(?:[\w.:-]+(?:+(?:[\w.:$-]+(?:=(?:"(?:\\[\s\S]|[^\\"])*"|'(?:\\[\s\S]|[^\\'])*'|[^\s{'"/>=]+|))?|))**\/?)?>/.source),e.languages.jsx.tag.inside.tag.pattern=/^<\/?[^\s>\/]*/,e.languages.jsx.tag.inside["attr-value"].pattern=/=(?!\{)(?:"(?:\\[\s\S]|[^\\"])*"|'(?:\\[\s\S]|[^\\'])*'|[^\s'">]+)/,e.languages.jsx.tag.inside.tag.inside["class-name"]=/^[A-Z]\w*(?:\.[A-Z]\w*)*$/,e.languages.jsx.tag.inside.comment=t.comment,e.languages.insertBefore("inside","attr-name",{spread:{pattern:o(//.source),inside:e.languages.jsx}},e.languages.jsx.tag),e.languages.insertBefore("inside","special-attr",{script:{pattern:o(/=/.source),alias:"language-javascript",inside:{"script-punctuation":{pattern:/^=(?=\{)/,alias:"punctuation"},rest:e.languages.jsx}}},e.languages.jsx.tag);var a=function(e){return e?"string"===typeof e?e:"string"===typeof e.content?e.content:e.content.map(a).join(""):""},s=function(t){for(var n=[],r=0;r0&&n[n.length-1].tagName===a(i.content[0].content[1])&&n.pop():"/>"===i.content[i.content.length-1].content||n.push({tagName:a(i.content[0].content[1]),openedBraces:0}):n.length>0&&"punctuation"===i.type&&"{"===i.content?n[n.length-1].openedBraces++:n.length>0&&n[n.length-1].openedBraces>0&&"punctuation"===i.type&&"}"===i.content?n[n.length-1].openedBraces--:o=!0),(o||"string"===typeof i)&&n.length>0&&0===n[n.length-1].openedBraces){var l=a(i);r0&&("string"===typeof t[r-1]||"plain-text"===t[r-1].type)&&(l=a(t[r-1])+l,t.splice(r-1,1),r--),t[r]=new e.Token("plain-text",l,null,l)}i.content&&"string"!==typeof i.content&&s(i.content)}};e.hooks.add("after-tokenize",(function(e){"jsx"!==e.language&&"tsx"!==e.language||s(e.tokens)}))}(Prism)},4067:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(4802),i=n(7664);const o=function(e){if(!(0,i.A)(e))return!1;var t=(0,r.A)(e);return"[object Function]"==t||"[object GeneratorFunction]"==t||"[object AsyncFunction]"==t||"[object Proxy]"==t}},4176:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M10 10.02h5V21h-5zM17 21h3c1.1 0 2-.9 2-2v-9h-5zm3-18H5c-1.1 0-2 .9-2 2v3h19V5c0-1.1-.9-2-2-2M3 19c0 1.1.9 2 2 2h3V10H3z"}),"TableChart")},4186:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});var r=n(7170),i=n(5481);const o=class{constructor(){this.type=i.Z.ALL}get(){return this.type}set(e){if(this.type&&this.type!==e)throw new Error("Cannot change both RGB and HSL channels at the same time");this.type=e}reset(){this.type=i.Z.ALL}is(e){return this.type===e}};const a=new class{constructor(e,t){this.color=t,this.changed=!1,this.data=e,this.type=new o}set(e,t){return this.color=t,this.changed=!1,this.data=e,this.type.type=i.Z.ALL,this}_ensureHSL(){const e=this.data,{h:t,s:n,l:i}=e;void 0===t&&(e.h=r.A.channel.rgb2hsl(e,"h")),void 0===n&&(e.s=r.A.channel.rgb2hsl(e,"s")),void 0===i&&(e.l=r.A.channel.rgb2hsl(e,"l"))}_ensureRGB(){const e=this.data,{r:t,g:n,b:i}=e;void 0===t&&(e.r=r.A.channel.hsl2rgb(e,"r")),void 0===n&&(e.g=r.A.channel.hsl2rgb(e,"g")),void 0===i&&(e.b=r.A.channel.hsl2rgb(e,"b"))}get r(){const e=this.data,t=e.r;return this.type.is(i.Z.HSL)||void 0===t?(this._ensureHSL(),r.A.channel.hsl2rgb(e,"r")):t}get g(){const e=this.data,t=e.g;return this.type.is(i.Z.HSL)||void 0===t?(this._ensureHSL(),r.A.channel.hsl2rgb(e,"g")):t}get b(){const e=this.data,t=e.b;return this.type.is(i.Z.HSL)||void 0===t?(this._ensureHSL(),r.A.channel.hsl2rgb(e,"b")):t}get h(){const e=this.data,t=e.h;return this.type.is(i.Z.RGB)||void 0===t?(this._ensureRGB(),r.A.channel.rgb2hsl(e,"h")):t}get s(){const e=this.data,t=e.s;return this.type.is(i.Z.RGB)||void 0===t?(this._ensureRGB(),r.A.channel.rgb2hsl(e,"s")):t}get l(){const e=this.data,t=e.l;return this.type.is(i.Z.RGB)||void 0===t?(this._ensureRGB(),r.A.channel.rgb2hsl(e,"l")):t}get a(){return this.data.a}set r(e){this.type.set(i.Z.RGB),this.changed=!0,this.data.r=e}set g(e){this.type.set(i.Z.RGB),this.changed=!0,this.data.g=e}set b(e){this.type.set(i.Z.RGB),this.changed=!0,this.data.b=e}set h(e){this.type.set(i.Z.HSL),this.changed=!0,this.data.h=e}set s(e){this.type.set(i.Z.HSL),this.changed=!0,this.data.s=e}set l(e){this.type.set(i.Z.HSL),this.changed=!0,this.data.l=e}set a(e){this.changed=!0,this.data.a=e}}({r:0,g:0,b:0,a:0},"transparent")},4222:function(e,t,n){"use strict";var r=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e,t){var n=null;if(!e||"string"!==typeof e)return n;var r=(0,i.default)(e),o="function"===typeof t;return r.forEach((function(e){if("declaration"===e.type){var r=e.property,i=e.value;o?t(r,i,e):i&&((n=n||{})[r]=i)}})),n};var i=r(n(4403))},4288:(e,t)=>{"use strict";var n=Symbol.for("react.transitional.element"),r=Symbol.for("react.portal"),i=Symbol.for("react.fragment"),o=Symbol.for("react.strict_mode"),a=Symbol.for("react.profiler"),s=Symbol.for("react.consumer"),l=Symbol.for("react.context"),c=Symbol.for("react.forward_ref"),u=Symbol.for("react.suspense"),h=Symbol.for("react.memo"),d=Symbol.for("react.lazy"),f=Symbol.iterator;var p={isMounted:function(){return!1},enqueueForceUpdate:function(){},enqueueReplaceState:function(){},enqueueSetState:function(){}},g=Object.assign,m={};function y(e,t,n){this.props=e,this.context=t,this.refs=m,this.updater=n||p}function b(){}function v(e,t,n){this.props=e,this.context=t,this.refs=m,this.updater=n||p}y.prototype.isReactComponent={},y.prototype.setState=function(e,t){if("object"!==typeof e&&"function"!==typeof e&&null!=e)throw Error("takes an object of state variables to update or a function which returns an object of state variables.");this.updater.enqueueSetState(this,e,t,"setState")},y.prototype.forceUpdate=function(e){this.updater.enqueueForceUpdate(this,e,"forceUpdate")},b.prototype=y.prototype;var x=v.prototype=new b;x.constructor=v,g(x,y.prototype),x.isPureReactComponent=!0;var k=Array.isArray,w={H:null,A:null,T:null,S:null,V:null},S=Object.prototype.hasOwnProperty;function C(e,t,r,i,o,a){return r=a.ref,{$$typeof:n,type:e,key:t,ref:void 0!==r?r:null,props:a}}function _(e){return"object"===typeof e&&null!==e&&e.$$typeof===n}var A=/\/+/g;function T(e,t){return"object"===typeof e&&null!==e&&null!=e.key?function(e){var t={"=":"=0",":":"=2"};return"$"+e.replace(/[=:]/g,(function(e){return t[e]}))}(""+e.key):t.toString(36)}function E(){}function F(e,t,i,o,a){var s=typeof e;"undefined"!==s&&"boolean"!==s||(e=null);var l,c,u=!1;if(null===e)u=!0;else switch(s){case"bigint":case"string":case"number":u=!0;break;case"object":switch(e.$$typeof){case n:case r:u=!0;break;case d:return F((u=e._init)(e._payload),t,i,o,a)}}if(u)return a=a(e),u=""===o?"."+T(e,0):o,k(a)?(i="",null!=u&&(i=u.replace(A,"$&/")+"/"),F(a,t,i,"",(function(e){return e}))):null!=a&&(_(a)&&(l=a,c=i+(null==a.key||e&&e.key===a.key?"":(""+a.key).replace(A,"$&/")+"/")+u,a=C(l.type,c,void 0,0,0,l.props)),t.push(a)),1;u=0;var h,p=""===o?".":o+":";if(k(e))for(var g=0;g{"use strict";n.d(t,{A:()=>o});var r=n(8187),i="object"==typeof self&&self&&self.Object===Object&&self;const o=r.A||i||Function("return this")()},4327:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M21 19V5c0-1.1-.9-2-2-2H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2M8.5 13.5l2.5 3.01L14.5 12l4.5 6H5z"}),"Image")},4391:(e,t,n)=>{"use strict";!function e(){if("undefined"!==typeof __REACT_DEVTOOLS_GLOBAL_HOOK__&&"function"===typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE)try{__REACT_DEVTOOLS_GLOBAL_HOOK__.checkDCE(e)}catch(t){console.error(t)}}(),e.exports=n(7004)},4403:e=>{var t=/\/\*[^*]*\*+([^/*][^*]*\*+)*\//g,n=/\n/g,r=/^\s*/,i=/^(\*?[-#/*\\\w]+(\[[0-9a-z_-]+\])?)\s*/,o=/^:\s*/,a=/^((?:'(?:\\'|.)*?'|"(?:\\"|.)*?"|\([^)]*?\)|[^};])+)/,s=/^[;\s]*/,l=/^\s+|\s+$/g,c="";function u(e){return e?e.replace(l,c):c}e.exports=function(e,l){if("string"!==typeof e)throw new TypeError("First argument must be a string");if(!e)return[];l=l||{};var h=1,d=1;function f(e){var t=e.match(n);t&&(h+=t.length);var r=e.lastIndexOf("\n");d=~r?e.length-r:d+e.length}function p(){var e={line:h,column:d};return function(t){return t.position=new g(e),v(),t}}function g(e){this.start=e,this.end={line:h,column:d},this.source=l.source}g.prototype.content=e;var m=[];function y(t){var n=new Error(l.source+":"+h+":"+d+": "+t);if(n.reason=t,n.filename=l.source,n.line=h,n.column=d,n.source=e,!l.silent)throw n;m.push(n)}function b(t){var n=t.exec(e);if(n){var r=n[0];return f(r),e=e.slice(r.length),n}}function v(){b(r)}function x(e){var t;for(e=e||[];t=k();)!1!==t&&e.push(t);return e}function k(){var t=p();if("/"==e.charAt(0)&&"*"==e.charAt(1)){for(var n=2;c!=e.charAt(n)&&("*"!=e.charAt(n)||"/"!=e.charAt(n+1));)++n;if(n+=2,c===e.charAt(n-1))return y("End of comment missing");var r=e.slice(2,n-2);return d+=2,f(r),e=e.slice(n),d+=2,t({type:"comment",comment:r})}}function w(){var e=p(),n=b(i);if(n){if(k(),!b(o))return y("property missing ':'");var r=b(a),l=e({type:"declaration",property:u(n[0].replace(t,c)),value:r?u(r[0].replace(t,c)):c});return b(s),l}}return v(),function(){var e,t=[];for(x(t);e=w();)!1!==e&&(t.push(e),x(t));return t}()}},4431:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=function(e){return function(){return e}}},4536:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M7.41 8.59 12 13.17l4.59-4.58L18 10l-6 6-6-6z"}),"KeyboardArrowDown")},4586:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(3493),i=n(3239);const o=function(e){return(0,i.A)(e)&&(0,r.A)(e)}},4634:e=>{function t(){return e.exports=t=Object.assign?Object.assign.bind():function(e){for(var t=1;t{"use strict";n.d(t,{A:()=>r});const r=function(e,t){var n=-1,r=e.length;for(t||(t=Array(r));++n{"use strict";n.d(t,{A:()=>d});var r=n(5622),i=Object.prototype,o=i.hasOwnProperty,a=i.toString,s=r.A?r.A.toStringTag:void 0;const l=function(e){var t=o.call(e,s),n=e[s];try{e[s]=void 0;var r=!0}catch(l){}var i=a.call(e);return r&&(t?e[s]=n:delete e[s]),i};var c=Object.prototype.toString;const u=function(e){return c.call(e)};var h=r.A?r.A.toStringTag:void 0;const d=function(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":h&&h in Object(e)?l(e):u(e)}},4830:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.BLANK_URL=t.relativeFirstCharacters=t.whitespaceEscapeCharsRegex=t.urlSchemeRegex=t.ctrlCharactersRegex=t.htmlCtrlEntityRegex=t.htmlEntitiesRegex=t.invalidProtocolRegex=void 0,t.invalidProtocolRegex=/^([^\w]*)(javascript|data|vbscript)/im,t.htmlEntitiesRegex=/&#(\w+)(^\w|;)?/g,t.htmlCtrlEntityRegex=/&(newline|tab);/gi,t.ctrlCharactersRegex=/[\u0000-\u001F\u007F-\u009F\u2000-\u200D\uFEFF]/gim,t.urlSchemeRegex=/^.+(:|:)/gim,t.whitespaceEscapeCharsRegex=/(\\|%5[cC])((%(6[eE]|72|74))|[nrt])/g,t.relativeFirstCharacters=[".","/"],t.BLANK_URL="about:blank"},4853:(e,t,n)=>{"use strict";n.d(t,{A:()=>s});var r=n(8587),i=n(8168);const o=["values","unit","step"],a=e=>{const t=Object.keys(e).map((t=>({key:t,val:e[t]})))||[];return t.sort(((e,t)=>e.val-t.val)),t.reduce(((e,t)=>(0,i.A)({},e,{[t.key]:t.val})),{})};function s(e){const{values:t={xs:0,sm:600,md:900,lg:1200,xl:1536},unit:n="px",step:s=5}=e,l=(0,r.A)(e,o),c=a(t),u=Object.keys(c);function h(e){return`@media (min-width:${"number"===typeof t[e]?t[e]:e}${n})`}function d(e){return`@media (max-width:${("number"===typeof t[e]?t[e]:e)-s/100}${n})`}function f(e,r){const i=u.indexOf(r);return`@media (min-width:${"number"===typeof t[e]?t[e]:e}${n}) and (max-width:${(-1!==i&&"number"===typeof t[u[i]]?t[u[i]]:r)-s/100}${n})`}return(0,i.A)({keys:u,values:c,up:h,down:d,between:f,only:function(e){return u.indexOf(e)+1{e.exports=function(e,t){if(null==e)return{};var n={};for(var r in e)if({}.hasOwnProperty.call(e,r)){if(-1!==t.indexOf(r))continue;n[r]=e[r]}return n},e.exports.__esModule=!0,e.exports.default=e.exports},4910:(e,t,n)=>{"use strict";n.d(t,{A:()=>h});var r=n(4802),i=n(3903),o=n(3239),a={};a["[object Float32Array]"]=a["[object Float64Array]"]=a["[object Int8Array]"]=a["[object Int16Array]"]=a["[object Int32Array]"]=a["[object Uint8Array]"]=a["[object Uint8ClampedArray]"]=a["[object Uint16Array]"]=a["[object Uint32Array]"]=!0,a["[object Arguments]"]=a["[object Array]"]=a["[object ArrayBuffer]"]=a["[object Boolean]"]=a["[object DataView]"]=a["[object Date]"]=a["[object Error]"]=a["[object Function]"]=a["[object Map]"]=a["[object Number]"]=a["[object Object]"]=a["[object RegExp]"]=a["[object Set]"]=a["[object String]"]=a["[object WeakMap]"]=!1;const s=function(e){return(0,o.A)(e)&&(0,i.A)(e.length)&&!!a[(0,r.A)(e)]};var l=n(7328),c=n(6662),u=c.A&&c.A.isTypedArray;const h=u?(0,l.A)(u):s},4943:(e,t,n)=>{"use strict";n.d(t,{A:()=>l});var r=n(4306),i="object"==typeof exports&&exports&&!exports.nodeType&&exports,o=i&&"object"==typeof module&&module&&!module.nodeType&&module,a=o&&o.exports===i?r.A.Buffer:void 0,s=a?a.allocUnsafe:void 0;const l=function(e,t){if(t)return e.slice();var n=e.length,r=s?s(n):new e.constructor(n);return e.copy(r),r}},4944:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M5 4v3h5.5v12h3V7H19V4z"}),"Title")},4962:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(7170),i=n(1458);const o=(e,t,n)=>{const o=i.A.parse(e),a=o[t],s=r.A.channel.clamp[t](a+n);return a!==s&&(o[t]=s),i.A.stringify(o)}},4983:(e,t)=>{"use strict";var n="function"===typeof Symbol&&Symbol.for,r=n?Symbol.for("react.element"):60103,i=n?Symbol.for("react.portal"):60106,o=n?Symbol.for("react.fragment"):60107,a=n?Symbol.for("react.strict_mode"):60108,s=n?Symbol.for("react.profiler"):60114,l=n?Symbol.for("react.provider"):60109,c=n?Symbol.for("react.context"):60110,u=n?Symbol.for("react.async_mode"):60111,h=n?Symbol.for("react.concurrent_mode"):60111,d=n?Symbol.for("react.forward_ref"):60112,f=n?Symbol.for("react.suspense"):60113,p=n?Symbol.for("react.suspense_list"):60120,g=n?Symbol.for("react.memo"):60115,m=n?Symbol.for("react.lazy"):60116,y=n?Symbol.for("react.block"):60121,b=n?Symbol.for("react.fundamental"):60117,v=n?Symbol.for("react.responder"):60118,x=n?Symbol.for("react.scope"):60119;function k(e){if("object"===typeof e&&null!==e){var t=e.$$typeof;switch(t){case r:switch(e=e.type){case u:case h:case o:case s:case a:case f:return e;default:switch(e=e&&e.$$typeof){case c:case d:case m:case g:case l:return e;default:return t}}case i:return t}}}function w(e){return k(e)===h}t.AsyncMode=u,t.ConcurrentMode=h,t.ContextConsumer=c,t.ContextProvider=l,t.Element=r,t.ForwardRef=d,t.Fragment=o,t.Lazy=m,t.Memo=g,t.Portal=i,t.Profiler=s,t.StrictMode=a,t.Suspense=f,t.isAsyncMode=function(e){return w(e)||k(e)===u},t.isConcurrentMode=w,t.isContextConsumer=function(e){return k(e)===c},t.isContextProvider=function(e){return k(e)===l},t.isElement=function(e){return"object"===typeof e&&null!==e&&e.$$typeof===r},t.isForwardRef=function(e){return k(e)===d},t.isFragment=function(e){return k(e)===o},t.isLazy=function(e){return k(e)===m},t.isMemo=function(e){return k(e)===g},t.isPortal=function(e){return k(e)===i},t.isProfiler=function(e){return k(e)===s},t.isStrictMode=function(e){return k(e)===a},t.isSuspense=function(e){return k(e)===f},t.isValidElementType=function(e){return"string"===typeof e||"function"===typeof e||e===o||e===h||e===s||e===a||e===f||e===p||"object"===typeof e&&null!==e&&(e.$$typeof===m||e.$$typeof===g||e.$$typeof===l||e.$$typeof===c||e.$$typeof===d||e.$$typeof===b||e.$$typeof===v||e.$$typeof===x||e.$$typeof===y)},t.typeOf=k},4989:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r.A,private_createBreakpoints:()=>i.A,unstable_applyStyles:()=>o.A});var r=n(8280),i=n(4853),o=n(9703)},4994:e=>{e.exports=function(e){return e&&e.__esModule?e:{default:e}},e.exports.__esModule=!0,e.exports.default=e.exports},5009:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=function(e,t){return e===t||e!==e&&t!==t}},5037:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M7.41 15.41 12 10.83l4.59 4.58L18 14l-6-6-6 6z"}),"KeyboardArrowUp")},5043:(e,t,n)=>{"use strict";e.exports=n(4288)},5140:(e,t,n)=>{"use strict";n.d(t,{A:()=>d});var r=n(2476);const i=function(){this.__data__=new r.A,this.size=0};const o=function(e){var t=this.__data__,n=t.delete(e);return this.size=t.size,n};const a=function(e){return this.__data__.get(e)};const s=function(e){return this.__data__.has(e)};var l=n(8438),c=n(7840);const u=function(e,t){var n=this.__data__;if(n instanceof r.A){var i=n.__data__;if(!l.A||i.length<199)return i.push([e,t]),this.size=++n.size,this;n=this.__data__=new c.A(i)}return n.set(e,t),this.size=n.size,this};function h(e){var t=this.__data__=new r.A(e);this.size=t.size}h.prototype.clear=i,h.prototype.delete=o,h.prototype.get=a,h.prototype.has=s,h.prototype.set=u;const d=h},5172:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(9885),i=n(4306);const o=(0,r.A)(i.A,"Set")},5173:(e,t,n)=>{e.exports=n(1497)()},5386:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=Array.isArray},5481:(e,t,n)=>{"use strict";n.d(t,{Y:()=>i,Z:()=>o});var r=n(7170);const i={};for(let a=0;a<=255;a++)i[a]=r.A.unit.dec2hex(a);const o={ALL:0,RGB:1,HSL:2}},5540:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M3 17.25V21h3.75L17.81 9.94l-3.75-3.75zM20.71 7.04c.39-.39.39-1.02 0-1.41l-2.34-2.34a.9959.9959 0 0 0-1.41 0l-1.83 1.83 3.75 3.75z"}),"Edit")},5554:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(4962);const i=(e,t)=>(0,r.A)(e,"l",-t)},5575:()=>{Prism.languages.markup={comment:{pattern://,greedy:!0},prolog:{pattern:/<\?[\s\S]+?\?>/,greedy:!0},doctype:{pattern:/"'[\]]|"[^"]*"|'[^']*')+(?:\[(?:[^<"'\]]|"[^"]*"|'[^']*'|<(?!!--)|)*\]\s*)?>/i,greedy:!0,inside:{"internal-subset":{pattern:/(^[^\[]*\[)[\s\S]+(?=\]>$)/,lookbehind:!0,greedy:!0,inside:null},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},punctuation:/^$|[[\]]/,"doctype-tag":/^DOCTYPE/i,name:/[^\s<>'"]+/}},cdata:{pattern://i,greedy:!0},tag:{pattern:/<\/?(?!\d)[^\s>\/=$<%]+(?:\s(?:\s*[^\s>\/=]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))|(?=[\s/>])))+)?\s*\/?>/,greedy:!0,inside:{tag:{pattern:/^<\/?[^\s>\/]+/,inside:{punctuation:/^<\/?/,namespace:/^[^\s>\/:]+:/}},"special-attr":[],"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+)/,inside:{punctuation:[{pattern:/^=/,alias:"attr-equals"},{pattern:/^(\s*)["']|["']$/,lookbehind:!0}]}},punctuation:/\/?>/,"attr-name":{pattern:/[^\s>\/]+/,inside:{namespace:/^[^\s>\/:]+:/}}}},entity:[{pattern:/&[\da-z]{1,8};/i,alias:"named-entity"},/&#x?[\da-f]{1,8};/i]},Prism.languages.markup.tag.inside["attr-value"].inside.entity=Prism.languages.markup.entity,Prism.languages.markup.doctype.inside["internal-subset"].inside=Prism.languages.markup,Prism.hooks.add("wrap",(function(e){"entity"===e.type&&(e.attributes.title=e.content.replace(/&/,"&"))})),Object.defineProperty(Prism.languages.markup.tag,"addInlined",{value:function(e,t){var n={};n["language-"+t]={pattern:/(^$)/i,lookbehind:!0,inside:Prism.languages[t]},n.cdata=/^$/i;var r={"included-cdata":{pattern://i,inside:n}};r["language-"+t]={pattern:/[\s\S]+/,inside:Prism.languages[t]};var i={};i[e]={pattern:RegExp(/(<__[^>]*>)(?:))*\]\]>|(?!)/.source.replace(/__/g,(function(){return e})),"i"),lookbehind:!0,greedy:!0,inside:r},Prism.languages.insertBefore("markup","cdata",i)}}),Object.defineProperty(Prism.languages.markup.tag,"addAttribute",{value:function(e,t){Prism.languages.markup.tag.inside["special-attr"].push({pattern:RegExp(/(^|["'\s])/.source+"(?:"+e+")"+/\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))/.source,"i"),lookbehind:!0,inside:{"attr-name":/^[^\s=]+/,"attr-value":{pattern:/=[\s\S]+/,inside:{value:{pattern:/(^=\s*(["']|(?!["'])))\S[\s\S]*(?=\2$)/,lookbehind:!0,alias:[t,"language-"+t],inside:Prism.languages[t]},punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}}}})}}),Prism.languages.html=Prism.languages.markup,Prism.languages.mathml=Prism.languages.markup,Prism.languages.svg=Prism.languages.markup,Prism.languages.xml=Prism.languages.extend("markup",{}),Prism.languages.ssml=Prism.languages.xml,Prism.languages.atom=Prism.languages.xml,Prism.languages.rss=Prism.languages.xml},5622:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=n(4306).A.Symbol},5674:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=function(e,t){return function(n){return e(t(n))}}},5820:()=>{Prism.languages.mermaid={comment:{pattern:/%%.*/,greedy:!0},style:{pattern:/^([ \t]*(?:classDef|linkStyle|style)[ \t]+[\w$-]+[ \t]+)\w.*[^\s;]/m,lookbehind:!0,inside:{property:/\b\w[\w-]*(?=[ \t]*:)/,operator:/:/,punctuation:/,/}},"inter-arrow-label":{pattern:/([^<>ox.=-])(?:-[-.]|==)(?![<>ox.=-])[ \t]*(?:"[^"\r\n]*"|[^\s".=-](?:[^\r\n.=-]*[^\s.=-])?)[ \t]*(?:\.+->?|--+[->]|==+[=>])(?![<>ox.=-])/,lookbehind:!0,greedy:!0,inside:{arrow:{pattern:/(?:\.+->?|--+[->]|==+[=>])$/,alias:"operator"},label:{pattern:/^([\s\S]{2}[ \t]*)\S(?:[\s\S]*\S)?/,lookbehind:!0,alias:"property"},"arrow-head":{pattern:/^\S+/,alias:["arrow","operator"]}}},arrow:[{pattern:/(^|[^{}|o.-])[|}][|o](?:--|\.\.)[|o][|{](?![{}|o.-])/,lookbehind:!0,alias:"operator"},{pattern:/(^|[^<>ox.=-])(?:[ox]?|(?:==+|--+|-\.*-)[>ox]|===+|---+|-\.+-)(?![<>ox.=-])/,lookbehind:!0,alias:"operator"},{pattern:/(^|[^<>()x-])(?:--?(?:>>|[x>)])(?![<>()x])|(?:<<|[x<(])--?(?!-))/,lookbehind:!0,alias:"operator"},{pattern:/(^|[^<>|*o.-])(?:[*o]--|--[*o]|<\|?(?:--|\.\.)|(?:--|\.\.)\|?>|--|\.\.)(?![<>|*o.-])/,lookbehind:!0,alias:"operator"}],label:{pattern:/(^|[^|<])\|(?:[^\r\n"|]|"[^"\r\n]*")+\|/,lookbehind:!0,greedy:!0,alias:"property"},text:{pattern:/(?:[(\[{]+|\b>)(?:[^\r\n"()\[\]{}]|"[^"\r\n]*")+(?:[)\]}]+|>)/,alias:"string"},string:{pattern:/"[^"\r\n]*"/,greedy:!0},annotation:{pattern:/<<(?:abstract|choice|enumeration|fork|interface|join|service)>>|\[\[(?:choice|fork|join)\]\]/i,alias:"important"},keyword:[{pattern:/(^[ \t]*)(?:action|callback|class|classDef|classDiagram|click|direction|erDiagram|flowchart|gantt|gitGraph|graph|journey|link|linkStyle|pie|requirementDiagram|sequenceDiagram|stateDiagram|stateDiagram-v2|style|subgraph)(?![\w$-])/m,lookbehind:!0,greedy:!0},{pattern:/(^[ \t]*)(?:activate|alt|and|as|autonumber|deactivate|else|end(?:[ \t]+note)?|loop|opt|par|participant|rect|state|note[ \t]+(?:over|(?:left|right)[ \t]+of))(?![\w$-])/im,lookbehind:!0,greedy:!0}],entity:/#[a-z0-9]+;/,operator:{pattern:/(\w[ \t]*)&(?=[ \t]*\w)|:::|:/,lookbehind:!0},punctuation:/[(){};]/}},5869:(e,t,n)=>{"use strict";n.d(t,{A:()=>u});const r=function(e,t){for(var n=-1,r=Array(e);++n{"use strict";n.d(t,{A:()=>i});var r=/^(?:0|[1-9]\d*)$/;const i=function(e,t){var n=typeof e;return!!(t=null==t?9007199254740991:t)&&("number"==n||"symbol"!=n&&r.test(e))&&e>-1&&e%1==0&&e{"use strict";function n(e,t){var n=e.length;e.push(t);e:for(;0>>1,i=e[r];if(!(0>>1;ro(l,n))co(u,l)?(e[r]=u,e[c]=n,r=c):(e[r]=l,e[s]=n,r=s);else{if(!(co(u,n)))break e;e[r]=u,e[c]=n,r=c}}}return t}function o(e,t){var n=e.sortIndex-t.sortIndex;return 0!==n?n:e.id-t.id}if(t.unstable_now=void 0,"object"===typeof performance&&"function"===typeof performance.now){var a=performance;t.unstable_now=function(){return a.now()}}else{var s=Date,l=s.now();t.unstable_now=function(){return s.now()-l}}var c=[],u=[],h=1,d=null,f=3,p=!1,g=!1,m=!1,y=!1,b="function"===typeof setTimeout?setTimeout:null,v="function"===typeof clearTimeout?clearTimeout:null,x="undefined"!==typeof setImmediate?setImmediate:null;function k(e){for(var t=r(u);null!==t;){if(null===t.callback)i(u);else{if(!(t.startTime<=e))break;i(u),t.sortIndex=t.expirationTime,n(c,t)}t=r(u)}}function w(e){if(m=!1,k(e),!g)if(null!==r(c))g=!0,C||(C=!0,S());else{var t=r(u);null!==t&&P(w,t.startTime-e)}}var S,C=!1,_=-1,A=5,T=-1;function E(){return!!y||!(t.unstable_now()-Te&&E());){var a=d.callback;if("function"===typeof a){d.callback=null,f=d.priorityLevel;var s=a(d.expirationTime<=e);if(e=t.unstable_now(),"function"===typeof s){d.callback=s,k(e),n=!0;break t}d===r(c)&&i(c),k(e)}else i(c);d=r(c)}if(null!==d)n=!0;else{var l=r(u);null!==l&&P(w,l.startTime-e),n=!1}}break e}finally{d=null,f=o,p=!1}n=void 0}}finally{n?S():C=!1}}}if("function"===typeof x)S=function(){x(F)};else if("undefined"!==typeof MessageChannel){var M=new MessageChannel,L=M.port2;M.port1.onmessage=F,S=function(){L.postMessage(null)}}else S=function(){b(F,0)};function P(e,n){_=b((function(){e(t.unstable_now())}),n)}t.unstable_IdlePriority=5,t.unstable_ImmediatePriority=1,t.unstable_LowPriority=4,t.unstable_NormalPriority=3,t.unstable_Profiling=null,t.unstable_UserBlockingPriority=2,t.unstable_cancelCallback=function(e){e.callback=null},t.unstable_forceFrameRate=function(e){0>e||125a?(e.sortIndex=o,n(u,e),null===r(c)&&e===r(u)&&(m?(v(_),_=-1):m=!0,P(w,o-a))):(e.sortIndex=s,n(c,e),g||p||(g=!0,C||(C=!0,S()))),e},t.unstable_shouldYield=E,t.unstable_wrapCallback=function(e){var t=f;return function(){var n=f;f=t;try{return e.apply(this,arguments)}finally{f=n}}}},5920:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(9885);const i=function(){try{var e=(0,r.A)(Object,"defineProperty");return e({},"",{}),e}catch(t){}}()},6043:()=>{!function(e){var t=/\b(?:abstract|assert|boolean|break|byte|case|catch|char|class|const|continue|default|do|double|else|enum|exports|extends|final|finally|float|for|goto|if|implements|import|instanceof|int|interface|long|module|native|new|non-sealed|null|open|opens|package|permits|private|protected|provides|public|record(?!\s*[(){}[\]<>=%~.:,;?+\-*/&|^])|requires|return|sealed|short|static|strictfp|super|switch|synchronized|this|throw|throws|to|transient|transitive|try|uses|var|void|volatile|while|with|yield)\b/,n=/(?:[a-z]\w*\s*\.\s*)*(?:[A-Z]\w*\s*\.\s*)*/.source,r={pattern:RegExp(/(^|[^\w.])/.source+n+/[A-Z](?:[\d_A-Z]*[a-z]\w*)?\b/.source),lookbehind:!0,inside:{namespace:{pattern:/^[a-z]\w*(?:\s*\.\s*[a-z]\w*)*(?:\s*\.)?/,inside:{punctuation:/\./}},punctuation:/\./}};e.languages.java=e.languages.extend("clike",{string:{pattern:/(^|[^\\])"(?:\\.|[^"\\\r\n])*"/,lookbehind:!0,greedy:!0},"class-name":[r,{pattern:RegExp(/(^|[^\w.])/.source+n+/[A-Z]\w*(?=\s+\w+\s*[;,=()]|\s*(?:\[[\s,]*\]\s*)?::\s*new\b)/.source),lookbehind:!0,inside:r.inside},{pattern:RegExp(/(\b(?:class|enum|extends|implements|instanceof|interface|new|record|throws)\s+)/.source+n+/[A-Z]\w*\b/.source),lookbehind:!0,inside:r.inside}],keyword:t,function:[e.languages.clike.function,{pattern:/(::\s*)[a-z_]\w*/,lookbehind:!0}],number:/\b0b[01][01_]*L?\b|\b0x(?:\.[\da-f_p+-]+|[\da-f_]+(?:\.[\da-f_p+-]+)?)\b|(?:\b\d[\d_]*(?:\.[\d_]*)?|\B\.\d[\d_]*)(?:e[+-]?\d[\d_]*)?[dfl]?/i,operator:{pattern:/(^|[^.])(?:<<=?|>>>?=?|->|--|\+\+|&&|\|\||::|[?:~]|[-+*/%&|^!=<>]=?)/m,lookbehind:!0},constant:/\b[A-Z][A-Z_\d]+\b/}),e.languages.insertBefore("java","string",{"triple-quoted-string":{pattern:/"""[ \t]*[\r\n](?:(?:"|"")?(?:\\.|[^"\\]))*"""/,greedy:!0,alias:"string"},char:{pattern:/'(?:\\.|[^'\\\r\n]){1,6}'/,greedy:!0}}),e.languages.insertBefore("java","class-name",{annotation:{pattern:/(^|[^.])@\w+(?:\s*\.\s*\w+)*/,lookbehind:!0,alias:"punctuation"},generics:{pattern:/<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&))*>)*>)*>)*>/,inside:{"class-name":r,keyword:t,punctuation:/[<>(),.:]/,operator:/[?&|]/}},import:[{pattern:RegExp(/(\bimport\s+)/.source+n+/(?:[A-Z]\w*|\*)(?=\s*;)/.source),lookbehind:!0,inside:{namespace:r.inside.namespace,punctuation:/\./,operator:/\*/,"class-name":/\w+/}},{pattern:RegExp(/(\bimport\s+static\s+)/.source+n+/(?:\w+|\*)(?=\s*;)/.source),lookbehind:!0,alias:"static",inside:{namespace:r.inside.namespace,static:/\b\w+$/,punctuation:/\./,operator:/\*/,"class-name":/\w+/}}],namespace:{pattern:RegExp(/(\b(?:exports|import(?:\s+static)?|module|open|opens|package|provides|requires|to|transitive|uses|with)\s+)(?!)[a-z]\w*(?:\.[a-z]\w*)*\.?/.source.replace(//g,(function(){return t.source}))),lookbehind:!0,inside:{punctuation:/\./}}})}(Prism)},6105:e=>{!function(){if("undefined"!==typeof Prism){var t=Object.assign||function(e,t){for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n]);return e},n={"remove-trailing":"boolean","remove-indent":"boolean","left-trim":"boolean","right-trim":"boolean","break-lines":"number",indent:"number","remove-initial-line-feed":"boolean","tabs-to-spaces":"number","spaces-to-tabs":"number"};r.prototype={setDefaults:function(e){this.defaults=t(this.defaults,e)},normalize:function(e,n){for(var r in n=t(this.defaults,n)){var i=r.replace(/-(\w)/g,(function(e,t){return t.toUpperCase()}));"normalize"!==r&&"setDefaults"!==i&&n[r]&&this[i]&&(e=this[i].call(this,e,n[r]))}return e},leftTrim:function(e){return e.replace(/^\s+/,"")},rightTrim:function(e){return e.replace(/\s+$/,"")},tabsToSpaces:function(e,t){return t=0|t||4,e.replace(/\t/g,new Array(++t).join(" "))},spacesToTabs:function(e,t){return t=0|t||4,e.replace(RegExp(" {"+t+"}","g"),"\t")},removeTrailing:function(e){return e.replace(/\s*?$/gm,"")},removeInitialLineFeed:function(e){return e.replace(/^(?:\r?\n|\r)/,"")},removeIndent:function(e){var t=e.match(/^[^\S\n\r]*(?=\S)/gm);return t&&t[0].length?(t.sort((function(e,t){return e.length-t.length})),t[0].length?e.replace(RegExp("^"+t[0],"gm"),""):e):e},indent:function(e,t){return e.replace(/^[^\S\n\r]*(?=\S)/gm,new Array(++t).join("\t")+"$&")},breakLines:function(e,t){t=!0===t?80:0|t||80;for(var n=e.split("\n"),r=0;rt&&(o[s]="\n"+o[s],a=l)}n[r]=o.join("")}return n.join("\n")}},e.exports&&(e.exports=r),Prism.plugins.NormalizeWhitespace=new r({"remove-trailing":!0,"remove-indent":!0,"left-trim":!0,"right-trim":!0}),Prism.hooks.add("before-sanity-check",(function(e){var t=Prism.plugins.NormalizeWhitespace;if((!e.settings||!1!==e.settings["whitespace-normalization"])&&Prism.util.isActive(e.element,"whitespace-normalization",!0))if(e.element&&e.element.parentNode||!e.code){var r=e.element.parentNode;if(e.code&&r&&"pre"===r.nodeName.toLowerCase()){for(var i in null==e.settings&&(e.settings={}),n)if(Object.hasOwnProperty.call(n,i)){var o=n[i];if(r.hasAttribute("data-"+i))try{var a=JSON.parse(r.getAttribute("data-"+i)||"true");typeof a===o&&(e.settings[i]=a)}catch(p){}}for(var s=r.childNodes,l="",c="",u=!1,h=0;h{"use strict";n.d(t,{A:()=>i});var r=Function.prototype.toString;const i=function(e){if(null!=e){try{return r.call(e)}catch(t){}try{return e+""}catch(t){}}return""}},6325:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M9.4 16.6 4.8 12l4.6-4.6L8 6l-6 6 6 6zm5.2 0 4.6-4.6-4.6-4.6L16 6l6 6-6 6z"}),"Code")},6356:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(1869),i=n(8635);const o=function(e){return(0,r.A)((function(t,n){var r=-1,o=n.length,a=o>1?n[o-1]:void 0,s=o>2?n[2]:void 0;for(a=e.length>3&&"function"==typeof a?(o--,a):void 0,s&&(0,i.A)(n[0],n[1],s)&&(a=o<3?void 0:a,o=1),t=Object(t);++r{"use strict";n.d(t,{A:()=>i});var r=n(4962);const i=(e,t)=>(0,r.A)(e,"l",t)},6453:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(7170),i=n(1458);const o=(e,t)=>{const n=i.A.parse(e);for(const i in t)n[i]=r.A.channel.clamp[i](t[i]);return i.A.stringify(n)}},6471:(e,t,n)=>{"use strict";n.d(t,{A:()=>s});var r=n(7170),i=n(4186),o=n(1458),a=n(6453);const s=function(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:0,s=arguments.length>3&&void 0!==arguments[3]?arguments[3]:1;if("number"!==typeof e)return(0,a.A)(e,{a:t});const l=i.A.set({r:r.A.channel.clamp.r(e),g:r.A.channel.clamp.g(t),b:r.A.channel.clamp.b(n),a:r.A.channel.clamp.a(s)});return o.A.stringify(l)}},6581:(e,t,n)=>{"use strict";var r=n(4994);t.A=void 0;var i=r(n(39)),o=n(579);t.A=(0,i.default)((0,o.jsx)("path",{d:"M19 3H5c-1.11 0-2 .9-2 2v14c0 1.1.89 2 2 2h14c1.11 0 2-.9 2-2V5c0-1.1-.89-2-2-2m-9 14-5-5 1.41-1.41L10 14.17l7.59-7.59L19 8z"}),"CheckBox")},6662:(e,t,n)=>{"use strict";n.d(t,{A:()=>s});var r=n(8187),i="object"==typeof exports&&exports&&!exports.nodeType&&exports,o=i&&"object"==typeof module&&module&&!module.nodeType&&module,a=o&&o.exports===i&&r.A.process;const s=function(){try{var e=o&&o.require&&o.require("util").types;return e||a&&a.binding&&a.binding("util")}catch(t){}}()},6672:(e,t,n)=>{"use strict";var r=n(5043);function i(e){var t="https://react.dev/errors/"+e;if(1{"use strict";var r=n(8853),i=n(5043),o=n(7950);function a(e){var t="https://react.dev/errors/"+e;if(1I||(e.current=z[I],z[I]=null,I--)}function j(e,t){I++,z[I]=e.current,e.current=t}var q=N(null),H=N(null),W=N(null),K=N(null);function U(e,t){switch(j(W,t),j(H,e),j(q,null),t.nodeType){case 9:case 11:e=(e=t.documentElement)&&(e=e.namespaceURI)?ih(e):0;break;default:if(e=t.tagName,t=t.namespaceURI)e=oh(t=ih(t),e);else switch(e){case"svg":e=1;break;case"math":e=2;break;default:e=0}}R(q),j(q,e)}function V(){R(q),R(H),R(W)}function Y(e){null!==e.memoizedState&&j(K,e);var t=q.current,n=oh(t,e.type);t!==n&&(j(H,e),j(q,n))}function G(e){H.current===e&&(R(q),R(H)),K.current===e&&(R(K),Yh._currentValue=D)}var X=Object.prototype.hasOwnProperty,Q=r.unstable_scheduleCallback,Z=r.unstable_cancelCallback,J=r.unstable_shouldYield,ee=r.unstable_requestPaint,te=r.unstable_now,ne=r.unstable_getCurrentPriorityLevel,re=r.unstable_ImmediatePriority,ie=r.unstable_UserBlockingPriority,oe=r.unstable_NormalPriority,ae=r.unstable_LowPriority,se=r.unstable_IdlePriority,le=r.log,ce=r.unstable_setDisableYieldValue,ue=null,he=null;function de(e){if("function"===typeof le&&ce(e),he&&"function"===typeof he.setStrictMode)try{he.setStrictMode(ue,e)}catch(t){}}var fe=Math.clz32?Math.clz32:function(e){return 0===(e>>>=0)?32:31-(pe(e)/ge|0)|0},pe=Math.log,ge=Math.LN2;var me=256,ye=4194304;function be(e){var t=42&e;if(0!==t)return t;switch(e&-e){case 1:return 1;case 2:return 2;case 4:return 4;case 8:return 8;case 16:return 16;case 32:return 32;case 64:return 64;case 128:return 128;case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return 4194048&e;case 4194304:case 8388608:case 16777216:case 33554432:return 62914560&e;case 67108864:return 67108864;case 134217728:return 134217728;case 268435456:return 268435456;case 536870912:return 536870912;case 1073741824:return 0;default:return e}}function ve(e,t,n){var r=e.pendingLanes;if(0===r)return 0;var i=0,o=e.suspendedLanes,a=e.pingedLanes;e=e.warmLanes;var s=134217727&r;return 0!==s?0!==(r=s&~o)?i=be(r):0!==(a&=s)?i=be(a):n||0!==(n=s&~e)&&(i=be(n)):0!==(s=r&~o)?i=be(s):0!==a?i=be(a):n||0!==(n=r&~e)&&(i=be(n)),0===i?0:0!==t&&t!==i&&0===(t&o)&&((o=i&-i)>=(n=t&-t)||32===o&&0!==(4194048&n))?t:i}function xe(e,t){return 0===(e.pendingLanes&~(e.suspendedLanes&~e.pingedLanes)&t)}function ke(e,t){switch(e){case 1:case 2:case 4:case 8:case 64:return t+250;case 16:case 32:case 128:case 256:case 512:case 1024:case 2048:case 4096:case 8192:case 16384:case 32768:case 65536:case 131072:case 262144:case 524288:case 1048576:case 2097152:return t+5e3;default:return-1}}function we(){var e=me;return 0===(4194048&(me<<=1))&&(me=256),e}function Se(){var e=ye;return 0===(62914560&(ye<<=1))&&(ye=4194304),e}function Ce(e){for(var t=[],n=0;31>n;n++)t.push(e);return t}function _e(e,t){e.pendingLanes|=t,268435456!==t&&(e.suspendedLanes=0,e.pingedLanes=0,e.warmLanes=0)}function Ae(e,t,n){e.pendingLanes|=t,e.suspendedLanes&=~t;var r=31-fe(t);e.entangledLanes|=t,e.entanglements[r]=1073741824|e.entanglements[r]|4194090&n}function Te(e,t){var n=e.entangledLanes|=t;for(e=e.entanglements;n;){var r=31-fe(n),i=1<)":-1--i||l[r]!==c[i]){var u="\n"+l[r].replace(" at new "," at ");return e.displayName&&u.includes("")&&(u=u.replace("",e.displayName)),u}}while(1<=r&&0<=i);break}}}finally{ot=!1,Error.prepareStackTrace=n}return(n=e?e.displayName||e.name:"")?it(n):""}function st(e){switch(e.tag){case 26:case 27:case 5:return it(e.type);case 16:return it("Lazy");case 13:return it("Suspense");case 19:return it("SuspenseList");case 0:case 15:return at(e.type,!1);case 11:return at(e.type.render,!1);case 1:return at(e.type,!0);case 31:return it("Activity");default:return""}}function lt(e){try{var t="";do{t+=st(e),e=e.return}while(e);return t}catch(n){return"\nError generating stack: "+n.message+"\n"+n.stack}}function ct(e){switch(typeof e){case"bigint":case"boolean":case"number":case"string":case"undefined":case"object":return e;default:return""}}function ut(e){var t=e.type;return(e=e.nodeName)&&"input"===e.toLowerCase()&&("checkbox"===t||"radio"===t)}function ht(e){e._valueTracker||(e._valueTracker=function(e){var t=ut(e)?"checked":"value",n=Object.getOwnPropertyDescriptor(e.constructor.prototype,t),r=""+e[t];if(!e.hasOwnProperty(t)&&"undefined"!==typeof n&&"function"===typeof n.get&&"function"===typeof n.set){var i=n.get,o=n.set;return Object.defineProperty(e,t,{configurable:!0,get:function(){return i.call(this)},set:function(e){r=""+e,o.call(this,e)}}),Object.defineProperty(e,t,{enumerable:n.enumerable}),{getValue:function(){return r},setValue:function(e){r=""+e},stopTracking:function(){e._valueTracker=null,delete e[t]}}}}(e))}function dt(e){if(!e)return!1;var t=e._valueTracker;if(!t)return!0;var n=t.getValue(),r="";return e&&(r=ut(e)?e.checked?"true":"false":e.value),(e=r)!==n&&(t.setValue(e),!0)}function ft(e){if("undefined"===typeof(e=e||("undefined"!==typeof document?document:void 0)))return null;try{return e.activeElement||e.body}catch(t){return e.body}}var pt=/[\n"\\]/g;function gt(e){return e.replace(pt,(function(e){return"\\"+e.charCodeAt(0).toString(16)+" "}))}function mt(e,t,n,r,i,o,a,s){e.name="",null!=a&&"function"!==typeof a&&"symbol"!==typeof a&&"boolean"!==typeof a?e.type=a:e.removeAttribute("type"),null!=t?"number"===a?(0===t&&""===e.value||e.value!=t)&&(e.value=""+ct(t)):e.value!==""+ct(t)&&(e.value=""+ct(t)):"submit"!==a&&"reset"!==a||e.removeAttribute("value"),null!=t?bt(e,a,ct(t)):null!=n?bt(e,a,ct(n)):null!=r&&e.removeAttribute("value"),null==i&&null!=o&&(e.defaultChecked=!!o),null!=i&&(e.checked=i&&"function"!==typeof i&&"symbol"!==typeof i),null!=s&&"function"!==typeof s&&"symbol"!==typeof s&&"boolean"!==typeof s?e.name=""+ct(s):e.removeAttribute("name")}function yt(e,t,n,r,i,o,a,s){if(null!=o&&"function"!==typeof o&&"symbol"!==typeof o&&"boolean"!==typeof o&&(e.type=o),null!=t||null!=n){if(!("submit"!==o&&"reset"!==o||void 0!==t&&null!==t))return;n=null!=n?""+ct(n):"",t=null!=t?""+ct(t):n,s||t===e.value||(e.value=t),e.defaultValue=t}r="function"!==typeof(r=null!=r?r:i)&&"symbol"!==typeof r&&!!r,e.checked=s?e.checked:!!r,e.defaultChecked=!!r,null!=a&&"function"!==typeof a&&"symbol"!==typeof a&&"boolean"!==typeof a&&(e.name=a)}function bt(e,t,n){"number"===t&&ft(e.ownerDocument)===e||e.defaultValue===""+n||(e.defaultValue=""+n)}function vt(e,t,n,r){if(e=e.options,t){t={};for(var i=0;i=Sn),An=String.fromCharCode(32),Tn=!1;function En(e,t){switch(e){case"keyup":return-1!==kn.indexOf(t.keyCode);case"keydown":return 229!==t.keyCode;case"keypress":case"mousedown":case"focusout":return!0;default:return!1}}function Fn(e){return"object"===typeof(e=e.detail)&&"data"in e?e.data:null}var Mn=!1;var Ln={color:!0,date:!0,datetime:!0,"datetime-local":!0,email:!0,month:!0,number:!0,password:!0,range:!0,search:!0,tel:!0,text:!0,time:!0,url:!0,week:!0};function Pn(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return"input"===t?!!Ln[e.type]:"textarea"===t}function On(e,t,n,r){Pt?Ot?Ot.push(r):Ot=[r]:Pt=r,0<(t=Wu(t,"onChange")).length&&(n=new Jt("onChange","change",null,n,r),e.push({event:n,listeners:t}))}var $n=null,Bn=null;function Dn(e){Du(e,0)}function zn(e){if(dt(He(e)))return e}function In(e,t){if("change"===e)return t}var Nn=!1;if(It){var Rn;if(It){var jn="oninput"in document;if(!jn){var qn=document.createElement("div");qn.setAttribute("oninput","return;"),jn="function"===typeof qn.oninput}Rn=jn}else Rn=!1;Nn=Rn&&(!document.documentMode||9=t)return{node:r,offset:t-e};e=n}e:{for(;r;){if(r.nextSibling){r=r.nextSibling;break e}r=r.parentNode}r=void 0}r=Qn(r)}}function Jn(e,t){return!(!e||!t)&&(e===t||(!e||3!==e.nodeType)&&(t&&3===t.nodeType?Jn(e,t.parentNode):"contains"in e?e.contains(t):!!e.compareDocumentPosition&&!!(16&e.compareDocumentPosition(t))))}function er(e){for(var t=ft((e=null!=e&&null!=e.ownerDocument&&null!=e.ownerDocument.defaultView?e.ownerDocument.defaultView:window).document);t instanceof e.HTMLIFrameElement;){try{var n="string"===typeof t.contentWindow.location.href}catch(r){n=!1}if(!n)break;t=ft((e=t.contentWindow).document)}return t}function tr(e){var t=e&&e.nodeName&&e.nodeName.toLowerCase();return t&&("input"===t&&("text"===e.type||"search"===e.type||"tel"===e.type||"url"===e.type||"password"===e.type)||"textarea"===t||"true"===e.contentEditable)}var nr=It&&"documentMode"in document&&11>=document.documentMode,rr=null,ir=null,or=null,ar=!1;function sr(e,t,n){var r=n.window===n?n.document:9===n.nodeType?n:n.ownerDocument;ar||null==rr||rr!==ft(r)||("selectionStart"in(r=rr)&&tr(r)?r={start:r.selectionStart,end:r.selectionEnd}:r={anchorNode:(r=(r.ownerDocument&&r.ownerDocument.defaultView||window).getSelection()).anchorNode,anchorOffset:r.anchorOffset,focusNode:r.focusNode,focusOffset:r.focusOffset},or&&Xn(or,r)||(or=r,0<(r=Wu(ir,"onSelect")).length&&(t=new Jt("onSelect","select",null,t,n),e.push({event:t,listeners:r}),t.target=rr)))}function lr(e,t){var n={};return n[e.toLowerCase()]=t.toLowerCase(),n["Webkit"+e]="webkit"+t,n["Moz"+e]="moz"+t,n}var cr={animationend:lr("Animation","AnimationEnd"),animationiteration:lr("Animation","AnimationIteration"),animationstart:lr("Animation","AnimationStart"),transitionrun:lr("Transition","TransitionRun"),transitionstart:lr("Transition","TransitionStart"),transitioncancel:lr("Transition","TransitionCancel"),transitionend:lr("Transition","TransitionEnd")},ur={},hr={};function dr(e){if(ur[e])return ur[e];if(!cr[e])return e;var t,n=cr[e];for(t in n)if(n.hasOwnProperty(t)&&t in hr)return ur[e]=n[t];return e}It&&(hr=document.createElement("div").style,"AnimationEvent"in window||(delete cr.animationend.animation,delete cr.animationiteration.animation,delete cr.animationstart.animation),"TransitionEvent"in window||delete cr.transitionend.transition);var fr=dr("animationend"),pr=dr("animationiteration"),gr=dr("animationstart"),mr=dr("transitionrun"),yr=dr("transitionstart"),br=dr("transitioncancel"),vr=dr("transitionend"),xr=new Map,kr="abort auxClick beforeToggle cancel canPlay canPlayThrough click close contextMenu copy cut drag dragEnd dragEnter dragExit dragLeave dragOver dragStart drop durationChange emptied encrypted ended error gotPointerCapture input invalid keyDown keyPress keyUp load loadedData loadedMetadata loadStart lostPointerCapture mouseDown mouseMove mouseOut mouseOver mouseUp paste pause play playing pointerCancel pointerDown pointerMove pointerOut pointerOver pointerUp progress rateChange reset resize seeked seeking stalled submit suspend timeUpdate touchCancel touchEnd touchStart volumeChange scroll toggle touchMove waiting wheel".split(" ");function wr(e,t){xr.set(e,t),Ye(t,[e])}kr.push("scrollEnd");var Sr=new WeakMap;function Cr(e,t){if("object"===typeof e&&null!==e){var n=Sr.get(e);return void 0!==n?n:(t={value:e,source:t,stack:lt(t)},Sr.set(e,t),t)}return{value:e,source:t,stack:lt(t)}}var _r=[],Ar=0,Tr=0;function Er(){for(var e=Ar,t=Tr=Ar=0;t>=a,i-=a,Qr=1<<32-fe(t)+i|n<o?o:8;var a=$.T,s={};$.T=s,ja(e,!1,t,n);try{var l=i(),c=$.S;if(null!==c&&c(s,l),null!==l&&"object"===typeof l&&"function"===typeof l.then)Ra(e,t,function(e,t){var n=[],r={status:"pending",value:null,reason:null,then:function(e){n.push(e)}};return e.then((function(){r.status="fulfilled",r.value=t;for(var e=0;ep?(g=h,h=null):g=h.sibling;var m=f(i,h,s[p],l);if(null===m){null===h&&(h=g);break}e&&h&&null===m.alternate&&t(i,h),a=o(m,a,p),null===u?c=m:u.sibling=m,u=m,h=g}if(p===s.length)return n(i,h),oi&&Jr(i,p),c;if(null===h){for(;pg?(m=p,p=null):m=p.sibling;var v=f(i,p,b.value,c);if(null===v){null===p&&(p=m);break}e&&p&&null===v.alternate&&t(i,p),s=o(v,s,g),null===h?u=v:h.sibling=v,h=v,p=m}if(b.done)return n(i,p),oi&&Jr(i,g),u;if(null===p){for(;!b.done;g++,b=l.next())null!==(b=d(i,b.value,c))&&(s=o(b,s,g),null===h?u=b:h.sibling=b,h=b);return oi&&Jr(i,g),u}for(p=r(p);!b.done;g++,b=l.next())null!==(b=y(p,i,g,b.value,c))&&(e&&null!==b.alternate&&p.delete(null===b.key?g:b.key),s=o(b,s,g),null===h?u=b:h.sibling=b,h=b);return e&&p.forEach((function(e){return t(i,e)})),oi&&Jr(i,g),u}(l,c,u=v.call(u),h)}if("function"===typeof u.then)return b(l,c,Qa(u),h);if(u.$$typeof===k)return b(l,c,Ti(l,u),h);Ja(l,u)}return"string"===typeof u&&""!==u||"number"===typeof u||"bigint"===typeof u?(u=""+u,null!==c&&6===c.tag?(n(l,c.sibling),(h=i(c,u)).return=l,l=h):(n(l,c),(h=qr(u,l.mode,h)).return=l,l=h),s(l)):n(l,c)}return function(e,t,n,r){try{Xa=0;var i=b(e,t,n,r);return Ga=null,i}catch(a){if(a===Ki||a===Vi)throw a;var o=Dr(29,a,null,e.mode);return o.lanes=r,o.return=e,o}}}var ns=ts(!0),rs=ts(!1),is=N(null),os=null;function as(e){var t=e.alternate;j(us,1&us.current),j(is,e),null===os&&(null===t||null!==po.current||null!==t.memoizedState)&&(os=e)}function ss(e){if(22===e.tag){if(j(us,us.current),j(is,e),null===os){var t=e.alternate;null!==t&&null!==t.memoizedState&&(os=e)}}else ls()}function ls(){j(us,us.current),j(is,is.current)}function cs(e){R(is),os===e&&(os=null),R(us)}var us=N(0);function hs(e){for(var t=e;null!==t;){if(13===t.tag){var n=t.memoizedState;if(null!==n&&(null===(n=n.dehydrated)||"$?"===n.data||mh(n)))return t}else if(19===t.tag&&void 0!==t.memoizedProps.revealOrder){if(0!==(128&t.flags))return t}else if(null!==t.child){t.child.return=t,t=t.child;continue}if(t===e)break;for(;null===t.sibling;){if(null===t.return||t.return===e)return null;t=t.return}t.sibling.return=t.return,t=t.sibling}return null}function ds(e,t,n,r){n=null===(n=n(r,t=e.memoizedState))||void 0===n?t:d({},t,n),e.memoizedState=n,0===e.lanes&&(e.updateQueue.baseState=n)}var fs={enqueueSetState:function(e,t,n){e=e._reactInternals;var r=$c(),i=io(r);i.payload=t,void 0!==n&&null!==n&&(i.callback=n),null!==(t=oo(e,i,r))&&(Dc(t,e,r),ao(t,e,r))},enqueueReplaceState:function(e,t,n){e=e._reactInternals;var r=$c(),i=io(r);i.tag=1,i.payload=t,void 0!==n&&null!==n&&(i.callback=n),null!==(t=oo(e,i,r))&&(Dc(t,e,r),ao(t,e,r))},enqueueForceUpdate:function(e,t){e=e._reactInternals;var n=$c(),r=io(n);r.tag=2,void 0!==t&&null!==t&&(r.callback=t),null!==(t=oo(e,r,n))&&(Dc(t,e,n),ao(t,e,n))}};function ps(e,t,n,r,i,o,a){return"function"===typeof(e=e.stateNode).shouldComponentUpdate?e.shouldComponentUpdate(r,o,a):!t.prototype||!t.prototype.isPureReactComponent||(!Xn(n,r)||!Xn(i,o))}function gs(e,t,n,r){e=t.state,"function"===typeof t.componentWillReceiveProps&&t.componentWillReceiveProps(n,r),"function"===typeof t.UNSAFE_componentWillReceiveProps&&t.UNSAFE_componentWillReceiveProps(n,r),t.state!==e&&fs.enqueueReplaceState(t,t.state,null)}function ms(e,t){var n=t;if("ref"in t)for(var r in n={},t)"ref"!==r&&(n[r]=t[r]);if(e=e.defaultProps)for(var i in n===t&&(n=d({},n)),e)void 0===n[i]&&(n[i]=e[i]);return n}var ys="function"===typeof reportError?reportError:function(e){if("object"===typeof window&&"function"===typeof window.ErrorEvent){var t=new window.ErrorEvent("error",{bubbles:!0,cancelable:!0,message:"object"===typeof e&&null!==e&&"string"===typeof e.message?String(e.message):String(e),error:e});if(!window.dispatchEvent(t))return}else if("object"===typeof process&&"function"===typeof process.emit)return void process.emit("uncaughtException",e);console.error(e)};function bs(e){ys(e)}function vs(e){console.error(e)}function xs(e){ys(e)}function ks(e,t){try{(0,e.onUncaughtError)(t.value,{componentStack:t.stack})}catch(n){setTimeout((function(){throw n}))}}function ws(e,t,n){try{(0,e.onCaughtError)(n.value,{componentStack:n.stack,errorBoundary:1===t.tag?t.stateNode:null})}catch(r){setTimeout((function(){throw r}))}}function Ss(e,t,n){return(n=io(n)).tag=3,n.payload={element:null},n.callback=function(){ks(e,t)},n}function Cs(e){return(e=io(e)).tag=3,e}function _s(e,t,n,r){var i=n.type.getDerivedStateFromError;if("function"===typeof i){var o=r.value;e.payload=function(){return i(o)},e.callback=function(){ws(t,n,r)}}var a=n.stateNode;null!==a&&"function"===typeof a.componentDidCatch&&(e.callback=function(){ws(t,n,r),"function"!==typeof i&&(null===Cc?Cc=new Set([this]):Cc.add(this));var e=r.stack;this.componentDidCatch(r.value,{componentStack:null!==e?e:""})})}var As=Error(a(461)),Ts=!1;function Es(e,t,n,r){t.child=null===e?rs(t,null,n,r):ns(t,e.child,n,r)}function Fs(e,t,n,r,i){n=n.render;var o=t.ref;if("ref"in r){var a={};for(var s in r)"ref"!==s&&(a[s]=r[s])}else a=r;return _i(t),r=Po(e,t,n,a,o,i),s=Do(),null===e||Ts?(oi&&s&&ti(t),t.flags|=1,Es(e,t,r,i),t.child):(zo(e,t,i),Gs(e,t,i))}function Ms(e,t,n,r,i){if(null===e){var o=n.type;return"function"!==typeof o||zr(o)||void 0!==o.defaultProps||null!==n.compare?((e=Rr(n.type,null,r,t,t.mode,i)).ref=t.ref,e.return=t,t.child=e):(t.tag=15,t.type=o,Ls(e,t,o,r,i))}if(o=e.child,!Xs(e,i)){var a=o.memoizedProps;if((n=null!==(n=n.compare)?n:Xn)(a,r)&&e.ref===t.ref)return Gs(e,t,i)}return t.flags|=1,(e=Ir(o,r)).ref=t.ref,e.return=t,t.child=e}function Ls(e,t,n,r,i){if(null!==e){var o=e.memoizedProps;if(Xn(o,r)&&e.ref===t.ref){if(Ts=!1,t.pendingProps=r=o,!Xs(e,i))return t.lanes=e.lanes,Gs(e,t,i);0!==(131072&e.flags)&&(Ts=!0)}}return Bs(e,t,n,r,i)}function Ps(e,t,n){var r=t.pendingProps,i=r.children,o=null!==e?e.memoizedState:null;if("hidden"===r.mode){if(0!==(128&t.flags)){if(r=null!==o?o.baseLanes|n:n,null!==e){for(i=t.child=e.child,o=0;null!==i;)o=o|i.lanes|i.childLanes,i=i.sibling;t.childLanes=o&~r}else t.childLanes=0,t.child=null;return Os(e,t,r,n)}if(0===(536870912&n))return t.lanes=t.childLanes=536870912,Os(e,t,null!==o?o.baseLanes|n:n,n);t.memoizedState={baseLanes:0,cachePool:null},null!==e&&Hi(0,null!==o?o.cachePool:null),null!==o?mo(t,o):yo(),ss(t)}else null!==o?(Hi(0,o.cachePool),mo(t,o),ls(),t.memoizedState=null):(null!==e&&Hi(0,null),yo(),ls());return Es(e,t,i,n),t.child}function Os(e,t,n,r){var i=qi();return i=null===i?null:{parent:Pi._currentValue,pool:i},t.memoizedState={baseLanes:n,cachePool:i},null!==e&&Hi(0,null),yo(),ss(t),null!==e&&Si(e,t,r,!0),null}function $s(e,t){var n=t.ref;if(null===n)null!==e&&null!==e.ref&&(t.flags|=4194816);else{if("function"!==typeof n&&"object"!==typeof n)throw Error(a(284));null!==e&&e.ref===n||(t.flags|=4194816)}}function Bs(e,t,n,r,i){return _i(t),n=Po(e,t,n,r,void 0,i),r=Do(),null===e||Ts?(oi&&r&&ti(t),t.flags|=1,Es(e,t,n,i),t.child):(zo(e,t,i),Gs(e,t,i))}function Ds(e,t,n,r,i,o){return _i(t),t.updateQueue=null,n=$o(t,r,n,i),Oo(e),r=Do(),null===e||Ts?(oi&&r&&ti(t),t.flags|=1,Es(e,t,n,o),t.child):(zo(e,t,o),Gs(e,t,o))}function zs(e,t,n,r,i){if(_i(t),null===t.stateNode){var o=$r,a=n.contextType;"object"===typeof a&&null!==a&&(o=Ai(a)),o=new n(r,o),t.memoizedState=null!==o.state&&void 0!==o.state?o.state:null,o.updater=fs,t.stateNode=o,o._reactInternals=t,(o=t.stateNode).props=r,o.state=t.memoizedState,o.refs={},no(t),a=n.contextType,o.context="object"===typeof a&&null!==a?Ai(a):$r,o.state=t.memoizedState,"function"===typeof(a=n.getDerivedStateFromProps)&&(ds(t,n,a,r),o.state=t.memoizedState),"function"===typeof n.getDerivedStateFromProps||"function"===typeof o.getSnapshotBeforeUpdate||"function"!==typeof o.UNSAFE_componentWillMount&&"function"!==typeof o.componentWillMount||(a=o.state,"function"===typeof o.componentWillMount&&o.componentWillMount(),"function"===typeof o.UNSAFE_componentWillMount&&o.UNSAFE_componentWillMount(),a!==o.state&&fs.enqueueReplaceState(o,o.state,null),uo(t,r,o,i),co(),o.state=t.memoizedState),"function"===typeof o.componentDidMount&&(t.flags|=4194308),r=!0}else if(null===e){o=t.stateNode;var s=t.memoizedProps,l=ms(n,s);o.props=l;var c=o.context,u=n.contextType;a=$r,"object"===typeof u&&null!==u&&(a=Ai(u));var h=n.getDerivedStateFromProps;u="function"===typeof h||"function"===typeof o.getSnapshotBeforeUpdate,s=t.pendingProps!==s,u||"function"!==typeof o.UNSAFE_componentWillReceiveProps&&"function"!==typeof o.componentWillReceiveProps||(s||c!==a)&&gs(t,o,r,a),to=!1;var d=t.memoizedState;o.state=d,uo(t,r,o,i),co(),c=t.memoizedState,s||d!==c||to?("function"===typeof h&&(ds(t,n,h,r),c=t.memoizedState),(l=to||ps(t,n,l,r,d,c,a))?(u||"function"!==typeof o.UNSAFE_componentWillMount&&"function"!==typeof o.componentWillMount||("function"===typeof o.componentWillMount&&o.componentWillMount(),"function"===typeof o.UNSAFE_componentWillMount&&o.UNSAFE_componentWillMount()),"function"===typeof o.componentDidMount&&(t.flags|=4194308)):("function"===typeof o.componentDidMount&&(t.flags|=4194308),t.memoizedProps=r,t.memoizedState=c),o.props=r,o.state=c,o.context=a,r=l):("function"===typeof o.componentDidMount&&(t.flags|=4194308),r=!1)}else{o=t.stateNode,ro(e,t),u=ms(n,a=t.memoizedProps),o.props=u,h=t.pendingProps,d=o.context,c=n.contextType,l=$r,"object"===typeof c&&null!==c&&(l=Ai(c)),(c="function"===typeof(s=n.getDerivedStateFromProps)||"function"===typeof o.getSnapshotBeforeUpdate)||"function"!==typeof o.UNSAFE_componentWillReceiveProps&&"function"!==typeof o.componentWillReceiveProps||(a!==h||d!==l)&&gs(t,o,r,l),to=!1,d=t.memoizedState,o.state=d,uo(t,r,o,i),co();var f=t.memoizedState;a!==h||d!==f||to||null!==e&&null!==e.dependencies&&Ci(e.dependencies)?("function"===typeof s&&(ds(t,n,s,r),f=t.memoizedState),(u=to||ps(t,n,u,r,d,f,l)||null!==e&&null!==e.dependencies&&Ci(e.dependencies))?(c||"function"!==typeof o.UNSAFE_componentWillUpdate&&"function"!==typeof o.componentWillUpdate||("function"===typeof o.componentWillUpdate&&o.componentWillUpdate(r,f,l),"function"===typeof o.UNSAFE_componentWillUpdate&&o.UNSAFE_componentWillUpdate(r,f,l)),"function"===typeof o.componentDidUpdate&&(t.flags|=4),"function"===typeof o.getSnapshotBeforeUpdate&&(t.flags|=1024)):("function"!==typeof o.componentDidUpdate||a===e.memoizedProps&&d===e.memoizedState||(t.flags|=4),"function"!==typeof o.getSnapshotBeforeUpdate||a===e.memoizedProps&&d===e.memoizedState||(t.flags|=1024),t.memoizedProps=r,t.memoizedState=f),o.props=r,o.state=f,o.context=l,r=u):("function"!==typeof o.componentDidUpdate||a===e.memoizedProps&&d===e.memoizedState||(t.flags|=4),"function"!==typeof o.getSnapshotBeforeUpdate||a===e.memoizedProps&&d===e.memoizedState||(t.flags|=1024),r=!1)}return o=r,$s(e,t),r=0!==(128&t.flags),o||r?(o=t.stateNode,n=r&&"function"!==typeof n.getDerivedStateFromError?null:o.render(),t.flags|=1,null!==e&&r?(t.child=ns(t,e.child,null,i),t.child=ns(t,null,n,i)):Es(e,t,n,i),t.memoizedState=o.state,e=t.child):e=Gs(e,t,i),e}function Is(e,t,n,r){return fi(),t.flags|=256,Es(e,t,n,r),t.child}var Ns={dehydrated:null,treeContext:null,retryLane:0,hydrationErrors:null};function Rs(e){return{baseLanes:e,cachePool:Wi()}}function js(e,t,n){return e=null!==e?e.childLanes&~n:0,t&&(e|=mc),e}function qs(e,t,n){var r,i=t.pendingProps,o=!1,s=0!==(128&t.flags);if((r=s)||(r=(null===e||null!==e.memoizedState)&&0!==(2&us.current)),r&&(o=!0,t.flags&=-129),r=0!==(32&t.flags),t.flags&=-33,null===e){if(oi){if(o?as(t):ls(),oi){var l,c=ii;if(l=c){e:{for(l=c,c=si;8!==l.nodeType;){if(!c){c=null;break e}if(null===(l=yh(l.nextSibling))){c=null;break e}}c=l}null!==c?(t.memoizedState={dehydrated:c,treeContext:null!==Xr?{id:Qr,overflow:Zr}:null,retryLane:536870912,hydrationErrors:null},(l=Dr(18,null,null,0)).stateNode=c,l.return=t,t.child=l,ri=t,ii=null,l=!0):l=!1}l||ci(t)}if(null!==(c=t.memoizedState)&&null!==(c=c.dehydrated))return mh(c)?t.lanes=32:t.lanes=536870912,null;cs(t)}return c=i.children,i=i.fallback,o?(ls(),c=Ws({mode:"hidden",children:c},o=t.mode),i=jr(i,o,n,null),c.return=t,i.return=t,c.sibling=i,t.child=c,(o=t.child).memoizedState=Rs(n),o.childLanes=js(e,r,n),t.memoizedState=Ns,i):(as(t),Hs(t,c))}if(null!==(l=e.memoizedState)&&null!==(c=l.dehydrated)){if(s)256&t.flags?(as(t),t.flags&=-257,t=Ks(e,t,n)):null!==t.memoizedState?(ls(),t.child=e.child,t.flags|=128,t=null):(ls(),o=i.fallback,c=t.mode,i=Ws({mode:"visible",children:i.children},c),(o=jr(o,c,n,null)).flags|=2,i.return=t,o.return=t,i.sibling=o,t.child=i,ns(t,e.child,null,n),(i=t.child).memoizedState=Rs(n),i.childLanes=js(e,r,n),t.memoizedState=Ns,t=o);else if(as(t),mh(c)){if(r=c.nextSibling&&c.nextSibling.dataset)var u=r.dgst;r=u,(i=Error(a(419))).stack="",i.digest=r,gi({value:i,source:null,stack:null}),t=Ks(e,t,n)}else if(Ts||Si(e,t,n,!1),r=0!==(n&e.childLanes),Ts||r){if(null!==(r=rc)&&(0!==(i=0!==((i=0!==(42&(i=n&-n))?1:Ee(i))&(r.suspendedLanes|n))?0:i)&&i!==l.retryLane))throw l.retryLane=i,Lr(e,i),Dc(r,e,i),As;"$?"===c.data||Vc(),t=Ks(e,t,n)}else"$?"===c.data?(t.flags|=192,t.child=e.child,t=null):(e=l.treeContext,ii=yh(c.nextSibling),ri=t,oi=!0,ai=null,si=!1,null!==e&&(Yr[Gr++]=Qr,Yr[Gr++]=Zr,Yr[Gr++]=Xr,Qr=e.id,Zr=e.overflow,Xr=t),(t=Hs(t,i.children)).flags|=4096);return t}return o?(ls(),o=i.fallback,c=t.mode,u=(l=e.child).sibling,(i=Ir(l,{mode:"hidden",children:i.children})).subtreeFlags=65011712&l.subtreeFlags,null!==u?o=Ir(u,o):(o=jr(o,c,n,null)).flags|=2,o.return=t,i.return=t,i.sibling=o,t.child=i,i=o,o=t.child,null===(c=e.child.memoizedState)?c=Rs(n):(null!==(l=c.cachePool)?(u=Pi._currentValue,l=l.parent!==u?{parent:u,pool:u}:l):l=Wi(),c={baseLanes:c.baseLanes|n,cachePool:l}),o.memoizedState=c,o.childLanes=js(e,r,n),t.memoizedState=Ns,i):(as(t),e=(n=e.child).sibling,(n=Ir(n,{mode:"visible",children:i.children})).return=t,n.sibling=null,null!==e&&(null===(r=t.deletions)?(t.deletions=[e],t.flags|=16):r.push(e)),t.child=n,t.memoizedState=null,n)}function Hs(e,t){return(t=Ws({mode:"visible",children:t},e.mode)).return=e,e.child=t}function Ws(e,t){return(e=Dr(22,e,null,t)).lanes=0,e.stateNode={_visibility:1,_pendingMarkers:null,_retryCache:null,_transitions:null},e}function Ks(e,t,n){return ns(t,e.child,null,n),(e=Hs(t,t.pendingProps.children)).flags|=2,t.memoizedState=null,e}function Us(e,t,n){e.lanes|=t;var r=e.alternate;null!==r&&(r.lanes|=t),ki(e.return,t,n)}function Vs(e,t,n,r,i){var o=e.memoizedState;null===o?e.memoizedState={isBackwards:t,rendering:null,renderingStartTime:0,last:r,tail:n,tailMode:i}:(o.isBackwards=t,o.rendering=null,o.renderingStartTime=0,o.last=r,o.tail=n,o.tailMode=i)}function Ys(e,t,n){var r=t.pendingProps,i=r.revealOrder,o=r.tail;if(Es(e,t,r.children,n),0!==(2&(r=us.current)))r=1&r|2,t.flags|=128;else{if(null!==e&&0!==(128&e.flags))e:for(e=t.child;null!==e;){if(13===e.tag)null!==e.memoizedState&&Us(e,n,t);else if(19===e.tag)Us(e,n,t);else if(null!==e.child){e.child.return=e,e=e.child;continue}if(e===t)break e;for(;null===e.sibling;){if(null===e.return||e.return===t)break e;e=e.return}e.sibling.return=e.return,e=e.sibling}r&=1}switch(j(us,r),i){case"forwards":for(n=t.child,i=null;null!==n;)null!==(e=n.alternate)&&null===hs(e)&&(i=n),n=n.sibling;null===(n=i)?(i=t.child,t.child=null):(i=n.sibling,n.sibling=null),Vs(t,!1,i,n,o);break;case"backwards":for(n=null,i=t.child,t.child=null;null!==i;){if(null!==(e=i.alternate)&&null===hs(e)){t.child=i;break}e=i.sibling,i.sibling=n,n=i,i=e}Vs(t,!0,n,null,o);break;case"together":Vs(t,!1,null,null,void 0);break;default:t.memoizedState=null}return t.child}function Gs(e,t,n){if(null!==e&&(t.dependencies=e.dependencies),fc|=t.lanes,0===(n&t.childLanes)){if(null===e)return null;if(Si(e,t,n,!1),0===(n&t.childLanes))return null}if(null!==e&&t.child!==e.child)throw Error(a(153));if(null!==t.child){for(n=Ir(e=t.child,e.pendingProps),t.child=n,n.return=t;null!==e.sibling;)e=e.sibling,(n=n.sibling=Ir(e,e.pendingProps)).return=t;n.sibling=null}return t.child}function Xs(e,t){return 0!==(e.lanes&t)||!(null===(e=e.dependencies)||!Ci(e))}function Qs(e,t,n){if(null!==e)if(e.memoizedProps!==t.pendingProps)Ts=!0;else{if(!Xs(e,n)&&0===(128&t.flags))return Ts=!1,function(e,t,n){switch(t.tag){case 3:U(t,t.stateNode.containerInfo),vi(0,Pi,e.memoizedState.cache),fi();break;case 27:case 5:Y(t);break;case 4:U(t,t.stateNode.containerInfo);break;case 10:vi(0,t.type,t.memoizedProps.value);break;case 13:var r=t.memoizedState;if(null!==r)return null!==r.dehydrated?(as(t),t.flags|=128,null):0!==(n&t.child.childLanes)?qs(e,t,n):(as(t),null!==(e=Gs(e,t,n))?e.sibling:null);as(t);break;case 19:var i=0!==(128&e.flags);if((r=0!==(n&t.childLanes))||(Si(e,t,n,!1),r=0!==(n&t.childLanes)),i){if(r)return Ys(e,t,n);t.flags|=128}if(null!==(i=t.memoizedState)&&(i.rendering=null,i.tail=null,i.lastEffect=null),j(us,us.current),r)break;return null;case 22:case 23:return t.lanes=0,Ps(e,t,n);case 24:vi(0,Pi,e.memoizedState.cache)}return Gs(e,t,n)}(e,t,n);Ts=0!==(131072&e.flags)}else Ts=!1,oi&&0!==(1048576&t.flags)&&ei(t,Vr,t.index);switch(t.lanes=0,t.tag){case 16:e:{e=t.pendingProps;var r=t.elementType,i=r._init;if(r=i(r._payload),t.type=r,"function"!==typeof r){if(void 0!==r&&null!==r){if((i=r.$$typeof)===w){t.tag=11,t=Fs(null,t,r,e,n);break e}if(i===_){t.tag=14,t=Ms(null,t,r,e,n);break e}}throw t=P(r)||r,Error(a(306,t,""))}zr(r)?(e=ms(r,e),t.tag=1,t=zs(null,t,r,e,n)):(t.tag=0,t=Bs(null,t,r,e,n))}return t;case 0:return Bs(e,t,t.type,t.pendingProps,n);case 1:return zs(e,t,r=t.type,i=ms(r,t.pendingProps),n);case 3:e:{if(U(t,t.stateNode.containerInfo),null===e)throw Error(a(387));r=t.pendingProps;var o=t.memoizedState;i=o.element,ro(e,t),uo(t,r,null,n);var s=t.memoizedState;if(r=s.cache,vi(0,Pi,r),r!==o.cache&&wi(t,[Pi],n,!0),co(),r=s.element,o.isDehydrated){if(o={element:r,isDehydrated:!1,cache:s.cache},t.updateQueue.baseState=o,t.memoizedState=o,256&t.flags){t=Is(e,t,r,n);break e}if(r!==i){gi(i=Cr(Error(a(424)),t)),t=Is(e,t,r,n);break e}if(9===(e=t.stateNode.containerInfo).nodeType)e=e.body;else e="HTML"===e.nodeName?e.ownerDocument.body:e;for(ii=yh(e.firstChild),ri=t,oi=!0,ai=null,si=!0,n=rs(t,null,r,n),t.child=n;n;)n.flags=-3&n.flags|4096,n=n.sibling}else{if(fi(),r===i){t=Gs(e,t,n);break e}Es(e,t,r,n)}t=t.child}return t;case 26:return $s(e,t),null===e?(n=Eh(t.type,null,t.pendingProps,null))?t.memoizedState=n:oi||(n=t.type,e=t.pendingProps,(r=rh(W.current).createElement(n))[Pe]=t,r[Oe]=e,eh(r,n,e),Ke(r),t.stateNode=r):t.memoizedState=Eh(t.type,e.memoizedProps,t.pendingProps,e.memoizedState),null;case 27:return Y(t),null===e&&oi&&(r=t.stateNode=xh(t.type,t.pendingProps,W.current),ri=t,si=!0,i=ii,fh(t.type)?(bh=i,ii=yh(r.firstChild)):ii=i),Es(e,t,t.pendingProps.children,n),$s(e,t),null===e&&(t.flags|=4194304),t.child;case 5:return null===e&&oi&&((i=r=ii)&&(null!==(r=function(e,t,n,r){for(;1===e.nodeType;){var i=n;if(e.nodeName.toLowerCase()!==t.toLowerCase()){if(!r&&("INPUT"!==e.nodeName||"hidden"!==e.type))break}else if(r){if(!e[Ne])switch(t){case"meta":if(!e.hasAttribute("itemprop"))break;return e;case"link":if("stylesheet"===(o=e.getAttribute("rel"))&&e.hasAttribute("data-precedence"))break;if(o!==i.rel||e.getAttribute("href")!==(null==i.href||""===i.href?null:i.href)||e.getAttribute("crossorigin")!==(null==i.crossOrigin?null:i.crossOrigin)||e.getAttribute("title")!==(null==i.title?null:i.title))break;return e;case"style":if(e.hasAttribute("data-precedence"))break;return e;case"script":if(((o=e.getAttribute("src"))!==(null==i.src?null:i.src)||e.getAttribute("type")!==(null==i.type?null:i.type)||e.getAttribute("crossorigin")!==(null==i.crossOrigin?null:i.crossOrigin))&&o&&e.hasAttribute("async")&&!e.hasAttribute("itemprop"))break;return e;default:return e}}else{if("input"!==t||"hidden"!==e.type)return e;var o=null==i.name?null:""+i.name;if("hidden"===i.type&&e.getAttribute("name")===o)return e}if(null===(e=yh(e.nextSibling)))break}return null}(r,t.type,t.pendingProps,si))?(t.stateNode=r,ri=t,ii=yh(r.firstChild),si=!1,i=!0):i=!1),i||ci(t)),Y(t),i=t.type,o=t.pendingProps,s=null!==e?e.memoizedProps:null,r=o.children,ah(i,o)?r=null:null!==s&&ah(i,s)&&(t.flags|=32),null!==t.memoizedState&&(i=Po(e,t,Bo,null,null,n),Yh._currentValue=i),$s(e,t),Es(e,t,r,n),t.child;case 6:return null===e&&oi&&((e=n=ii)&&(null!==(n=function(e,t,n){if(""===t)return null;for(;3!==e.nodeType;){if((1!==e.nodeType||"INPUT"!==e.nodeName||"hidden"!==e.type)&&!n)return null;if(null===(e=yh(e.nextSibling)))return null}return e}(n,t.pendingProps,si))?(t.stateNode=n,ri=t,ii=null,e=!0):e=!1),e||ci(t)),null;case 13:return qs(e,t,n);case 4:return U(t,t.stateNode.containerInfo),r=t.pendingProps,null===e?t.child=ns(t,null,r,n):Es(e,t,r,n),t.child;case 11:return Fs(e,t,t.type,t.pendingProps,n);case 7:return Es(e,t,t.pendingProps,n),t.child;case 8:case 12:return Es(e,t,t.pendingProps.children,n),t.child;case 10:return r=t.pendingProps,vi(0,t.type,r.value),Es(e,t,r.children,n),t.child;case 9:return i=t.type._context,r=t.pendingProps.children,_i(t),r=r(i=Ai(i)),t.flags|=1,Es(e,t,r,n),t.child;case 14:return Ms(e,t,t.type,t.pendingProps,n);case 15:return Ls(e,t,t.type,t.pendingProps,n);case 19:return Ys(e,t,n);case 31:return r=t.pendingProps,n=t.mode,r={mode:r.mode,children:r.children},null===e?((n=Ws(r,n)).ref=t.ref,t.child=n,n.return=t,t=n):((n=Ir(e.child,r)).ref=t.ref,t.child=n,n.return=t,t=n),t;case 22:return Ps(e,t,n);case 24:return _i(t),r=Ai(Pi),null===e?(null===(i=qi())&&(i=rc,o=Oi(),i.pooledCache=o,o.refCount++,null!==o&&(i.pooledCacheLanes|=n),i=o),t.memoizedState={parent:r,cache:i},no(t),vi(0,Pi,i)):(0!==(e.lanes&n)&&(ro(e,t),uo(t,null,null,n),co()),i=e.memoizedState,o=t.memoizedState,i.parent!==r?(i={parent:r,cache:r},t.memoizedState=i,0===t.lanes&&(t.memoizedState=t.updateQueue.baseState=i),vi(0,Pi,r)):(r=o.cache,vi(0,Pi,r),r!==i.cache&&wi(t,[Pi],n,!0))),Es(e,t,t.pendingProps.children,n),t.child;case 29:throw t.pendingProps}throw Error(a(156,t.tag))}function Zs(e){e.flags|=4}function Js(e,t){if("stylesheet"!==t.type||0!==(4&t.state.loading))e.flags&=-16777217;else if(e.flags|=16777216,!jh(t)){if(null!==(t=is.current)&&((4194048&oc)===oc?null!==os:(62914560&oc)!==oc&&0===(536870912&oc)||t!==os))throw Zi=Yi,Ui;e.flags|=8192}}function el(e,t){null!==t&&(e.flags|=4),16384&e.flags&&(t=22!==e.tag?Se():536870912,e.lanes|=t,yc|=t)}function tl(e,t){if(!oi)switch(e.tailMode){case"hidden":t=e.tail;for(var n=null;null!==t;)null!==t.alternate&&(n=t),t=t.sibling;null===n?e.tail=null:n.sibling=null;break;case"collapsed":n=e.tail;for(var r=null;null!==n;)null!==n.alternate&&(r=n),n=n.sibling;null===r?t||null===e.tail?e.tail=null:e.tail.sibling=null:r.sibling=null}}function nl(e){var t=null!==e.alternate&&e.alternate.child===e.child,n=0,r=0;if(t)for(var i=e.child;null!==i;)n|=i.lanes|i.childLanes,r|=65011712&i.subtreeFlags,r|=65011712&i.flags,i.return=e,i=i.sibling;else for(i=e.child;null!==i;)n|=i.lanes|i.childLanes,r|=i.subtreeFlags,r|=i.flags,i.return=e,i=i.sibling;return e.subtreeFlags|=r,e.childLanes=n,t}function rl(e,t,n){var r=t.pendingProps;switch(ni(t),t.tag){case 31:case 16:case 15:case 0:case 11:case 7:case 8:case 12:case 9:case 14:case 1:return nl(t),null;case 3:return n=t.stateNode,r=null,null!==e&&(r=e.memoizedState.cache),t.memoizedState.cache!==r&&(t.flags|=2048),xi(Pi),V(),n.pendingContext&&(n.context=n.pendingContext,n.pendingContext=null),null!==e&&null!==e.child||(di(t)?Zs(t):null===e||e.memoizedState.isDehydrated&&0===(256&t.flags)||(t.flags|=1024,pi())),nl(t),null;case 26:return n=t.memoizedState,null===e?(Zs(t),null!==n?(nl(t),Js(t,n)):(nl(t),t.flags&=-16777217)):n?n!==e.memoizedState?(Zs(t),nl(t),Js(t,n)):(nl(t),t.flags&=-16777217):(e.memoizedProps!==r&&Zs(t),nl(t),t.flags&=-16777217),null;case 27:G(t),n=W.current;var i=t.type;if(null!==e&&null!=t.stateNode)e.memoizedProps!==r&&Zs(t);else{if(!r){if(null===t.stateNode)throw Error(a(166));return nl(t),null}e=q.current,di(t)?ui(t):(e=xh(i,r,n),t.stateNode=e,Zs(t))}return nl(t),null;case 5:if(G(t),n=t.type,null!==e&&null!=t.stateNode)e.memoizedProps!==r&&Zs(t);else{if(!r){if(null===t.stateNode)throw Error(a(166));return nl(t),null}if(e=q.current,di(t))ui(t);else{switch(i=rh(W.current),e){case 1:e=i.createElementNS("http://www.w3.org/2000/svg",n);break;case 2:e=i.createElementNS("http://www.w3.org/1998/Math/MathML",n);break;default:switch(n){case"svg":e=i.createElementNS("http://www.w3.org/2000/svg",n);break;case"math":e=i.createElementNS("http://www.w3.org/1998/Math/MathML",n);break;case"script":(e=i.createElement("div")).innerHTML="\n * ^\n * ```\n *\n * @type {State}\n */\n function continuationRawTagOpen(code) {\n if (code === 47) {\n effects.consume(code);\n buffer = '';\n return continuationRawEndTag;\n }\n return continuation(code);\n }\n\n /**\n * In raw continuation, after ` | \n * ^^^^^^\n * ```\n *\n * @type {State}\n */\n function continuationRawEndTag(code) {\n if (code === 62) {\n const name = buffer.toLowerCase();\n if (htmlRawNames.includes(name)) {\n effects.consume(code);\n return continuationClose;\n }\n return continuation(code);\n }\n if (asciiAlpha(code) && buffer.length < 8) {\n // Always the case.\n effects.consume(code);\n buffer += String.fromCharCode(code);\n return continuationRawEndTag;\n }\n return continuation(code);\n }\n\n /**\n * In cdata continuation, after `]`, expecting `]>`.\n *\n * ```markdown\n * > | &<]]>\n * ^\n * ```\n *\n * @type {State}\n */\n function continuationCdataInside(code) {\n if (code === 93) {\n effects.consume(code);\n return continuationDeclarationInside;\n }\n return continuation(code);\n }\n\n /**\n * In declaration or instruction continuation, at `>`.\n *\n * ```markdown\n * > | \n * ^\n * > | \n * ^\n * > | \n * ^\n * > | \n * ^\n * > | &<]]>\n * ^\n * ```\n *\n * @type {State}\n */\n function continuationDeclarationInside(code) {\n if (code === 62) {\n effects.consume(code);\n return continuationClose;\n }\n\n // More dashes.\n if (code === 45 && marker === 2) {\n effects.consume(code);\n return continuationDeclarationInside;\n }\n return continuation(code);\n }\n\n /**\n * In closed continuation: everything we get until the eol/eof is part of it.\n *\n * ```markdown\n * > | \n * ^\n * ```\n *\n * @type {State}\n */\n function continuationClose(code) {\n if (code === null || markdownLineEnding(code)) {\n effects.exit(\"htmlFlowData\");\n return continuationAfter(code);\n }\n effects.consume(code);\n return continuationClose;\n }\n\n /**\n * Done.\n *\n * ```markdown\n * > | \n * ^\n * ```\n *\n * @type {State}\n */\n function continuationAfter(code) {\n effects.exit(\"htmlFlow\");\n // // Feel free to interrupt.\n // tokenizer.interrupt = false\n // // No longer concrete.\n // tokenizer.concrete = false\n return ok(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeNonLazyContinuationStart(effects, ok, nok) {\n const self = this;\n return start;\n\n /**\n * At eol, before continuation.\n *\n * ```markdown\n * > | * ```js\n * ^\n * | b\n * ```\n *\n * @type {State}\n */\n function start(code) {\n if (markdownLineEnding(code)) {\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return after;\n }\n return nok(code);\n }\n\n /**\n * A continuation.\n *\n * ```markdown\n * | * ```js\n * > | b\n * ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n return self.parser.lazy[self.now().line] ? nok(code) : ok(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeBlankLineBefore(effects, ok, nok) {\n return start;\n\n /**\n * Before eol, expecting blank line.\n *\n * ```markdown\n * > | \r\n ) : (\r\n
    \r\n \r\n
    \r\n )}\r\n \r\n {connectionStatusMessage}\r\n \r\n {isWebSocketConnected ? 'Send' : 'Reconnecting...'}\r\n \r\n \r\n \r\n \r\n \r\n );\r\n});\r\n\r\nexport default InputArea;","import React, {useEffect, useState} from 'react';\r\n import {useDispatch, useSelector} from 'react-redux';\r\n import styled from 'styled-components';\r\n import {fetchAppConfig} from '../services/appConfig';\r\n import {isArchive} from '../utils/constants';\r\n import {logger} from '../utils/logger';\r\n import {useWebSocket} from '../hooks/useWebSocket';\r\n import {addMessage} from '../store/slices/messageSlice';\r\n import MessageList from './MessageList';\r\n import InputArea from './InputArea';\r\n import Spinner from './common/Spinner';\r\n import {Message, MessageType} from '../types/messages';\r\n import {WebSocketService} from '../services/websocket';\r\n import {RootState} from '../store';\r\n const LOG_PREFIX = '[ChatInterface]';\r\n interface WebSocketMessage {\r\n data: string;\r\n isHtml: boolean;\r\n timestamp: number;\r\n }\r\n interface ChatInterfaceProps {\r\n sessionId?: string;\r\n websocket: WebSocketService;\r\n isConnected: boolean;\r\n }\r\n const ChatContainer = styled.div`\r\n display: flex;\r\n flex-direction: column;\r\n height: 100vh;\r\n /* Add test id */\r\n &[data-testid] {\r\n outline: none;\r\n }\r\n `;\r\n const ChatInterface: React.FC = ({\r\n sessionId: propSessionId,\r\n websocket,\r\n isConnected,\r\n }) => {\r\n const DEBUG = process.env.NODE_ENV === 'development';\r\n const debugLog = (message: string, data?: any) => {\r\n logger.debug(`${LOG_PREFIX} ${message}`, data);\r\n };\r\n const [messages, setMessages] = useState([]);\r\n const [sessionId] = useState(() => propSessionId || window.location.hash.slice(1) || 'new');\r\n const dispatch = useDispatch();\r\n const ws = useWebSocket(sessionId);\r\n const appConfig = useSelector((state: RootState) => state.config);\r\n useEffect(() => {\r\n\r\n if (isArchive) return;\r\n let mounted = true;\r\n const loadAppConfig = async () => {\r\n if (!sessionId) return;\r\n try {\r\n\r\n const config = await fetchAppConfig(sessionId);\r\n if (mounted && config) {\r\n console.info(`${LOG_PREFIX} App config loaded successfully`, config);\r\n } else {\r\n if (mounted) {\r\n console.warn(`${LOG_PREFIX} Could not load app config, using defaults`);\r\n }\r\n }\r\n } catch (error) {\r\n if (mounted) {\r\n console.error(`${LOG_PREFIX} Failed to fetch app config:`, error);\r\n }\r\n }\r\n };\r\n loadAppConfig();\r\n return () => {\r\n mounted = false;\r\n };\r\n }, [sessionId]);\r\n\r\n useEffect(() => {\r\n\r\n if (isArchive) return;\r\n\r\n let isComponentMounted = true;\r\n const handleMessage = (data: WebSocketMessage) => {\r\n if (!isComponentMounted) return;\r\n if (data.isHtml) {\r\n const newMessage = {\r\n id: `${Date.now()}`,\r\n content: data.data || '',\r\n type: 'assistant' as MessageType,\r\n\r\n timestamp: data.timestamp,\r\n isHtml: true,\r\n rawHtml: data.data,\r\n version: data.timestamp,\r\n sanitized: false\r\n };\r\n if (isComponentMounted) {\r\n setMessages(prev => [...prev, newMessage]);\r\n }\r\n dispatch(addMessage(newMessage));\r\n return;\r\n }\r\n\r\n if (!data.data || typeof data.data !== 'string') {\r\n return;\r\n }\r\n\r\n if (data.data.includes('\"type\":\"connect\"')) {\r\n return;\r\n }\r\n\r\n const firstCommaIndex = data.data.indexOf(',');\r\n const secondCommaIndex = firstCommaIndex > -1 ? data.data.indexOf(',', firstCommaIndex + 1) : -1;\r\n if (firstCommaIndex === -1 || secondCommaIndex === -1) {\r\n console.error(`${LOG_PREFIX} Invalid message format received:`, data.data);\r\n return;\r\n }\r\n const id = data.data.substring(0, firstCommaIndex);\r\n const version = data.data.substring(firstCommaIndex + 1, secondCommaIndex);\r\n const content = data.data.substring(secondCommaIndex + 1);\r\n const timestamp = Date.now();\r\n const messageObject = {\r\n id: `${id}-${timestamp}`,\r\n content: content,\r\n version: parseInt(version, 10) || timestamp,\r\n type: id.startsWith('u') ? 'user' : id.startsWith('s') ? 'system' : 'assistant' as MessageType,\r\n timestamp,\r\n isHtml: false,\r\n rawHtml: null,\r\n sanitized: false\r\n };\r\n dispatch(addMessage(messageObject));\r\n };\r\n websocket.addMessageHandler(handleMessage);\r\n return () => {\r\n isComponentMounted = false;\r\n websocket.removeMessageHandler(handleMessage);\r\n };\r\n }, [DEBUG, dispatch, isConnected, sessionId, websocket, ws.readyState]);\r\n const handleSendMessage = (msg: string) => {\r\n console.info(`${LOG_PREFIX} Sending message - length: ${msg.length}`, {\r\n sessionId,\r\n isConnected\r\n });\r\n ws.send(msg);\r\n };\r\n return isArchive ? (\r\n \r\n \r\n {!isConnected && (\r\n
    \r\n \r\n Connecting...\r\n
    \r\n )}\r\n
    \r\n ) : (\r\n \r\n \r\n \r\n \r\n );\r\n };\r\n export default ChatInterface;","// Import and re-export ThemeName type\r\n\r\nimport type {BaseTheme, ColorThemeName, LayoutTheme, LayoutThemeName} from '../types/theme';\r\n\r\nexport type { ColorThemeName, LayoutThemeName };\r\n\r\nconst themeLogger = {\r\n styles: {\r\n theme: 'color: #4CAF50; font-weight: bold',\r\n action: 'color: #2196F3; font-weight: bold',\r\n },\r\n log(action: string, themeName: string) {\r\n console.groupCollapsed(\r\n `%cTheme %c${action} %c${themeName}`,\r\n this.styles.theme,\r\n this.styles.action,\r\n this.styles.theme\r\n );\r\n console.groupEnd();\r\n }\r\n};\r\n\r\ninterface ThemeSizing {\r\n spacing: {\r\n xs: string;\r\n sm: string;\r\n md: string;\r\n lg: string;\r\n xl: string;\r\n };\r\n borderRadius: {\r\n sm: string;\r\n md: string;\r\n lg: string;\r\n };\r\n console: {\r\n minHeight: string;\r\n maxHeight: string;\r\n padding: string;\r\n };\r\n}\r\n\r\ninterface ThemeTypography {\r\n fontFamily: string;\r\n fontSize: {\r\n xs: string;\r\n sm: string;\r\n md: string;\r\n lg: string;\r\n xl: string;\r\n };\r\n fontWeight: {\r\n regular: number;\r\n medium: number;\r\n bold: number;\r\n };\r\n console: {\r\n fontFamily: string;\r\n fontSize: string;\r\n lineHeight: string;\r\n };\r\n}\r\n\r\ntype ExtendedTheme = BaseTheme;\r\n\r\nconst baseTheme: Omit = {\r\n _init() {\r\n themeLogger.log('initialized', 'base');\r\n },\r\n shadows: {\r\n small: '0 1px 3px rgba(0, 0, 0, 0.12)',\r\n medium: '0 4px 6px rgba(0, 0, 0, 0.15)',\r\n large: '0 10px 20px rgba(0, 0, 0, 0.20)'\r\n },\r\n transitions: {\r\n default: '0.3s ease',\r\n fast: '0.15s ease',\r\n slow: '0.5s ease'\r\n },\r\n config: {\r\n stickyInput: true,\r\n inputCnt: 0\r\n },\r\n logging: {\r\n colors: {\r\n error: '#FF3B30',\r\n warning: '#FF9500',\r\n info: '#007AFF',\r\n debug: '#5856D6',\r\n success: '#34C759',\r\n trace: '#8E8E93',\r\n verbose: '#C7C7CC',\r\n system: '#48484A',\r\n critical: '#FF3B30'\r\n },\r\n fontSize: {\r\n normal: '0.9rem',\r\n large: '1.1rem',\r\n small: '0.8rem',\r\n system: '0.85rem',\r\n critical: '1.2rem'\r\n },\r\n padding: {\r\n message: '0.5rem',\r\n container: '1rem',\r\n timestamp: '0.25rem'\r\n },\r\n background: {\r\n error: '#FFE5E5',\r\n warning: '#FFF3E0',\r\n info: '#E3F2FD',\r\n debug: '#F3E5F5',\r\n success: '#E8F5E9',\r\n system: '#FAFAFA',\r\n critical: '#FFEBEE'\r\n },\r\n border: {\r\n radius: '4px',\r\n style: 'solid',\r\n width: '1px'\r\n },\r\n timestamp: {\r\n format: 'HH:mm:ss',\r\n color: '#8E8E93',\r\n show: true\r\n },\r\n display: {\r\n maxLines: 0,\r\n }\r\n },\r\n sizing: {\r\n spacing: {\r\n xs: '0.25rem',\r\n sm: '0.5rem',\r\n md: '1rem',\r\n lg: '1.5rem',\r\n xl: '2rem',\r\n },\r\n borderRadius: {\r\n sm: '0.25rem',\r\n md: '0.5rem',\r\n lg: '1rem',\r\n },\r\n console: {\r\n minHeight: '200px',\r\n maxHeight: '500px',\r\n padding: '1rem',\r\n },\r\n },\r\n typography: {\r\n fontFamily: \"'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif\",\r\n families: {\r\n primary: \"'Outfit', system-ui, -apple-system, BlinkMacSystemFont, sans-serif\",\r\n heading: \"'Space Grotesk', system-ui, sans-serif\",\r\n secondary: \"system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif\", // Example secondary\r\n mono: \"'IBM Plex Mono', 'Fira Code', monospace\",\r\n display: \"'Syne', system-ui, sans-serif\",\r\n },\r\n monoFontFamily: \"'Fira Code', 'Consolas', monospace\",\r\n fontSize: {\r\n '2xl': '1.75rem', // clamp(2.5rem, 5vw, 3.5rem) - Adjusted base for 2xl\r\n xs: '0.75rem',\r\n\r\n sm: '0.875rem',\r\n\r\n md: '1rem',\r\n\r\n lg: '1.125rem',\r\n\r\n xl: '1.25rem',\r\n\r\n },\r\n fontWeight: {\r\n light: 300,\r\n regular: 400,\r\n medium: 500,\r\n semibold: 600,\r\n bold: 700,\r\n extrabold: 800,\r\n },\r\n lineHeight: {\r\n tight: '1.15',\r\n normal: '1.65',\r\n relaxed: '1.85',\r\n },\r\n letterSpacing: {\r\n tight: '-0.04em',\r\n normal: '-0.02em',\r\n wide: '0.04em',\r\n wider: '0.08em',\r\n },\r\n console: {\r\n fontFamily: \"'Fira Code', Consolas, Monaco, 'Courier New', monospace\",\r\n fontSize: '0.9rem',\r\n lineHeight: '1.6',\r\n },\r\n },\r\n};\r\n\r\nexport const mainTheme: BaseTheme = {\r\n name: 'main' as ColorThemeName,\r\n colors: {\r\n primary: '#007AFF',\r\n secondary: '#5856D6',\r\n background: '#FFFFFF',\r\n surface: '#F2F2F7',\r\n text: {\r\n primary: '#000000',\r\n secondary: '#6E6E73',\r\n },\r\n border: '#C6C6C8',\r\n error: '#FF3B30',\r\n success: '#34C759',\r\n warning: '#FF9500',\r\n info: '#007AFF', // Adjusted to be same as primary for this theme\r\n primaryDark: '#0056b3',\r\n secondaryDark: '#4240aa', // Darker purple\r\n errorDark: '#D9362B', // Darker red\r\n successDark: '#28A745', // Darker green\r\n critical: '#FF3B30',\r\n\r\n disabled: '#E5E5EA',\r\n\r\n\r\n hover: '#0056b3', // Using primaryDark for hover\r\n\r\n },\r\n ...baseTheme,\r\n};\r\n\r\nexport const nightTheme: ExtendedTheme = {\r\n name: 'night' as ColorThemeName,\r\n colors: {\r\n primary: '#0A84FF',\r\n secondary: '#5E5CE6',\r\n background: '#000000',\r\n surface: '#1C1C1E',\r\n text: {\r\n primary: '#FFFFFF',\r\n secondary: '#98989F',\r\n },\r\n border: '#38383A',\r\n error: '#FF453A',\r\n success: '#32D74B',\r\n warning: '#FF9F0A',\r\n info: '#5E5CE6',\r\n primaryDark: '#0063cc', // Darker blue\r\n secondaryDark: '#4b49b8', // Darker purple\r\n errorDark: '#E53E30', // Darker red\r\n successDark: '#27C13F', // Darker green\r\n critical: '#FF453A',\r\n\r\n disabled: '#2C2C2E',\r\n hover: '#0063cc',\r\n\r\n },\r\n ...baseTheme,\r\n};\r\n\r\nexport const forestTheme: ExtendedTheme = {\r\n name: 'forest' as ColorThemeName,\r\n colors: {\r\n primary: '#2D6A4F',\r\n secondary: '#40916C',\r\n background: '#081C15',\r\n surface: '#1B4332',\r\n text: {\r\n primary: '#D8F3DC',\r\n secondary: '#95D5B2',\r\n },\r\n border: '#2D6A4F',\r\n error: '#D62828',\r\n success: '#52B788',\r\n warning: '#F77F00',\r\n info: '#4895EF',\r\n primaryDark: '#1E4D38', // Darker green\r\n secondaryDark: '#2F6D50', // Darker secondary green\r\n errorDark: '#B82323', // Darker red\r\n successDark: '#3E8E6A', // Darker success green\r\n critical: '#D62828',\r\n\r\n disabled: '#2D3B35',\r\n hover: '#1E4D38',\r\n\r\n },\r\n ...baseTheme,\r\n};\r\n\r\nexport const ponyTheme: ExtendedTheme = {\r\n name: 'pony' as ColorThemeName,\r\n colors: {\r\n primary: '#FF69B4',\r\n secondary: '#FFB6C1',\r\n background: '#FFF0F5',\r\n surface: '#FFE4E1',\r\n text: {\r\n primary: '#DB7093',\r\n secondary: '#C71585',\r\n },\r\n border: '#FFB6C1',\r\n error: '#FF1493',\r\n success: '#FF69B4',\r\n warning: '#FFB6C1',\r\n info: '#DB7093',\r\n primaryDark: '#E55EA4', // Darker pink\r\n secondaryDark: '#E5A0AD', // Darker light pink\r\n errorDark: '#D9127F', // Darker deep pink\r\n successDark: '#E55EA4', // Darker pink (same as primaryDark for this theme)\r\n critical: '#FF1493',\r\n\r\n disabled: '#F8E1E7',\r\n hover: '#E55EA4',\r\n\r\n },\r\n ...baseTheme,\r\n};\r\n\r\nexport const alienTheme: ExtendedTheme = {\r\n name: 'alien' as ColorThemeName,\r\n colors: {\r\n primary: '#39FF14',\r\n secondary: '#00FF00',\r\n background: '#0A0A0A',\r\n surface: '#1A1A1A',\r\n text: {\r\n primary: '#39FF14',\r\n secondary: '#00FF00',\r\n },\r\n border: '#008000',\r\n error: '#FF0000',\r\n success: '#39FF14',\r\n warning: '#FFFF00',\r\n info: '#00FFFF',\r\n primaryDark: '#2ECF0F', // Darker green\r\n secondaryDark: '#00CF00', // Darker bright green\r\n errorDark: '#CF0000', // Darker red\r\n successDark: '#2ECF0F', // Darker success green\r\n critical: '#FF0000',\r\n\r\n disabled: '#1C1C1C',\r\n hover: '#2ECF0F',\r\n\r\n },\r\n ...baseTheme,\r\n};\r\n\r\nexport const themes = {\r\n default: {\r\n ...mainTheme,\r\n name: 'default' as ColorThemeName,\r\n colors: {\r\n ...mainTheme.colors,\r\n }\r\n },\r\n main: mainTheme,\r\n night: nightTheme,\r\n forest: forestTheme,\r\n pony: ponyTheme,\r\n alien: alienTheme,\r\n // New themes will be added below\r\n synthwave: {} as ExtendedTheme, // Placeholder\r\n paper: {} as ExtendedTheme, // Placeholder\r\n sunset: {\r\n name: 'sunset' as ColorThemeName,\r\n colors: {\r\n primary: '#FF6B6B',\r\n secondary: '#FFA07A',\r\n background: '#2C3E50',\r\n surface: '#34495E',\r\n text: {\r\n primary: '#ECF0F1',\r\n secondary: '#BDC3C7',\r\n },\r\n border: '#95A5A6',\r\n error: '#E74C3C',\r\n success: '#2ECC71',\r\n warning: '#F1C40F',\r\n info: '#3498DB',\r\n primaryDark: '#D65B5B', // Darker red\r\n secondaryDark: '#E08A6A', // Darker light red\r\n errorDark: '#C0392B', // Darker error red\r\n successDark: '#27AE60', // Darker green\r\n disabled: '#7F8C8D',\r\n critical: '#E74C3C',\r\n hover: '#D65B5B',\r\n\r\n },\r\n ...baseTheme,\r\n },\r\n ocean: {\r\n name: 'ocean' as ColorThemeName,\r\n colors: {\r\n primary: '#00B4D8',\r\n secondary: '#48CAE4',\r\n background: '#03045E',\r\n surface: '#023E8A',\r\n text: {\r\n primary: '#CAF0F8',\r\n secondary: '#90E0EF',\r\n },\r\n border: '#0077B6',\r\n error: '#FF6B6B',\r\n success: '#2ECC71',\r\n warning: '#FFB703',\r\n info: '#48CAE4',\r\n primaryDark: '#0093C0', // Darker blue\r\n secondaryDark: '#3EAFC7', // Darker light blue\r\n errorDark: '#D65B5B', // Darker red\r\n successDark: '#27AE60', // Darker green\r\n disabled: '#415A77',\r\n hover: '#0077B6',\r\n critical: '#FF6B6B',\r\n\r\n },\r\n ...baseTheme,\r\n },\r\n cyberpunk: {\r\n name: 'cyberpunk' as ColorThemeName,\r\n colors: {\r\n primary: '#FF00FF',\r\n secondary: '#00FFFF',\r\n background: '#0D0221',\r\n surface: '#1A1A2E',\r\n text: {\r\n primary: '#FF00FF',\r\n secondary: '#00FFFF',\r\n },\r\n border: '#FF00FF',\r\n error: '#FF0000',\r\n success: '#00FF00',\r\n warning: '#FFD700',\r\n info: '#00FFFF',\r\n primaryDark: '#D100D1', // Darker magenta\r\n secondaryDark: '#00D1D1', // Darker cyan\r\n errorDark: '#D10000', // Darker red\r\n successDark: '#00D100', // Darker green\r\n disabled: '#4A4A4A',\r\n hover: '#FF69B4',\r\n critical: '#FF0000',\r\n\r\n },\r\n ...baseTheme,\r\n },\r\n};\r\n// Add new themes to the export\r\nthemes.synthwave = {\r\n name: 'synthwave' as ColorThemeName,\r\n colors: {\r\n primary: '#FF00FF', // Magenta\r\n secondary: '#00FFFF', // Cyan\r\n background: '#1A1A2E', // Dark Indigo\r\n surface: '#2A2A3E', // Slightly Lighter Indigo\r\n text: {\r\n primary: '#00FFFF', // Cyan\r\n secondary: '#FF00FF', // Magenta\r\n },\r\n border: '#FF00FF', // Magenta\r\n error: '#FF3366', // Hot Pink\r\n success: '#00FF7F', // Spring Green\r\n warning: '#FFFF66', // Canary Yellow\r\n info: '#3399FF', // Bright Blue\r\n primaryDark: '#CC00CC',\r\n secondaryDark: '#00CCCC',\r\n errorDark: '#D92B58',\r\n successDark: '#00CC66',\r\n critical: '#FF3366',\r\n disabled: '#4A4A5E',\r\n hover: '#CC00CC',\r\n },\r\n ...baseTheme,\r\n};\r\nthemes.paper = {\r\n name: 'paper' as ColorThemeName,\r\n colors: {\r\n primary: '#5D737E', // Desaturated Blue/Grey\r\n secondary: '#8C7A6B', // Muted Brown\r\n background: '#FDFBF7', // Off-white, parchment like\r\n surface: '#F5F2EB', // Slightly darker off-white\r\n text: {\r\n primary: '#4A4A4A', // Dark Grey\r\n secondary: '#7B7B7B', // Medium Grey\r\n },\r\n border: '#DCDCDC', // Light Grey\r\n error: '#C94E4E', // Muted Red\r\n success: '#6A994E', // Muted Green\r\n warning: '#D4A26A', // Muted Orange\r\n info: '#7E9CB9', // Muted Blue\r\n primaryDark: '#4A5C66',\r\n secondaryDark: '#706053',\r\n errorDark: '#A84040',\r\n successDark: '#537A3E',\r\n critical: '#C94E4E',\r\n disabled: '#E0E0E0',\r\n hover: '#4A5C66',\r\n },\r\n ...baseTheme,\r\n};\r\n\r\n\r\nexport const defaultLayoutTheme: LayoutTheme = {\r\n name: 'default',\r\n // Base layout settings inherited from baseTheme\r\n sizing: baseTheme.sizing,\r\n typography: baseTheme.typography,\r\n};\r\n\r\nexport const compactLayoutTheme: LayoutTheme = {\r\n name: 'compact',\r\n // Inherit base sizing and typography, then override for compactness\r\n sizing: {\r\n ...baseTheme.sizing,\r\n spacing: {\r\n xs: '0.125rem',\r\n sm: '0.25rem',\r\n md: '0.5rem',\r\n lg: '1rem',\r\n xl: '1.5rem',\r\n },\r\n },\r\n typography: {\r\n ...baseTheme.typography,\r\n fontSize: {\r\n xs: '0.65rem',\r\n sm: '0.75rem',\r\n md: '0.875rem',\r\n lg: '1rem',\r\n xl: '1.125rem',\r\n '2xl': '1.5rem',\r\n },\r\n lineHeight: {\r\n tight: '1.1',\r\n normal: '1.5',\r\n relaxed: '1.7',\r\n }\r\n }\r\n};\r\n\r\nexport const spaciousLayoutTheme: LayoutTheme = {\r\n name: 'spacious',\r\n sizing: {\r\n ...baseTheme.sizing,\r\n spacing: {\r\n xs: '0.5rem',\r\n sm: '0.75rem',\r\n md: '1.25rem',\r\n lg: '2rem',\r\n xl: '2.5rem',\r\n },\r\n },\r\n typography: {\r\n ...baseTheme.typography,\r\n fontSize: {\r\n xs: '0.875rem',\r\n sm: '1rem',\r\n md: '1.125rem',\r\n lg: '1.375rem',\r\n xl: '1.625rem',\r\n '2xl': '2rem',\r\n },\r\n }\r\n};\r\nexport const ultraCompactLayoutTheme: LayoutTheme = {\r\n name: 'ultra-compact',\r\n sizing: {\r\n ...baseTheme.sizing,\r\n spacing: {\r\n xs: '0.0625rem', // 1px\r\n sm: '0.125rem', // 2px\r\n md: '0.25rem', // 4px\r\n lg: '0.5rem', // 8px\r\n xl: '0.75rem', // 12px\r\n },\r\n },\r\n typography: {\r\n ...baseTheme.typography,\r\n fontSize: {\r\n xs: '0.6rem',\r\n sm: '0.7rem',\r\n md: '0.8rem',\r\n lg: '0.9rem',\r\n xl: '1rem',\r\n '2xl': '1.25rem',\r\n },\r\n lineHeight: {\r\n tight: '1.0',\r\n normal: '1.3',\r\n relaxed: '1.5',\r\n }\r\n }\r\n};\r\nexport const contentFocusedLayoutTheme: LayoutTheme = {\r\n name: 'content-focused',\r\n sizing: {\r\n ...baseTheme.sizing,\r\n spacing: { // Slightly more generous than default for readability\r\n xs: '0.3rem',\r\n sm: '0.6rem',\r\n md: '1.1rem',\r\n lg: '1.6rem',\r\n xl: '2.2rem',\r\n },\r\n console: {\r\n ...baseTheme.sizing.console,\r\n maxHeight: '600px', // Allow more console content\r\n }\r\n },\r\n typography: {\r\n ...baseTheme.typography,\r\n fontSize: { // Slightly larger base for readability\r\n xs: '0.8rem',\r\n sm: '0.9rem',\r\n md: '1.05rem',\r\n lg: '1.2rem',\r\n xl: '1.35rem',\r\n '2xl': '1.85rem',\r\n },\r\n lineHeight: { // More generous line height for readability\r\n tight: '1.2',\r\n normal: '1.7',\r\n relaxed: '1.9',\r\n }\r\n }\r\n};\r\n\r\n\r\nexport const layoutThemes: Record = {\r\n default: defaultLayoutTheme,\r\n compact: compactLayoutTheme,\r\n spacious: spaciousLayoutTheme,\r\n 'ultra-compact': ultraCompactLayoutTheme,\r\n 'content-focused': contentFocusedLayoutTheme,\r\n};\r\n\r\n\r\nexport const logThemeChange = (from: ColorThemeName, to: ColorThemeName) => {\r\n themeLogger.log('changed', `${from} → ${to}`);\r\n};","import type {DefaultTheme} from 'styled-components';\nimport {createGlobalStyle} from 'styled-components';\n\nconst logStyleChange = (component: string, property: string, value: any) => {\n\n if (process.env.NODE_ENV !== 'development') {\n return;\n }\n const timestamp = new Date().toISOString();\n const criticalEvents = [\n 'theme-transition',\n 'theme-change',\n 'font-load',\n 'style-init',\n 'accessibility-violation'\n ];\n\n if (criticalEvents.some(event => property.includes(event))) {\n console.log(`[${timestamp}] GlobalStyles: ${component} - ${property}:`, value);\n }\n};\n\nlogStyleChange('GlobalStyles', 'style-init', 'Styles initialized');\n\nexport const GlobalStyles = createGlobalStyle<{ theme: DefaultTheme; }>`\n /* Improved scrollbar styling */\n ::-webkit-scrollbar {\n width: 10px;\n }\n\n ::-webkit-scrollbar-track {\n background: ${({theme}) => theme.colors.background};\n border-radius: 4px;\n }\n\n ::-webkit-scrollbar-thumb {\n background: ${({theme}) => theme.colors.primary + '40'};\n border-radius: 4px;\n border: 2px solid ${({theme}) => theme.colors.background};\n\n &:hover {\n background: ${({theme}) => theme.colors.primary + '60'};\n }\n }\n\n :root {\n /* Fallback Theme variables - these will be overridden by ThemeProvider */\n /* Color related fallbacks (can be minimal as ThemeProvider sets them) */\n /* Font weights */\n --font-weight-light: 300; /* Fallback */\n --font-weight-regular: 400; /* Fallback */\n --font-weight-medium: 500; /* Fallback */\n --font-weight-semibold: 600; /* Fallback */\n --font-weight-bold: 700; /* Fallback */\n --font-weight-extrabold: 800; /* Fallback */\n\n /* Font families */\n --font-primary: 'Outfit', system-ui, -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, \"Helvetica Neue\", Arial, sans-serif; /* Fallback to match baseTheme */\n --font-heading: 'Space Grotesk', system-ui, sans-serif; /* Fallback to match baseTheme */\n --font-mono: 'IBM Plex Mono', 'Fira Code', monospace; /* Fallback to match baseTheme */\n --font-display: 'Syne', system-ui, sans-serif; /* Fallback to match baseTheme */\n\n /* Font sizes */\n --font-size-xs: 0.75rem; /* Fallback */\n --font-size-sm: 0.875rem; /* Fallback */\n --font-size-md: 1rem; /* Fallback */\n --font-size-lg: 1.125rem; /* Fallback */\n --font-size-xl: 1.25rem; /* Fallback */\n --font-size-2xl: 1.5rem; /* Fallback */\n\n /* Line heights */\n --line-height-tight: 1.2; /* Fallback */\n --line-height-normal: 1.6; /* Fallback */\n --line-height-relaxed: 1.8; /* Fallback */\n\n /* Letter spacing */\n --letter-spacing-tight: -0.02em; /* Fallback */\n --letter-spacing-normal: normal; /* Fallback */\n --letter-spacing-wide: 0.02em; /* Fallback */\n --letter-spacing-wider: 0.04em; /* Fallback */\n\n /* Sizing */\n --spacing-xs: 0.25rem; /* Fallback */\n --spacing-sm: 0.5rem; /* Fallback */\n --spacing-md: 1rem; /* Fallback */\n --spacing-lg: 1.5rem; /* Fallback */\n --spacing-xl: 2rem; /* Fallback */\n --border-radius-sm: 0.25rem; /* Fallback */\n --border-radius-md: 0.5rem; /* Fallback */\n --border-radius-lg: 1rem; /* Fallback */\n }\n /*\n The :root variables above serve as fallbacks.\n ThemeProvider.tsx will inject a ")},this.getStyleTags=function(){if(e.sealed)throw Oa(2);return e._emitSheetCSS()},this.getStyleElement=function(){var t;if(e.sealed)throw Oa(2);var n=e.instance.toString();if(!n)return[];var i=((t={})[jo]="",t[Ho]=Wo,t.dangerouslySetInnerHTML={__html:n},t),o=Ka();return o&&(i.nonce=o),[r.createElement("style",Ii({},i,{key:"sc-0-0"}))]},this.seal=function(){e.sealed=!0},this.instance=new Za({isServer:!0}),this.sealed=!1}e.prototype.collectStyles=function(e){if(this.sealed)throw Oa(2);return r.createElement(ls,{sheet:this.instance},e)},e.prototype.interleaveWithNodeStream=function(e){throw Oa(3)}})(),"__sc-".concat(jo,"__");const Fs="[AppConfig]",Ms=(()=>{const e={NODE_ENV:"production",PUBLIC_URL:"",WDS_SOCKET_HOST:void 0,WDS_SOCKET_PATH:void 0,WDS_SOCKET_PORT:void 0,FAST_REFRESH:!0}.REACT_APP_API_URL||window.location.origin+window.location.pathname;return e.endsWith("/")?e:e+"/"})();let Ls=null;const Ps=window.location.pathname.includes("/archive/"),Os="theme",$s=()=>{const e=localStorage.getItem(Os);return(e=>"string"===typeof e&&["default","main","night","forest","pony","alien","sunset","ocean","cyberpunk"].includes(e))(e)?e:null},Bs=e=>{localStorage.setItem(Os,e)};const Ds=new class{constructor(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};this.prefix=void 0,this.level=void 0,this.prefix=e.prefix||"",this.level=e.level||"info"}debug(e){}info(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r1?t-1:0),r=1;r1?t-1:0),r=1;r{var t;const n=(0,r.useRef)({attempts:0,lastAttempt:0}),[i,o]=(0,r.useState)(!1),[a,s]=(0,r.useState)(null),[l,c]=(0,r.useState)(!1),u=k(),h=(0,r.useRef)(0);return(0,r.useEffect)((()=>{let t,r=!1;const a=()=>Math.min(1e3*Math.pow(2,n.current.attempts),6e4),l=Ci((()=>{if(r)return;clearTimeout(t);const o=Date.now();o-n.current.lastAttempt<1e3||(n.current.lastAttempt=o,n.current.attempts++,zi.connect(e),t=setTimeout((()=>{i||r||g(new Error("Connection timeout"))}),5e3))}),100);n.current={attempts:0,lastAttempt:0},h.current=0;const d=e=>{c(!0),n.current={attempts:e,lastAttempt:Date.now()}};if(!e)return void console.error("[WebSocket] Critical error: No sessionId provided, connection aborted");const f=e=>{null!==e&&void 0!==e&&e.id&&null!==e&&void 0!==e&&e.version?u(Ur(e)):console.warn("[WebSocket] Received message with missing id or version:",e)},p=e=>{o(e),e?(s(null),c(!1),h.current=0,n.current.attempts=0,console.log("[WebSocket] Connected successfully at",(new Date).toISOString())):r||(console.warn("[WebSocket] Disconnected unexpectedly at",(new Date).toISOString()),setTimeout(l,a()))},g=e=>{if(r)return;if(s(e),n.current.attempts>=10)return void console.error("[WebSocket] Maximum reconnection attempts reached:",n.current.attempts);console.error(`[WebSocket] Connection error (attempt ${n.current.attempts}):`,e.message);const t=a();console.log("[WebSocket] Attempting reconnection in",t/1e3,"seconds"),setTimeout(l,t),c(!0)};return zi.addMessageHandler(f),zi.addConnectionHandler(p),zi.addErrorHandler(g),zi.on("reconnecting",d),zi.connect(e),()=>{r=!0,clearTimeout(t),console.log("[WebSocket] Disconnecting at",(new Date).toISOString()),zi.removeMessageHandler(f),zi.removeConnectionHandler(p),zi.removeErrorHandler(g),zi.off("reconnecting",d),zi.disconnect(),t&&clearTimeout(t)}}),[e,u]),{error:a,isReconnecting:l,readyState:null===(t=zi.ws)||void 0===t?void 0:t.readyState,send:e=>zi.send(e),isConnected:i}},Is=e=>{const t=k(),n=C((e=>e.ui.theme));r.useEffect((()=>{const e=$s();e&&e!==n&&(console.info("Theme loaded from storage:",e),t(Zr(e)))}),[]);const i=(0,r.useCallback)((e=>{console.info("Theme changed:",{from:n,to:e}),t(Zr(e)),Bs(e)}),[t]);return r.useEffect((()=>{const t=$s();e&&!n&&e!==t&&i(e)}),[e,n,i]),[n,i]};var Ns=n(2999),Rs=n.n(Ns),js=n(579);const qs=Ts.div` + display: inline-flex; + align-items: center; + justify-content: center; +`,Hs=e=>{let{size:t="medium",className:n="","aria-label":i="Loading..."}=e;(0,r.useEffect)((()=>()=>{0}),[t]);const o="medium"!==t?t:"";return(0,js.jsx)(qs,{children:(0,js.jsx)("div",{role:"status",className:`spinner-border ${o} ${n}`.trim(),children:(0,js.jsx)("span",{className:"sr-only",children:i})})})},Ws=!1,Ks="message-list-"+Math.random().toString(36).substr(2,9),Us=(e,t)=>{if("text-submit"!==t)"link"!==t?"run"!==t?"regen"!==t?"stop"!==t?zi.send(`!${e},${t}`):zi.send(`!${e},stop`):zi.send(`!${e},regen`):zi.send(`!${e},run`):zi.send(`!${e},link`);else{const t=document.querySelector(`.reply-input[data-id="${e}"]`);if(t){const n=t.value;if(!n.trim())return;const r=`!${e},userTxt,${encodeURIComponent(n)}`;zi.send(r),t.value="",t.style.height="auto"}}},Vs=e=>{let{messages:t}=e;const n=C((e=>e.ui.theme)),i=`message-list-container${Ps?" archive-mode":""} theme-${n}`;r.useEffect((()=>{c.current&&c.current.setAttribute("data-theme",n)}),[n]);const o=r.useCallback((e=>e.filter((e=>e.id&&!e.id.startsWith("z"))).filter((e=>{var t;return(null===(t=e.content)||void 0===t?void 0:t.length)>0}))),[]),a=C((e=>e.ui.verboseMode)),s=C((e=>e.messages.messages),((e,t)=>(null===e||void 0===e?void 0:e.length)===(null===t||void 0===t?void 0:t.length)&&(null===e||void 0===e?void 0:e.every(((e,n)=>e.id===t[n].id&&e.version===t[n].version))))),l=r.useMemo((()=>Array.isArray(t)?t:Array.isArray(s)?s:[]),[t,s]),c=(0,r.useRef)(null),u=r.useMemo((()=>{const e={};return l.forEach((t=>{var n;null!==(n=t.id)&&void 0!==n&&n.startsWith("z")&&(e[t.id]=t.version||0)})),e}),[l]),h=r.useMemo((()=>o(l).map((e=>{let t=e.content||"";t&&e.id&&!e.id.startsWith("z")&&(t=function(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:new Set;if(!e||"string"!==typeof e)return console.warn("[MessageList] Invalid content passed to expandMessageReferences:",e),"";if(!Array.isArray(t))return console.warn("[MessageList] Invalid messages array passed to expandMessageReferences"),e;const r=document.createElement("div");r.innerHTML=e;const i=[r];for(;i.length>0;){const e=i.shift();if(!e)continue;const r=e.getAttribute("message-id");if(r&&!n.has(r)&&r.startsWith("z")){n.add(r);const i=t.find((e=>e.id===r));if(i)try{i.content?e.innerHTML=i.content:(console.warn("[MessageList] Referenced message has no content. ID:",r),e.innerHTML='Referenced content unavailable')}catch(o){console.error("[MessageList] Error expanding message reference:",o,{messageID:r}),e.innerHTML='Error expanding reference'}else e.innerHTML='Referenced message not found'}Array.from(e.children).forEach((e=>{e instanceof HTMLElement&&i.push(e)}))}return r.innerHTML}(t,l));const n=document.createElement("div");n.innerHTML=t;return n.querySelectorAll('[class*="verbose"]').forEach((e=>{var t;const n=document.createElement("span");n.className="verbose-wrapper"+(a?" verbose-visible":""),null===(t=e.parentNode)||void 0===t||t.insertBefore(n,e),n.appendChild(e)})),t=n.innerHTML,{...e,content:t}}))),[l,u,a]);(0,r.useEffect)((()=>{let e=!0,t=null;return c.current?(t=new IntersectionObserver((n=>{e&&n.forEach((n=>{if(n.isIntersecting){const r=n.target;"CODE"===r.tagName&&requestIdleCallback((()=>{e&&(r.classList.contains("language-none")||r.closest(".token")||Rs().highlightElement(r))})),t&&t.unobserve(r)}}))})),c.current.querySelectorAll("pre code").forEach((e=>{t&&t.observe(e)})),()=>{e=!1,t&&(t.disconnect(),t=null)}):()=>{e=!1}}),[h]);const d=r.useCallback(Ci((()=>{try{if(!c.current)return;Ws,Li().forEach((e=>{_i.set(e.containerId,e);const t=document.getElementById(e.containerId);t&&Oi(t)})),Bi(),$i(),requestIdleCallback((()=>{c.current&&c.current.querySelectorAll("pre code:not(.prismjs-processed)").forEach((e=>{e instanceof HTMLElement&&null!==e.offsetParent&&(Rs().highlightElement(e),e.classList.add("prismjs-processed"))}))})),(e=>{if(e)try{const t=e.querySelectorAll(".mermaid:not(.mermaid-processed)");t.length>0&&t.forEach(((e,t)=>{if(e instanceof HTMLElement&&null!==e.offsetParent){var n;const r=`mermaid-${Date.now()}-${t}`,i=(null===(n=e.textContent)||void 0===n?void 0:n.trim())||"";if(!i)return console.warn("[Mermaid] Empty diagram source, skipping render"),void e.classList.add("mermaid-error","mermaid-empty");e.innerHTML="",Hr.render(r,i).then((t=>{let{svg:n}=t;e.innerHTML=n,e.classList.add("mermaid-processed")})).catch((t=>{console.warn("[Mermaid] Failed to render diagram:",(null===t||void 0===t?void 0:t.message)||"Unknown error",e),e.classList.add("mermaid-error"),e.textContent=i}))}}))}catch(t){console.error("[Mermaid] Failed to render mermaid diagrams:",t)}})(c.current)}catch(e){console.error("[MessageList] Error during post-render update:",e,"Container:",Ks)}}),250),[]);Is(),r.useEffect((()=>{d()}),[h,d]),r.useEffect((()=>{if(!c.current)return;const e=new MutationObserver((e=>{let t=!1;e.forEach((e=>{"childList"===e.type&&e.addedNodes.forEach((e=>{e instanceof HTMLElement&&(e.querySelector(".tabs-container")||e.classList.contains("tabs-container"))&&(t=!0)}))})),t&&d()}));return e.observe(c.current,{childList:!0,subtree:!0}),()=>e.disconnect()}),[d]);const f=r.useCallback((e=>{const t=e.target;t.closest(".tab-button")&&t.closest(".tabs")||(e=>{const t=e.target,{messageId:n,action:r}=(e=>{var t,n,r,i,o,a;const s=null!==(t=null!==(n=null!==(r=e.getAttribute("data-message-id"))&&void 0!==r?r:null===(i=e.closest("[data-message-id]"))||void 0===i?void 0:i.getAttribute("data-message-id"))&&void 0!==n?n:e.getAttribute("data-id"))&&void 0!==t?t:void 0;let l=null!==(o=null!==(a=e.getAttribute("data-message-action"))&&void 0!==a?a:e.getAttribute("data-action"))&&void 0!==o?o:void 0;return l||(e.classList.contains("href-link")||e.closest(".href-link")?l="link":e.classList.contains("play-button")?l="run":e.classList.contains("regen-button")?l="regen":e.classList.contains("cancel-button")?l="stop":e.classList.contains("text-submit-button")&&(l="text-submit")),{messageId:s,action:l}})(t);n&&r&&(e.preventDefault(),e.stopPropagation(),Us(n,r))})(e)}),[]);return(0,js.jsxs)("div",{"data-testid":"message-list",id:"message-list-container",ref:c,className:i,children:[0===l.length&&(0,js.jsx)("div",{className:"message-list-loading",children:(0,js.jsx)(Hs,{size:"large","aria-label":"Loading messages..."})}),h.map((e=>(0,js.jsxs)("div",{className:`message-item ${e.type}`,"data-testid":`message-${e.id}`,id:`message-${e.id}`,children:[(0,js.jsx)("div",{className:"message-content message-body",onClick:Ps?void 0:f,"data-testid":`message-content-${e.id}`,dangerouslySetInnerHTML:{__html:e.content}}),"assistant"===e.type&&(0,js.jsxs)("div",{className:"reply-form",children:[(0,js.jsx)("textarea",{className:"reply-input","data-id":e.id,placeholder:"Type your reply...",onKeyDown:t=>{"Enter"!==t.key||t.shiftKey||(t.preventDefault(),Us(e.id,"text-submit"))}}),(0,js.jsx)("button",{className:"text-submit-button","data-id":e.id,"data-message-action":"text-submit",children:"Send"})]})]},e.id)))]})};const Ys=/^[$_\p{ID_Start}][$_\u{200C}\u{200D}\p{ID_Continue}]*$/u,Gs=/^[$_\p{ID_Start}][-$_\u{200C}\u{200D}\p{ID_Continue}]*$/u,Xs={};function Qs(e,t){return((t||Xs).jsx?Gs:Ys).test(e)}const Zs=/[ \t\n\f\r]/g;function Js(e){return""===e.replace(Zs,"")}class el{constructor(e,t,n){this.normal=t,this.property=e,n&&(this.space=n)}}function tl(e,t){const n={},r={};for(const i of e)Object.assign(n,i.property),Object.assign(r,i.normal);return new el(n,r,t)}function nl(e){return e.toLowerCase()}el.prototype.normal={},el.prototype.property={},el.prototype.space=void 0;class rl{constructor(e,t){this.attribute=t,this.property=e}}rl.prototype.attribute="",rl.prototype.booleanish=!1,rl.prototype.boolean=!1,rl.prototype.commaOrSpaceSeparated=!1,rl.prototype.commaSeparated=!1,rl.prototype.defined=!1,rl.prototype.mustUseProperty=!1,rl.prototype.number=!1,rl.prototype.overloadedBoolean=!1,rl.prototype.property="",rl.prototype.spaceSeparated=!1,rl.prototype.space=void 0;let il=0;const ol=dl(),al=dl(),sl=dl(),ll=dl(),cl=dl(),ul=dl(),hl=dl();function dl(){return 2**++il}const fl=Object.keys(e);class pl extends rl{constructor(t,n,r,i){let o=-1;if(super(t,n),gl(this,"space",i),"number"===typeof r)for(;++o"role"===t?t:"aria-"+t.slice(4).toLowerCase()});function bl(e,t){return t in e?e[t]:t}function vl(e,t){return bl(e,t.toLowerCase())}const xl=ml({attributes:{acceptcharset:"accept-charset",classname:"class",htmlfor:"for",httpequiv:"http-equiv"},mustUseProperty:["checked","multiple","muted","selected"],properties:{abbr:null,accept:ul,acceptCharset:cl,accessKey:cl,action:null,allow:null,allowFullScreen:ol,allowPaymentRequest:ol,allowUserMedia:ol,alt:null,as:null,async:ol,autoCapitalize:null,autoComplete:cl,autoFocus:ol,autoPlay:ol,blocking:cl,capture:null,charSet:null,checked:ol,cite:null,className:cl,cols:ll,colSpan:null,content:null,contentEditable:al,controls:ol,controlsList:cl,coords:ll|ul,crossOrigin:null,data:null,dateTime:null,decoding:null,default:ol,defer:ol,dir:null,dirName:null,disabled:ol,download:sl,draggable:al,encType:null,enterKeyHint:null,fetchPriority:null,form:null,formAction:null,formEncType:null,formMethod:null,formNoValidate:ol,formTarget:null,headers:cl,height:ll,hidden:ol,high:ll,href:null,hrefLang:null,htmlFor:cl,httpEquiv:cl,id:null,imageSizes:null,imageSrcSet:null,inert:ol,inputMode:null,integrity:null,is:null,isMap:ol,itemId:null,itemProp:cl,itemRef:cl,itemScope:ol,itemType:cl,kind:null,label:null,lang:null,language:null,list:null,loading:null,loop:ol,low:ll,manifest:null,max:null,maxLength:ll,media:null,method:null,min:null,minLength:ll,multiple:ol,muted:ol,name:null,nonce:null,noModule:ol,noValidate:ol,onAbort:null,onAfterPrint:null,onAuxClick:null,onBeforeMatch:null,onBeforePrint:null,onBeforeToggle:null,onBeforeUnload:null,onBlur:null,onCancel:null,onCanPlay:null,onCanPlayThrough:null,onChange:null,onClick:null,onClose:null,onContextLost:null,onContextMenu:null,onContextRestored:null,onCopy:null,onCueChange:null,onCut:null,onDblClick:null,onDrag:null,onDragEnd:null,onDragEnter:null,onDragExit:null,onDragLeave:null,onDragOver:null,onDragStart:null,onDrop:null,onDurationChange:null,onEmptied:null,onEnded:null,onError:null,onFocus:null,onFormData:null,onHashChange:null,onInput:null,onInvalid:null,onKeyDown:null,onKeyPress:null,onKeyUp:null,onLanguageChange:null,onLoad:null,onLoadedData:null,onLoadedMetadata:null,onLoadEnd:null,onLoadStart:null,onMessage:null,onMessageError:null,onMouseDown:null,onMouseEnter:null,onMouseLeave:null,onMouseMove:null,onMouseOut:null,onMouseOver:null,onMouseUp:null,onOffline:null,onOnline:null,onPageHide:null,onPageShow:null,onPaste:null,onPause:null,onPlay:null,onPlaying:null,onPopState:null,onProgress:null,onRateChange:null,onRejectionHandled:null,onReset:null,onResize:null,onScroll:null,onScrollEnd:null,onSecurityPolicyViolation:null,onSeeked:null,onSeeking:null,onSelect:null,onSlotChange:null,onStalled:null,onStorage:null,onSubmit:null,onSuspend:null,onTimeUpdate:null,onToggle:null,onUnhandledRejection:null,onUnload:null,onVolumeChange:null,onWaiting:null,onWheel:null,open:ol,optimum:ll,pattern:null,ping:cl,placeholder:null,playsInline:ol,popover:null,popoverTarget:null,popoverTargetAction:null,poster:null,preload:null,readOnly:ol,referrerPolicy:null,rel:cl,required:ol,reversed:ol,rows:ll,rowSpan:ll,sandbox:cl,scope:null,scoped:ol,seamless:ol,selected:ol,shadowRootClonable:ol,shadowRootDelegatesFocus:ol,shadowRootMode:null,shape:null,size:ll,sizes:null,slot:null,span:ll,spellCheck:al,src:null,srcDoc:null,srcLang:null,srcSet:null,start:ll,step:null,style:null,tabIndex:ll,target:null,title:null,translate:null,type:null,typeMustMatch:ol,useMap:null,value:al,width:ll,wrap:null,writingSuggestions:null,align:null,aLink:null,archive:cl,axis:null,background:null,bgColor:null,border:ll,borderColor:null,bottomMargin:ll,cellPadding:null,cellSpacing:null,char:null,charOff:null,classId:null,clear:null,code:null,codeBase:null,codeType:null,color:null,compact:ol,declare:ol,event:null,face:null,frame:null,frameBorder:null,hSpace:ll,leftMargin:ll,link:null,longDesc:null,lowSrc:null,marginHeight:ll,marginWidth:ll,noResize:ol,noHref:ol,noShade:ol,noWrap:ol,object:null,profile:null,prompt:null,rev:null,rightMargin:ll,rules:null,scheme:null,scrolling:al,standby:null,summary:null,text:null,topMargin:ll,valueType:null,version:null,vAlign:null,vLink:null,vSpace:ll,allowTransparency:null,autoCorrect:null,autoSave:null,disablePictureInPicture:ol,disableRemotePlayback:ol,prefix:null,property:null,results:ll,security:null,unselectable:null},space:"html",transform:vl}),kl=ml({attributes:{accentHeight:"accent-height",alignmentBaseline:"alignment-baseline",arabicForm:"arabic-form",baselineShift:"baseline-shift",capHeight:"cap-height",className:"class",clipPath:"clip-path",clipRule:"clip-rule",colorInterpolation:"color-interpolation",colorInterpolationFilters:"color-interpolation-filters",colorProfile:"color-profile",colorRendering:"color-rendering",crossOrigin:"crossorigin",dataType:"datatype",dominantBaseline:"dominant-baseline",enableBackground:"enable-background",fillOpacity:"fill-opacity",fillRule:"fill-rule",floodColor:"flood-color",floodOpacity:"flood-opacity",fontFamily:"font-family",fontSize:"font-size",fontSizeAdjust:"font-size-adjust",fontStretch:"font-stretch",fontStyle:"font-style",fontVariant:"font-variant",fontWeight:"font-weight",glyphName:"glyph-name",glyphOrientationHorizontal:"glyph-orientation-horizontal",glyphOrientationVertical:"glyph-orientation-vertical",hrefLang:"hreflang",horizAdvX:"horiz-adv-x",horizOriginX:"horiz-origin-x",horizOriginY:"horiz-origin-y",imageRendering:"image-rendering",letterSpacing:"letter-spacing",lightingColor:"lighting-color",markerEnd:"marker-end",markerMid:"marker-mid",markerStart:"marker-start",navDown:"nav-down",navDownLeft:"nav-down-left",navDownRight:"nav-down-right",navLeft:"nav-left",navNext:"nav-next",navPrev:"nav-prev",navRight:"nav-right",navUp:"nav-up",navUpLeft:"nav-up-left",navUpRight:"nav-up-right",onAbort:"onabort",onActivate:"onactivate",onAfterPrint:"onafterprint",onBeforePrint:"onbeforeprint",onBegin:"onbegin",onCancel:"oncancel",onCanPlay:"oncanplay",onCanPlayThrough:"oncanplaythrough",onChange:"onchange",onClick:"onclick",onClose:"onclose",onCopy:"oncopy",onCueChange:"oncuechange",onCut:"oncut",onDblClick:"ondblclick",onDrag:"ondrag",onDragEnd:"ondragend",onDragEnter:"ondragenter",onDragExit:"ondragexit",onDragLeave:"ondragleave",onDragOver:"ondragover",onDragStart:"ondragstart",onDrop:"ondrop",onDurationChange:"ondurationchange",onEmptied:"onemptied",onEnd:"onend",onEnded:"onended",onError:"onerror",onFocus:"onfocus",onFocusIn:"onfocusin",onFocusOut:"onfocusout",onHashChange:"onhashchange",onInput:"oninput",onInvalid:"oninvalid",onKeyDown:"onkeydown",onKeyPress:"onkeypress",onKeyUp:"onkeyup",onLoad:"onload",onLoadedData:"onloadeddata",onLoadedMetadata:"onloadedmetadata",onLoadStart:"onloadstart",onMessage:"onmessage",onMouseDown:"onmousedown",onMouseEnter:"onmouseenter",onMouseLeave:"onmouseleave",onMouseMove:"onmousemove",onMouseOut:"onmouseout",onMouseOver:"onmouseover",onMouseUp:"onmouseup",onMouseWheel:"onmousewheel",onOffline:"onoffline",onOnline:"ononline",onPageHide:"onpagehide",onPageShow:"onpageshow",onPaste:"onpaste",onPause:"onpause",onPlay:"onplay",onPlaying:"onplaying",onPopState:"onpopstate",onProgress:"onprogress",onRateChange:"onratechange",onRepeat:"onrepeat",onReset:"onreset",onResize:"onresize",onScroll:"onscroll",onSeeked:"onseeked",onSeeking:"onseeking",onSelect:"onselect",onShow:"onshow",onStalled:"onstalled",onStorage:"onstorage",onSubmit:"onsubmit",onSuspend:"onsuspend",onTimeUpdate:"ontimeupdate",onToggle:"ontoggle",onUnload:"onunload",onVolumeChange:"onvolumechange",onWaiting:"onwaiting",onZoom:"onzoom",overlinePosition:"overline-position",overlineThickness:"overline-thickness",paintOrder:"paint-order",panose1:"panose-1",pointerEvents:"pointer-events",referrerPolicy:"referrerpolicy",renderingIntent:"rendering-intent",shapeRendering:"shape-rendering",stopColor:"stop-color",stopOpacity:"stop-opacity",strikethroughPosition:"strikethrough-position",strikethroughThickness:"strikethrough-thickness",strokeDashArray:"stroke-dasharray",strokeDashOffset:"stroke-dashoffset",strokeLineCap:"stroke-linecap",strokeLineJoin:"stroke-linejoin",strokeMiterLimit:"stroke-miterlimit",strokeOpacity:"stroke-opacity",strokeWidth:"stroke-width",tabIndex:"tabindex",textAnchor:"text-anchor",textDecoration:"text-decoration",textRendering:"text-rendering",transformOrigin:"transform-origin",typeOf:"typeof",underlinePosition:"underline-position",underlineThickness:"underline-thickness",unicodeBidi:"unicode-bidi",unicodeRange:"unicode-range",unitsPerEm:"units-per-em",vAlphabetic:"v-alphabetic",vHanging:"v-hanging",vIdeographic:"v-ideographic",vMathematical:"v-mathematical",vectorEffect:"vector-effect",vertAdvY:"vert-adv-y",vertOriginX:"vert-origin-x",vertOriginY:"vert-origin-y",wordSpacing:"word-spacing",writingMode:"writing-mode",xHeight:"x-height",playbackOrder:"playbackorder",timelineBegin:"timelinebegin"},properties:{about:hl,accentHeight:ll,accumulate:null,additive:null,alignmentBaseline:null,alphabetic:ll,amplitude:ll,arabicForm:null,ascent:ll,attributeName:null,attributeType:null,azimuth:ll,bandwidth:null,baselineShift:null,baseFrequency:null,baseProfile:null,bbox:null,begin:null,bias:ll,by:null,calcMode:null,capHeight:ll,className:cl,clip:null,clipPath:null,clipPathUnits:null,clipRule:null,color:null,colorInterpolation:null,colorInterpolationFilters:null,colorProfile:null,colorRendering:null,content:null,contentScriptType:null,contentStyleType:null,crossOrigin:null,cursor:null,cx:null,cy:null,d:null,dataType:null,defaultAction:null,descent:ll,diffuseConstant:ll,direction:null,display:null,dur:null,divisor:ll,dominantBaseline:null,download:ol,dx:null,dy:null,edgeMode:null,editable:null,elevation:ll,enableBackground:null,end:null,event:null,exponent:ll,externalResourcesRequired:null,fill:null,fillOpacity:ll,fillRule:null,filter:null,filterRes:null,filterUnits:null,floodColor:null,floodOpacity:null,focusable:null,focusHighlight:null,fontFamily:null,fontSize:null,fontSizeAdjust:null,fontStretch:null,fontStyle:null,fontVariant:null,fontWeight:null,format:null,fr:null,from:null,fx:null,fy:null,g1:ul,g2:ul,glyphName:ul,glyphOrientationHorizontal:null,glyphOrientationVertical:null,glyphRef:null,gradientTransform:null,gradientUnits:null,handler:null,hanging:ll,hatchContentUnits:null,hatchUnits:null,height:null,href:null,hrefLang:null,horizAdvX:ll,horizOriginX:ll,horizOriginY:ll,id:null,ideographic:ll,imageRendering:null,initialVisibility:null,in:null,in2:null,intercept:ll,k:ll,k1:ll,k2:ll,k3:ll,k4:ll,kernelMatrix:hl,kernelUnitLength:null,keyPoints:null,keySplines:null,keyTimes:null,kerning:null,lang:null,lengthAdjust:null,letterSpacing:null,lightingColor:null,limitingConeAngle:ll,local:null,markerEnd:null,markerMid:null,markerStart:null,markerHeight:null,markerUnits:null,markerWidth:null,mask:null,maskContentUnits:null,maskUnits:null,mathematical:null,max:null,media:null,mediaCharacterEncoding:null,mediaContentEncodings:null,mediaSize:ll,mediaTime:null,method:null,min:null,mode:null,name:null,navDown:null,navDownLeft:null,navDownRight:null,navLeft:null,navNext:null,navPrev:null,navRight:null,navUp:null,navUpLeft:null,navUpRight:null,numOctaves:null,observer:null,offset:null,onAbort:null,onActivate:null,onAfterPrint:null,onBeforePrint:null,onBegin:null,onCancel:null,onCanPlay:null,onCanPlayThrough:null,onChange:null,onClick:null,onClose:null,onCopy:null,onCueChange:null,onCut:null,onDblClick:null,onDrag:null,onDragEnd:null,onDragEnter:null,onDragExit:null,onDragLeave:null,onDragOver:null,onDragStart:null,onDrop:null,onDurationChange:null,onEmptied:null,onEnd:null,onEnded:null,onError:null,onFocus:null,onFocusIn:null,onFocusOut:null,onHashChange:null,onInput:null,onInvalid:null,onKeyDown:null,onKeyPress:null,onKeyUp:null,onLoad:null,onLoadedData:null,onLoadedMetadata:null,onLoadStart:null,onMessage:null,onMouseDown:null,onMouseEnter:null,onMouseLeave:null,onMouseMove:null,onMouseOut:null,onMouseOver:null,onMouseUp:null,onMouseWheel:null,onOffline:null,onOnline:null,onPageHide:null,onPageShow:null,onPaste:null,onPause:null,onPlay:null,onPlaying:null,onPopState:null,onProgress:null,onRateChange:null,onRepeat:null,onReset:null,onResize:null,onScroll:null,onSeeked:null,onSeeking:null,onSelect:null,onShow:null,onStalled:null,onStorage:null,onSubmit:null,onSuspend:null,onTimeUpdate:null,onToggle:null,onUnload:null,onVolumeChange:null,onWaiting:null,onZoom:null,opacity:null,operator:null,order:null,orient:null,orientation:null,origin:null,overflow:null,overlay:null,overlinePosition:ll,overlineThickness:ll,paintOrder:null,panose1:null,path:null,pathLength:ll,patternContentUnits:null,patternTransform:null,patternUnits:null,phase:null,ping:cl,pitch:null,playbackOrder:null,pointerEvents:null,points:null,pointsAtX:ll,pointsAtY:ll,pointsAtZ:ll,preserveAlpha:null,preserveAspectRatio:null,primitiveUnits:null,propagate:null,property:hl,r:null,radius:null,referrerPolicy:null,refX:null,refY:null,rel:hl,rev:hl,renderingIntent:null,repeatCount:null,repeatDur:null,requiredExtensions:hl,requiredFeatures:hl,requiredFonts:hl,requiredFormats:hl,resource:null,restart:null,result:null,rotate:null,rx:null,ry:null,scale:null,seed:null,shapeRendering:null,side:null,slope:null,snapshotTime:null,specularConstant:ll,specularExponent:ll,spreadMethod:null,spacing:null,startOffset:null,stdDeviation:null,stemh:null,stemv:null,stitchTiles:null,stopColor:null,stopOpacity:null,strikethroughPosition:ll,strikethroughThickness:ll,string:null,stroke:null,strokeDashArray:hl,strokeDashOffset:null,strokeLineCap:null,strokeLineJoin:null,strokeMiterLimit:ll,strokeOpacity:ll,strokeWidth:null,style:null,surfaceScale:ll,syncBehavior:null,syncBehaviorDefault:null,syncMaster:null,syncTolerance:null,syncToleranceDefault:null,systemLanguage:hl,tabIndex:ll,tableValues:null,target:null,targetX:ll,targetY:ll,textAnchor:null,textDecoration:null,textRendering:null,textLength:null,timelineBegin:null,title:null,transformBehavior:null,type:null,typeOf:hl,to:null,transform:null,transformOrigin:null,u1:null,u2:null,underlinePosition:ll,underlineThickness:ll,unicode:null,unicodeBidi:null,unicodeRange:null,unitsPerEm:ll,values:null,vAlphabetic:ll,vMathematical:ll,vectorEffect:null,vHanging:ll,vIdeographic:ll,version:null,vertAdvY:ll,vertOriginX:ll,vertOriginY:ll,viewBox:null,viewTarget:null,visibility:null,width:null,widths:null,wordSpacing:null,writingMode:null,x:null,x1:null,x2:null,xChannelSelector:null,xHeight:ll,y:null,y1:null,y2:null,yChannelSelector:null,z:null,zoomAndPan:null},space:"svg",transform:bl}),wl=ml({properties:{xLinkActuate:null,xLinkArcRole:null,xLinkHref:null,xLinkRole:null,xLinkShow:null,xLinkTitle:null,xLinkType:null},space:"xlink",transform:(e,t)=>"xlink:"+t.slice(5).toLowerCase()}),Sl=ml({attributes:{xmlnsxlink:"xmlns:xlink"},properties:{xmlnsXLink:null,xmlns:null},space:"xmlns",transform:vl}),Cl=ml({properties:{xmlBase:null,xmlLang:null,xmlSpace:null},space:"xml",transform:(e,t)=>"xml:"+t.slice(3).toLowerCase()}),_l=tl([yl,xl,wl,Sl,Cl],"html"),Al=tl([yl,kl,wl,Sl,Cl],"svg"),Tl=/[A-Z]/g,El=/-[a-z]/g,Fl=/^data[-\w.:]+$/i;function Ml(e){return"-"+e.toLowerCase()}function Ll(e){return e.charAt(1).toUpperCase()}const Pl={classId:"classID",dataType:"datatype",itemId:"itemID",strokeDashArray:"strokeDasharray",strokeDashOffset:"strokeDashoffset",strokeLineCap:"strokeLinecap",strokeLineJoin:"strokeLinejoin",strokeMiterLimit:"strokeMiterlimit",typeOf:"typeof",xLinkActuate:"xlinkActuate",xLinkArcRole:"xlinkArcrole",xLinkHref:"xlinkHref",xLinkRole:"xlinkRole",xLinkShow:"xlinkShow",xLinkTitle:"xlinkTitle",xLinkType:"xlinkType",xmlnsXLink:"xmlnsXlink"};var Ol=n(7294);const $l=Dl("end"),Bl=Dl("start");function Dl(e){return function(t){const n=t&&t.position&&t.position[e]||{};if("number"===typeof n.line&&n.line>0&&"number"===typeof n.column&&n.column>0)return{line:n.line,column:n.column,offset:"number"===typeof n.offset&&n.offset>-1?n.offset:void 0}}}function zl(e){return e&&"object"===typeof e?"position"in e||"type"in e?Nl(e.position):"start"in e||"end"in e?Nl(e):"line"in e||"column"in e?Il(e):"":""}function Il(e){return Rl(e&&e.line)+":"+Rl(e&&e.column)}function Nl(e){return Il(e&&e.start)+"-"+Il(e&&e.end)}function Rl(e){return e&&"number"===typeof e?e:1}class jl extends Error{constructor(e,t,n){super(),"string"===typeof t&&(n=t,t=void 0);let r="",i={},o=!1;if(t&&(i="line"in t&&"column"in t||"start"in t&&"end"in t?{place:t}:"type"in t?{ancestors:[t],place:t.position}:{...t}),"string"===typeof e?r=e:!i.cause&&e&&(o=!0,r=e.message,i.cause=e),!i.ruleId&&!i.source&&"string"===typeof n){const e=n.indexOf(":");-1===e?i.ruleId=n:(i.source=n.slice(0,e),i.ruleId=n.slice(e+1))}if(!i.place&&i.ancestors&&i.ancestors){const e=i.ancestors[i.ancestors.length-1];e&&(i.place=e.position)}const a=i.place&&"start"in i.place?i.place.start:i.place;this.ancestors=i.ancestors||void 0,this.cause=i.cause||void 0,this.column=a?a.column:void 0,this.fatal=void 0,this.file,this.message=r,this.line=a?a.line:void 0,this.name=zl(i.place)||"1:1",this.place=i.place||void 0,this.reason=this.message,this.ruleId=i.ruleId||void 0,this.source=i.source||void 0,this.stack=o&&i.cause&&"string"===typeof i.cause.stack?i.cause.stack:"",this.actual,this.expected,this.note,this.url}}jl.prototype.file="",jl.prototype.name="",jl.prototype.reason="",jl.prototype.message="",jl.prototype.stack="",jl.prototype.column=void 0,jl.prototype.line=void 0,jl.prototype.ancestors=void 0,jl.prototype.cause=void 0,jl.prototype.fatal=void 0,jl.prototype.place=void 0,jl.prototype.ruleId=void 0,jl.prototype.source=void 0;const ql={}.hasOwnProperty,Hl=new Map,Wl=/[A-Z]/g,Kl=new Set(["table","tbody","thead","tfoot","tr"]),Ul=new Set(["td","th"]),Vl="https://github.com/syntax-tree/hast-util-to-jsx-runtime";function Yl(e,t){if(!t||void 0===t.Fragment)throw new TypeError("Expected `Fragment` in options");const n=t.filePath||void 0;let r;if(t.development){if("function"!==typeof t.jsxDEV)throw new TypeError("Expected `jsxDEV` in options when `development: true`");r=function(e,t){return n;function n(n,r,i,o){const a=Array.isArray(i.children),s=Bl(n);return t(r,i,o,a,{columnNumber:s?s.column-1:void 0,fileName:e,lineNumber:s?s.line:void 0},void 0)}}(n,t.jsxDEV)}else{if("function"!==typeof t.jsx)throw new TypeError("Expected `jsx` in production options");if("function"!==typeof t.jsxs)throw new TypeError("Expected `jsxs` in production options");r=function(e,t,n){return r;function r(e,r,i,o){const a=Array.isArray(i.children)?n:t;return o?a(r,i,o):a(r,i)}}(0,t.jsx,t.jsxs)}const i={Fragment:t.Fragment,ancestors:[],components:t.components||{},create:r,elementAttributeNameCase:t.elementAttributeNameCase||"react",evaluater:t.createEvaluater?t.createEvaluater():void 0,filePath:n,ignoreInvalidStyle:t.ignoreInvalidStyle||!1,passKeys:!1!==t.passKeys,passNode:t.passNode||!1,schema:"svg"===t.space?Al:_l,stylePropertyNameCase:t.stylePropertyNameCase||"dom",tableCellAlignToStyle:!1!==t.tableCellAlignToStyle},o=Gl(i,e,void 0);return o&&"string"!==typeof o?o:i.create(e,i.Fragment,{children:o||void 0},void 0)}function Gl(e,t,n){return"element"===t.type?function(e,t,n){const r=e.schema;let i=r;"svg"===t.tagName.toLowerCase()&&"html"===r.space&&(i=Al,e.schema=i);e.ancestors.push(t);const o=ec(e,t.tagName,!1),a=function(e,t){const n={};let r,i;for(i in t.properties)if("children"!==i&&ql.call(t.properties,i)){const o=Jl(e,i,t.properties[i]);if(o){const[i,a]=o;e.tableCellAlignToStyle&&"align"===i&&"string"===typeof a&&Ul.has(t.tagName)?r=a:n[i]=a}}if(r){(n.style||(n.style={}))["css"===e.stylePropertyNameCase?"text-align":"textAlign"]=r}return n}(e,t);let s=Zl(e,t);Kl.has(t.tagName)&&(s=s.filter((function(e){return"string"!==typeof e||!("object"===typeof(t=e)?"text"===t.type&&Js(t.value):Js(t));var t})));return Xl(e,a,o,t),Ql(a,s),e.ancestors.pop(),e.schema=r,e.create(t,o,a,n)}(e,t,n):"mdxFlowExpression"===t.type||"mdxTextExpression"===t.type?function(e,t){if(t.data&&t.data.estree&&e.evaluater){const n=t.data.estree.body[0];return n.type,e.evaluater.evaluateExpression(n.expression)}tc(e,t.position)}(e,t):"mdxJsxFlowElement"===t.type||"mdxJsxTextElement"===t.type?function(e,t,n){const r=e.schema;let i=r;"svg"===t.name&&"html"===r.space&&(i=Al,e.schema=i);e.ancestors.push(t);const o=null===t.name?e.Fragment:ec(e,t.name,!0),a=function(e,t){const n={};for(const r of t.attributes)if("mdxJsxExpressionAttribute"===r.type)if(r.data&&r.data.estree&&e.evaluater){const t=r.data.estree.body[0];t.type;const i=t.expression;i.type;const o=i.properties[0];o.type,Object.assign(n,e.evaluater.evaluateExpression(o.argument))}else tc(e,t.position);else{const i=r.name;let o;if(r.value&&"object"===typeof r.value)if(r.value.data&&r.value.data.estree&&e.evaluater){const t=r.value.data.estree.body[0];t.type,o=e.evaluater.evaluateExpression(t.expression)}else tc(e,t.position);else o=null===r.value||r.value;n[i]=o}return n}(e,t),s=Zl(e,t);return Xl(e,a,o,t),Ql(a,s),e.ancestors.pop(),e.schema=r,e.create(t,o,a,n)}(e,t,n):"mdxjsEsm"===t.type?function(e,t){if(t.data&&t.data.estree&&e.evaluater)return e.evaluater.evaluateProgram(t.data.estree);tc(e,t.position)}(e,t):"root"===t.type?function(e,t,n){const r={};return Ql(r,Zl(e,t)),e.create(t,e.Fragment,r,n)}(e,t,n):"text"===t.type?function(e,t){return t.value}(0,t):void 0}function Xl(e,t,n,r){"string"!==typeof n&&n!==e.Fragment&&e.passNode&&(t.node=r)}function Ql(e,t){if(t.length>0){const n=t.length>1?t:t[0];n&&(e.children=n)}}function Zl(e,t){const n=[];let r=-1;const i=e.passKeys?new Map:Hl;for(;++r4&&"data"===n.slice(0,4)&&Fl.test(t)){if("-"===t.charAt(4)){const e=t.slice(5).replace(El,Ll);r="data"+e.charAt(0).toUpperCase()+e.slice(1)}else{const e=t.slice(4);if(!El.test(e)){let n=e.replace(Tl,Ml);"-"!==n.charAt(0)&&(n="-"+n),t="data"+n}}i=pl}return new i(r,t)}(e.schema,t);if(!(null===n||void 0===n||"number"===typeof n&&Number.isNaN(n))){if(Array.isArray(n)&&(n=r.commaSeparated?function(e,t){const n=t||{};return(""===e[e.length-1]?[...e,""]:e).join((n.padRight?" ":"")+","+(!1===n.padLeft?"":" ")).trim()}(n):n.join(" ").trim()),"style"===r.property){let t="object"===typeof n?n:function(e,t){try{return Ol(t,{reactCompat:!0})}catch(n){if(e.ignoreInvalidStyle)return{};const t=n,r=new jl("Cannot parse `style` attribute",{ancestors:e.ancestors,cause:t,ruleId:"style",source:"hast-util-to-jsx-runtime"});throw r.file=e.filePath||void 0,r.url=Vl+"#cannot-parse-style-attribute",r}}(e,String(n));return"css"===e.stylePropertyNameCase&&(t=function(e){const t={};let n;for(n in e)ql.call(e,n)&&(t[nc(n)]=e[n]);return t}(t)),["style",t]}return["react"===e.elementAttributeNameCase&&r.space?Pl[r.property]||r.property:r.attribute,n]}}function ec(e,t,n){let r;if(n)if(t.includes(".")){const e=t.split(".");let n,i=-1;for(;++ii?0:i+t:t>i?i:t,n=n>0?n:0,r.length<1e4)o=Array.from(r),o.unshift(t,n),e.splice(...o);else for(n&&e.splice(t,n);a0?(cc(e,e.length,0,t),e):t}class hc{constructor(e){this.left=e?[...e]:[],this.right=[]}get(e){if(e<0||e>=this.left.length+this.right.length)throw new RangeError("Cannot access index `"+e+"` in a splice buffer of size `"+(this.left.length+this.right.length)+"`");return ethis.left.length?this.right.slice(this.right.length-n+this.left.length,this.right.length-e+this.left.length).reverse():this.left.slice(e).concat(this.right.slice(this.right.length-n+this.left.length).reverse())}splice(e,t,n){const r=t||0;this.setCursor(Math.trunc(e));const i=this.right.splice(this.right.length-r,Number.POSITIVE_INFINITY);return n&&dc(this.left,n),i.reverse()}pop(){return this.setCursor(Number.POSITIVE_INFINITY),this.left.pop()}push(e){this.setCursor(Number.POSITIVE_INFINITY),this.left.push(e)}pushMany(e){this.setCursor(Number.POSITIVE_INFINITY),dc(this.left,e)}unshift(e){this.setCursor(0),this.right.push(e)}unshiftMany(e){this.setCursor(0),dc(this.right,e.reverse())}setCursor(e){if(!(e===this.left.length||e>this.left.length&&0===this.right.length||e<0&&0===this.left.length))if(e-1&&e.test(String.fromCharCode(t))}}function Pc(e,t,n,r){const i=r?r-1:Number.POSITIVE_INFINITY;let o=0;return function(r){if(Ec(r))return e.enter(n),a(r);return t(r)};function a(r){return Ec(r)&&o++o))return;const n=t.events.length;let i,s,l=n;for(;l--;)if("exit"===t.events[l][0]&&"chunkFlow"===t.events[l][1].type){if(i){s=t.events[l][1].end;break}i=!0}for(y(a),e=n;er;){const r=n[i];t.containerState=r[1],r[0].exit.call(t,e)}n.length=r}function b(){r.write([null]),i=void 0,r=void 0,t.containerState._closeFlow=void 0}}},Bc={tokenize:function(e,t,n){return Pc(e,e.attempt(this.parser.constructs.document,t,n),"linePrefix",this.parser.constructs.disable.null.includes("codeIndented")?void 0:4)}};const Dc={partial:!0,tokenize:function(e,t,n){return function(t){return Ec(t)?Pc(e,r,"linePrefix")(t):r(t)};function r(e){return null===e||Ac(e)?t(e):n(e)}}};const zc={resolve:function(e){return fc(e),e},tokenize:function(e,t){let n;return function(t){return e.enter("content"),n=e.enter("chunkContent",{contentType:"content"}),r(t)};function r(t){return null===t?i(t):Ac(t)?e.check(Ic,o,i)(t):(e.consume(t),r)}function i(n){return e.exit("chunkContent"),e.exit("content"),t(n)}function o(t){return e.consume(t),e.exit("chunkContent"),n.next=e.enter("chunkContent",{contentType:"content",previous:n}),n=n.next,r}}},Ic={partial:!0,tokenize:function(e,t,n){const r=this;return function(t){return e.exit("chunkContent"),e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),Pc(e,i,"linePrefix")};function i(i){if(null===i||Ac(i))return n(i);const o=r.events[r.events.length-1];return!r.parser.constructs.disable.null.includes("codeIndented")&&o&&"linePrefix"===o[1].type&&o[2].sliceSerialize(o[1],!0).length>=4?t(i):e.interrupt(r.parser.constructs.flow,n,t)(i)}}};const Nc={tokenize:function(e){const t=this,n=e.attempt(Dc,(function(r){if(null===r)return void e.consume(r);return e.enter("lineEndingBlank"),e.consume(r),e.exit("lineEndingBlank"),t.currentConstruct=void 0,n}),e.attempt(this.parser.constructs.flowInitial,r,Pc(e,e.attempt(this.parser.constructs.flow,r,e.attempt(zc,r)),"linePrefix")));return n;function r(r){if(null!==r)return e.enter("lineEnding"),e.consume(r),e.exit("lineEnding"),t.currentConstruct=void 0,n;e.consume(r)}}};const Rc={resolveAll:Wc()},jc=Hc("string"),qc=Hc("text");function Hc(e){return{resolveAll:Wc("text"===e?Kc:void 0),tokenize:function(t){const n=this,r=this.parser.constructs[e],i=t.attempt(r,o,a);return o;function o(e){return l(e)?i(e):a(e)}function a(e){if(null!==e)return t.enter("data"),t.consume(e),s;t.consume(e)}function s(e){return l(e)?(t.exit("data"),i(e)):(t.consume(e),s)}function l(e){if(null===e)return!0;const t=r[e];let i=-1;if(t)for(;++i=3&&(null===o||Ac(o))?(e.exit("thematicBreak"),t(o)):n(o)}function a(t){return t===r?(e.consume(t),i++,a):(e.exit("thematicBreakSequence"),Ec(t)?Pc(e,o,"whitespace")(t):o(t))}}};const Vc={continuation:{tokenize:function(e,t,n){const r=this;return r.containerState._closeFlow=void 0,e.check(Dc,i,o);function i(n){return r.containerState.furtherBlankLines=r.containerState.furtherBlankLines||r.containerState.initialBlankLine,Pc(e,t,"listItemIndent",r.containerState.size+1)(n)}function o(n){return r.containerState.furtherBlankLines||!Ec(n)?(r.containerState.furtherBlankLines=void 0,r.containerState.initialBlankLine=void 0,a(n)):(r.containerState.furtherBlankLines=void 0,r.containerState.initialBlankLine=void 0,e.attempt(Gc,t,a)(n))}function a(i){return r.containerState._closeFlow=!0,r.interrupt=void 0,Pc(e,e.attempt(Vc,t,n),"linePrefix",r.parser.constructs.disable.null.includes("codeIndented")?void 0:4)(i)}}},exit:function(e){e.exit(this.containerState.type)},name:"list",tokenize:function(e,t,n){const r=this,i=r.events[r.events.length-1];let o=i&&"linePrefix"===i[1].type?i[2].sliceSerialize(i[1],!0).length:0,a=0;return function(t){const i=r.containerState.type||(42===t||43===t||45===t?"listUnordered":"listOrdered");if("listUnordered"===i?!r.containerState.marker||t===r.containerState.marker:Sc(t)){if(r.containerState.type||(r.containerState.type=i,e.enter(i,{_container:!0})),"listUnordered"===i)return e.enter("listItemPrefix"),42===t||45===t?e.check(Uc,n,l)(t):l(t);if(!r.interrupt||49===t)return e.enter("listItemPrefix"),e.enter("listItemValue"),s(t)}return n(t)};function s(t){return Sc(t)&&++a<10?(e.consume(t),s):(!r.interrupt||a<2)&&(r.containerState.marker?t===r.containerState.marker:41===t||46===t)?(e.exit("listItemValue"),l(t)):n(t)}function l(t){return e.enter("listItemMarker"),e.consume(t),e.exit("listItemMarker"),r.containerState.marker=r.containerState.marker||t,e.check(Dc,r.interrupt?n:c,e.attempt(Yc,h,u))}function c(e){return r.containerState.initialBlankLine=!0,o++,h(e)}function u(t){return Ec(t)?(e.enter("listItemPrefixWhitespace"),e.consume(t),e.exit("listItemPrefixWhitespace"),h):n(t)}function h(n){return r.containerState.size=o+r.sliceSerialize(e.exit("listItemPrefix"),!0).length,t(n)}}},Yc={partial:!0,tokenize:function(e,t,n){const r=this;return Pc(e,(function(e){const i=r.events[r.events.length-1];return!Ec(e)&&i&&"listItemPrefixWhitespace"===i[1].type?t(e):n(e)}),"listItemPrefixWhitespace",r.parser.constructs.disable.null.includes("codeIndented")?void 0:5)}},Gc={partial:!0,tokenize:function(e,t,n){const r=this;return Pc(e,(function(e){const i=r.events[r.events.length-1];return i&&"listItemIndent"===i[1].type&&i[2].sliceSerialize(i[1],!0).length===r.containerState.size?t(e):n(e)}),"listItemIndent",r.containerState.size+1)}};const Xc={continuation:{tokenize:function(e,t,n){const r=this;return function(t){if(Ec(t))return Pc(e,i,"linePrefix",r.parser.constructs.disable.null.includes("codeIndented")?void 0:4)(t);return i(t)};function i(r){return e.attempt(Xc,t,n)(r)}}},exit:function(e){e.exit("blockQuote")},name:"blockQuote",tokenize:function(e,t,n){const r=this;return function(t){if(62===t){const n=r.containerState;return n.open||(e.enter("blockQuote",{_container:!0}),n.open=!0),e.enter("blockQuotePrefix"),e.enter("blockQuoteMarker"),e.consume(t),e.exit("blockQuoteMarker"),i}return n(t)};function i(n){return Ec(n)?(e.enter("blockQuotePrefixWhitespace"),e.consume(n),e.exit("blockQuotePrefixWhitespace"),e.exit("blockQuotePrefix"),t):(e.exit("blockQuotePrefix"),t(n))}}};function Qc(e,t,n,r,i,o,a,s,l){const c=l||Number.POSITIVE_INFINITY;let u=0;return function(t){if(60===t)return e.enter(r),e.enter(i),e.enter(o),e.consume(t),e.exit(o),h;if(null===t||32===t||41===t||wc(t))return n(t);return e.enter(r),e.enter(a),e.enter(s),e.enter("chunkString",{contentType:"string"}),p(t)};function h(n){return 62===n?(e.enter(o),e.consume(n),e.exit(o),e.exit(i),e.exit(r),t):(e.enter(s),e.enter("chunkString",{contentType:"string"}),d(n))}function d(t){return 62===t?(e.exit("chunkString"),e.exit(s),h(t)):null===t||60===t||Ac(t)?n(t):(e.consume(t),92===t?f:d)}function f(t){return 60===t||62===t||92===t?(e.consume(t),d):d(t)}function p(i){return u||null!==i&&41!==i&&!Tc(i)?u999||null===h||91===h||93===h&&!s||94===h&&!l&&"_hiddenFootnoteSupport"in a.parser.constructs?n(h):93===h?(e.exit(o),e.enter(i),e.consume(h),e.exit(i),e.exit(r),t):Ac(h)?(e.enter("lineEnding"),e.consume(h),e.exit("lineEnding"),c):(e.enter("chunkString",{contentType:"string"}),u(h))}function u(t){return null===t||91===t||93===t||Ac(t)||l++>999?(e.exit("chunkString"),c(t)):(e.consume(t),s||(s=!Ec(t)),92===t?h:u)}function h(t){return 91===t||92===t||93===t?(e.consume(t),l++,u):u(t)}}function Jc(e,t,n,r,i,o){let a;return function(t){if(34===t||39===t||40===t)return e.enter(r),e.enter(i),e.consume(t),e.exit(i),a=40===t?41:t,s;return n(t)};function s(n){return n===a?(e.enter(i),e.consume(n),e.exit(i),e.exit(r),t):(e.enter(o),l(n))}function l(t){return t===a?(e.exit(o),s(a)):null===t?n(t):Ac(t)?(e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),Pc(e,l,"linePrefix")):(e.enter("chunkString",{contentType:"string"}),c(t))}function c(t){return t===a||null===t||Ac(t)?(e.exit("chunkString"),l(t)):(e.consume(t),92===t?u:c)}function u(t){return t===a||92===t?(e.consume(t),c):c(t)}}function eu(e,t){let n;return function r(i){if(Ac(i))return e.enter("lineEnding"),e.consume(i),e.exit("lineEnding"),n=!0,r;if(Ec(i))return Pc(e,r,n?"linePrefix":"lineSuffix")(i);return t(i)}}function tu(e){return e.replace(/[\t\n\r ]+/g," ").replace(/^ | $/g,"").toLowerCase().toUpperCase()}const nu={name:"definition",tokenize:function(e,t,n){const r=this;let i;return function(t){return e.enter("definition"),function(t){return Zc.call(r,e,o,n,"definitionLabel","definitionLabelMarker","definitionLabelString")(t)}(t)};function o(t){return i=tu(r.sliceSerialize(r.events[r.events.length-1][1]).slice(1,-1)),58===t?(e.enter("definitionMarker"),e.consume(t),e.exit("definitionMarker"),a):n(t)}function a(t){return Tc(t)?eu(e,s)(t):s(t)}function s(t){return Qc(e,l,n,"definitionDestination","definitionDestinationLiteral","definitionDestinationLiteralMarker","definitionDestinationRaw","definitionDestinationString")(t)}function l(t){return e.attempt(ru,c,c)(t)}function c(t){return Ec(t)?Pc(e,u,"whitespace")(t):u(t)}function u(o){return null===o||Ac(o)?(e.exit("definition"),r.parser.defined.push(i),t(o)):n(o)}}},ru={partial:!0,tokenize:function(e,t,n){return function(t){return Tc(t)?eu(e,r)(t):n(t)};function r(t){return Jc(e,i,n,"definitionTitle","definitionTitleMarker","definitionTitleString")(t)}function i(t){return Ec(t)?Pc(e,o,"whitespace")(t):o(t)}function o(e){return null===e||Ac(e)?t(e):n(e)}}};const iu={name:"codeIndented",tokenize:function(e,t,n){const r=this;return function(t){return e.enter("codeIndented"),Pc(e,i,"linePrefix",5)(t)};function i(e){const t=r.events[r.events.length-1];return t&&"linePrefix"===t[1].type&&t[2].sliceSerialize(t[1],!0).length>=4?o(e):n(e)}function o(t){return null===t?s(t):Ac(t)?e.attempt(ou,o,s)(t):(e.enter("codeFlowValue"),a(t))}function a(t){return null===t||Ac(t)?(e.exit("codeFlowValue"),o(t)):(e.consume(t),a)}function s(n){return e.exit("codeIndented"),t(n)}}},ou={partial:!0,tokenize:function(e,t,n){const r=this;return i;function i(t){return r.parser.lazy[r.now().line]?n(t):Ac(t)?(e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),i):Pc(e,o,"linePrefix",5)(t)}function o(e){const o=r.events[r.events.length-1];return o&&"linePrefix"===o[1].type&&o[2].sliceSerialize(o[1],!0).length>=4?t(e):Ac(e)?i(e):n(e)}}};const au={name:"headingAtx",resolve:function(e,t){let n,r,i=e.length-2,o=3;"whitespace"===e[o][1].type&&(o+=2);i-2>o&&"whitespace"===e[i][1].type&&(i-=2);"atxHeadingSequence"===e[i][1].type&&(o===i-1||i-4>o&&"whitespace"===e[i-2][1].type)&&(i-=o+1===i?2:4);i>o&&(n={type:"atxHeadingText",start:e[o][1].start,end:e[i][1].end},r={type:"chunkText",start:e[o][1].start,end:e[i][1].end,contentType:"text"},cc(e,o,i-o+1,[["enter",n,t],["enter",r,t],["exit",r,t],["exit",n,t]]));return e},tokenize:function(e,t,n){let r=0;return function(t){return e.enter("atxHeading"),function(t){return e.enter("atxHeadingSequence"),i(t)}(t)};function i(t){return 35===t&&r++<6?(e.consume(t),i):null===t||Tc(t)?(e.exit("atxHeadingSequence"),o(t)):n(t)}function o(n){return 35===n?(e.enter("atxHeadingSequence"),a(n)):null===n||Ac(n)?(e.exit("atxHeading"),t(n)):Ec(n)?Pc(e,o,"whitespace")(n):(e.enter("atxHeadingText"),s(n))}function a(t){return 35===t?(e.consume(t),a):(e.exit("atxHeadingSequence"),o(t))}function s(t){return null===t||35===t||Tc(t)?(e.exit("atxHeadingText"),o(t)):(e.consume(t),s)}}};const su={name:"setextUnderline",resolveTo:function(e,t){let n,r,i,o=e.length;for(;o--;)if("enter"===e[o][0]){if("content"===e[o][1].type){n=o;break}"paragraph"===e[o][1].type&&(r=o)}else"content"===e[o][1].type&&e.splice(o,1),i||"definition"!==e[o][1].type||(i=o);const a={type:"setextHeading",start:{...e[n][1].start},end:{...e[e.length-1][1].end}};e[r][1].type="setextHeadingText",i?(e.splice(r,0,["enter",a,t]),e.splice(i+1,0,["exit",e[n][1],t]),e[n][1].end={...e[i][1].end}):e[n][1]=a;return e.push(["exit",a,t]),e},tokenize:function(e,t,n){const r=this;let i;return function(t){let a,s=r.events.length;for(;s--;)if("lineEnding"!==r.events[s][1].type&&"linePrefix"!==r.events[s][1].type&&"content"!==r.events[s][1].type){a="paragraph"===r.events[s][1].type;break}if(!r.parser.lazy[r.now().line]&&(r.interrupt||a))return e.enter("setextHeadingLine"),i=t,function(t){return e.enter("setextHeadingLineSequence"),o(t)}(t);return n(t)};function o(t){return t===i?(e.consume(t),o):(e.exit("setextHeadingLineSequence"),Ec(t)?Pc(e,a,"lineSuffix")(t):a(t))}function a(r){return null===r||Ac(r)?(e.exit("setextHeadingLine"),t(r)):n(r)}}};const lu=["address","article","aside","base","basefont","blockquote","body","caption","center","col","colgroup","dd","details","dialog","dir","div","dl","dt","fieldset","figcaption","figure","footer","form","frame","frameset","h1","h2","h3","h4","h5","h6","head","header","hr","html","iframe","legend","li","link","main","menu","menuitem","nav","noframes","ol","optgroup","option","p","param","search","section","summary","table","tbody","td","tfoot","th","thead","title","tr","track","ul"],cu=["pre","script","style","textarea"],uu={concrete:!0,name:"htmlFlow",resolveTo:function(e){let t=e.length;for(;t--&&("enter"!==e[t][0]||"htmlFlow"!==e[t][1].type););t>1&&"linePrefix"===e[t-2][1].type&&(e[t][1].start=e[t-2][1].start,e[t+1][1].start=e[t-2][1].start,e.splice(t-2,2));return e},tokenize:function(e,t,n){const r=this;let i,o,a,s,l;return function(t){return function(t){return e.enter("htmlFlow"),e.enter("htmlFlowData"),e.consume(t),c}(t)};function c(s){return 33===s?(e.consume(s),u):47===s?(e.consume(s),o=!0,f):63===s?(e.consume(s),i=3,r.interrupt?t:$):vc(s)?(e.consume(s),a=String.fromCharCode(s),p):n(s)}function u(o){return 45===o?(e.consume(o),i=2,h):91===o?(e.consume(o),i=5,s=0,d):vc(o)?(e.consume(o),i=4,r.interrupt?t:$):n(o)}function h(i){return 45===i?(e.consume(i),r.interrupt?t:$):n(i)}function d(i){const o="CDATA[";return i===o.charCodeAt(s++)?(e.consume(i),6===s?r.interrupt?t:A:d):n(i)}function f(t){return vc(t)?(e.consume(t),a=String.fromCharCode(t),p):n(t)}function p(s){if(null===s||47===s||62===s||Tc(s)){const l=47===s,c=a.toLowerCase();return l||o||!cu.includes(c)?lu.includes(a.toLowerCase())?(i=6,l?(e.consume(s),g):r.interrupt?t(s):A(s)):(i=7,r.interrupt&&!r.parser.lazy[r.now().line]?n(s):o?m(s):y(s)):(i=1,r.interrupt?t(s):A(s))}return 45===s||xc(s)?(e.consume(s),a+=String.fromCharCode(s),p):n(s)}function g(i){return 62===i?(e.consume(i),r.interrupt?t:A):n(i)}function m(t){return Ec(t)?(e.consume(t),m):C(t)}function y(t){return 47===t?(e.consume(t),C):58===t||95===t||vc(t)?(e.consume(t),b):Ec(t)?(e.consume(t),y):C(t)}function b(t){return 45===t||46===t||58===t||95===t||xc(t)?(e.consume(t),b):v(t)}function v(t){return 61===t?(e.consume(t),x):Ec(t)?(e.consume(t),v):y(t)}function x(t){return null===t||60===t||61===t||62===t||96===t?n(t):34===t||39===t?(e.consume(t),l=t,k):Ec(t)?(e.consume(t),x):w(t)}function k(t){return t===l?(e.consume(t),l=null,S):null===t||Ac(t)?n(t):(e.consume(t),k)}function w(t){return null===t||34===t||39===t||47===t||60===t||61===t||62===t||96===t||Tc(t)?v(t):(e.consume(t),w)}function S(e){return 47===e||62===e||Ec(e)?y(e):n(e)}function C(t){return 62===t?(e.consume(t),_):n(t)}function _(t){return null===t||Ac(t)?A(t):Ec(t)?(e.consume(t),_):n(t)}function A(t){return 45===t&&2===i?(e.consume(t),M):60===t&&1===i?(e.consume(t),L):62===t&&4===i?(e.consume(t),B):63===t&&3===i?(e.consume(t),$):93===t&&5===i?(e.consume(t),O):!Ac(t)||6!==i&&7!==i?null===t||Ac(t)?(e.exit("htmlFlowData"),T(t)):(e.consume(t),A):(e.exit("htmlFlowData"),e.check(hu,D,T)(t))}function T(t){return e.check(du,E,D)(t)}function E(t){return e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),F}function F(t){return null===t||Ac(t)?T(t):(e.enter("htmlFlowData"),A(t))}function M(t){return 45===t?(e.consume(t),$):A(t)}function L(t){return 47===t?(e.consume(t),a="",P):A(t)}function P(t){if(62===t){const n=a.toLowerCase();return cu.includes(n)?(e.consume(t),B):A(t)}return vc(t)&&a.length<8?(e.consume(t),a+=String.fromCharCode(t),P):A(t)}function O(t){return 93===t?(e.consume(t),$):A(t)}function $(t){return 62===t?(e.consume(t),B):45===t&&2===i?(e.consume(t),$):A(t)}function B(t){return null===t||Ac(t)?(e.exit("htmlFlowData"),D(t)):(e.consume(t),B)}function D(n){return e.exit("htmlFlow"),t(n)}}},hu={partial:!0,tokenize:function(e,t,n){return function(r){return e.enter("lineEnding"),e.consume(r),e.exit("lineEnding"),e.attempt(Dc,t,n)}}},du={partial:!0,tokenize:function(e,t,n){const r=this;return function(t){if(Ac(t))return e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),i;return n(t)};function i(e){return r.parser.lazy[r.now().line]?n(e):t(e)}}};const fu={partial:!0,tokenize:function(e,t,n){const r=this;return function(t){if(null===t)return n(t);return e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),i};function i(e){return r.parser.lazy[r.now().line]?n(e):t(e)}}},pu={concrete:!0,name:"codeFenced",tokenize:function(e,t,n){const r=this,i={partial:!0,tokenize:function(e,t,n){let i=0;return a;function a(t){return e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),l}function l(t){return e.enter("codeFencedFence"),Ec(t)?Pc(e,c,"linePrefix",r.parser.constructs.disable.null.includes("codeIndented")?void 0:4)(t):c(t)}function c(t){return t===o?(e.enter("codeFencedFenceSequence"),u(t)):n(t)}function u(t){return t===o?(i++,e.consume(t),u):i>=s?(e.exit("codeFencedFenceSequence"),Ec(t)?Pc(e,h,"whitespace")(t):h(t)):n(t)}function h(r){return null===r||Ac(r)?(e.exit("codeFencedFence"),t(r)):n(r)}}};let o,a=0,s=0;return function(t){return function(t){const n=r.events[r.events.length-1];return a=n&&"linePrefix"===n[1].type?n[2].sliceSerialize(n[1],!0).length:0,o=t,e.enter("codeFenced"),e.enter("codeFencedFence"),e.enter("codeFencedFenceSequence"),l(t)}(t)};function l(t){return t===o?(s++,e.consume(t),l):s<3?n(t):(e.exit("codeFencedFenceSequence"),Ec(t)?Pc(e,c,"whitespace")(t):c(t))}function c(n){return null===n||Ac(n)?(e.exit("codeFencedFence"),r.interrupt?t(n):e.check(fu,f,b)(n)):(e.enter("codeFencedFenceInfo"),e.enter("chunkString",{contentType:"string"}),u(n))}function u(t){return null===t||Ac(t)?(e.exit("chunkString"),e.exit("codeFencedFenceInfo"),c(t)):Ec(t)?(e.exit("chunkString"),e.exit("codeFencedFenceInfo"),Pc(e,h,"whitespace")(t)):96===t&&t===o?n(t):(e.consume(t),u)}function h(t){return null===t||Ac(t)?c(t):(e.enter("codeFencedFenceMeta"),e.enter("chunkString",{contentType:"string"}),d(t))}function d(t){return null===t||Ac(t)?(e.exit("chunkString"),e.exit("codeFencedFenceMeta"),c(t)):96===t&&t===o?n(t):(e.consume(t),d)}function f(t){return e.attempt(i,b,p)(t)}function p(t){return e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),g}function g(t){return a>0&&Ec(t)?Pc(e,m,"linePrefix",a+1)(t):m(t)}function m(t){return null===t||Ac(t)?e.check(fu,f,b)(t):(e.enter("codeFlowValue"),y(t))}function y(t){return null===t||Ac(t)?(e.exit("codeFlowValue"),m(t)):(e.consume(t),y)}function b(n){return e.exit("codeFenced"),t(n)}}};const gu=document.createElement("i");function mu(e){const t="&"+e+";";gu.innerHTML=t;const n=gu.textContent;return(59!==n.charCodeAt(n.length-1)||"semi"===e)&&(n!==t&&n)}const yu={name:"characterReference",tokenize:function(e,t,n){const r=this;let i,o,a=0;return function(t){return e.enter("characterReference"),e.enter("characterReferenceMarker"),e.consume(t),e.exit("characterReferenceMarker"),s};function s(t){return 35===t?(e.enter("characterReferenceMarkerNumeric"),e.consume(t),e.exit("characterReferenceMarkerNumeric"),l):(e.enter("characterReferenceValue"),i=31,o=xc,c(t))}function l(t){return 88===t||120===t?(e.enter("characterReferenceMarkerHexadecimal"),e.consume(t),e.exit("characterReferenceMarkerHexadecimal"),e.enter("characterReferenceValue"),i=6,o=Cc,c):(e.enter("characterReferenceValue"),i=7,o=Sc,c(t))}function c(s){if(59===s&&a){const i=e.exit("characterReferenceValue");return o!==xc||mu(r.sliceSerialize(i))?(e.enter("characterReferenceMarker"),e.consume(s),e.exit("characterReferenceMarker"),e.exit("characterReference"),t):n(s)}return o(s)&&a++1&&e[u][1].end.offset-e[u][1].start.offset>1?2:1;const h={...e[n][1].end},d={...e[u][1].start};Eu(h,-s),Eu(d,s),o={type:s>1?"strongSequence":"emphasisSequence",start:h,end:{...e[n][1].end}},a={type:s>1?"strongSequence":"emphasisSequence",start:{...e[u][1].start},end:d},i={type:s>1?"strongText":"emphasisText",start:{...e[n][1].end},end:{...e[u][1].start}},r={type:s>1?"strong":"emphasis",start:{...o.start},end:{...a.end}},e[n][1].end={...o.start},e[u][1].start={...a.end},l=[],e[n][1].end.offset-e[n][1].start.offset&&(l=uc(l,[["enter",e[n][1],t],["exit",e[n][1],t]])),l=uc(l,[["enter",r,t],["enter",o,t],["exit",o,t],["enter",i,t]]),l=uc(l,xu(t.parser.constructs.insideSpan.null,e.slice(n+1,u),t)),l=uc(l,[["exit",i,t],["enter",a,t],["exit",a,t],["exit",r,t]]),e[u][1].end.offset-e[u][1].start.offset?(c=2,l=uc(l,[["enter",e[u][1],t],["exit",e[u][1],t]])):c=0,cc(e,n-1,u-n+3,l),u=n+l.length-c-2;break}u=-1;for(;++u-1){const e=a[0];"string"===typeof e?a[0]=e.slice(r):a.shift()}o>0&&a.push(e[i].slice(0,o))}return a}(a,e)}function p(){const{_bufferIndex:e,_index:t,line:n,column:i,offset:o}=r;return{_bufferIndex:e,_index:t,line:n,column:i,offset:o}}function g(){let e;for(;r._index13&&n<32||n>126&&n<160||n>55295&&n<57344||n>64975&&n<65008||65535===(65535&n)||65534===(65535&n)||n>1114111?"\ufffd":String.fromCodePoint(n)}const Uu=/\\([!-/:-@[-`{-~])|&(#(?:\d{1,7}|x[\da-f]{1,6})|[\da-z]{1,31});/gi;function Vu(e,t,n){if(t)return t;if(35===n.charCodeAt(0)){const e=n.charCodeAt(1),t=120===e||88===e;return Ku(n.slice(t?2:1),t?16:10)}return mu(n)||e}const Yu={}.hasOwnProperty;function Gu(e,n,r){return"string"!==typeof n&&(r=n,n=void 0),function(e){const t={transforms:[],canContainEols:["emphasis","fragment","heading","paragraph","strong"],enter:{autolink:o(te),autolinkProtocol:_,autolinkEmail:_,atxHeading:o(Q),blockQuote:o(U),characterEscape:_,characterReference:_,codeFenced:o(V),codeFencedFenceInfo:a,codeFencedFenceMeta:a,codeIndented:o(V,a),codeText:o(Y,a),codeTextData:_,data:_,codeFlowValue:_,definition:o(G),definitionDestinationString:a,definitionLabelString:a,definitionTitleString:a,emphasis:o(X),hardBreakEscape:o(Z),hardBreakTrailing:o(Z),htmlFlow:o(J,a),htmlFlowData:_,htmlText:o(J,a),htmlTextData:_,image:o(ee),label:a,link:o(te),listItem:o(re),listItemValue:d,listOrdered:o(ne,h),listUnordered:o(ne),paragraph:o(ie),reference:N,referenceString:a,resourceDestinationString:a,resourceTitleString:a,setextHeading:o(Q),strong:o(oe),thematicBreak:o(se)},exit:{atxHeading:l(),atxHeadingSequence:k,autolink:l(),autolinkEmail:K,autolinkProtocol:W,blockQuote:l(),characterEscapeValue:A,characterReferenceMarkerHexadecimal:j,characterReferenceMarkerNumeric:j,characterReferenceValue:q,characterReference:H,codeFenced:l(m),codeFencedFence:g,codeFencedFenceInfo:f,codeFencedFenceMeta:p,codeFlowValue:A,codeIndented:l(y),codeText:l(L),codeTextData:A,data:A,definition:l(),definitionDestinationString:x,definitionLabelString:b,definitionTitleString:v,emphasis:l(),hardBreakEscape:l(E),hardBreakTrailing:l(E),htmlFlow:l(F),htmlFlowData:A,htmlText:l(M),htmlTextData:A,image:l(O),label:B,labelText:$,lineEnding:T,link:l(P),listItem:l(),listOrdered:l(),listUnordered:l(),paragraph:l(),referenceString:R,resourceDestinationString:D,resourceTitleString:z,resource:I,setextHeading:l(C),setextHeadingLineSequence:S,setextHeadingText:w,strong:l(),thematicBreak:l()}};Qu(t,(e||{}).mdastExtensions||[]);const n={};return r;function r(e){let r={type:"root",children:[]};const o={stack:[r],tokenStack:[],config:t,enter:s,exit:c,buffer:a,resume:u,data:n},l=[];let h=-1;for(;++h0){const e=o.tokenStack[o.tokenStack.length-1];(e[1]||Ju).call(o,void 0,e[0])}for(r.position={start:Xu(e.length>0?e[0][1].start:{line:1,column:1,offset:0}),end:Xu(e.length>0?e[e.length-2][1].end:{line:1,column:1,offset:0})},h=-1;++h((e,t)=>{const n=(t,n)=>(e.set(n,t),t),r=i=>{if(e.has(i))return e.get(i);const[o,a]=t[i];switch(o){case 0:case-1:return n(a,i);case 1:{const e=n([],i);for(const t of a)e.push(r(t));return e}case 2:{const e=n({},i);for(const[t,n]of a)e[r(t)]=r(n);return e}case 3:return n(new Date(a),i);case 4:{const{source:e,flags:t}=a;return n(new RegExp(e,t),i)}case 5:{const e=n(new Map,i);for(const[t,n]of a)e.set(r(t),r(n));return e}case 6:{const e=n(new Set,i);for(const t of a)e.add(r(t));return e}case 7:{const{name:e,message:t}=a;return n(new th[e](t),i)}case 8:return n(BigInt(a),i);case"BigInt":return n(Object(BigInt(a)),i);case"ArrayBuffer":return n(new Uint8Array(a).buffer,a);case"DataView":{const{buffer:e}=new Uint8Array(a);return n(new DataView(e),a)}}return n(new th[o](a),i)};return r})(new Map,e)(0),rh="",{toString:ih}={},{keys:oh}=Object,ah=e=>{const t=typeof e;if("object"!==t||!e)return[0,t];const n=ih.call(e).slice(8,-1);switch(n){case"Array":return[1,rh];case"Object":return[2,rh];case"Date":return[3,rh];case"RegExp":return[4,rh];case"Map":return[5,rh];case"Set":return[6,rh];case"DataView":return[1,n]}return n.includes("Array")?[1,n]:n.includes("Error")?[7,n]:[2,n]},sh=e=>{let[t,n]=e;return 0===t&&("function"===n||"symbol"===n)},lh=function(e){let{json:t,lossy:n}=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};const r=[];return((e,t,n,r)=>{const i=(e,t)=>{const i=r.push(e)-1;return n.set(t,i),i},o=r=>{if(n.has(r))return n.get(r);let[a,s]=ah(r);switch(a){case 0:{let t=r;switch(s){case"bigint":a=8,t=r.toString();break;case"function":case"symbol":if(e)throw new TypeError("unable to serialize "+s);t=null;break;case"undefined":return i([-1],r)}return i([a,t],r)}case 1:{if(s){let e=r;return"DataView"===s?e=new Uint8Array(r.buffer):"ArrayBuffer"===s&&(e=new Uint8Array(r)),i([s,[...e]],r)}const e=[],t=i([a,e],r);for(const n of r)e.push(o(n));return t}case 2:{if(s)switch(s){case"BigInt":return i([s,r.toString()],r);case"Boolean":case"Number":case"String":return i([s,r.valueOf()],r)}if(t&&"toJSON"in r)return o(r.toJSON());const n=[],l=i([a,n],r);for(const t of oh(r))!e&&sh(ah(r[t]))||n.push([o(t),o(r[t])]);return l}case 3:return i([a,r.toISOString()],r);case 4:{const{source:e,flags:t}=r;return i([a,{source:e,flags:t}],r)}case 5:{const t=[],n=i([a,t],r);for(const[i,a]of r)(e||!sh(ah(i))&&!sh(ah(a)))&&t.push([o(i),o(a)]);return n}case 6:{const t=[],n=i([a,t],r);for(const i of r)!e&&sh(ah(i))||t.push(o(i));return n}}const{message:l}=r;return i([a,{name:s,message:l}],r)};return o})(!(t||n),!!t,new Map,r)(e),r},ch="function"===typeof structuredClone?(e,t)=>t&&("json"in t||"lossy"in t)?nh(lh(e,t)):structuredClone(e):(e,t)=>nh(lh(e,t));function uh(e){const t=[];let n=-1,r=0,i=0;for(;++n55295&&o<57344){const t=e.charCodeAt(n+1);o<56320&&t>56319&&t<57344?(a=String.fromCharCode(o,t),i=1):a="\ufffd"}else a=String.fromCharCode(o);a&&(t.push(e.slice(r,n),encodeURIComponent(a)),r=n+i+1,a=""),i&&(n+=i,i=0)}return t.join("")+e.slice(r)}function hh(e,t){const n=[{type:"text",value:"\u21a9"}];return t>1&&n.push({type:"element",tagName:"sup",properties:{},children:[{type:"text",value:String(t)}]}),n}function dh(e,t){return"Back to reference "+(e+1)+(t>1?"-"+t:"")}const fh=function(e){if(null===e||void 0===e)return gh;if("function"===typeof e)return ph(e);if("object"===typeof e)return Array.isArray(e)?function(e){const t=[];let n=-1;for(;++n":"")+")"})}return u;function u(){let c,u,h,d=yh;if((!t||o(i,s,l[l.length-1]||void 0))&&(d=function(e){if(Array.isArray(e))return e;if("number"===typeof e)return[bh,e];return null===e||void 0===e?yh:[e]}(n(i,l)),d[0]===vh))return d;if("children"in i&&i.children){const t=i;if(t.children&&"skip"!==d[0])for(u=(r?t.children.length:-1)+a,h=l.concat(t);u>-1&&u1:t}function Ch(e){const t=String(e),n=/\r?\n|\r/g;let r=n.exec(t),i=0;const o=[];for(;r;)o.push(_h(t.slice(i,r.index),i>0,!0),r[0]),i=r.index+r[0].length,r=n.exec(t);return o.push(_h(t.slice(i),i>0,!1)),o.join("")}function _h(e,t,n){let r=0,i=e.length;if(t){let t=e.codePointAt(r);for(;9===t||32===t;)r++,t=e.codePointAt(r)}if(n){let t=e.codePointAt(i-1);for(;9===t||32===t;)i--,t=e.codePointAt(i-1)}return i>r?e.slice(r,i):""}const Ah={blockquote:function(e,t){const n={type:"element",tagName:"blockquote",properties:{},children:e.wrap(e.all(t),!0)};return e.patch(t,n),e.applyData(t,n)},break:function(e,t){const n={type:"element",tagName:"br",properties:{},children:[]};return e.patch(t,n),[e.applyData(t,n),{type:"text",value:"\n"}]},code:function(e,t){const n=t.value?t.value+"\n":"",r={};t.lang&&(r.className=["language-"+t.lang]);let i={type:"element",tagName:"code",properties:r,children:[{type:"text",value:n}]};return t.meta&&(i.data={meta:t.meta}),e.patch(t,i),i=e.applyData(t,i),i={type:"element",tagName:"pre",properties:{},children:[i]},e.patch(t,i),i},delete:function(e,t){const n={type:"element",tagName:"del",properties:{},children:e.all(t)};return e.patch(t,n),e.applyData(t,n)},emphasis:function(e,t){const n={type:"element",tagName:"em",properties:{},children:e.all(t)};return e.patch(t,n),e.applyData(t,n)},footnoteReference:function(e,t){const n="string"===typeof e.options.clobberPrefix?e.options.clobberPrefix:"user-content-",r=String(t.identifier).toUpperCase(),i=uh(r.toLowerCase()),o=e.footnoteOrder.indexOf(r);let a,s=e.footnoteCounts.get(r);void 0===s?(s=0,e.footnoteOrder.push(r),a=e.footnoteOrder.length):a=o+1,s+=1,e.footnoteCounts.set(r,s);const l={type:"element",tagName:"a",properties:{href:"#"+n+"fn-"+i,id:n+"fnref-"+i+(s>1?"-"+s:""),dataFootnoteRef:!0,ariaDescribedBy:["footnote-label"]},children:[{type:"text",value:String(a)}]};e.patch(t,l);const c={type:"element",tagName:"sup",properties:{},children:[l]};return e.patch(t,c),e.applyData(t,c)},heading:function(e,t){const n={type:"element",tagName:"h"+t.depth,properties:{},children:e.all(t)};return e.patch(t,n),e.applyData(t,n)},html:function(e,t){if(e.options.allowDangerousHtml){const n={type:"raw",value:t.value};return e.patch(t,n),e.applyData(t,n)}},imageReference:function(e,t){const n=String(t.identifier).toUpperCase(),r=e.definitionById.get(n);if(!r)return wh(e,t);const i={src:uh(r.url||""),alt:t.alt};null!==r.title&&void 0!==r.title&&(i.title=r.title);const o={type:"element",tagName:"img",properties:i,children:[]};return e.patch(t,o),e.applyData(t,o)},image:function(e,t){const n={src:uh(t.url)};null!==t.alt&&void 0!==t.alt&&(n.alt=t.alt),null!==t.title&&void 0!==t.title&&(n.title=t.title);const r={type:"element",tagName:"img",properties:n,children:[]};return e.patch(t,r),e.applyData(t,r)},inlineCode:function(e,t){const n={type:"text",value:t.value.replace(/\r?\n|\r/g," ")};e.patch(t,n);const r={type:"element",tagName:"code",properties:{},children:[n]};return e.patch(t,r),e.applyData(t,r)},linkReference:function(e,t){const n=String(t.identifier).toUpperCase(),r=e.definitionById.get(n);if(!r)return wh(e,t);const i={href:uh(r.url||"")};null!==r.title&&void 0!==r.title&&(i.title=r.title);const o={type:"element",tagName:"a",properties:i,children:e.all(t)};return e.patch(t,o),e.applyData(t,o)},link:function(e,t){const n={href:uh(t.url)};null!==t.title&&void 0!==t.title&&(n.title=t.title);const r={type:"element",tagName:"a",properties:n,children:e.all(t)};return e.patch(t,r),e.applyData(t,r)},listItem:function(e,t,n){const r=e.all(t),i=n?function(e){let t=!1;if("list"===e.type){t=e.spread||!1;const n=e.children;let r=-1;for(;!t&&++r0&&n.children.unshift({type:"text",value:" "}),n.children.unshift({type:"element",tagName:"input",properties:{type:"checkbox",checked:t.checked,disabled:!0},children:[]}),o.className=["task-list-item"]}let s=-1;for(;++s0){const r={type:"element",tagName:"tbody",properties:{},children:e.wrap(n,!0)},o=Bl(t.children[1]),a=$l(t.children[t.children.length-1]);o&&a&&(r.position={start:o,end:a}),i.push(r)}const o={type:"element",tagName:"table",properties:{},children:e.wrap(i,!0)};return e.patch(t,o),e.applyData(t,o)},tableCell:function(e,t){const n={type:"element",tagName:"td",properties:{},children:e.all(t)};return e.patch(t,n),e.applyData(t,n)},tableRow:function(e,t,n){const r=n?n.children:void 0,i=0===(r?r.indexOf(t):1)?"th":"td",o=n&&"table"===n.type?n.align:void 0,a=o?o.length:t.children.length;let s=-1;const l=[];for(;++s0&&n.push({type:"text",value:"\n"}),n}function $h(e){let t=0,n=e.charCodeAt(t);for(;9===n||32===n;)t++,n=e.charCodeAt(t);return e.slice(t)}function Bh(e,t){const n=function(e,t){const n=t||Fh,r=new Map,i=new Map,o=new Map,a={...Ah,...n.handlers},s={all:function(e){const t=[];if("children"in e){const n=e.children;let r=-1;for(;++r0&&h.push({type:"text",value:" "});let e="string"===typeof n?n:n(l,u);"string"===typeof e&&(e={type:"text",value:e}),h.push({type:"element",tagName:"a",properties:{href:"#"+t+"fnref-"+c+(u>1?"-"+u:""),dataFootnoteBackref:"",ariaLabel:"string"===typeof r?r:r(l,u),className:["data-footnote-backref"]},children:Array.isArray(e)?e:[e]})}const f=o[o.length-1];if(f&&"element"===f.type&&"p"===f.tagName){const e=f.children[f.children.length-1];e&&"text"===e.type?e.value+=" ":f.children.push({type:"text",value:" "}),f.children.push(...h)}else o.push(...h);const p={type:"element",tagName:"li",properties:{id:t+"fn-"+c},children:e.wrap(o,!0)};e.patch(i,p),s.push(p)}if(0!==s.length)return{type:"element",tagName:"section",properties:{dataFootnotes:!0,className:["footnotes"]},children:[{type:"element",tagName:o,properties:{...ch(a),id:"footnote-label"},children:[{type:"text",value:i}]},{type:"text",value:"\n"},{type:"element",tagName:"ol",properties:{},children:e.wrap(s,!0)},{type:"text",value:"\n"}]}}(n),o=Array.isArray(r)?{type:"root",children:r}:r||{type:"root",children:[]};return i&&o.children.push({type:"text",value:"\n"},i),o}function Dh(e,t){return e&&"run"in e?async function(n,r){const i=Bh(n,{file:r,...t});await e.run(i,r)}:function(n,r){return Bh(n,{file:r,...e||t})}}function zh(e){if(e)throw e}var Ih=n(3240);function Nh(e){if("object"!==typeof e||null===e)return!1;const t=Object.getPrototypeOf(e);return(null===t||t===Object.prototype||null===Object.getPrototypeOf(t))&&!(Symbol.toStringTag in e)&&!(Symbol.iterator in e)}function Rh(){const e=[],t={run:function(){for(var t=arguments.length,n=new Array(t),r=0;r1?l-1:0),u=1;ui.length;let c;l&&i.push(o);try{c=e.apply(this,i)}catch(r){if(l&&n)throw r;return o(r)}l||(c&&c.then&&"function"===typeof c.then?c.then(a,o):c instanceof Error?o(c):a(c))}function o(e){if(!n){n=!0;for(var r=arguments.length,i=new Array(r>1?r-1:0),o=1;oe.length){for(;o--;)if(47===e.codePointAt(o)){if(n){r=o+1;break}}else i<0&&(n=!0,i=o+1);return i<0?"":e.slice(r,i)}if(t===e)return"";let a=-1,s=t.length-1;for(;o--;)if(47===e.codePointAt(o)){if(n){r=o+1;break}}else a<0&&(n=!0,a=o+1),s>-1&&(e.codePointAt(o)===t.codePointAt(s--)?s<0&&(i=o):(s=-1,i=a));r===i?i=a:i<0&&(i=e.length);return e.slice(r,i)},dirname:function(e){if(qh(e),0===e.length)return".";let t,n=-1,r=e.length;for(;--r;)if(47===e.codePointAt(r)){if(t){n=r;break}}else t||(t=!0);return n<0?47===e.codePointAt(0)?"/":".":1===n&&47===e.codePointAt(0)?"//":e.slice(0,n)},extname:function(e){qh(e);let t,n=e.length,r=-1,i=0,o=-1,a=0;for(;n--;){const s=e.codePointAt(n);if(47!==s)r<0&&(t=!0,r=n+1),46===s?o<0?o=n:1!==a&&(a=1):o>-1&&(a=-1);else if(t){i=n+1;break}}if(o<0||r<0||0===a||1===a&&o===r-1&&o===i+1)return"";return e.slice(o,r)},join:function(){let e,t=-1;for(var n=arguments.length,r=new Array(n),i=0;i2){if(r=i.lastIndexOf("/"),r!==i.length-1){r<0?(i="",o=0):(i=i.slice(0,r),o=i.length-1-i.lastIndexOf("/")),a=l,s=0;continue}}else if(i.length>0){i="",o=0,a=l,s=0;continue}t&&(i=i.length>0?i+"/..":"..",o=2)}else i.length>0?i+="/"+e.slice(a+1,l):i=e.slice(a+1,l),o=l-a-1;a=l,s=0}else 46===n&&s>-1?s++:s=-1}return i}(e,!t);0!==n.length||t||(n=".");n.length>0&&47===e.codePointAt(e.length-1)&&(n+="/");return t?"/"+n:n}(e)},sep:"/"};function qh(e){if("string"!==typeof e)throw new TypeError("Path must be a string. Received "+JSON.stringify(e))}const Hh={cwd:function(){return"/"}};function Wh(e){return Boolean(null!==e&&"object"===typeof e&&"href"in e&&e.href&&"protocol"in e&&e.protocol&&void 0===e.auth)}function Kh(e){if("string"===typeof e)e=new URL(e);else if(!Wh(e)){const t=new TypeError('The "path" argument must be of type string or an instance of URL. Received `'+e+"`");throw t.code="ERR_INVALID_ARG_TYPE",t}if("file:"!==e.protocol){const e=new TypeError("The URL must be of scheme file");throw e.code="ERR_INVALID_URL_SCHEME",e}return function(e){if(""!==e.hostname){const e=new TypeError('File URL host must be "localhost" or empty on darwin');throw e.code="ERR_INVALID_FILE_URL_HOST",e}const t=e.pathname;let n=-1;for(;++n1?r-1:0),o=1;o0){let[r,...o]=n;const a=t[i][1];Nh(a)&&Nh(r)&&(r=Ih(!0,a,r)),t[i]=[e,r,...o]}}}}const ed=(new Jh).freeze();function td(e,t){if("function"!==typeof t)throw new TypeError("Cannot `"+e+"` without `parser`")}function nd(e,t){if("function"!==typeof t)throw new TypeError("Cannot `"+e+"` without `compiler`")}function rd(e,t){if(t)throw new Error("Cannot call `"+e+"` on a frozen processor.\nCreate a new processor first, by calling it: use `processor()` instead of `processor`.")}function id(e){if(!Nh(e)||"string"!==typeof e.type)throw new TypeError("Expected node, got `"+e+"`")}function od(e,t,n){if(!n)throw new Error("`"+e+"` finished async. Use `"+t+"` instead")}function ad(e){return function(e){return Boolean(e&&"object"===typeof e&&"message"in e&&"messages"in e)}(e)?e:new Vh(e)}const sd=[],ld={allowDangerousHtml:!0},cd=/^(https?|ircs?|mailto|xmpp)$/i,ud=[{from:"astPlugins",id:"remove-buggy-html-in-markdown-parser"},{from:"allowDangerousHtml",id:"remove-buggy-html-in-markdown-parser"},{from:"allowNode",id:"replace-allownode-allowedtypes-and-disallowedtypes",to:"allowElement"},{from:"allowedTypes",id:"replace-allownode-allowedtypes-and-disallowedtypes",to:"allowedElements"},{from:"disallowedTypes",id:"replace-allownode-allowedtypes-and-disallowedtypes",to:"disallowedElements"},{from:"escapeHtml",id:"remove-buggy-html-in-markdown-parser"},{from:"includeElementIndex",id:"#remove-includeelementindex"},{from:"includeNodeIndex",id:"change-includenodeindex-to-includeelementindex"},{from:"linkTarget",id:"remove-linktarget"},{from:"plugins",id:"change-plugins-to-remarkplugins",to:"remarkPlugins"},{from:"rawSourcePos",id:"#remove-rawsourcepos"},{from:"renderers",id:"change-renderers-to-components",to:"components"},{from:"source",id:"change-source-to-children",to:"children"},{from:"sourcePos",id:"#remove-sourcepos"},{from:"transformImageUri",id:"#add-urltransform",to:"urlTransform"},{from:"transformLinkUri",id:"#add-urltransform",to:"urlTransform"}];function hd(e){const t=dd(e),n=fd(e);return pd(t.runSync(t.parse(n),n),e)}function dd(e){const t=e.rehypePlugins||sd,n=e.remarkPlugins||sd,r=e.remarkRehypeOptions?{...e.remarkRehypeOptions,...ld}:ld;return ed().use(eh).use(n).use(Dh,r).use(t)}function fd(e){const t=e.children||"",n=new Vh;return"string"===typeof t&&(n.value=t),n}function pd(e,t){const n=t.allowedElements,r=t.allowElement,i=t.components,o=t.disallowedElements,a=t.skipHtml,s=t.unwrapDisallowed,l=t.urlTransform||gd;for(const c of ud)Object.hasOwn(t,c.from)&&(c.from,c.to&&c.to,c.id);return t.className&&(e={type:"element",tagName:"div",properties:{className:t.className},children:"root"===e.type?e.children:[e]}),kh(e,(function(e,t,i){if("raw"===e.type&&i&&"number"===typeof t)return a?i.children.splice(t,1):i.children[t]={type:"text",value:e.value},t;if("element"===e.type){let t;for(t in ic)if(Object.hasOwn(ic,t)&&Object.hasOwn(e.properties,t)){const n=e.properties[t],r=ic[t];(null===r||r.includes(e.tagName))&&(e.properties[t]=l(String(n||""),t,e))}}if("element"===e.type){let a=n?!n.includes(e.tagName):!!o&&o.includes(e.tagName);if(!a&&r&&"number"===typeof t&&(a=!r(e,t,i)),a&&i&&"number"===typeof t)return s&&e.children?i.children.splice(t,1,...e.children):i.children.splice(t,1),t}})),Yl(e,{Fragment:js.Fragment,components:i,ignoreInvalidStyle:!0,jsx:js.jsx,jsxs:js.jsxs,passKeys:!0,passNode:!0})}function gd(e){const t=e.indexOf(":"),n=e.indexOf("?"),r=e.indexOf("#"),i=e.indexOf("/");return-1===t||-1!==i&&t>i||-1!==n&&t>n||-1!==r&&t>r||cd.test(e.slice(0,t))?e:""}function md(e,t){const n=String(e);if("string"!==typeof t)throw new TypeError("Expected character");let r=0,i=n.indexOf(t);for(;-1!==i;)r++,i=n.indexOf(t,i+t.length);return r}function yd(e,t,n){const r=fh((n||{}).ignore||[]),i=function(e){const t=[];if(!Array.isArray(e))throw new TypeError("Expected find and replace tuple or list of tuples");const n=!e[0]||Array.isArray(e[0])?e:[e];let r=-1;for(;++r0?{type:"text",value:o}:void 0),!1===o?r.lastIndex=n+1:(s!==n&&u.push({type:"text",value:e.value.slice(s,n)}),Array.isArray(o)?u.push(...o):o&&u.push(o),s=n+h[0].length,c=!0),!r.global)break;h=r.exec(e.value)}c?(s?\]}]+$/.exec(e);if(!t)return[e,void 0];e=e.slice(0,t.index);let n=t[0],r=n.indexOf(")");const i=md(e,"(");let o=md(e,")");for(;-1!==r&&i>o;)e+=n.slice(0,r+1),n=n.slice(r+1),r=n.indexOf(")"),o++;return[e,n]}(n+r);if(!a[0])return!1;const s={type:"link",title:null,url:o+t+a[0],children:[{type:"text",value:t+a[0]}]};return a[1]?[s,{type:"text",value:a[1]}]:s}function Md(e,t,n,r){return!(!Ld(r,!0)||/[-\d_]$/.test(n))&&{type:"link",title:null,url:"mailto:"+t+"@"+n,children:[{type:"text",value:t+"@"+n}]}}function Ld(e,t){const n=e.input.charCodeAt(e.index-1);return(0===e.index||Mc(n)||Fc(n))&&(!t||47!==n)}function Pd(){this.buffer()}function Od(e){this.enter({type:"footnoteReference",identifier:"",label:""},e)}function $d(){this.buffer()}function Bd(e){this.enter({type:"footnoteDefinition",identifier:"",label:"",children:[]},e)}function Dd(e){const t=this.resume(),n=this.stack[this.stack.length-1];n.type,n.identifier=tu(this.sliceSerialize(e)).toLowerCase(),n.label=t}function zd(e){this.exit(e)}function Id(e){const t=this.resume(),n=this.stack[this.stack.length-1];n.type,n.identifier=tu(this.sliceSerialize(e)).toLowerCase(),n.label=t}function Nd(e){this.exit(e)}function Rd(e,t,n,r){const i=n.createTracker(r);let o=i.move("[^");const a=n.enter("footnoteReference"),s=n.enter("reference");return o+=i.move(n.safe(n.associationId(e),{after:"]",before:o})),s(),a(),o+=i.move("]"),o}function jd(e){let t=!1;return e&&e.firstLineBlank&&(t=!0),{handlers:{footnoteDefinition:function(e,n,r,i){const o=r.createTracker(i);let a=o.move("[^");const s=r.enter("footnoteDefinition"),l=r.enter("label");a+=o.move(r.safe(r.associationId(e),{before:a,after:"]"})),l(),a+=o.move("]:"),e.children&&e.children.length>0&&(o.shift(4),a+=o.move((t?"\n":" ")+r.indentLines(r.containerFlow(e,o.current()),t?Hd:qd)));return s(),a},footnoteReference:Rd},unsafe:[{character:"[",inConstruct:["label","phrasing","reference"]}]}}function qd(e,t,n){return 0===t?e:Hd(e,t,n)}function Hd(e,t,n){return(n?"":" ")+e}Rd.peek=function(){return"["};const Wd=["autolink","destinationLiteral","destinationRaw","reference","titleQuote","titleApostrophe"];function Kd(e){this.enter({type:"delete",children:[]},e)}function Ud(e){this.exit(e)}function Vd(e,t,n,r){const i=n.createTracker(r),o=n.enter("strikethrough");let a=i.move("~~");return a+=n.containerPhrasing(e,{...i.current(),before:a,after:"~"}),a+=i.move("~~"),o(),a}function Yd(e){return e.length}function Gd(e){const t="string"===typeof e?e.codePointAt(0):0;return 67===t||99===t?99:76===t||108===t?108:82===t||114===t?114:0}function Xd(e,t,n){return">"+(n?"":" ")+e}function Qd(e,t,n){if("string"===typeof t&&(t=[t]),!t||0===t.length)return n;let r=-1;for(;++r",...l.current()})),c+=l.move(">")):(s=n.enter("destinationRaw"),c+=l.move(n.safe(e.url,{before:c,after:e.title?" ":")",...l.current()}))),s(),e.title&&(s=n.enter(`title${o}`),c+=l.move(" "+i),c+=l.move(n.safe(e.title,{before:c,after:i,...l.current()})),c+=l.move(i),s()),c+=l.move(")"),a(),c}function sf(e,t,n,r){const i=e.referenceType,o=n.enter("imageReference");let a=n.enter("label");const s=n.createTracker(r);let l=s.move("![");const c=n.safe(e.alt,{before:l,after:"]",...s.current()});l+=s.move(c+"]["),a();const u=n.stack;n.stack=[],a=n.enter("reference");const h=n.safe(n.associationId(e),{before:l,after:"]",...s.current()});return a(),n.stack=u,o(),"full"!==i&&c&&c===h?"shortcut"===i?l=l.slice(0,-1):l+=s.move("]"):l+=s.move(h+"]"),l}function lf(e,t,n){let r=e.value||"",i="`",o=-1;for(;new RegExp("(^|[^`])"+i+"([^`]|$)").test(r);)i+="`";for(/[^ \r\n]/.test(r)&&(/^[ \r\n]/.test(r)&&/[ \r\n]$/.test(r)||/^`|`$/.test(r))&&(r=" "+r+" ");++o\u007F]/.test(e.url))}function uf(e,t,n,r){const i=ef(n),o='"'===i?"Quote":"Apostrophe",a=n.createTracker(r);let s,l;if(cf(e,n)){const t=n.stack;n.stack=[],s=n.enter("autolink");let r=a.move("<");return r+=a.move(n.containerPhrasing(e,{before:r,after:">",...a.current()})),r+=a.move(">"),s(),n.stack=t,r}s=n.enter("link"),l=n.enter("label");let c=a.move("[");return c+=a.move(n.containerPhrasing(e,{before:c,after:"](",...a.current()})),c+=a.move("]("),l(),!e.url&&e.title||/[\0- \u007F]/.test(e.url)?(l=n.enter("destinationLiteral"),c+=a.move("<"),c+=a.move(n.safe(e.url,{before:c,after:">",...a.current()})),c+=a.move(">")):(l=n.enter("destinationRaw"),c+=a.move(n.safe(e.url,{before:c,after:e.title?" ":")",...a.current()}))),l(),e.title&&(l=n.enter(`title${o}`),c+=a.move(" "+i),c+=a.move(n.safe(e.title,{before:c,after:i,...a.current()})),c+=a.move(i),l()),c+=a.move(")"),s(),c}function hf(e,t,n,r){const i=e.referenceType,o=n.enter("linkReference");let a=n.enter("label");const s=n.createTracker(r);let l=s.move("[");const c=n.containerPhrasing(e,{before:l,after:"]",...s.current()});l+=s.move(c+"]["),a();const u=n.stack;n.stack=[],a=n.enter("reference");const h=n.safe(n.associationId(e),{before:l,after:"]",...s.current()});return a(),n.stack=u,o(),"full"!==i&&c&&c===h?"shortcut"===i?l=l.slice(0,-1):l+=s.move("]"):l+=s.move(h+"]"),l}function df(e){const t=e.options.bullet||"*";if("*"!==t&&"+"!==t&&"-"!==t)throw new Error("Cannot serialize items with `"+t+"` for `options.bullet`, expected `*`, `+`, or `-`");return t}function ff(e){const t=e.options.rule||"*";if("*"!==t&&"-"!==t&&"_"!==t)throw new Error("Cannot serialize rules with `"+t+"` for `options.rule`, expected `*`, `-`, or `_`");return t}Vd.peek=function(){return"~"},rf.peek=function(e,t,n){return n.options.emphasis||"*"},of.peek=function(){return"<"},af.peek=function(){return"!"},sf.peek=function(){return"!"},lf.peek=function(){return"`"},uf.peek=function(e,t,n){return cf(e,n)?"<":"["},hf.peek=function(){return"["};const pf=fh(["break","delete","emphasis","footnote","footnoteReference","image","imageReference","inlineCode","inlineMath","link","linkReference","mdxJsxTextElement","mdxTextExpression","strong","text","textDirective"]);function gf(e,t,n,r){const i=function(e){const t=e.options.strong||"*";if("*"!==t&&"_"!==t)throw new Error("Cannot serialize strong with `"+t+"` for `options.strong`, expected `*`, or `_`");return t}(n),o=n.enter("strong"),a=n.createTracker(r),s=a.move(i+i);let l=a.move(n.containerPhrasing(e,{after:i,before:s,...a.current()}));const c=l.charCodeAt(0),u=nf(r.before.charCodeAt(r.before.length-1),c,i);u.inside&&(l=tf(c)+l.slice(1));const h=l.charCodeAt(l.length-1),d=nf(r.after.charCodeAt(0),h,i);d.inside&&(l=l.slice(0,-1)+tf(h));const f=a.move(i+i);return o(),n.attentionEncodeSurroundingInfo={after:d.outside,before:u.outside},s+l+f}gf.peek=function(e,t,n){return n.options.strong||"*"};const mf={blockquote:function(e,t,n,r){const i=n.enter("blockquote"),o=n.createTracker(r);o.move("> "),o.shift(2);const a=n.indentLines(n.containerFlow(e,o.current()),Xd);return i(),a},break:Zd,code:function(e,t,n,r){const i=function(e){const t=e.options.fence||"`";if("`"!==t&&"~"!==t)throw new Error("Cannot serialize code with `"+t+"` for `options.fence`, expected `` ` `` or `~`");return t}(n),o=e.value||"",a="`"===i?"GraveAccent":"Tilde";if(function(e,t){return Boolean(!1===t.options.fences&&e.value&&!e.lang&&/[^ \r\n]/.test(e.value)&&!/^[\t ]*(?:[\r\n]|$)|(?:^|[\r\n])[\t ]*$/.test(e.value))}(e,n)){const e=n.enter("codeIndented"),t=n.indentLines(o,Jd);return e(),t}const s=n.createTracker(r),l=i.repeat(Math.max(function(e,t){const n=String(e);let r=n.indexOf(t),i=r,o=0,a=0;if("string"!==typeof t)throw new TypeError("Expected substring");for(;-1!==r;)r===i?++o>a&&(a=o):o=1,i=r+t.length,r=n.indexOf(t,i);return a}(o,i)+1,3)),c=n.enter("codeFenced");let u=s.move(l);if(e.lang){const t=n.enter(`codeFencedLang${a}`);u+=s.move(n.safe(e.lang,{before:u,after:" ",encode:["`"],...s.current()})),t()}if(e.lang&&e.meta){const t=n.enter(`codeFencedMeta${a}`);u+=s.move(" "),u+=s.move(n.safe(e.meta,{before:u,after:"\n",encode:["`"],...s.current()})),t()}return u+=s.move("\n"),o&&(u+=s.move(o+"\n")),u+=s.move(l),c(),u},definition:function(e,t,n,r){const i=ef(n),o='"'===i?"Quote":"Apostrophe",a=n.enter("definition");let s=n.enter("label");const l=n.createTracker(r);let c=l.move("[");return c+=l.move(n.safe(n.associationId(e),{before:c,after:"]",...l.current()})),c+=l.move("]: "),s(),!e.url||/[\0- \u007F]/.test(e.url)?(s=n.enter("destinationLiteral"),c+=l.move("<"),c+=l.move(n.safe(e.url,{before:c,after:">",...l.current()})),c+=l.move(">")):(s=n.enter("destinationRaw"),c+=l.move(n.safe(e.url,{before:c,after:e.title?" ":"\n",...l.current()}))),s(),e.title&&(s=n.enter(`title${o}`),c+=l.move(" "+i),c+=l.move(n.safe(e.title,{before:c,after:i,...l.current()})),c+=l.move(i),s()),a(),c},emphasis:rf,hardBreak:Zd,heading:function(e,t,n,r){const i=Math.max(Math.min(6,e.depth||1),1),o=n.createTracker(r);if(function(e,t){let n=!1;return kh(e,(function(e){if("value"in e&&/\r?\n|\r/.test(e.value)||"break"===e.type)return n=!0,vh})),Boolean((!e.depth||e.depth<3)&&ac(e)&&(t.options.setext||n))}(e,n)){const t=n.enter("headingSetext"),r=n.enter("phrasing"),a=n.containerPhrasing(e,{...o.current(),before:"\n",after:"\n"});return r(),t(),a+"\n"+(1===i?"=":"-").repeat(a.length-(Math.max(a.lastIndexOf("\r"),a.lastIndexOf("\n"))+1))}const a="#".repeat(i),s=n.enter("headingAtx"),l=n.enter("phrasing");o.move(a+" ");let c=n.containerPhrasing(e,{before:"# ",after:"\n",...o.current()});return/^[\t ]/.test(c)&&(c=tf(c.charCodeAt(0))+c.slice(1)),c=c?a+" "+c:a,n.options.closeAtx&&(c+=" "+a),l(),s(),c},html:of,image:af,imageReference:sf,inlineCode:lf,link:uf,linkReference:hf,list:function(e,t,n,r){const i=n.enter("list"),o=n.bulletCurrent;let a=e.ordered?function(e){const t=e.options.bulletOrdered||".";if("."!==t&&")"!==t)throw new Error("Cannot serialize items with `"+t+"` for `options.bulletOrdered`, expected `.` or `)`");return t}(n):df(n);const s=e.ordered?"."===a?")":".":function(e){const t=df(e),n=e.options.bulletOther;if(!n)return"*"===t?"-":"*";if("*"!==n&&"+"!==n&&"-"!==n)throw new Error("Cannot serialize items with `"+n+"` for `options.bulletOther`, expected `*`, `+`, or `-`");if(n===t)throw new Error("Expected `bullet` (`"+t+"`) and `bulletOther` (`"+n+"`) to be different");return n}(n);let l=!(!t||!n.bulletLastUsed)&&a===n.bulletLastUsed;if(!e.ordered){const t=e.children?e.children[0]:void 0;if("*"!==a&&"-"!==a||!t||t.children&&t.children[0]||"list"!==n.stack[n.stack.length-1]||"listItem"!==n.stack[n.stack.length-2]||"list"!==n.stack[n.stack.length-3]||"listItem"!==n.stack[n.stack.length-4]||0!==n.indexStack[n.indexStack.length-1]||0!==n.indexStack[n.indexStack.length-2]||0!==n.indexStack[n.indexStack.length-3]||(l=!0),ff(n)===a&&t){let t=-1;for(;++t-1?t.start:1)+(!1===n.options.incrementListMarker?0:t.children.indexOf(e))+o);let a=o.length+1;("tab"===i||"mixed"===i&&(t&&"list"===t.type&&t.spread||e.spread))&&(a=4*Math.ceil(a/4));const s=n.createTracker(r);s.move(o+" ".repeat(a-o.length)),s.shift(a);const l=n.enter("listItem"),c=n.indentLines(n.containerFlow(e,s.current()),(function(e,t,n){if(t)return(n?"":" ".repeat(a))+e;return(n?o:o+" ".repeat(a-o.length))+e}));return l(),c},paragraph:function(e,t,n,r){const i=n.enter("paragraph"),o=n.enter("phrasing"),a=n.containerPhrasing(e,r);return o(),i(),a},root:function(e,t,n,r){return(e.children.some((function(e){return pf(e)}))?n.containerPhrasing:n.containerFlow).call(n,e,r)},strong:gf,text:function(e,t,n,r){return n.safe(e.value,r)},thematicBreak:function(e,t,n){const r=(ff(n)+(n.options.ruleSpaces?" ":"")).repeat(function(e){const t=e.options.ruleRepetition||3;if(t<3)throw new Error("Cannot serialize rules with repetition `"+t+"` for `options.ruleRepetition`, expected `3` or more");return t}(n));return n.options.ruleSpaces?r.slice(0,-1):r}};function yf(e){const t=e._align;this.enter({type:"table",align:t.map((function(e){return"none"===e?null:e})),children:[]},e),this.data.inTable=!0}function bf(e){this.exit(e),this.data.inTable=void 0}function vf(e){this.enter({type:"tableRow",children:[]},e)}function xf(e){this.exit(e)}function kf(e){this.enter({type:"tableCell",children:[]},e)}function wf(e){let t=this.resume();this.data.inTable&&(t=t.replace(/\\([\\|])/g,Sf));const n=this.stack[this.stack.length-1];n.type,n.value=t,this.exit(e)}function Sf(e,t){return"|"===t?t:e}function Cf(e){const t=e||{},n=t.tableCellPadding,r=t.tablePipeAlign,i=t.stringLength,o=n?" ":"|";return{unsafe:[{character:"\r",inConstruct:"tableCell"},{character:"\n",inConstruct:"tableCell"},{atBreak:!0,character:"|",after:"[\t :-]"},{character:"|",inConstruct:"tableCell"},{atBreak:!0,character:":",after:"-"},{atBreak:!0,character:"-",after:"[:|-]"}],handlers:{inlineCode:function(e,t,n){let r=mf.inlineCode(e,t,n);n.stack.includes("tableCell")&&(r=r.replace(/\|/g,"\\$&"));return r},table:function(e,t,n,r){return s(function(e,t,n){const r=e.children;let i=-1;const o=[],a=t.enter("table");for(;++ic&&(c=e[u].length);++ol[o])&&(l[o]=e)}t.push(a)}a[u]=t,s[u]=r}var h;let d=-1;if("object"===typeof r&&"length"in r)for(;++dl[d]&&(l[d]=i),p[d]=i),f[d]=a}a.splice(1,0,f),s.splice(1,0,p),u=-1;const g=[];for(;++u0&&!n&&(e[e.length-1][1]._gfmAutolinkLiteralWalkedInto=!0),n}Df[43]=Bf,Df[45]=Bf,Df[46]=Bf,Df[95]=Bf,Df[72]=[Bf,$f],Df[104]=[Bf,$f],Df[87]=[Bf,Of],Df[119]=[Bf,Of];const Hf={tokenize:function(e,t,n){const r=this;return Pc(e,(function(e){const i=r.events[r.events.length-1];return i&&"gfmFootnoteDefinitionIndent"===i[1].type&&4===i[2].sliceSerialize(i[1],!0).length?t(e):n(e)}),"gfmFootnoteDefinitionIndent",5)},partial:!0};function Wf(e,t,n){const r=this;let i=r.events.length;const o=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let a;for(;i--;){const e=r.events[i][1];if("labelImage"===e.type){a=e;break}if("gfmFootnoteCall"===e.type||"labelLink"===e.type||"label"===e.type||"image"===e.type||"link"===e.type)break}return function(i){if(!a||!a._balanced)return n(i);const s=tu(r.sliceSerialize({start:a.end,end:r.now()}));if(94!==s.codePointAt(0)||!o.includes(s.slice(1)))return n(i);return e.enter("gfmFootnoteCallLabelMarker"),e.consume(i),e.exit("gfmFootnoteCallLabelMarker"),t(i)}}function Kf(e,t){let n,r=e.length;for(;r--;)if("labelImage"===e[r][1].type&&"enter"===e[r][0]){n=e[r][1];break}e[r+1][1].type="data",e[r+3][1].type="gfmFootnoteCallLabelMarker";const i={type:"gfmFootnoteCall",start:Object.assign({},e[r+3][1].start),end:Object.assign({},e[e.length-1][1].end)},o={type:"gfmFootnoteCallMarker",start:Object.assign({},e[r+3][1].end),end:Object.assign({},e[r+3][1].end)};o.end.column++,o.end.offset++,o.end._bufferIndex++;const a={type:"gfmFootnoteCallString",start:Object.assign({},o.end),end:Object.assign({},e[e.length-1][1].start)},s={type:"chunkString",contentType:"string",start:Object.assign({},a.start),end:Object.assign({},a.end)},l=[e[r+1],e[r+2],["enter",i,t],e[r+3],e[r+4],["enter",o,t],["exit",o,t],["enter",a,t],["enter",s,t],["exit",s,t],["exit",a,t],e[e.length-2],e[e.length-1],["exit",i,t]];return e.splice(r,e.length-r+1,...l),e}function Uf(e,t,n){const r=this,i=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let o,a=0;return function(t){return e.enter("gfmFootnoteCall"),e.enter("gfmFootnoteCallLabelMarker"),e.consume(t),e.exit("gfmFootnoteCallLabelMarker"),s};function s(t){return 94!==t?n(t):(e.enter("gfmFootnoteCallMarker"),e.consume(t),e.exit("gfmFootnoteCallMarker"),e.enter("gfmFootnoteCallString"),e.enter("chunkString").contentType="string",l)}function l(s){if(a>999||93===s&&!o||null===s||91===s||Tc(s))return n(s);if(93===s){e.exit("chunkString");const o=e.exit("gfmFootnoteCallString");return i.includes(tu(r.sliceSerialize(o)))?(e.enter("gfmFootnoteCallLabelMarker"),e.consume(s),e.exit("gfmFootnoteCallLabelMarker"),e.exit("gfmFootnoteCall"),t):n(s)}return Tc(s)||(o=!0),a++,e.consume(s),92===s?c:l}function c(t){return 91===t||92===t||93===t?(e.consume(t),a++,l):l(t)}}function Vf(e,t,n){const r=this,i=r.parser.gfmFootnotes||(r.parser.gfmFootnotes=[]);let o,a,s=0;return function(t){return e.enter("gfmFootnoteDefinition")._container=!0,e.enter("gfmFootnoteDefinitionLabel"),e.enter("gfmFootnoteDefinitionLabelMarker"),e.consume(t),e.exit("gfmFootnoteDefinitionLabelMarker"),l};function l(t){return 94===t?(e.enter("gfmFootnoteDefinitionMarker"),e.consume(t),e.exit("gfmFootnoteDefinitionMarker"),e.enter("gfmFootnoteDefinitionLabelString"),e.enter("chunkString").contentType="string",c):n(t)}function c(t){if(s>999||93===t&&!a||null===t||91===t||Tc(t))return n(t);if(93===t){e.exit("chunkString");const n=e.exit("gfmFootnoteDefinitionLabelString");return o=tu(r.sliceSerialize(n)),e.enter("gfmFootnoteDefinitionLabelMarker"),e.consume(t),e.exit("gfmFootnoteDefinitionLabelMarker"),e.exit("gfmFootnoteDefinitionLabel"),h}return Tc(t)||(a=!0),s++,e.consume(t),92===t?u:c}function u(t){return 91===t||92===t||93===t?(e.consume(t),s++,c):c(t)}function h(t){return 58===t?(e.enter("definitionMarker"),e.consume(t),e.exit("definitionMarker"),i.includes(o)||i.push(o),Pc(e,d,"gfmFootnoteDefinitionWhitespace")):n(t)}function d(e){return t(e)}}function Yf(e,t,n){return e.check(Dc,t,e.attempt(Hf,t,n))}function Gf(e){e.exit("gfmFootnoteDefinition")}function Xf(e){let t=(e||{}).singleTilde;const n={name:"strikethrough",tokenize:function(e,n,r){const i=this.previous,o=this.events;let a=0;return function(t){if(126===i&&"characterEscape"!==o[o.length-1][1].type)return r(t);return e.enter("strikethroughSequenceTemporary"),s(t)};function s(o){const l=Au(i);if(126===o)return a>1?r(o):(e.consume(o),a++,s);if(a<2&&!t)return r(o);const c=e.exit("strikethroughSequenceTemporary"),u=Au(o);return c._open=!u||2===u&&Boolean(l),c._close=!l||2===l&&Boolean(u),n(o)}},resolveAll:function(e,t){let n=-1;for(;++n0;)t-=1,n.push(e.slice(this.map[t][0]+this.map[t][1]),this.map[t][2]),e.length=this.map[t][0];n.push(e.slice()),e.length=0;let r=n.pop();for(;r;){for(const t of r)e.push(t);r=n.pop()}this.map.length=0}}function Zf(e,t){let n=!1;const r=[];for(;t-1;){const e=r.events[t][1].type;if("lineEnding"!==e&&"linePrefix"!==e)break;t--}const i=t>-1?r.events[t][1].type:null,o="tableHead"===i||"tableRow"===i?x:s;if(o===x&&r.parser.lazy[r.now().line])return n(e);return o(e)};function s(t){return e.enter("tableHead"),e.enter("tableRow"),function(e){if(124===e)return l(e);return i=!0,a+=1,l(e)}(t)}function l(t){return null===t?n(t):Ac(t)?a>1?(a=0,r.interrupt=!0,e.exit("tableRow"),e.enter("lineEnding"),e.consume(t),e.exit("lineEnding"),h):n(t):Ec(t)?Pc(e,l,"whitespace")(t):(a+=1,i&&(i=!1,o+=1),124===t?(e.enter("tableCellDivider"),e.consume(t),e.exit("tableCellDivider"),i=!0,l):(e.enter("data"),c(t)))}function c(t){return null===t||124===t||Tc(t)?(e.exit("data"),l(t)):(e.consume(t),92===t?u:c)}function u(t){return 92===t||124===t?(e.consume(t),c):c(t)}function h(t){return r.interrupt=!1,r.parser.lazy[r.now().line]?n(t):(e.enter("tableDelimiterRow"),i=!1,Ec(t)?Pc(e,d,"linePrefix",r.parser.constructs.disable.null.includes("codeIndented")?void 0:4)(t):d(t))}function d(t){return 45===t||58===t?p(t):124===t?(i=!0,e.enter("tableCellDivider"),e.consume(t),e.exit("tableCellDivider"),f):v(t)}function f(t){return Ec(t)?Pc(e,p,"whitespace")(t):p(t)}function p(t){return 58===t?(a+=1,i=!0,e.enter("tableDelimiterMarker"),e.consume(t),e.exit("tableDelimiterMarker"),g):45===t?(a+=1,g(t)):null===t||Ac(t)?b(t):v(t)}function g(t){return 45===t?(e.enter("tableDelimiterFiller"),m(t)):v(t)}function m(t){return 45===t?(e.consume(t),m):58===t?(i=!0,e.exit("tableDelimiterFiller"),e.enter("tableDelimiterMarker"),e.consume(t),e.exit("tableDelimiterMarker"),y):(e.exit("tableDelimiterFiller"),y(t))}function y(t){return Ec(t)?Pc(e,b,"whitespace")(t):b(t)}function b(n){return 124===n?d(n):(null===n||Ac(n))&&i&&o===a?(e.exit("tableDelimiterRow"),e.exit("tableHead"),t(n)):v(n)}function v(e){return n(e)}function x(t){return e.enter("tableRow"),k(t)}function k(n){return 124===n?(e.enter("tableCellDivider"),e.consume(n),e.exit("tableCellDivider"),k):null===n||Ac(n)?(e.exit("tableRow"),t(n)):Ec(n)?Pc(e,k,"whitespace")(n):(e.enter("data"),w(n))}function w(t){return null===t||124===t||Tc(t)?(e.exit("data"),k(t)):(e.consume(t),92===t?S:w)}function S(t){return 92===t||124===t?(e.consume(t),w):w(t)}}function ep(e,t){let n,r,i,o=-1,a=!0,s=0,l=[0,0,0,0],c=[0,0,0,0],u=!1,h=0;const d=new Qf;for(;++on[2]+1){const t=n[2]+1,r=n[3]-n[2]-1;e.add(t,r,[])}}e.add(n[3]+1,0,[["exit",a,t]])}return void 0!==i&&(o.end=Object.assign({},rp(t.events,i)),e.add(i,0,[["exit",o,t]]),o=void 0),o}function np(e,t,n,r,i){const o=[],a=rp(t.events,n);i&&(i.end=Object.assign({},a),o.push(["exit",i,t])),r.end=Object.assign({},a),o.push(["exit",r,t]),e.add(n+1,0,o)}function rp(e,t){const n=e[t],r="enter"===n[0]?"start":"end";return n[1][r]}const ip={name:"tasklistCheck",tokenize:function(e,t,n){const r=this;return function(t){if(null!==r.previous||!r._gfmTasklistFirstContentOfListItem)return n(t);return e.enter("taskListCheck"),e.enter("taskListCheckMarker"),e.consume(t),e.exit("taskListCheckMarker"),i};function i(t){return Tc(t)?(e.enter("taskListCheckValueUnchecked"),e.consume(t),e.exit("taskListCheckValueUnchecked"),o):88===t||120===t?(e.enter("taskListCheckValueChecked"),e.consume(t),e.exit("taskListCheckValueChecked"),o):n(t)}function o(t){return 93===t?(e.enter("taskListCheckMarker"),e.consume(t),e.exit("taskListCheckMarker"),e.exit("taskListCheck"),a):n(t)}function a(r){return Ac(r)?t(r):Ec(r)?e.check({tokenize:op},t,n)(r):n(r)}}};function op(e,t,n){return Pc(e,(function(e){return null===e?n(e):t(e)}),"whitespace")}const ap={};function sp(e){const t=e||ap,n=this.data(),r=n.micromarkExtensions||(n.micromarkExtensions=[]),i=n.fromMarkdownExtensions||(n.fromMarkdownExtensions=[]),o=n.toMarkdownExtensions||(n.toMarkdownExtensions=[]);r.push(function(e){return mc([{text:Df},{document:{91:{name:"gfmFootnoteDefinition",tokenize:Vf,continuation:{tokenize:Yf},exit:Gf}},text:{91:{name:"gfmFootnoteCall",tokenize:Uf},93:{name:"gfmPotentialFootnoteCall",add:"after",tokenize:Wf,resolveTo:Kf}}},Xf(e),{flow:{null:{name:"table",tokenize:Jf,resolveAll:ep}}},{text:{91:ip}}])}(t)),i.push([{transforms:[Ed],enter:{literalAutolink:wd,literalAutolinkEmail:Sd,literalAutolinkHttp:Sd,literalAutolinkWww:Sd},exit:{literalAutolink:Td,literalAutolinkEmail:Ad,literalAutolinkHttp:Cd,literalAutolinkWww:_d}},{enter:{gfmFootnoteCallString:Pd,gfmFootnoteCall:Od,gfmFootnoteDefinitionLabelString:$d,gfmFootnoteDefinition:Bd},exit:{gfmFootnoteCallString:Dd,gfmFootnoteCall:zd,gfmFootnoteDefinitionLabelString:Id,gfmFootnoteDefinition:Nd}},{canContainEols:["delete"],enter:{strikethrough:Kd},exit:{strikethrough:Ud}},{enter:{table:yf,tableData:kf,tableHeader:kf,tableRow:vf},exit:{codeText:wf,table:bf,tableData:xf,tableHeader:xf,tableRow:xf}},{exit:{taskListCheckValueChecked:_f,taskListCheckValueUnchecked:_f,paragraph:Af}}]),o.push(function(e){return{extensions:[{unsafe:[{character:"@",before:"[+\\-.\\w]",after:"[\\-.\\w]",inConstruct:xd,notInConstruct:kd},{character:".",before:"[Ww]",after:"[\\-.\\w]",inConstruct:xd,notInConstruct:kd},{character:":",before:"[ps]",after:"\\/",inConstruct:xd,notInConstruct:kd}]},jd(e),{unsafe:[{character:"~",inConstruct:"phrasing",notInConstruct:Wd}],handlers:{delete:Vd}},Cf(e),{unsafe:[{atBreak:!0,character:"-",after:"[:|-]"}],handlers:{listItem:Tf}}]}}(t))}var lp=n(7020),cp=n(7107),up=n(6325),hp=n(8054),dp=n(463),fp=n(8212),pp=n(4944),gp=n(4176),mp=n(6581),yp=n(4327),bp=n(7260),vp=n(5037),xp=n(4536),kp=n(5540);const wp=Ts.button` + position: absolute; + top: -12px; + right: 24px; + width: 24px; + height: 24px; + border-radius: 50%; + background: ${e=>{let{theme:t}=e;return t.colors.surface}}; + border: 1px solid ${e=>{let{theme:t}=e;return t.colors.border}}; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + color: ${e=>{let{theme:t}=e;return t.colors.text}}; + transition: all 0.2s ease; + &:hover { + background: ${e=>{let{theme:t}=e;return t.colors.hover}}; + transform: translateY(-1px); + } +`,Sp=Ts.div` + padding: 0.75rem; + background: ${e=>{let{theme:t}=e;return t.colors.surface}}dd; + border-top: 1px solid ${e=>{let{theme:t}=e;return t.colors.border}}; + display: flex; + align-items: center; + justify-content: center; + cursor: pointer; + position: sticky; + bottom: 0; + backdrop-filter: blur(16px); + &:hover { + background: ${e=>{let{theme:t}=e;return t.colors.hover}}; + } +`,Cp=Ts.div` + padding: 0.5rem; + border: 1px solid ${e=>e.theme.colors.border}; + border-radius: 0 0 ${e=>e.theme.sizing.borderRadius.md} ${e=>e.theme.sizing.borderRadius.md}; + background: ${e=>e.theme.colors.background}; + min-height: 120px; + max-height: ${e=>{let{theme:t}=e;return t.sizing.console.maxHeight}}; + overflow-y: auto; + pre { + background: ${e=>e.theme.colors.surface}; + padding: 1rem; + border-radius: ${e=>e.theme.sizing.borderRadius.sm}; + overflow-x: auto; + } + code { + font-family: monospace; + } +`,_p=!1,Ap=(e,t)=>{_p},Tp=(e,t)=>{console.error(`[InputArea] ${e}`,t)},Ep=Ts.div` + padding: 1.5rem; + background-color: ${e=>e.theme.colors.surface}; + /* Add test id */ + &[data-testid] { + outline: none; + + } + border-top: 1px solid ${e=>e.theme.colors.border}; + display: ${e=>{let{theme:t,$hide:n}=e;return n?"none":"block"}}; + position: sticky; + bottom: 0; + z-index: 10; + backdrop-filter: blur(16px) saturate(180%); + box-shadow: 0 -4px 16px rgba(0, 0, 0, 0.15); + background: ${e=>{let{theme:t}=e;return`linear-gradient(to top,\n\n ${t.colors.surface}dd,\n ${t.colors.background}aa\n )`}}; +`,Fp=Ts.form` + display: flex; + gap: 1rem; + align-items: flex-start; +`,Mp=Ts.div` + display: flex; + gap: 0.25rem; + padding: 0.5rem; + flex-wrap: wrap; + background: ${e=>{let{theme:t}=e;return t.colors.surface}}; + border: 1px solid ${e=>{let{theme:t}=e;return t.colors.border}}; + border-bottom: none; + border-radius: ${e=>{let{theme:t}=e;return t.sizing.borderRadius.md}} + + ${e=>{let{theme:t}=e;return t.sizing.borderRadius.md}} 0 0; + /* Toolbar sections */ + .toolbar-section { + display: flex; + gap: 0.25rem; + padding: 0 0.5rem; + border-right: 1px solid ${e=>{let{theme:t}=e;return t.colors.border}}; + &:last-child { + border-right: none; + } + } +`,Lp=Ts.button` + padding: 0.5rem; + background: transparent; + border: none; + border-radius: ${e=>{let{theme:t}=e;return t.sizing.borderRadius.sm}}; + cursor: pointer; + color: ${e=>{let{theme:t}=e;return t.colors.text}}; + &:hover { + background: ${e=>{let{theme:t}=e;return t.colors.hover}}; + } + &.active { + color: ${e=>{let{theme:t}=e;return t.colors.primary}}; + } +`,Pp=Ts.textarea` + width: 100%; + padding: 0.5rem; + border-radius: ${e=>e.theme.sizing.borderRadius.md}; + border: 1px solid ${e=>e.theme.colors.border}; + font-family: inherit; + resize: vertical; + min-height: 40px; + max-height: ${e=>{let{theme:t}=e;return t.sizing.console.maxHeight}}; + border-radius: 0 0 ${e=>e.theme.sizing.borderRadius.md} ${e=>e.theme.sizing.borderRadius.md}; + transition: all 0.3s ease; + background: ${e=>{let{theme:t}=e;return t.colors.background}}; + + &:focus { + outline: none; + border-color: ${e=>e.theme.colors.primary}; + box-shadow: 0 0 0 2px ${e=>{let{theme:t}=e;return`${t.colors.primary}40`}}; + transform: translateY(-1px); + } + &:disabled { + background-color: ${e=>e.theme.colors.disabled}; + cursor: not-allowed; + } +`,Op=Ts.button` + padding: 0.75rem 1.5rem; + background: ${e=>{let{theme:t}=e;return`linear-gradient(135deg,\n\n ${t.colors.primary},\n\n ${t.colors.primaryDark}\n )`}}; + color: white; + border: none; + border-radius: ${e=>e.theme.sizing.borderRadius.md}; + cursor: pointer; + transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); + font-weight: ${e=>{let{theme:t}=e;return t.typography.fontWeight.medium}}; + text-transform: uppercase; + letter-spacing: 0.5px; + position: relative; + overflow: hidden; + min-width: 120px; + + &:disabled { + opacity: 0.5; + cursor: not-allowed; + } + &:hover:not(:disabled) { + background: ${e=>{let{theme:t}=e;return`linear-gradient(135deg,\n ${t.colors.primaryDark},\n ${t.colors.primary}\n )`}}; + transform: translateY(-2px); + box-shadow: 0 8px 16px ${e=>{let{theme:t}=e;return t.colors.primary+"40"}}; + } + + &:active:not(:disabled) { + transform: translateY(0); + } + + &:after { + content: ''; + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; + background: linear-gradient(rgba(255, 255, 255, 0.2), transparent); + pointer-events: none; + } +`,$p=(0,r.memo)((function(e){let{onSendMessage:t,isWebSocketConnected:n=!0}=e;const[i,o]=(0,r.useState)(""),[a,s]=(0,r.useState)(!1),[l,c]=(0,r.useState)(!1),u=C((e=>e.config)),h=C((e=>e.messages.messages)),[d,f]=(0,r.useState)(!1),p=(0,r.useCallback)((()=>{c((e=>{const t=!e;return t||setTimeout((()=>{var e;return null===(e=g.current)||void 0===e?void 0:e.focus()}),0),t}))}),[]),g=r.useRef(null),m=u.inputCnt>0&&h.length>u.inputCnt;r.useEffect((()=>{a&&Rs().highlightAll()}),[a,i]);const y=(0,r.useCallback)((e=>{const t=g.current;if(t){const n=t.selectionStart,r=t.selectionEnd,i=t.value.substring(n,r),a=e.replace("$1",i||"text");o((e=>e.substring(0,n)+a+e.substring(r))),setTimeout((()=>{const e=n+a.indexOf(i||"text");t.focus(),t.setSelectionRange(e,e+(i||"text").length)}),0)}}),[]),b=(0,r.useCallback)((()=>{const e="\n| Header 1 | Header 2 | Header 3 |\n|----------|----------|----------|\n| Cell 1 | Cell 2 | Cell 3 |\n| Cell 4 | Cell 5 | Cell 6 |\n".trim()+"\n";y(e)}),[y]),v=(0,r.useCallback)((e=>{e.preventDefault(),!d&&n&&(i.trim()?(f(!0),Promise.resolve(t(i)).finally((()=>{o(""),f(!1)})).catch((e=>{Tp("Failed to send message",e)}))):Ap())}),[i,t,d,n,_p]),x=(0,r.useCallback)((e=>{const t=e.target.value;o(t)}),[]),k=(0,r.useCallback)((e=>{"Enter"===e.key&&!e.shiftKey&&n&&(e.preventDefault(),v(e))}),[v,n]);r.useEffect((()=>{try{var e;null===(e=g.current)||void 0===e||e.focus()}catch(t){Tp("Failed to focus input on mount",t)}return()=>{}}),[u]);const w=n?null:(0,js.jsx)("div",{style:{color:"red",fontSize:"0.8rem",marginTop:"0.5rem",display:"flex",alignItems:"center",justifyContent:"center"},children:"\u26a0\ufe0f Connection lost. Reconnecting... (Your message will be preserved)"});return l?(0,js.jsxs)(Ep,{$hide:m,"data-testid":"input-container",id:"chat-input-container",className:"collapsed",children:[(0,js.jsx)(wp,{onClick:p,title:"Expand input area","data-testid":"expand-input",children:(0,js.jsx)(vp.A,{fontSize:"small"})}),(0,js.jsxs)(Sp,{onClick:p,children:["Click to expand input",w]})]}):(0,js.jsxs)(Ep,{$hide:m,"data-testid":"input-container",id:"chat-input-container",className:"expanded",children:[(0,js.jsx)(wp,{onClick:p,title:"Collapse input area","data-testid":"collapse-input",children:(0,js.jsx)(xp.A,{fontSize:"small"})}),(0,js.jsx)("div",{className:"input-area-content",children:(0,js.jsx)(Fp,{onSubmit:v,children:(0,js.jsxs)("div",{style:{width:"100%"},children:[(0,js.jsxs)(Mp,{children:[(0,js.jsx)("div",{className:"toolbar-section",children:(0,js.jsx)(Lp,{type:"button",onClick:()=>{const e=!a;Ci((()=>s(e)),150)()},title:a?"Edit":"Preview",className:a?"active":"",children:a?(0,js.jsx)(kp.A,{fontSize:"small"}):(0,js.jsx)(bp.A,{fontSize:"small"})})}),(0,js.jsxs)("div",{className:"toolbar-section",children:[(0,js.jsx)(Lp,{type:"button",onClick:()=>y("# $1"),title:"Heading",children:(0,js.jsx)(pp.A,{fontSize:"small"})}),(0,js.jsx)(Lp,{type:"button",onClick:()=>y("**$1**"),title:"Bold",children:(0,js.jsx)(lp.A,{fontSize:"small"})}),(0,js.jsx)(Lp,{type:"button",onClick:()=>y("*$1*"),title:"Italic",children:(0,js.jsx)(cp.A,{fontSize:"small"})})]}),(0,js.jsxs)("div",{className:"toolbar-section",children:[(0,js.jsx)(Lp,{type:"button",onClick:()=>y("`$1`"),title:"Inline Code",children:(0,js.jsx)(up.A,{fontSize:"small"})}),(0,js.jsx)(Lp,{type:"button",onClick:()=>y("```\n$1\n```"),title:"Code Block",children:(0,js.jsxs)("div",{style:{display:"flex"},children:[(0,js.jsx)(up.A,{fontSize:"small",style:{marginRight:"2px"}}),(0,js.jsx)(up.A,{fontSize:"small"})]})})]}),(0,js.jsxs)("div",{className:"toolbar-section",children:[(0,js.jsx)(Lp,{type:"button",onClick:()=>y("- $1"),title:"Bullet List",children:(0,js.jsx)(hp.A,{fontSize:"small"})}),(0,js.jsx)(Lp,{type:"button",onClick:()=>y("> $1"),title:"Quote",children:(0,js.jsx)(dp.A,{fontSize:"small"})}),(0,js.jsx)(Lp,{type:"button",onClick:()=>y("- [ ] $1"),title:"Task List",children:(0,js.jsx)(mp.A,{fontSize:"small"})})]}),(0,js.jsxs)("div",{className:"toolbar-section",children:[(0,js.jsx)(Lp,{type:"button",onClick:()=>y("[$1](url)"),title:"Link",children:(0,js.jsx)(fp.A,{fontSize:"small"})}),(0,js.jsx)(Lp,{type:"button",onClick:()=>y("![$1](image-url)"),title:"Image",children:(0,js.jsx)(yp.A,{fontSize:"small"})}),(0,js.jsx)(Lp,{type:"button",onClick:b,title:"Table",children:(0,js.jsx)(gp.A,{fontSize:"small"})})]})]}),(0,js.jsx)("div",{className:"input-modes",children:a?(0,js.jsx)("div",{style:{display:"block",transition:"opacity 0.2s ease"},children:(0,js.jsx)(Cp,{children:(0,js.jsx)(hd,{remarkPlugins:[sp],components:{code(e){let{node:t,className:n,children:r,...i}=e;return(0,js.jsx)("pre",{className:n,children:(0,js.jsx)("code",{...i,children:r})})}},children:i})})}):(0,js.jsx)("div",{style:{display:"block",transition:"opacity 0.2s ease"},children:(0,js.jsx)(Pp,{ref:g,"data-testid":"chat-input",id:"chat-input",value:i,onChange:x,onKeyPress:k,placeholder:n?"Type a message... (Markdown supported)":"Connection lost. Reconnecting...",rows:3,"aria-label":"Message input",disabled:d})})}),w,(0,js.jsx)(Op,{type:"submit","data-testid":"send-button",id:"send-message-button",disabled:d||!i.trim()||!n,"aria-label":"Send message",children:n?"Send":"Reconnecting..."})]})})})]})})),Bp=$p,Dp="[ChatInterface]",zp=Ts.div` + display: flex; + flex-direction: column; + height: 100vh; + /* Add test id */ + &[data-testid] { + outline: none; + } + `,Ip=e=>{let{sessionId:t,websocket:n,isConnected:i}=e;const[o,a]=(0,r.useState)([]),[s]=(0,r.useState)((()=>t||window.location.hash.slice(1)||"new")),l=k(),c=zs(s);C((e=>e.config));(0,r.useEffect)((()=>{let e=!0;return(async()=>{if(s)try{const t=await async function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"appInfo";return Ls||(console.info(`${Fs} Fetching app config from ${t} for session: ${e}`),Ls=fetch(`${Ms}${t}?session=${e}`,{headers:{Accept:"application/json"}}).then((e=>{if(!e.ok)throw new Error(`Failed to fetch app config: ${e.status} ${e.statusText}`);const t=e.headers.get("content-type");if(!t||!t.includes("application/json")&&!t.includes("text/json"))throw new Error(`Expected JSON response but got ${t}`);return e.json()})).then((e=>(console.info(`${Fs} Received app config:`,e),yi.dispatch(Ke(e)),e))).catch((e=>(console.error(`${Fs} Failed to fetch app config:`,e),Ls=null,{applicationName:"Chat App",inputCnt:0,stickyInput:!0,loadImages:!0,showMenubar:!0}))),Ls)}(s);e&&t?console.info(`${Dp} App config loaded successfully`,t):e&&console.warn(`${Dp} Could not load app config, using defaults`)}catch(t){e&&console.error(`${Dp} Failed to fetch app config:`,t)}})(),()=>{e=!1}}),[s]),(0,r.useEffect)((()=>{let e=!0;const t=t=>{if(!e)return;if(t.isHtml){const n={id:`${Date.now()}`,content:t.data||"",type:"assistant",timestamp:t.timestamp,isHtml:!0,rawHtml:t.data,version:t.timestamp,sanitized:!1};return e&&a((e=>[...e,n])),void l(Ur(n))}if(!t.data||"string"!==typeof t.data)return;if(t.data.includes('"type":"connect"'))return;const n=t.data.indexOf(","),r=n>-1?t.data.indexOf(",",n+1):-1;if(-1===n||-1===r)return void console.error(`${Dp} Invalid message format received:`,t.data);const i=t.data.substring(0,n),o=t.data.substring(n+1,r),s=t.data.substring(r+1),c=Date.now(),u={id:`${i}-${c}`,content:s,version:parseInt(o,10)||c,type:i.startsWith("u")?"user":i.startsWith("s")?"system":"assistant",timestamp:c,isHtml:!1,rawHtml:null,sanitized:!1};l(Ur(u))};return n.addMessageHandler(t),()=>{e=!1,n.removeMessageHandler(t)}}),[!1,l,i,s,n,c.readyState]);return(0,js.jsxs)(zp,{"data-testid":"chat-container",id:"chat-container",children:[(0,js.jsx)(Vs,{}),(0,js.jsx)(Bp,{onSendMessage:e=>{console.info(`${Dp} Sending message - length: ${e.length}`,{sessionId:s,isConnected:i}),c.send(e)},isWebSocketConnected:c.isConnected})]})},Np={styles:{theme:"color: #4CAF50; font-weight: bold",action:"color: #2196F3; font-weight: bold"},log(e,t){console.groupCollapsed(`%cTheme %c${e} %c${t}`,this.styles.theme,this.styles.action,this.styles.theme),console.groupEnd()}},Rp={_init(){Np.log("initialized","base")},shadows:{small:"0 1px 3px rgba(0, 0, 0, 0.12)",medium:"0 4px 6px rgba(0, 0, 0, 0.15)",large:"0 10px 20px rgba(0, 0, 0, 0.20)"},transitions:{default:"0.3s ease",fast:"0.15s ease",slow:"0.5s ease"},config:{stickyInput:!0,inputCnt:0},logging:{colors:{error:"#FF3B30",warning:"#FF9500",info:"#007AFF",debug:"#5856D6",success:"#34C759",trace:"#8E8E93",verbose:"#C7C7CC",system:"#48484A",critical:"#FF3B30"},fontSize:{normal:"0.9rem",large:"1.1rem",small:"0.8rem",system:"0.85rem",critical:"1.2rem"},padding:{message:"0.5rem",container:"1rem",timestamp:"0.25rem"},background:{error:"#FFE5E5",warning:"#FFF3E0",info:"#E3F2FD",debug:"#F3E5F5",success:"#E8F5E9",system:"#FAFAFA",critical:"#FFEBEE"},border:{radius:"4px",style:"solid",width:"1px"},timestamp:{format:"HH:mm:ss",color:"#8E8E93",show:!0},display:{maxLines:0}},sizing:{spacing:{xs:"0.25rem",sm:"0.5rem",md:"1rem",lg:"1.5rem",xl:"2rem"},borderRadius:{sm:"0.25rem",md:"0.5rem",lg:"1rem"},console:{minHeight:"200px",maxHeight:"500px",padding:"1rem"}},typography:{fontFamily:"'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif",families:{primary:"'Outfit', system-ui, -apple-system, BlinkMacSystemFont, sans-serif",heading:"'Space Grotesk', system-ui, sans-serif",secondary:"system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif",mono:"'IBM Plex Mono', 'Fira Code', monospace",display:"'Syne', system-ui, sans-serif"},monoFontFamily:"'Fira Code', 'Consolas', monospace",fontSize:{"2xl":"1.75rem",xs:"0.75rem",sm:"0.875rem",md:"1rem",lg:"1.125rem",xl:"1.25rem"},fontWeight:{light:300,regular:400,medium:500,semibold:600,bold:700,extrabold:800},lineHeight:{tight:"1.15",normal:"1.65",relaxed:"1.85"},letterSpacing:{tight:"-0.04em",normal:"-0.02em",wide:"0.04em",wider:"0.08em"},console:{fontFamily:"'Fira Code', Consolas, Monaco, 'Courier New', monospace",fontSize:"0.9rem",lineHeight:"1.6"}}},jp={name:"main",colors:{primary:"#007AFF",secondary:"#5856D6",background:"#FFFFFF",surface:"#F2F2F7",text:{primary:"#000000",secondary:"#6E6E73"},border:"#C6C6C8",error:"#FF3B30",success:"#34C759",warning:"#FF9500",info:"#007AFF",primaryDark:"#0056b3",secondaryDark:"#4240aa",errorDark:"#D9362B",successDark:"#28A745",critical:"#FF3B30",disabled:"#E5E5EA",hover:"#0056b3"},...Rp},qp={name:"night",colors:{primary:"#0A84FF",secondary:"#5E5CE6",background:"#000000",surface:"#1C1C1E",text:{primary:"#FFFFFF",secondary:"#98989F"},border:"#38383A",error:"#FF453A",success:"#32D74B",warning:"#FF9F0A",info:"#5E5CE6",primaryDark:"#0063cc",secondaryDark:"#4b49b8",errorDark:"#E53E30",successDark:"#27C13F",critical:"#FF453A",disabled:"#2C2C2E",hover:"#0063cc"},...Rp},Hp={name:"forest",colors:{primary:"#2D6A4F",secondary:"#40916C",background:"#081C15",surface:"#1B4332",text:{primary:"#D8F3DC",secondary:"#95D5B2"},border:"#2D6A4F",error:"#D62828",success:"#52B788",warning:"#F77F00",info:"#4895EF",primaryDark:"#1E4D38",secondaryDark:"#2F6D50",errorDark:"#B82323",successDark:"#3E8E6A",critical:"#D62828",disabled:"#2D3B35",hover:"#1E4D38"},...Rp},Wp={name:"pony",colors:{primary:"#FF69B4",secondary:"#FFB6C1",background:"#FFF0F5",surface:"#FFE4E1",text:{primary:"#DB7093",secondary:"#C71585"},border:"#FFB6C1",error:"#FF1493",success:"#FF69B4",warning:"#FFB6C1",info:"#DB7093",primaryDark:"#E55EA4",secondaryDark:"#E5A0AD",errorDark:"#D9127F",successDark:"#E55EA4",critical:"#FF1493",disabled:"#F8E1E7",hover:"#E55EA4"},...Rp},Kp={name:"alien",colors:{primary:"#39FF14",secondary:"#00FF00",background:"#0A0A0A",surface:"#1A1A1A",text:{primary:"#39FF14",secondary:"#00FF00"},border:"#008000",error:"#FF0000",success:"#39FF14",warning:"#FFFF00",info:"#00FFFF",primaryDark:"#2ECF0F",secondaryDark:"#00CF00",errorDark:"#CF0000",successDark:"#2ECF0F",critical:"#FF0000",disabled:"#1C1C1C",hover:"#2ECF0F"},...Rp},Up={default:{...jp,name:"default",colors:{...jp.colors}},main:jp,night:qp,forest:Hp,pony:Wp,alien:Kp,synthwave:{},paper:{},sunset:{name:"sunset",colors:{primary:"#FF6B6B",secondary:"#FFA07A",background:"#2C3E50",surface:"#34495E",text:{primary:"#ECF0F1",secondary:"#BDC3C7"},border:"#95A5A6",error:"#E74C3C",success:"#2ECC71",warning:"#F1C40F",info:"#3498DB",primaryDark:"#D65B5B",secondaryDark:"#E08A6A",errorDark:"#C0392B",successDark:"#27AE60",disabled:"#7F8C8D",critical:"#E74C3C",hover:"#D65B5B"},...Rp},ocean:{name:"ocean",colors:{primary:"#00B4D8",secondary:"#48CAE4",background:"#03045E",surface:"#023E8A",text:{primary:"#CAF0F8",secondary:"#90E0EF"},border:"#0077B6",error:"#FF6B6B",success:"#2ECC71",warning:"#FFB703",info:"#48CAE4",primaryDark:"#0093C0",secondaryDark:"#3EAFC7",errorDark:"#D65B5B",successDark:"#27AE60",disabled:"#415A77",hover:"#0077B6",critical:"#FF6B6B"},...Rp},cyberpunk:{name:"cyberpunk",colors:{primary:"#FF00FF",secondary:"#00FFFF",background:"#0D0221",surface:"#1A1A2E",text:{primary:"#FF00FF",secondary:"#00FFFF"},border:"#FF00FF",error:"#FF0000",success:"#00FF00",warning:"#FFD700",info:"#00FFFF",primaryDark:"#D100D1",secondaryDark:"#00D1D1",errorDark:"#D10000",successDark:"#00D100",disabled:"#4A4A4A",hover:"#FF69B4",critical:"#FF0000"},...Rp}};Up.synthwave={name:"synthwave",colors:{primary:"#FF00FF",secondary:"#00FFFF",background:"#1A1A2E",surface:"#2A2A3E",text:{primary:"#00FFFF",secondary:"#FF00FF"},border:"#FF00FF",error:"#FF3366",success:"#00FF7F",warning:"#FFFF66",info:"#3399FF",primaryDark:"#CC00CC",secondaryDark:"#00CCCC",errorDark:"#D92B58",successDark:"#00CC66",critical:"#FF3366",disabled:"#4A4A5E",hover:"#CC00CC"},...Rp},Up.paper={name:"paper",colors:{primary:"#5D737E",secondary:"#8C7A6B",background:"#FDFBF7",surface:"#F5F2EB",text:{primary:"#4A4A4A",secondary:"#7B7B7B"},border:"#DCDCDC",error:"#C94E4E",success:"#6A994E",warning:"#D4A26A",info:"#7E9CB9",primaryDark:"#4A5C66",secondaryDark:"#706053",errorDark:"#A84040",successDark:"#537A3E",critical:"#C94E4E",disabled:"#E0E0E0",hover:"#4A5C66"},...Rp};const Vp={name:"default",sizing:Rp.sizing,typography:Rp.typography},Yp={name:"compact",sizing:{...Rp.sizing,spacing:{xs:"0.125rem",sm:"0.25rem",md:"0.5rem",lg:"1rem",xl:"1.5rem"}},typography:{...Rp.typography,fontSize:{xs:"0.65rem",sm:"0.75rem",md:"0.875rem",lg:"1rem",xl:"1.125rem","2xl":"1.5rem"},lineHeight:{tight:"1.1",normal:"1.5",relaxed:"1.7"}}},Gp={name:"spacious",sizing:{...Rp.sizing,spacing:{xs:"0.5rem",sm:"0.75rem",md:"1.25rem",lg:"2rem",xl:"2.5rem"}},typography:{...Rp.typography,fontSize:{xs:"0.875rem",sm:"1rem",md:"1.125rem",lg:"1.375rem",xl:"1.625rem","2xl":"2rem"}}},Xp={name:"ultra-compact",sizing:{...Rp.sizing,spacing:{xs:"0.0625rem",sm:"0.125rem",md:"0.25rem",lg:"0.5rem",xl:"0.75rem"}},typography:{...Rp.typography,fontSize:{xs:"0.6rem",sm:"0.7rem",md:"0.8rem",lg:"0.9rem",xl:"1rem","2xl":"1.25rem"},lineHeight:{tight:"1.0",normal:"1.3",relaxed:"1.5"}}},Qp={name:"content-focused",sizing:{...Rp.sizing,spacing:{xs:"0.3rem",sm:"0.6rem",md:"1.1rem",lg:"1.6rem",xl:"2.2rem"},console:{...Rp.sizing.console,maxHeight:"600px"}},typography:{...Rp.typography,fontSize:{xs:"0.8rem",sm:"0.9rem",md:"1.05rem",lg:"1.2rem",xl:"1.35rem","2xl":"1.85rem"},lineHeight:{tight:"1.2",normal:"1.7",relaxed:"1.9"}}},Zp={default:Vp,compact:Yp,spacious:Gp,"ultra-compact":Xp,"content-focused":Qp},Jp=(function(e){for(var t=[],n=1;n{let{theme:t}=e;return t.colors.background}}; + border-radius: 4px; + } + + ::-webkit-scrollbar-thumb { + background: ${e=>{let{theme:t}=e;return t.colors.primary+"40"}}; + border-radius: 4px; + border: 2px solid ${e=>{let{theme:t}=e;return t.colors.background}}; + + &:hover { + background: ${e=>{let{theme:t}=e;return t.colors.primary+"60"}}; + } + } + + :root { + /* Fallback Theme variables - these will be overridden by ThemeProvider */ + /* Color related fallbacks (can be minimal as ThemeProvider sets them) */ + /* Font weights */ + --font-weight-light: 300; /* Fallback */ + --font-weight-regular: 400; /* Fallback */ + --font-weight-medium: 500; /* Fallback */ + --font-weight-semibold: 600; /* Fallback */ + --font-weight-bold: 700; /* Fallback */ + --font-weight-extrabold: 800; /* Fallback */ + + /* Font families */ + --font-primary: 'Outfit', system-ui, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif; /* Fallback to match baseTheme */ + --font-heading: 'Space Grotesk', system-ui, sans-serif; /* Fallback to match baseTheme */ + --font-mono: 'IBM Plex Mono', 'Fira Code', monospace; /* Fallback to match baseTheme */ + --font-display: 'Syne', system-ui, sans-serif; /* Fallback to match baseTheme */ + + /* Font sizes */ + --font-size-xs: 0.75rem; /* Fallback */ + --font-size-sm: 0.875rem; /* Fallback */ + --font-size-md: 1rem; /* Fallback */ + --font-size-lg: 1.125rem; /* Fallback */ + --font-size-xl: 1.25rem; /* Fallback */ + --font-size-2xl: 1.5rem; /* Fallback */ + + /* Line heights */ + --line-height-tight: 1.2; /* Fallback */ + --line-height-normal: 1.6; /* Fallback */ + --line-height-relaxed: 1.8; /* Fallback */ + + /* Letter spacing */ + --letter-spacing-tight: -0.02em; /* Fallback */ + --letter-spacing-normal: normal; /* Fallback */ + --letter-spacing-wide: 0.02em; /* Fallback */ + --letter-spacing-wider: 0.04em; /* Fallback */ + + /* Sizing */ + --spacing-xs: 0.25rem; /* Fallback */ + --spacing-sm: 0.5rem; /* Fallback */ + --spacing-md: 1rem; /* Fallback */ + --spacing-lg: 1.5rem; /* Fallback */ + --spacing-xl: 2rem; /* Fallback */ + --border-radius-sm: 0.25rem; /* Fallback */ + --border-radius-md: 0.5rem; /* Fallback */ + --border-radius-lg: 1rem; /* Fallback */ + } + /* + The :root variables above serve as fallbacks. + ThemeProvider.tsx will inject a `;\n };\n\n collectStyles(children: any): React.JSX.Element {\n if (this.sealed) {\n throw styledError(2);\n }\n\n return {children};\n }\n\n getStyleTags = (): string => {\n if (this.sealed) {\n throw styledError(2);\n }\n\n return this._emitSheetCSS();\n };\n\n getStyleElement = () => {\n if (this.sealed) {\n throw styledError(2);\n }\n\n const css = this.instance.toString();\n if (!css) return [];\n\n const props = {\n [SC_ATTR]: '',\n [SC_ATTR_VERSION]: SC_VERSION,\n dangerouslySetInnerHTML: {\n __html: css,\n },\n };\n\n const nonce = getNonce();\n if (nonce) {\n (props as any).nonce = nonce;\n }\n\n // v4 returned an array for this fn, so we'll do the same for v5 for backward compat\n return [
    \n * ^\n * |\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return effects.attempt(blankLine, ok, nok);\n }\n}","/**\n * @import {\n * Code,\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { factorySpace } from 'micromark-factory-space';\nimport { markdownLineEnding, markdownSpace } from 'micromark-util-character';\n/** @type {Construct} */\nconst nonLazyContinuation = {\n partial: true,\n tokenize: tokenizeNonLazyContinuation\n};\n\n/** @type {Construct} */\nexport const codeFenced = {\n concrete: true,\n name: 'codeFenced',\n tokenize: tokenizeCodeFenced\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeCodeFenced(effects, ok, nok) {\n const self = this;\n /** @type {Construct} */\n const closeStart = {\n partial: true,\n tokenize: tokenizeCloseStart\n };\n let initialPrefix = 0;\n let sizeOpen = 0;\n /** @type {NonNullable} */\n let marker;\n return start;\n\n /**\n * Start of code.\n *\n * ```markdown\n * > | ~~~js\n * ^\n * | alert(1)\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function start(code) {\n // To do: parse whitespace like `markdown-rs`.\n return beforeSequenceOpen(code);\n }\n\n /**\n * In opening fence, after prefix, at sequence.\n *\n * ```markdown\n * > | ~~~js\n * ^\n * | alert(1)\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function beforeSequenceOpen(code) {\n const tail = self.events[self.events.length - 1];\n initialPrefix = tail && tail[1].type === \"linePrefix\" ? tail[2].sliceSerialize(tail[1], true).length : 0;\n marker = code;\n effects.enter(\"codeFenced\");\n effects.enter(\"codeFencedFence\");\n effects.enter(\"codeFencedFenceSequence\");\n return sequenceOpen(code);\n }\n\n /**\n * In opening fence sequence.\n *\n * ```markdown\n * > | ~~~js\n * ^\n * | alert(1)\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function sequenceOpen(code) {\n if (code === marker) {\n sizeOpen++;\n effects.consume(code);\n return sequenceOpen;\n }\n if (sizeOpen < 3) {\n return nok(code);\n }\n effects.exit(\"codeFencedFenceSequence\");\n return markdownSpace(code) ? factorySpace(effects, infoBefore, \"whitespace\")(code) : infoBefore(code);\n }\n\n /**\n * In opening fence, after the sequence (and optional whitespace), before info.\n *\n * ```markdown\n * > | ~~~js\n * ^\n * | alert(1)\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function infoBefore(code) {\n if (code === null || markdownLineEnding(code)) {\n effects.exit(\"codeFencedFence\");\n return self.interrupt ? ok(code) : effects.check(nonLazyContinuation, atNonLazyBreak, after)(code);\n }\n effects.enter(\"codeFencedFenceInfo\");\n effects.enter(\"chunkString\", {\n contentType: \"string\"\n });\n return info(code);\n }\n\n /**\n * In info.\n *\n * ```markdown\n * > | ~~~js\n * ^\n * | alert(1)\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function info(code) {\n if (code === null || markdownLineEnding(code)) {\n effects.exit(\"chunkString\");\n effects.exit(\"codeFencedFenceInfo\");\n return infoBefore(code);\n }\n if (markdownSpace(code)) {\n effects.exit(\"chunkString\");\n effects.exit(\"codeFencedFenceInfo\");\n return factorySpace(effects, metaBefore, \"whitespace\")(code);\n }\n if (code === 96 && code === marker) {\n return nok(code);\n }\n effects.consume(code);\n return info;\n }\n\n /**\n * In opening fence, after info and whitespace, before meta.\n *\n * ```markdown\n * > | ~~~js eval\n * ^\n * | alert(1)\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function metaBefore(code) {\n if (code === null || markdownLineEnding(code)) {\n return infoBefore(code);\n }\n effects.enter(\"codeFencedFenceMeta\");\n effects.enter(\"chunkString\", {\n contentType: \"string\"\n });\n return meta(code);\n }\n\n /**\n * In meta.\n *\n * ```markdown\n * > | ~~~js eval\n * ^\n * | alert(1)\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function meta(code) {\n if (code === null || markdownLineEnding(code)) {\n effects.exit(\"chunkString\");\n effects.exit(\"codeFencedFenceMeta\");\n return infoBefore(code);\n }\n if (code === 96 && code === marker) {\n return nok(code);\n }\n effects.consume(code);\n return meta;\n }\n\n /**\n * At eol/eof in code, before a non-lazy closing fence or content.\n *\n * ```markdown\n * > | ~~~js\n * ^\n * > | alert(1)\n * ^\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function atNonLazyBreak(code) {\n return effects.attempt(closeStart, after, contentBefore)(code);\n }\n\n /**\n * Before code content, not a closing fence, at eol.\n *\n * ```markdown\n * | ~~~js\n * > | alert(1)\n * ^\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function contentBefore(code) {\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return contentStart;\n }\n\n /**\n * Before code content, not a closing fence.\n *\n * ```markdown\n * | ~~~js\n * > | alert(1)\n * ^\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function contentStart(code) {\n return initialPrefix > 0 && markdownSpace(code) ? factorySpace(effects, beforeContentChunk, \"linePrefix\", initialPrefix + 1)(code) : beforeContentChunk(code);\n }\n\n /**\n * Before code content, after optional prefix.\n *\n * ```markdown\n * | ~~~js\n * > | alert(1)\n * ^\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function beforeContentChunk(code) {\n if (code === null || markdownLineEnding(code)) {\n return effects.check(nonLazyContinuation, atNonLazyBreak, after)(code);\n }\n effects.enter(\"codeFlowValue\");\n return contentChunk(code);\n }\n\n /**\n * In code content.\n *\n * ```markdown\n * | ~~~js\n * > | alert(1)\n * ^^^^^^^^\n * | ~~~\n * ```\n *\n * @type {State}\n */\n function contentChunk(code) {\n if (code === null || markdownLineEnding(code)) {\n effects.exit(\"codeFlowValue\");\n return beforeContentChunk(code);\n }\n effects.consume(code);\n return contentChunk;\n }\n\n /**\n * After code.\n *\n * ```markdown\n * | ~~~js\n * | alert(1)\n * > | ~~~\n * ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n effects.exit(\"codeFenced\");\n return ok(code);\n }\n\n /**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\n function tokenizeCloseStart(effects, ok, nok) {\n let size = 0;\n return startBefore;\n\n /**\n *\n *\n * @type {State}\n */\n function startBefore(code) {\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return start;\n }\n\n /**\n * Before closing fence, at optional whitespace.\n *\n * ```markdown\n * | ~~~js\n * | alert(1)\n * > | ~~~\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n // Always populated by defaults.\n\n // To do: `enter` here or in next state?\n effects.enter(\"codeFencedFence\");\n return markdownSpace(code) ? factorySpace(effects, beforeSequenceClose, \"linePrefix\", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code) : beforeSequenceClose(code);\n }\n\n /**\n * In closing fence, after optional whitespace, at sequence.\n *\n * ```markdown\n * | ~~~js\n * | alert(1)\n * > | ~~~\n * ^\n * ```\n *\n * @type {State}\n */\n function beforeSequenceClose(code) {\n if (code === marker) {\n effects.enter(\"codeFencedFenceSequence\");\n return sequenceClose(code);\n }\n return nok(code);\n }\n\n /**\n * In closing fence sequence.\n *\n * ```markdown\n * | ~~~js\n * | alert(1)\n * > | ~~~\n * ^\n * ```\n *\n * @type {State}\n */\n function sequenceClose(code) {\n if (code === marker) {\n size++;\n effects.consume(code);\n return sequenceClose;\n }\n if (size >= sizeOpen) {\n effects.exit(\"codeFencedFenceSequence\");\n return markdownSpace(code) ? factorySpace(effects, sequenceCloseAfter, \"whitespace\")(code) : sequenceCloseAfter(code);\n }\n return nok(code);\n }\n\n /**\n * After closing fence sequence, after optional whitespace.\n *\n * ```markdown\n * | ~~~js\n * | alert(1)\n * > | ~~~\n * ^\n * ```\n *\n * @type {State}\n */\n function sequenceCloseAfter(code) {\n if (code === null || markdownLineEnding(code)) {\n effects.exit(\"codeFencedFence\");\n return ok(code);\n }\n return nok(code);\n }\n }\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeNonLazyContinuation(effects, ok, nok) {\n const self = this;\n return start;\n\n /**\n *\n *\n * @type {State}\n */\n function start(code) {\n if (code === null) {\n return nok(code);\n }\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return lineStart;\n }\n\n /**\n *\n *\n * @type {State}\n */\n function lineStart(code) {\n return self.parser.lazy[self.now().line] ? nok(code) : ok(code);\n }\n}","/// \n\n/* eslint-env browser */\n\nconst element = document.createElement('i')\n\n/**\n * @param {string} value\n * @returns {string | false}\n */\nexport function decodeNamedCharacterReference(value) {\n const characterReference = '&' + value + ';'\n element.innerHTML = characterReference\n const character = element.textContent\n\n // Some named character references do not require the closing semicolon\n // (`¬`, for instance), which leads to situations where parsing the assumed\n // named reference of `¬it;` will result in the string `¬it;`.\n // When we encounter a trailing semicolon after parsing, and the character\n // reference to decode was not a semicolon (`;`), we can assume that the\n // matching was not complete.\n if (\n // @ts-expect-error: TypeScript is wrong that `textContent` on elements can\n // yield `null`.\n character.charCodeAt(character.length - 1) === 59 /* `;` */ &&\n value !== 'semi'\n ) {\n return false\n }\n\n // If the decoded string is equal to the input, the character reference was\n // not valid.\n // @ts-expect-error: TypeScript is wrong that `textContent` on elements can\n // yield `null`.\n return character === characterReference ? false : character\n}\n","/**\n * @import {\n * Code,\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { decodeNamedCharacterReference } from 'decode-named-character-reference';\nimport { asciiAlphanumeric, asciiDigit, asciiHexDigit } from 'micromark-util-character';\n/** @type {Construct} */\nexport const characterReference = {\n name: 'characterReference',\n tokenize: tokenizeCharacterReference\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeCharacterReference(effects, ok, nok) {\n const self = this;\n let size = 0;\n /** @type {number} */\n let max;\n /** @type {(code: Code) => boolean} */\n let test;\n return start;\n\n /**\n * Start of character reference.\n *\n * ```markdown\n * > | a&b\n * ^\n * > | a{b\n * ^\n * > | a b\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"characterReference\");\n effects.enter(\"characterReferenceMarker\");\n effects.consume(code);\n effects.exit(\"characterReferenceMarker\");\n return open;\n }\n\n /**\n * After `&`, at `#` for numeric references or alphanumeric for named\n * references.\n *\n * ```markdown\n * > | a&b\n * ^\n * > | a{b\n * ^\n * > | a b\n * ^\n * ```\n *\n * @type {State}\n */\n function open(code) {\n if (code === 35) {\n effects.enter(\"characterReferenceMarkerNumeric\");\n effects.consume(code);\n effects.exit(\"characterReferenceMarkerNumeric\");\n return numeric;\n }\n effects.enter(\"characterReferenceValue\");\n max = 31;\n test = asciiAlphanumeric;\n return value(code);\n }\n\n /**\n * After `#`, at `x` for hexadecimals or digit for decimals.\n *\n * ```markdown\n * > | a{b\n * ^\n * > | a b\n * ^\n * ```\n *\n * @type {State}\n */\n function numeric(code) {\n if (code === 88 || code === 120) {\n effects.enter(\"characterReferenceMarkerHexadecimal\");\n effects.consume(code);\n effects.exit(\"characterReferenceMarkerHexadecimal\");\n effects.enter(\"characterReferenceValue\");\n max = 6;\n test = asciiHexDigit;\n return value;\n }\n effects.enter(\"characterReferenceValue\");\n max = 7;\n test = asciiDigit;\n return value(code);\n }\n\n /**\n * After markers (`&#x`, `&#`, or `&`), in value, before `;`.\n *\n * The character reference kind defines what and how many characters are\n * allowed.\n *\n * ```markdown\n * > | a&b\n * ^^^\n * > | a{b\n * ^^^\n * > | a b\n * ^\n * ```\n *\n * @type {State}\n */\n function value(code) {\n if (code === 59 && size) {\n const token = effects.exit(\"characterReferenceValue\");\n if (test === asciiAlphanumeric && !decodeNamedCharacterReference(self.sliceSerialize(token))) {\n return nok(code);\n }\n\n // To do: `markdown-rs` uses a different name:\n // `CharacterReferenceMarkerSemi`.\n effects.enter(\"characterReferenceMarker\");\n effects.consume(code);\n effects.exit(\"characterReferenceMarker\");\n effects.exit(\"characterReference\");\n return ok;\n }\n if (test(code) && size++ < max) {\n effects.consume(code);\n return value;\n }\n return nok(code);\n }\n}","/**\n * @import {\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { asciiPunctuation } from 'micromark-util-character';\n/** @type {Construct} */\nexport const characterEscape = {\n name: 'characterEscape',\n tokenize: tokenizeCharacterEscape\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeCharacterEscape(effects, ok, nok) {\n return start;\n\n /**\n * Start of character escape.\n *\n * ```markdown\n * > | a\\*b\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"characterEscape\");\n effects.enter(\"escapeMarker\");\n effects.consume(code);\n effects.exit(\"escapeMarker\");\n return inside;\n }\n\n /**\n * After `\\`, at punctuation.\n *\n * ```markdown\n * > | a\\*b\n * ^\n * ```\n *\n * @type {State}\n */\n function inside(code) {\n // ASCII punctuation.\n if (asciiPunctuation(code)) {\n effects.enter(\"characterEscapeValue\");\n effects.consume(code);\n effects.exit(\"characterEscapeValue\");\n effects.exit(\"characterEscape\");\n return ok;\n }\n return nok(code);\n }\n}","/**\n * @import {\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { factorySpace } from 'micromark-factory-space';\nimport { markdownLineEnding } from 'micromark-util-character';\n/** @type {Construct} */\nexport const lineEnding = {\n name: 'lineEnding',\n tokenize: tokenizeLineEnding\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeLineEnding(effects, ok) {\n return start;\n\n /** @type {State} */\n function start(code) {\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return factorySpace(effects, ok, \"linePrefix\");\n }\n}","/**\n * @import {Event, Resolver, TokenizeContext} from 'micromark-util-types'\n */\n\n/**\n * Call all `resolveAll`s.\n *\n * @param {ReadonlyArray<{resolveAll?: Resolver | undefined}>} constructs\n * List of constructs, optionally with `resolveAll`s.\n * @param {Array} events\n * List of events.\n * @param {TokenizeContext} context\n * Context used by `tokenize`.\n * @returns {Array}\n * Changed events.\n */\nexport function resolveAll(constructs, events, context) {\n /** @type {Array} */\n const called = []\n let index = -1\n\n while (++index < constructs.length) {\n const resolve = constructs[index].resolveAll\n\n if (resolve && !called.includes(resolve)) {\n events = resolve(events, context)\n called.push(resolve)\n }\n }\n\n return events\n}\n","/**\n * @import {\n * Construct,\n * Event,\n * Resolver,\n * State,\n * TokenizeContext,\n * Tokenizer,\n * Token\n * } from 'micromark-util-types'\n */\n\nimport { factoryDestination } from 'micromark-factory-destination';\nimport { factoryLabel } from 'micromark-factory-label';\nimport { factoryTitle } from 'micromark-factory-title';\nimport { factoryWhitespace } from 'micromark-factory-whitespace';\nimport { markdownLineEndingOrSpace } from 'micromark-util-character';\nimport { push, splice } from 'micromark-util-chunked';\nimport { normalizeIdentifier } from 'micromark-util-normalize-identifier';\nimport { resolveAll } from 'micromark-util-resolve-all';\n/** @type {Construct} */\nexport const labelEnd = {\n name: 'labelEnd',\n resolveAll: resolveAllLabelEnd,\n resolveTo: resolveToLabelEnd,\n tokenize: tokenizeLabelEnd\n};\n\n/** @type {Construct} */\nconst resourceConstruct = {\n tokenize: tokenizeResource\n};\n/** @type {Construct} */\nconst referenceFullConstruct = {\n tokenize: tokenizeReferenceFull\n};\n/** @type {Construct} */\nconst referenceCollapsedConstruct = {\n tokenize: tokenizeReferenceCollapsed\n};\n\n/** @type {Resolver} */\nfunction resolveAllLabelEnd(events) {\n let index = -1;\n /** @type {Array} */\n const newEvents = [];\n while (++index < events.length) {\n const token = events[index][1];\n newEvents.push(events[index]);\n if (token.type === \"labelImage\" || token.type === \"labelLink\" || token.type === \"labelEnd\") {\n // Remove the marker.\n const offset = token.type === \"labelImage\" ? 4 : 2;\n token.type = \"data\";\n index += offset;\n }\n }\n\n // If the events are equal, we don't have to copy newEvents to events\n if (events.length !== newEvents.length) {\n splice(events, 0, events.length, newEvents);\n }\n return events;\n}\n\n/** @type {Resolver} */\nfunction resolveToLabelEnd(events, context) {\n let index = events.length;\n let offset = 0;\n /** @type {Token} */\n let token;\n /** @type {number | undefined} */\n let open;\n /** @type {number | undefined} */\n let close;\n /** @type {Array} */\n let media;\n\n // Find an opening.\n while (index--) {\n token = events[index][1];\n if (open) {\n // If we see another link, or inactive link label, we’ve been here before.\n if (token.type === \"link\" || token.type === \"labelLink\" && token._inactive) {\n break;\n }\n\n // Mark other link openings as inactive, as we can’t have links in\n // links.\n if (events[index][0] === 'enter' && token.type === \"labelLink\") {\n token._inactive = true;\n }\n } else if (close) {\n if (events[index][0] === 'enter' && (token.type === \"labelImage\" || token.type === \"labelLink\") && !token._balanced) {\n open = index;\n if (token.type !== \"labelLink\") {\n offset = 2;\n break;\n }\n }\n } else if (token.type === \"labelEnd\") {\n close = index;\n }\n }\n const group = {\n type: events[open][1].type === \"labelLink\" ? \"link\" : \"image\",\n start: {\n ...events[open][1].start\n },\n end: {\n ...events[events.length - 1][1].end\n }\n };\n const label = {\n type: \"label\",\n start: {\n ...events[open][1].start\n },\n end: {\n ...events[close][1].end\n }\n };\n const text = {\n type: \"labelText\",\n start: {\n ...events[open + offset + 2][1].end\n },\n end: {\n ...events[close - 2][1].start\n }\n };\n media = [['enter', group, context], ['enter', label, context]];\n\n // Opening marker.\n media = push(media, events.slice(open + 1, open + offset + 3));\n\n // Text open.\n media = push(media, [['enter', text, context]]);\n\n // Always populated by defaults.\n\n // Between.\n media = push(media, resolveAll(context.parser.constructs.insideSpan.null, events.slice(open + offset + 4, close - 3), context));\n\n // Text close, marker close, label close.\n media = push(media, [['exit', text, context], events[close - 2], events[close - 1], ['exit', label, context]]);\n\n // Reference, resource, or so.\n media = push(media, events.slice(close + 1));\n\n // Media close.\n media = push(media, [['exit', group, context]]);\n splice(events, open, events.length, media);\n return events;\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeLabelEnd(effects, ok, nok) {\n const self = this;\n let index = self.events.length;\n /** @type {Token} */\n let labelStart;\n /** @type {boolean} */\n let defined;\n\n // Find an opening.\n while (index--) {\n if ((self.events[index][1].type === \"labelImage\" || self.events[index][1].type === \"labelLink\") && !self.events[index][1]._balanced) {\n labelStart = self.events[index][1];\n break;\n }\n }\n return start;\n\n /**\n * Start of label end.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * > | [a][b] c\n * ^\n * > | [a][] b\n * ^\n * > | [a] b\n * ```\n *\n * @type {State}\n */\n function start(code) {\n // If there is not an okay opening.\n if (!labelStart) {\n return nok(code);\n }\n\n // If the corresponding label (link) start is marked as inactive,\n // it means we’d be wrapping a link, like this:\n //\n // ```markdown\n // > | a [b [c](d) e](f) g.\n // ^\n // ```\n //\n // We can’t have that, so it’s just balanced brackets.\n if (labelStart._inactive) {\n return labelEndNok(code);\n }\n defined = self.parser.defined.includes(normalizeIdentifier(self.sliceSerialize({\n start: labelStart.end,\n end: self.now()\n })));\n effects.enter(\"labelEnd\");\n effects.enter(\"labelMarker\");\n effects.consume(code);\n effects.exit(\"labelMarker\");\n effects.exit(\"labelEnd\");\n return after;\n }\n\n /**\n * After `]`.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * > | [a][b] c\n * ^\n * > | [a][] b\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n // Note: `markdown-rs` also parses GFM footnotes here, which for us is in\n // an extension.\n\n // Resource (`[asd](fgh)`)?\n if (code === 40) {\n return effects.attempt(resourceConstruct, labelEndOk, defined ? labelEndOk : labelEndNok)(code);\n }\n\n // Full (`[asd][fgh]`) or collapsed (`[asd][]`) reference?\n if (code === 91) {\n return effects.attempt(referenceFullConstruct, labelEndOk, defined ? referenceNotFull : labelEndNok)(code);\n }\n\n // Shortcut (`[asd]`) reference?\n return defined ? labelEndOk(code) : labelEndNok(code);\n }\n\n /**\n * After `]`, at `[`, but not at a full reference.\n *\n * > 👉 **Note**: we only get here if the label is defined.\n *\n * ```markdown\n * > | [a][] b\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceNotFull(code) {\n return effects.attempt(referenceCollapsedConstruct, labelEndOk, labelEndNok)(code);\n }\n\n /**\n * Done, we found something.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * > | [a][b] c\n * ^\n * > | [a][] b\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function labelEndOk(code) {\n // Note: `markdown-rs` does a bunch of stuff here.\n return ok(code);\n }\n\n /**\n * Done, it’s nothing.\n *\n * There was an okay opening, but we didn’t match anything.\n *\n * ```markdown\n * > | [a](b c\n * ^\n * > | [a][b c\n * ^\n * > | [a] b\n * ^\n * ```\n *\n * @type {State}\n */\n function labelEndNok(code) {\n labelStart._balanced = true;\n return nok(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeResource(effects, ok, nok) {\n return resourceStart;\n\n /**\n * At a resource.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceStart(code) {\n effects.enter(\"resource\");\n effects.enter(\"resourceMarker\");\n effects.consume(code);\n effects.exit(\"resourceMarker\");\n return resourceBefore;\n }\n\n /**\n * In resource, after `(`, at optional whitespace.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceBefore(code) {\n return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceOpen)(code) : resourceOpen(code);\n }\n\n /**\n * In resource, after optional whitespace, at `)` or a destination.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceOpen(code) {\n if (code === 41) {\n return resourceEnd(code);\n }\n return factoryDestination(effects, resourceDestinationAfter, resourceDestinationMissing, \"resourceDestination\", \"resourceDestinationLiteral\", \"resourceDestinationLiteralMarker\", \"resourceDestinationRaw\", \"resourceDestinationString\", 32)(code);\n }\n\n /**\n * In resource, after destination, at optional whitespace.\n *\n * ```markdown\n * > | [a](b) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceDestinationAfter(code) {\n return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceBetween)(code) : resourceEnd(code);\n }\n\n /**\n * At invalid destination.\n *\n * ```markdown\n * > | [a](<<) b\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceDestinationMissing(code) {\n return nok(code);\n }\n\n /**\n * In resource, after destination and whitespace, at `(` or title.\n *\n * ```markdown\n * > | [a](b ) c\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceBetween(code) {\n if (code === 34 || code === 39 || code === 40) {\n return factoryTitle(effects, resourceTitleAfter, nok, \"resourceTitle\", \"resourceTitleMarker\", \"resourceTitleString\")(code);\n }\n return resourceEnd(code);\n }\n\n /**\n * In resource, after title, at optional whitespace.\n *\n * ```markdown\n * > | [a](b \"c\") d\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceTitleAfter(code) {\n return markdownLineEndingOrSpace(code) ? factoryWhitespace(effects, resourceEnd)(code) : resourceEnd(code);\n }\n\n /**\n * In resource, at `)`.\n *\n * ```markdown\n * > | [a](b) d\n * ^\n * ```\n *\n * @type {State}\n */\n function resourceEnd(code) {\n if (code === 41) {\n effects.enter(\"resourceMarker\");\n effects.consume(code);\n effects.exit(\"resourceMarker\");\n effects.exit(\"resource\");\n return ok;\n }\n return nok(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeReferenceFull(effects, ok, nok) {\n const self = this;\n return referenceFull;\n\n /**\n * In a reference (full), at the `[`.\n *\n * ```markdown\n * > | [a][b] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceFull(code) {\n return factoryLabel.call(self, effects, referenceFullAfter, referenceFullMissing, \"reference\", \"referenceMarker\", \"referenceString\")(code);\n }\n\n /**\n * In a reference (full), after `]`.\n *\n * ```markdown\n * > | [a][b] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceFullAfter(code) {\n return self.parser.defined.includes(normalizeIdentifier(self.sliceSerialize(self.events[self.events.length - 1][1]).slice(1, -1))) ? ok(code) : nok(code);\n }\n\n /**\n * In reference (full) that was missing.\n *\n * ```markdown\n * > | [a][b d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceFullMissing(code) {\n return nok(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeReferenceCollapsed(effects, ok, nok) {\n return referenceCollapsedStart;\n\n /**\n * In reference (collapsed), at `[`.\n *\n * > 👉 **Note**: we only get here if the label is defined.\n *\n * ```markdown\n * > | [a][] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceCollapsedStart(code) {\n // We only attempt a collapsed label if there’s a `[`.\n\n effects.enter(\"reference\");\n effects.enter(\"referenceMarker\");\n effects.consume(code);\n effects.exit(\"referenceMarker\");\n return referenceCollapsedOpen;\n }\n\n /**\n * In reference (collapsed), at `]`.\n *\n * > 👉 **Note**: we only get here if the label is defined.\n *\n * ```markdown\n * > | [a][] d\n * ^\n * ```\n *\n * @type {State}\n */\n function referenceCollapsedOpen(code) {\n if (code === 93) {\n effects.enter(\"referenceMarker\");\n effects.consume(code);\n effects.exit(\"referenceMarker\");\n effects.exit(\"reference\");\n return ok;\n }\n return nok(code);\n }\n}","/**\n * @import {\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { labelEnd } from './label-end.js';\n\n/** @type {Construct} */\nexport const labelStartImage = {\n name: 'labelStartImage',\n resolveAll: labelEnd.resolveAll,\n tokenize: tokenizeLabelStartImage\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeLabelStartImage(effects, ok, nok) {\n const self = this;\n return start;\n\n /**\n * Start of label (image) start.\n *\n * ```markdown\n * > | a ![b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"labelImage\");\n effects.enter(\"labelImageMarker\");\n effects.consume(code);\n effects.exit(\"labelImageMarker\");\n return open;\n }\n\n /**\n * After `!`, at `[`.\n *\n * ```markdown\n * > | a ![b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function open(code) {\n if (code === 91) {\n effects.enter(\"labelMarker\");\n effects.consume(code);\n effects.exit(\"labelMarker\");\n effects.exit(\"labelImage\");\n return after;\n }\n return nok(code);\n }\n\n /**\n * After `![`.\n *\n * ```markdown\n * > | a ![b] c\n * ^\n * ```\n *\n * This is needed in because, when GFM footnotes are enabled, images never\n * form when started with a `^`.\n * Instead, links form:\n *\n * ```markdown\n * ![^a](b)\n *\n * ![^a][b]\n *\n * [b]: c\n * ```\n *\n * ```html\n * \n *

    !^a

    \n * ```\n *\n * @type {State}\n */\n function after(code) {\n // To do: use a new field to do this, this is still needed for\n // `micromark-extension-gfm-footnote`, but the `label-start-link`\n // behavior isn’t.\n // Hidden footnotes hook.\n /* c8 ignore next 3 */\n return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs ? nok(code) : ok(code);\n }\n}","/**\n * @import {Code} from 'micromark-util-types'\n */\n\nimport { markdownLineEndingOrSpace, unicodePunctuation, unicodeWhitespace } from 'micromark-util-character';\n/**\n * Classify whether a code represents whitespace, punctuation, or something\n * else.\n *\n * Used for attention (emphasis, strong), whose sequences can open or close\n * based on the class of surrounding characters.\n *\n * > 👉 **Note**: eof (`null`) is seen as whitespace.\n *\n * @param {Code} code\n * Code.\n * @returns {typeof constants.characterGroupWhitespace | typeof constants.characterGroupPunctuation | undefined}\n * Group.\n */\nexport function classifyCharacter(code) {\n if (code === null || markdownLineEndingOrSpace(code) || unicodeWhitespace(code)) {\n return 1;\n }\n if (unicodePunctuation(code)) {\n return 2;\n }\n}","/**\n * @import {\n * Code,\n * Construct,\n * Event,\n * Point,\n * Resolver,\n * State,\n * TokenizeContext,\n * Tokenizer,\n * Token\n * } from 'micromark-util-types'\n */\n\nimport { push, splice } from 'micromark-util-chunked';\nimport { classifyCharacter } from 'micromark-util-classify-character';\nimport { resolveAll } from 'micromark-util-resolve-all';\n/** @type {Construct} */\nexport const attention = {\n name: 'attention',\n resolveAll: resolveAllAttention,\n tokenize: tokenizeAttention\n};\n\n/**\n * Take all events and resolve attention to emphasis or strong.\n *\n * @type {Resolver}\n */\n// eslint-disable-next-line complexity\nfunction resolveAllAttention(events, context) {\n let index = -1;\n /** @type {number} */\n let open;\n /** @type {Token} */\n let group;\n /** @type {Token} */\n let text;\n /** @type {Token} */\n let openingSequence;\n /** @type {Token} */\n let closingSequence;\n /** @type {number} */\n let use;\n /** @type {Array} */\n let nextEvents;\n /** @type {number} */\n let offset;\n\n // Walk through all events.\n //\n // Note: performance of this is fine on an mb of normal markdown, but it’s\n // a bottleneck for malicious stuff.\n while (++index < events.length) {\n // Find a token that can close.\n if (events[index][0] === 'enter' && events[index][1].type === 'attentionSequence' && events[index][1]._close) {\n open = index;\n\n // Now walk back to find an opener.\n while (open--) {\n // Find a token that can open the closer.\n if (events[open][0] === 'exit' && events[open][1].type === 'attentionSequence' && events[open][1]._open &&\n // If the markers are the same:\n context.sliceSerialize(events[open][1]).charCodeAt(0) === context.sliceSerialize(events[index][1]).charCodeAt(0)) {\n // If the opening can close or the closing can open,\n // and the close size *is not* a multiple of three,\n // but the sum of the opening and closing size *is* multiple of three,\n // then don’t match.\n if ((events[open][1]._close || events[index][1]._open) && (events[index][1].end.offset - events[index][1].start.offset) % 3 && !((events[open][1].end.offset - events[open][1].start.offset + events[index][1].end.offset - events[index][1].start.offset) % 3)) {\n continue;\n }\n\n // Number of markers to use from the sequence.\n use = events[open][1].end.offset - events[open][1].start.offset > 1 && events[index][1].end.offset - events[index][1].start.offset > 1 ? 2 : 1;\n const start = {\n ...events[open][1].end\n };\n const end = {\n ...events[index][1].start\n };\n movePoint(start, -use);\n movePoint(end, use);\n openingSequence = {\n type: use > 1 ? \"strongSequence\" : \"emphasisSequence\",\n start,\n end: {\n ...events[open][1].end\n }\n };\n closingSequence = {\n type: use > 1 ? \"strongSequence\" : \"emphasisSequence\",\n start: {\n ...events[index][1].start\n },\n end\n };\n text = {\n type: use > 1 ? \"strongText\" : \"emphasisText\",\n start: {\n ...events[open][1].end\n },\n end: {\n ...events[index][1].start\n }\n };\n group = {\n type: use > 1 ? \"strong\" : \"emphasis\",\n start: {\n ...openingSequence.start\n },\n end: {\n ...closingSequence.end\n }\n };\n events[open][1].end = {\n ...openingSequence.start\n };\n events[index][1].start = {\n ...closingSequence.end\n };\n nextEvents = [];\n\n // If there are more markers in the opening, add them before.\n if (events[open][1].end.offset - events[open][1].start.offset) {\n nextEvents = push(nextEvents, [['enter', events[open][1], context], ['exit', events[open][1], context]]);\n }\n\n // Opening.\n nextEvents = push(nextEvents, [['enter', group, context], ['enter', openingSequence, context], ['exit', openingSequence, context], ['enter', text, context]]);\n\n // Always populated by defaults.\n\n // Between.\n nextEvents = push(nextEvents, resolveAll(context.parser.constructs.insideSpan.null, events.slice(open + 1, index), context));\n\n // Closing.\n nextEvents = push(nextEvents, [['exit', text, context], ['enter', closingSequence, context], ['exit', closingSequence, context], ['exit', group, context]]);\n\n // If there are more markers in the closing, add them after.\n if (events[index][1].end.offset - events[index][1].start.offset) {\n offset = 2;\n nextEvents = push(nextEvents, [['enter', events[index][1], context], ['exit', events[index][1], context]]);\n } else {\n offset = 0;\n }\n splice(events, open - 1, index - open + 3, nextEvents);\n index = open + nextEvents.length - offset - 2;\n break;\n }\n }\n }\n }\n\n // Remove remaining sequences.\n index = -1;\n while (++index < events.length) {\n if (events[index][1].type === 'attentionSequence') {\n events[index][1].type = 'data';\n }\n }\n return events;\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeAttention(effects, ok) {\n const attentionMarkers = this.parser.constructs.attentionMarkers.null;\n const previous = this.previous;\n const before = classifyCharacter(previous);\n\n /** @type {NonNullable} */\n let marker;\n return start;\n\n /**\n * Before a sequence.\n *\n * ```markdown\n * > | **\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n marker = code;\n effects.enter('attentionSequence');\n return inside(code);\n }\n\n /**\n * In a sequence.\n *\n * ```markdown\n * > | **\n * ^^\n * ```\n *\n * @type {State}\n */\n function inside(code) {\n if (code === marker) {\n effects.consume(code);\n return inside;\n }\n const token = effects.exit('attentionSequence');\n\n // To do: next major: move this to resolver, just like `markdown-rs`.\n const after = classifyCharacter(code);\n\n // Always populated by defaults.\n\n const open = !after || after === 2 && before || attentionMarkers.includes(code);\n const close = !before || before === 2 && after || attentionMarkers.includes(previous);\n token._open = Boolean(marker === 42 ? open : open && (before || !close));\n token._close = Boolean(marker === 42 ? close : close && (after || !open));\n return ok(code);\n }\n}\n\n/**\n * Move a point a bit.\n *\n * Note: `move` only works inside lines! It’s not possible to move past other\n * chunks (replacement characters, tabs, or line endings).\n *\n * @param {Point} point\n * Point.\n * @param {number} offset\n * Amount to move.\n * @returns {undefined}\n * Nothing.\n */\nfunction movePoint(point, offset) {\n point.column += offset;\n point.offset += offset;\n point._bufferIndex += offset;\n}","/**\n * @import {\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { asciiAlphanumeric, asciiAlpha, asciiAtext, asciiControl } from 'micromark-util-character';\n/** @type {Construct} */\nexport const autolink = {\n name: 'autolink',\n tokenize: tokenizeAutolink\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeAutolink(effects, ok, nok) {\n let size = 0;\n return start;\n\n /**\n * Start of an autolink.\n *\n * ```markdown\n * > | ab\n * ^\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"autolink\");\n effects.enter(\"autolinkMarker\");\n effects.consume(code);\n effects.exit(\"autolinkMarker\");\n effects.enter(\"autolinkProtocol\");\n return open;\n }\n\n /**\n * After `<`, at protocol or atext.\n *\n * ```markdown\n * > | ab\n * ^\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function open(code) {\n if (asciiAlpha(code)) {\n effects.consume(code);\n return schemeOrEmailAtext;\n }\n if (code === 64) {\n return nok(code);\n }\n return emailAtext(code);\n }\n\n /**\n * At second byte of protocol or atext.\n *\n * ```markdown\n * > | ab\n * ^\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function schemeOrEmailAtext(code) {\n // ASCII alphanumeric and `+`, `-`, and `.`.\n if (code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) {\n // Count the previous alphabetical from `open` too.\n size = 1;\n return schemeInsideOrEmailAtext(code);\n }\n return emailAtext(code);\n }\n\n /**\n * In ambiguous protocol or atext.\n *\n * ```markdown\n * > | ab\n * ^\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function schemeInsideOrEmailAtext(code) {\n if (code === 58) {\n effects.consume(code);\n size = 0;\n return urlInside;\n }\n\n // ASCII alphanumeric and `+`, `-`, and `.`.\n if ((code === 43 || code === 45 || code === 46 || asciiAlphanumeric(code)) && size++ < 32) {\n effects.consume(code);\n return schemeInsideOrEmailAtext;\n }\n size = 0;\n return emailAtext(code);\n }\n\n /**\n * After protocol, in URL.\n *\n * ```markdown\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function urlInside(code) {\n if (code === 62) {\n effects.exit(\"autolinkProtocol\");\n effects.enter(\"autolinkMarker\");\n effects.consume(code);\n effects.exit(\"autolinkMarker\");\n effects.exit(\"autolink\");\n return ok;\n }\n\n // ASCII control, space, or `<`.\n if (code === null || code === 32 || code === 60 || asciiControl(code)) {\n return nok(code);\n }\n effects.consume(code);\n return urlInside;\n }\n\n /**\n * In email atext.\n *\n * ```markdown\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function emailAtext(code) {\n if (code === 64) {\n effects.consume(code);\n return emailAtSignOrDot;\n }\n if (asciiAtext(code)) {\n effects.consume(code);\n return emailAtext;\n }\n return nok(code);\n }\n\n /**\n * In label, after at-sign or dot.\n *\n * ```markdown\n * > | ab\n * ^ ^\n * ```\n *\n * @type {State}\n */\n function emailAtSignOrDot(code) {\n return asciiAlphanumeric(code) ? emailLabel(code) : nok(code);\n }\n\n /**\n * In label, where `.` and `>` are allowed.\n *\n * ```markdown\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function emailLabel(code) {\n if (code === 46) {\n effects.consume(code);\n size = 0;\n return emailAtSignOrDot;\n }\n if (code === 62) {\n // Exit, then change the token type.\n effects.exit(\"autolinkProtocol\").type = \"autolinkEmail\";\n effects.enter(\"autolinkMarker\");\n effects.consume(code);\n effects.exit(\"autolinkMarker\");\n effects.exit(\"autolink\");\n return ok;\n }\n return emailValue(code);\n }\n\n /**\n * In label, where `.` and `>` are *not* allowed.\n *\n * Though, this is also used in `emailLabel` to parse other values.\n *\n * ```markdown\n * > | ab\n * ^\n * ```\n *\n * @type {State}\n */\n function emailValue(code) {\n // ASCII alphanumeric or `-`.\n if ((code === 45 || asciiAlphanumeric(code)) && size++ < 63) {\n const next = code === 45 ? emailValue : emailLabel;\n effects.consume(code);\n return next;\n }\n return nok(code);\n }\n}","/**\n * @import {\n * Code,\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { factorySpace } from 'micromark-factory-space';\nimport { asciiAlphanumeric, asciiAlpha, markdownLineEndingOrSpace, markdownLineEnding, markdownSpace } from 'micromark-util-character';\n/** @type {Construct} */\nexport const htmlText = {\n name: 'htmlText',\n tokenize: tokenizeHtmlText\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeHtmlText(effects, ok, nok) {\n const self = this;\n /** @type {NonNullable | undefined} */\n let marker;\n /** @type {number} */\n let index;\n /** @type {State} */\n let returnState;\n return start;\n\n /**\n * Start of HTML (text).\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"htmlText\");\n effects.enter(\"htmlTextData\");\n effects.consume(code);\n return open;\n }\n\n /**\n * After `<`, at tag name or other stuff.\n *\n * ```markdown\n * > | a c\n * ^\n * > | a c\n * ^\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function open(code) {\n if (code === 33) {\n effects.consume(code);\n return declarationOpen;\n }\n if (code === 47) {\n effects.consume(code);\n return tagCloseStart;\n }\n if (code === 63) {\n effects.consume(code);\n return instruction;\n }\n\n // ASCII alphabetical.\n if (asciiAlpha(code)) {\n effects.consume(code);\n return tagOpen;\n }\n return nok(code);\n }\n\n /**\n * After ` | a c\n * ^\n * > | a c\n * ^\n * > | a &<]]> c\n * ^\n * ```\n *\n * @type {State}\n */\n function declarationOpen(code) {\n if (code === 45) {\n effects.consume(code);\n return commentOpenInside;\n }\n if (code === 91) {\n effects.consume(code);\n index = 0;\n return cdataOpenInside;\n }\n if (asciiAlpha(code)) {\n effects.consume(code);\n return declaration;\n }\n return nok(code);\n }\n\n /**\n * In a comment, after ` | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function commentOpenInside(code) {\n if (code === 45) {\n effects.consume(code);\n return commentEnd;\n }\n return nok(code);\n }\n\n /**\n * In comment.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function comment(code) {\n if (code === null) {\n return nok(code);\n }\n if (code === 45) {\n effects.consume(code);\n return commentClose;\n }\n if (markdownLineEnding(code)) {\n returnState = comment;\n return lineEndingBefore(code);\n }\n effects.consume(code);\n return comment;\n }\n\n /**\n * In comment, after `-`.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function commentClose(code) {\n if (code === 45) {\n effects.consume(code);\n return commentEnd;\n }\n return comment(code);\n }\n\n /**\n * In comment, after `--`.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function commentEnd(code) {\n return code === 62 ? end(code) : code === 45 ? commentClose(code) : comment(code);\n }\n\n /**\n * After ` | a &<]]> b\n * ^^^^^^\n * ```\n *\n * @type {State}\n */\n function cdataOpenInside(code) {\n const value = \"CDATA[\";\n if (code === value.charCodeAt(index++)) {\n effects.consume(code);\n return index === value.length ? cdata : cdataOpenInside;\n }\n return nok(code);\n }\n\n /**\n * In CDATA.\n *\n * ```markdown\n * > | a &<]]> b\n * ^^^\n * ```\n *\n * @type {State}\n */\n function cdata(code) {\n if (code === null) {\n return nok(code);\n }\n if (code === 93) {\n effects.consume(code);\n return cdataClose;\n }\n if (markdownLineEnding(code)) {\n returnState = cdata;\n return lineEndingBefore(code);\n }\n effects.consume(code);\n return cdata;\n }\n\n /**\n * In CDATA, after `]`, at another `]`.\n *\n * ```markdown\n * > | a &<]]> b\n * ^\n * ```\n *\n * @type {State}\n */\n function cdataClose(code) {\n if (code === 93) {\n effects.consume(code);\n return cdataEnd;\n }\n return cdata(code);\n }\n\n /**\n * In CDATA, after `]]`, at `>`.\n *\n * ```markdown\n * > | a &<]]> b\n * ^\n * ```\n *\n * @type {State}\n */\n function cdataEnd(code) {\n if (code === 62) {\n return end(code);\n }\n if (code === 93) {\n effects.consume(code);\n return cdataEnd;\n }\n return cdata(code);\n }\n\n /**\n * In declaration.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function declaration(code) {\n if (code === null || code === 62) {\n return end(code);\n }\n if (markdownLineEnding(code)) {\n returnState = declaration;\n return lineEndingBefore(code);\n }\n effects.consume(code);\n return declaration;\n }\n\n /**\n * In instruction.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function instruction(code) {\n if (code === null) {\n return nok(code);\n }\n if (code === 63) {\n effects.consume(code);\n return instructionClose;\n }\n if (markdownLineEnding(code)) {\n returnState = instruction;\n return lineEndingBefore(code);\n }\n effects.consume(code);\n return instruction;\n }\n\n /**\n * In instruction, after `?`, at `>`.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function instructionClose(code) {\n return code === 62 ? end(code) : instruction(code);\n }\n\n /**\n * After ` | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagCloseStart(code) {\n // ASCII alphabetical.\n if (asciiAlpha(code)) {\n effects.consume(code);\n return tagClose;\n }\n return nok(code);\n }\n\n /**\n * After ` | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagClose(code) {\n // ASCII alphanumerical and `-`.\n if (code === 45 || asciiAlphanumeric(code)) {\n effects.consume(code);\n return tagClose;\n }\n return tagCloseBetween(code);\n }\n\n /**\n * In closing tag, after tag name.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagCloseBetween(code) {\n if (markdownLineEnding(code)) {\n returnState = tagCloseBetween;\n return lineEndingBefore(code);\n }\n if (markdownSpace(code)) {\n effects.consume(code);\n return tagCloseBetween;\n }\n return end(code);\n }\n\n /**\n * After ` | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpen(code) {\n // ASCII alphanumerical and `-`.\n if (code === 45 || asciiAlphanumeric(code)) {\n effects.consume(code);\n return tagOpen;\n }\n if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {\n return tagOpenBetween(code);\n }\n return nok(code);\n }\n\n /**\n * In opening tag, after tag name.\n *\n * ```markdown\n * > | a c\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenBetween(code) {\n if (code === 47) {\n effects.consume(code);\n return end;\n }\n\n // ASCII alphabetical and `:` and `_`.\n if (code === 58 || code === 95 || asciiAlpha(code)) {\n effects.consume(code);\n return tagOpenAttributeName;\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenBetween;\n return lineEndingBefore(code);\n }\n if (markdownSpace(code)) {\n effects.consume(code);\n return tagOpenBetween;\n }\n return end(code);\n }\n\n /**\n * In attribute name.\n *\n * ```markdown\n * > | a d\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeName(code) {\n // ASCII alphabetical and `-`, `.`, `:`, and `_`.\n if (code === 45 || code === 46 || code === 58 || code === 95 || asciiAlphanumeric(code)) {\n effects.consume(code);\n return tagOpenAttributeName;\n }\n return tagOpenAttributeNameAfter(code);\n }\n\n /**\n * After attribute name, before initializer, the end of the tag, or\n * whitespace.\n *\n * ```markdown\n * > | a d\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeNameAfter(code) {\n if (code === 61) {\n effects.consume(code);\n return tagOpenAttributeValueBefore;\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenAttributeNameAfter;\n return lineEndingBefore(code);\n }\n if (markdownSpace(code)) {\n effects.consume(code);\n return tagOpenAttributeNameAfter;\n }\n return tagOpenBetween(code);\n }\n\n /**\n * Before unquoted, double quoted, or single quoted attribute value, allowing\n * whitespace.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueBefore(code) {\n if (code === null || code === 60 || code === 61 || code === 62 || code === 96) {\n return nok(code);\n }\n if (code === 34 || code === 39) {\n effects.consume(code);\n marker = code;\n return tagOpenAttributeValueQuoted;\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenAttributeValueBefore;\n return lineEndingBefore(code);\n }\n if (markdownSpace(code)) {\n effects.consume(code);\n return tagOpenAttributeValueBefore;\n }\n effects.consume(code);\n return tagOpenAttributeValueUnquoted;\n }\n\n /**\n * In double or single quoted attribute value.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueQuoted(code) {\n if (code === marker) {\n effects.consume(code);\n marker = undefined;\n return tagOpenAttributeValueQuotedAfter;\n }\n if (code === null) {\n return nok(code);\n }\n if (markdownLineEnding(code)) {\n returnState = tagOpenAttributeValueQuoted;\n return lineEndingBefore(code);\n }\n effects.consume(code);\n return tagOpenAttributeValueQuoted;\n }\n\n /**\n * In unquoted attribute value.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueUnquoted(code) {\n if (code === null || code === 34 || code === 39 || code === 60 || code === 61 || code === 96) {\n return nok(code);\n }\n if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {\n return tagOpenBetween(code);\n }\n effects.consume(code);\n return tagOpenAttributeValueUnquoted;\n }\n\n /**\n * After double or single quoted attribute value, before whitespace or the end\n * of the tag.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function tagOpenAttributeValueQuotedAfter(code) {\n if (code === 47 || code === 62 || markdownLineEndingOrSpace(code)) {\n return tagOpenBetween(code);\n }\n return nok(code);\n }\n\n /**\n * In certain circumstances of a tag where only an `>` is allowed.\n *\n * ```markdown\n * > | a e\n * ^\n * ```\n *\n * @type {State}\n */\n function end(code) {\n if (code === 62) {\n effects.consume(code);\n effects.exit(\"htmlTextData\");\n effects.exit(\"htmlText\");\n return ok;\n }\n return nok(code);\n }\n\n /**\n * At eol.\n *\n * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about\n * > empty tokens.\n *\n * ```markdown\n * > | a \n * ```\n *\n * @type {State}\n */\n function lineEndingBefore(code) {\n effects.exit(\"htmlTextData\");\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return lineEndingAfter;\n }\n\n /**\n * After eol, at optional whitespace.\n *\n * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about\n * > empty tokens.\n *\n * ```markdown\n * | a \n * ^\n * ```\n *\n * @type {State}\n */\n function lineEndingAfter(code) {\n // Always populated by defaults.\n\n return markdownSpace(code) ? factorySpace(effects, lineEndingAfterPrefix, \"linePrefix\", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code) : lineEndingAfterPrefix(code);\n }\n\n /**\n * After eol, after optional whitespace.\n *\n * > 👉 **Note**: we can’t have blank lines in text, so no need to worry about\n * > empty tokens.\n *\n * ```markdown\n * | a \n * ^\n * ```\n *\n * @type {State}\n */\n function lineEndingAfterPrefix(code) {\n effects.enter(\"htmlTextData\");\n return returnState(code);\n }\n}","/**\n * @import {\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { labelEnd } from './label-end.js';\n\n/** @type {Construct} */\nexport const labelStartLink = {\n name: 'labelStartLink',\n resolveAll: labelEnd.resolveAll,\n tokenize: tokenizeLabelStartLink\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeLabelStartLink(effects, ok, nok) {\n const self = this;\n return start;\n\n /**\n * Start of label (link) start.\n *\n * ```markdown\n * > | a [b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"labelLink\");\n effects.enter(\"labelMarker\");\n effects.consume(code);\n effects.exit(\"labelMarker\");\n effects.exit(\"labelLink\");\n return after;\n }\n\n /** @type {State} */\n function after(code) {\n // To do: this isn’t needed in `micromark-extension-gfm-footnote`,\n // remove.\n // Hidden footnotes hook.\n /* c8 ignore next 3 */\n return code === 94 && '_hiddenFootnoteSupport' in self.parser.constructs ? nok(code) : ok(code);\n }\n}","/**\n * @import {\n * Construct,\n * State,\n * TokenizeContext,\n * Tokenizer\n * } from 'micromark-util-types'\n */\n\nimport { markdownLineEnding } from 'micromark-util-character';\n/** @type {Construct} */\nexport const hardBreakEscape = {\n name: 'hardBreakEscape',\n tokenize: tokenizeHardBreakEscape\n};\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeHardBreakEscape(effects, ok, nok) {\n return start;\n\n /**\n * Start of a hard break (escape).\n *\n * ```markdown\n * > | a\\\n * ^\n * | b\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"hardBreakEscape\");\n effects.consume(code);\n return after;\n }\n\n /**\n * After `\\`, at eol.\n *\n * ```markdown\n * > | a\\\n * ^\n * | b\n * ```\n *\n * @type {State}\n */\n function after(code) {\n if (markdownLineEnding(code)) {\n effects.exit(\"hardBreakEscape\");\n return ok(code);\n }\n return nok(code);\n }\n}","/**\n * @import {\n * Construct,\n * Previous,\n * Resolver,\n * State,\n * TokenizeContext,\n * Tokenizer,\n * Token\n * } from 'micromark-util-types'\n */\n\nimport { markdownLineEnding } from 'micromark-util-character';\n/** @type {Construct} */\nexport const codeText = {\n name: 'codeText',\n previous,\n resolve: resolveCodeText,\n tokenize: tokenizeCodeText\n};\n\n// To do: next major: don’t resolve, like `markdown-rs`.\n/** @type {Resolver} */\nfunction resolveCodeText(events) {\n let tailExitIndex = events.length - 4;\n let headEnterIndex = 3;\n /** @type {number} */\n let index;\n /** @type {number | undefined} */\n let enter;\n\n // If we start and end with an EOL or a space.\n if ((events[headEnterIndex][1].type === \"lineEnding\" || events[headEnterIndex][1].type === 'space') && (events[tailExitIndex][1].type === \"lineEnding\" || events[tailExitIndex][1].type === 'space')) {\n index = headEnterIndex;\n\n // And we have data.\n while (++index < tailExitIndex) {\n if (events[index][1].type === \"codeTextData\") {\n // Then we have padding.\n events[headEnterIndex][1].type = \"codeTextPadding\";\n events[tailExitIndex][1].type = \"codeTextPadding\";\n headEnterIndex += 2;\n tailExitIndex -= 2;\n break;\n }\n }\n }\n\n // Merge adjacent spaces and data.\n index = headEnterIndex - 1;\n tailExitIndex++;\n while (++index <= tailExitIndex) {\n if (enter === undefined) {\n if (index !== tailExitIndex && events[index][1].type !== \"lineEnding\") {\n enter = index;\n }\n } else if (index === tailExitIndex || events[index][1].type === \"lineEnding\") {\n events[enter][1].type = \"codeTextData\";\n if (index !== enter + 2) {\n events[enter][1].end = events[index - 1][1].end;\n events.splice(enter + 2, index - enter - 2);\n tailExitIndex -= index - enter - 2;\n index = enter + 2;\n }\n enter = undefined;\n }\n }\n return events;\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Previous}\n */\nfunction previous(code) {\n // If there is a previous code, there will always be a tail.\n return code !== 96 || this.events[this.events.length - 1][1].type === \"characterEscape\";\n}\n\n/**\n * @this {TokenizeContext}\n * Context.\n * @type {Tokenizer}\n */\nfunction tokenizeCodeText(effects, ok, nok) {\n const self = this;\n let sizeOpen = 0;\n /** @type {number} */\n let size;\n /** @type {Token} */\n let token;\n return start;\n\n /**\n * Start of code (text).\n *\n * ```markdown\n * > | `a`\n * ^\n * > | \\`a`\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter(\"codeText\");\n effects.enter(\"codeTextSequence\");\n return sequenceOpen(code);\n }\n\n /**\n * In opening sequence.\n *\n * ```markdown\n * > | `a`\n * ^\n * ```\n *\n * @type {State}\n */\n function sequenceOpen(code) {\n if (code === 96) {\n effects.consume(code);\n sizeOpen++;\n return sequenceOpen;\n }\n effects.exit(\"codeTextSequence\");\n return between(code);\n }\n\n /**\n * Between something and something else.\n *\n * ```markdown\n * > | `a`\n * ^^\n * ```\n *\n * @type {State}\n */\n function between(code) {\n // EOF.\n if (code === null) {\n return nok(code);\n }\n\n // To do: next major: don’t do spaces in resolve, but when compiling,\n // like `markdown-rs`.\n // Tabs don’t work, and virtual spaces don’t make sense.\n if (code === 32) {\n effects.enter('space');\n effects.consume(code);\n effects.exit('space');\n return between;\n }\n\n // Closing fence? Could also be data.\n if (code === 96) {\n token = effects.enter(\"codeTextSequence\");\n size = 0;\n return sequenceClose(code);\n }\n if (markdownLineEnding(code)) {\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return between;\n }\n\n // Data.\n effects.enter(\"codeTextData\");\n return data(code);\n }\n\n /**\n * In data.\n *\n * ```markdown\n * > | `a`\n * ^\n * ```\n *\n * @type {State}\n */\n function data(code) {\n if (code === null || code === 32 || code === 96 || markdownLineEnding(code)) {\n effects.exit(\"codeTextData\");\n return between(code);\n }\n effects.consume(code);\n return data;\n }\n\n /**\n * In closing sequence.\n *\n * ```markdown\n * > | `a`\n * ^\n * ```\n *\n * @type {State}\n */\n function sequenceClose(code) {\n // More.\n if (code === 96) {\n effects.consume(code);\n size++;\n return sequenceClose;\n }\n\n // Done!\n if (size === sizeOpen) {\n effects.exit(\"codeTextSequence\");\n effects.exit(\"codeText\");\n return ok(code);\n }\n\n // More or less accents: mark as data.\n token.type = \"codeTextData\";\n return data(code);\n }\n}","/**\n * @import {Extension} from 'micromark-util-types'\n */\n\nimport { attention, autolink, blockQuote, characterEscape, characterReference, codeFenced, codeIndented, codeText, definition, hardBreakEscape, headingAtx, htmlFlow, htmlText, labelEnd, labelStartImage, labelStartLink, lineEnding, list, setextUnderline, thematicBreak } from 'micromark-core-commonmark';\nimport { resolver as resolveText } from './initialize/text.js';\n\n/** @satisfies {Extension['document']} */\nexport const document = {\n [42]: list,\n [43]: list,\n [45]: list,\n [48]: list,\n [49]: list,\n [50]: list,\n [51]: list,\n [52]: list,\n [53]: list,\n [54]: list,\n [55]: list,\n [56]: list,\n [57]: list,\n [62]: blockQuote\n};\n\n/** @satisfies {Extension['contentInitial']} */\nexport const contentInitial = {\n [91]: definition\n};\n\n/** @satisfies {Extension['flowInitial']} */\nexport const flowInitial = {\n [-2]: codeIndented,\n [-1]: codeIndented,\n [32]: codeIndented\n};\n\n/** @satisfies {Extension['flow']} */\nexport const flow = {\n [35]: headingAtx,\n [42]: thematicBreak,\n [45]: [setextUnderline, thematicBreak],\n [60]: htmlFlow,\n [61]: setextUnderline,\n [95]: thematicBreak,\n [96]: codeFenced,\n [126]: codeFenced\n};\n\n/** @satisfies {Extension['string']} */\nexport const string = {\n [38]: characterReference,\n [92]: characterEscape\n};\n\n/** @satisfies {Extension['text']} */\nexport const text = {\n [-5]: lineEnding,\n [-4]: lineEnding,\n [-3]: lineEnding,\n [33]: labelStartImage,\n [38]: characterReference,\n [42]: attention,\n [60]: [autolink, htmlText],\n [91]: labelStartLink,\n [92]: [hardBreakEscape, characterEscape],\n [93]: labelEnd,\n [95]: attention,\n [96]: codeText\n};\n\n/** @satisfies {Extension['insideSpan']} */\nexport const insideSpan = {\n null: [attention, resolveText]\n};\n\n/** @satisfies {Extension['attentionMarkers']} */\nexport const attentionMarkers = {\n null: [42, 95]\n};\n\n/** @satisfies {Extension['disable']} */\nexport const disable = {\n null: []\n};","/**\n * @import {\n * Chunk,\n * Code,\n * ConstructRecord,\n * Construct,\n * Effects,\n * InitialConstruct,\n * ParseContext,\n * Point,\n * State,\n * TokenizeContext,\n * Token\n * } from 'micromark-util-types'\n */\n\n/**\n * @callback Restore\n * Restore the state.\n * @returns {undefined}\n * Nothing.\n *\n * @typedef Info\n * Info.\n * @property {Restore} restore\n * Restore.\n * @property {number} from\n * From.\n *\n * @callback ReturnHandle\n * Handle a successful run.\n * @param {Construct} construct\n * Construct.\n * @param {Info} info\n * Info.\n * @returns {undefined}\n * Nothing.\n */\n\nimport { markdownLineEnding } from 'micromark-util-character';\nimport { push, splice } from 'micromark-util-chunked';\nimport { resolveAll } from 'micromark-util-resolve-all';\n/**\n * Create a tokenizer.\n * Tokenizers deal with one type of data (e.g., containers, flow, text).\n * The parser is the object dealing with it all.\n * `initialize` works like other constructs, except that only its `tokenize`\n * function is used, in which case it doesn’t receive an `ok` or `nok`.\n * `from` can be given to set the point before the first character, although\n * when further lines are indented, they must be set with `defineSkip`.\n *\n * @param {ParseContext} parser\n * Parser.\n * @param {InitialConstruct} initialize\n * Construct.\n * @param {Omit | undefined} [from]\n * Point (optional).\n * @returns {TokenizeContext}\n * Context.\n */\nexport function createTokenizer(parser, initialize, from) {\n /** @type {Point} */\n let point = {\n _bufferIndex: -1,\n _index: 0,\n line: from && from.line || 1,\n column: from && from.column || 1,\n offset: from && from.offset || 0\n };\n /** @type {Record} */\n const columnStart = {};\n /** @type {Array} */\n const resolveAllConstructs = [];\n /** @type {Array} */\n let chunks = [];\n /** @type {Array} */\n let stack = [];\n /** @type {boolean | undefined} */\n let consumed = true;\n\n /**\n * Tools used for tokenizing.\n *\n * @type {Effects}\n */\n const effects = {\n attempt: constructFactory(onsuccessfulconstruct),\n check: constructFactory(onsuccessfulcheck),\n consume,\n enter,\n exit,\n interrupt: constructFactory(onsuccessfulcheck, {\n interrupt: true\n })\n };\n\n /**\n * State and tools for resolving and serializing.\n *\n * @type {TokenizeContext}\n */\n const context = {\n code: null,\n containerState: {},\n defineSkip,\n events: [],\n now,\n parser,\n previous: null,\n sliceSerialize,\n sliceStream,\n write\n };\n\n /**\n * The state function.\n *\n * @type {State | undefined}\n */\n let state = initialize.tokenize.call(context, effects);\n\n /**\n * Track which character we expect to be consumed, to catch bugs.\n *\n * @type {Code}\n */\n let expectedCode;\n if (initialize.resolveAll) {\n resolveAllConstructs.push(initialize);\n }\n return context;\n\n /** @type {TokenizeContext['write']} */\n function write(slice) {\n chunks = push(chunks, slice);\n main();\n\n // Exit if we’re not done, resolve might change stuff.\n if (chunks[chunks.length - 1] !== null) {\n return [];\n }\n addResult(initialize, 0);\n\n // Otherwise, resolve, and exit.\n context.events = resolveAll(resolveAllConstructs, context.events, context);\n return context.events;\n }\n\n //\n // Tools.\n //\n\n /** @type {TokenizeContext['sliceSerialize']} */\n function sliceSerialize(token, expandTabs) {\n return serializeChunks(sliceStream(token), expandTabs);\n }\n\n /** @type {TokenizeContext['sliceStream']} */\n function sliceStream(token) {\n return sliceChunks(chunks, token);\n }\n\n /** @type {TokenizeContext['now']} */\n function now() {\n // This is a hot path, so we clone manually instead of `Object.assign({}, point)`\n const {\n _bufferIndex,\n _index,\n line,\n column,\n offset\n } = point;\n return {\n _bufferIndex,\n _index,\n line,\n column,\n offset\n };\n }\n\n /** @type {TokenizeContext['defineSkip']} */\n function defineSkip(value) {\n columnStart[value.line] = value.column;\n accountForPotentialSkip();\n }\n\n //\n // State management.\n //\n\n /**\n * Main loop (note that `_index` and `_bufferIndex` in `point` are modified by\n * `consume`).\n * Here is where we walk through the chunks, which either include strings of\n * several characters, or numerical character codes.\n * The reason to do this in a loop instead of a call is so the stack can\n * drain.\n *\n * @returns {undefined}\n * Nothing.\n */\n function main() {\n /** @type {number} */\n let chunkIndex;\n while (point._index < chunks.length) {\n const chunk = chunks[point._index];\n\n // If we’re in a buffer chunk, loop through it.\n if (typeof chunk === 'string') {\n chunkIndex = point._index;\n if (point._bufferIndex < 0) {\n point._bufferIndex = 0;\n }\n while (point._index === chunkIndex && point._bufferIndex < chunk.length) {\n go(chunk.charCodeAt(point._bufferIndex));\n }\n } else {\n go(chunk);\n }\n }\n }\n\n /**\n * Deal with one code.\n *\n * @param {Code} code\n * Code.\n * @returns {undefined}\n * Nothing.\n */\n function go(code) {\n consumed = undefined;\n expectedCode = code;\n state = state(code);\n }\n\n /** @type {Effects['consume']} */\n function consume(code) {\n if (markdownLineEnding(code)) {\n point.line++;\n point.column = 1;\n point.offset += code === -3 ? 2 : 1;\n accountForPotentialSkip();\n } else if (code !== -1) {\n point.column++;\n point.offset++;\n }\n\n // Not in a string chunk.\n if (point._bufferIndex < 0) {\n point._index++;\n } else {\n point._bufferIndex++;\n\n // At end of string chunk.\n if (point._bufferIndex ===\n // Points w/ non-negative `_bufferIndex` reference\n // strings.\n /** @type {string} */\n chunks[point._index].length) {\n point._bufferIndex = -1;\n point._index++;\n }\n }\n\n // Expose the previous character.\n context.previous = code;\n\n // Mark as consumed.\n consumed = true;\n }\n\n /** @type {Effects['enter']} */\n function enter(type, fields) {\n /** @type {Token} */\n // @ts-expect-error Patch instead of assign required fields to help GC.\n const token = fields || {};\n token.type = type;\n token.start = now();\n context.events.push(['enter', token, context]);\n stack.push(token);\n return token;\n }\n\n /** @type {Effects['exit']} */\n function exit(type) {\n const token = stack.pop();\n token.end = now();\n context.events.push(['exit', token, context]);\n return token;\n }\n\n /**\n * Use results.\n *\n * @type {ReturnHandle}\n */\n function onsuccessfulconstruct(construct, info) {\n addResult(construct, info.from);\n }\n\n /**\n * Discard results.\n *\n * @type {ReturnHandle}\n */\n function onsuccessfulcheck(_, info) {\n info.restore();\n }\n\n /**\n * Factory to attempt/check/interrupt.\n *\n * @param {ReturnHandle} onreturn\n * Callback.\n * @param {{interrupt?: boolean | undefined} | undefined} [fields]\n * Fields.\n */\n function constructFactory(onreturn, fields) {\n return hook;\n\n /**\n * Handle either an object mapping codes to constructs, a list of\n * constructs, or a single construct.\n *\n * @param {Array | ConstructRecord | Construct} constructs\n * Constructs.\n * @param {State} returnState\n * State.\n * @param {State | undefined} [bogusState]\n * State.\n * @returns {State}\n * State.\n */\n function hook(constructs, returnState, bogusState) {\n /** @type {ReadonlyArray} */\n let listOfConstructs;\n /** @type {number} */\n let constructIndex;\n /** @type {Construct} */\n let currentConstruct;\n /** @type {Info} */\n let info;\n return Array.isArray(constructs) ? /* c8 ignore next 1 */\n handleListOfConstructs(constructs) : 'tokenize' in constructs ?\n // Looks like a construct.\n handleListOfConstructs([(/** @type {Construct} */constructs)]) : handleMapOfConstructs(constructs);\n\n /**\n * Handle a list of construct.\n *\n * @param {ConstructRecord} map\n * Constructs.\n * @returns {State}\n * State.\n */\n function handleMapOfConstructs(map) {\n return start;\n\n /** @type {State} */\n function start(code) {\n const left = code !== null && map[code];\n const all = code !== null && map.null;\n const list = [\n // To do: add more extension tests.\n /* c8 ignore next 2 */\n ...(Array.isArray(left) ? left : left ? [left] : []), ...(Array.isArray(all) ? all : all ? [all] : [])];\n return handleListOfConstructs(list)(code);\n }\n }\n\n /**\n * Handle a list of construct.\n *\n * @param {ReadonlyArray} list\n * Constructs.\n * @returns {State}\n * State.\n */\n function handleListOfConstructs(list) {\n listOfConstructs = list;\n constructIndex = 0;\n if (list.length === 0) {\n return bogusState;\n }\n return handleConstruct(list[constructIndex]);\n }\n\n /**\n * Handle a single construct.\n *\n * @param {Construct} construct\n * Construct.\n * @returns {State}\n * State.\n */\n function handleConstruct(construct) {\n return start;\n\n /** @type {State} */\n function start(code) {\n // To do: not needed to store if there is no bogus state, probably?\n // Currently doesn’t work because `inspect` in document does a check\n // w/o a bogus, which doesn’t make sense. But it does seem to help perf\n // by not storing.\n info = store();\n currentConstruct = construct;\n if (!construct.partial) {\n context.currentConstruct = construct;\n }\n\n // Always populated by defaults.\n\n if (construct.name && context.parser.constructs.disable.null.includes(construct.name)) {\n return nok(code);\n }\n return construct.tokenize.call(\n // If we do have fields, create an object w/ `context` as its\n // prototype.\n // This allows a “live binding”, which is needed for `interrupt`.\n fields ? Object.assign(Object.create(context), fields) : context, effects, ok, nok)(code);\n }\n }\n\n /** @type {State} */\n function ok(code) {\n consumed = true;\n onreturn(currentConstruct, info);\n return returnState;\n }\n\n /** @type {State} */\n function nok(code) {\n consumed = true;\n info.restore();\n if (++constructIndex < listOfConstructs.length) {\n return handleConstruct(listOfConstructs[constructIndex]);\n }\n return bogusState;\n }\n }\n }\n\n /**\n * @param {Construct} construct\n * Construct.\n * @param {number} from\n * From.\n * @returns {undefined}\n * Nothing.\n */\n function addResult(construct, from) {\n if (construct.resolveAll && !resolveAllConstructs.includes(construct)) {\n resolveAllConstructs.push(construct);\n }\n if (construct.resolve) {\n splice(context.events, from, context.events.length - from, construct.resolve(context.events.slice(from), context));\n }\n if (construct.resolveTo) {\n context.events = construct.resolveTo(context.events, context);\n }\n }\n\n /**\n * Store state.\n *\n * @returns {Info}\n * Info.\n */\n function store() {\n const startPoint = now();\n const startPrevious = context.previous;\n const startCurrentConstruct = context.currentConstruct;\n const startEventsIndex = context.events.length;\n const startStack = Array.from(stack);\n return {\n from: startEventsIndex,\n restore\n };\n\n /**\n * Restore state.\n *\n * @returns {undefined}\n * Nothing.\n */\n function restore() {\n point = startPoint;\n context.previous = startPrevious;\n context.currentConstruct = startCurrentConstruct;\n context.events.length = startEventsIndex;\n stack = startStack;\n accountForPotentialSkip();\n }\n }\n\n /**\n * Move the current point a bit forward in the line when it’s on a column\n * skip.\n *\n * @returns {undefined}\n * Nothing.\n */\n function accountForPotentialSkip() {\n if (point.line in columnStart && point.column < 2) {\n point.column = columnStart[point.line];\n point.offset += columnStart[point.line] - 1;\n }\n }\n}\n\n/**\n * Get the chunks from a slice of chunks in the range of a token.\n *\n * @param {ReadonlyArray} chunks\n * Chunks.\n * @param {Pick} token\n * Token.\n * @returns {Array}\n * Chunks.\n */\nfunction sliceChunks(chunks, token) {\n const startIndex = token.start._index;\n const startBufferIndex = token.start._bufferIndex;\n const endIndex = token.end._index;\n const endBufferIndex = token.end._bufferIndex;\n /** @type {Array} */\n let view;\n if (startIndex === endIndex) {\n // @ts-expect-error `_bufferIndex` is used on string chunks.\n view = [chunks[startIndex].slice(startBufferIndex, endBufferIndex)];\n } else {\n view = chunks.slice(startIndex, endIndex);\n if (startBufferIndex > -1) {\n const head = view[0];\n if (typeof head === 'string') {\n view[0] = head.slice(startBufferIndex);\n /* c8 ignore next 4 -- used to be used, no longer */\n } else {\n view.shift();\n }\n }\n if (endBufferIndex > 0) {\n // @ts-expect-error `_bufferIndex` is used on string chunks.\n view.push(chunks[endIndex].slice(0, endBufferIndex));\n }\n }\n return view;\n}\n\n/**\n * Get the string value of a slice of chunks.\n *\n * @param {ReadonlyArray} chunks\n * Chunks.\n * @param {boolean | undefined} [expandTabs=false]\n * Whether to expand tabs (default: `false`).\n * @returns {string}\n * Result.\n */\nfunction serializeChunks(chunks, expandTabs) {\n let index = -1;\n /** @type {Array} */\n const result = [];\n /** @type {boolean | undefined} */\n let atTab;\n while (++index < chunks.length) {\n const chunk = chunks[index];\n /** @type {string} */\n let value;\n if (typeof chunk === 'string') {\n value = chunk;\n } else switch (chunk) {\n case -5:\n {\n value = \"\\r\";\n break;\n }\n case -4:\n {\n value = \"\\n\";\n break;\n }\n case -3:\n {\n value = \"\\r\" + \"\\n\";\n break;\n }\n case -2:\n {\n value = expandTabs ? \" \" : \"\\t\";\n break;\n }\n case -1:\n {\n if (!expandTabs && atTab) continue;\n value = \" \";\n break;\n }\n default:\n {\n // Currently only replacement character.\n value = String.fromCharCode(chunk);\n }\n }\n atTab = chunk === -2;\n result.push(value);\n }\n return result.join('');\n}","/**\n * @import {Chunk, Code, Encoding, Value} from 'micromark-util-types'\n */\n\n/**\n * @callback Preprocessor\n * Preprocess a value.\n * @param {Value} value\n * Value.\n * @param {Encoding | null | undefined} [encoding]\n * Encoding when `value` is a typed array (optional).\n * @param {boolean | null | undefined} [end=false]\n * Whether this is the last chunk (default: `false`).\n * @returns {Array}\n * Chunks.\n */\n\nconst search = /[\\0\\t\\n\\r]/g;\n\n/**\n * @returns {Preprocessor}\n * Preprocess a value.\n */\nexport function preprocess() {\n let column = 1;\n let buffer = '';\n /** @type {boolean | undefined} */\n let start = true;\n /** @type {boolean | undefined} */\n let atCarriageReturn;\n return preprocessor;\n\n /** @type {Preprocessor} */\n // eslint-disable-next-line complexity\n function preprocessor(value, encoding, end) {\n /** @type {Array} */\n const chunks = [];\n /** @type {RegExpMatchArray | null} */\n let match;\n /** @type {number} */\n let next;\n /** @type {number} */\n let startPosition;\n /** @type {number} */\n let endPosition;\n /** @type {Code} */\n let code;\n value = buffer + (typeof value === 'string' ? value.toString() : new TextDecoder(encoding || undefined).decode(value));\n startPosition = 0;\n buffer = '';\n if (start) {\n // To do: `markdown-rs` actually parses BOMs (byte order mark).\n if (value.charCodeAt(0) === 65279) {\n startPosition++;\n }\n start = undefined;\n }\n while (startPosition < value.length) {\n search.lastIndex = startPosition;\n match = search.exec(value);\n endPosition = match && match.index !== undefined ? match.index : value.length;\n code = value.charCodeAt(endPosition);\n if (!match) {\n buffer = value.slice(startPosition);\n break;\n }\n if (code === 10 && startPosition === endPosition && atCarriageReturn) {\n chunks.push(-3);\n atCarriageReturn = undefined;\n } else {\n if (atCarriageReturn) {\n chunks.push(-5);\n atCarriageReturn = undefined;\n }\n if (startPosition < endPosition) {\n chunks.push(value.slice(startPosition, endPosition));\n column += endPosition - startPosition;\n }\n switch (code) {\n case 0:\n {\n chunks.push(65533);\n column++;\n break;\n }\n case 9:\n {\n next = Math.ceil(column / 4) * 4;\n chunks.push(-2);\n while (column++ < next) chunks.push(-1);\n break;\n }\n case 10:\n {\n chunks.push(-4);\n column = 1;\n break;\n }\n default:\n {\n atCarriageReturn = true;\n column = 1;\n }\n }\n }\n startPosition = endPosition + 1;\n }\n if (end) {\n if (atCarriageReturn) chunks.push(-5);\n if (buffer) chunks.push(buffer);\n chunks.push(null);\n }\n return chunks;\n }\n}","/**\n * Turn the number (in string form as either hexa- or plain decimal) coming from\n * a numeric character reference into a character.\n *\n * Sort of like `String.fromCodePoint(Number.parseInt(value, base))`, but makes\n * non-characters and control characters safe.\n *\n * @param {string} value\n * Value to decode.\n * @param {number} base\n * Numeric base.\n * @returns {string}\n * Character.\n */\nexport function decodeNumericCharacterReference(value, base) {\n const code = Number.parseInt(value, base);\n if (\n // C0 except for HT, LF, FF, CR, space.\n code < 9 || code === 11 || code > 13 && code < 32 ||\n // Control character (DEL) of C0, and C1 controls.\n code > 126 && code < 160 ||\n // Lone high surrogates and low surrogates.\n code > 55_295 && code < 57_344 ||\n // Noncharacters.\n code > 64_975 && code < 65_008 || /* eslint-disable no-bitwise */\n (code & 65_535) === 65_535 || (code & 65_535) === 65_534 || /* eslint-enable no-bitwise */\n // Out of range\n code > 1_114_111) {\n return \"\\uFFFD\";\n }\n return String.fromCodePoint(code);\n}","import { decodeNamedCharacterReference } from 'decode-named-character-reference';\nimport { decodeNumericCharacterReference } from 'micromark-util-decode-numeric-character-reference';\nconst characterEscapeOrReference = /\\\\([!-/:-@[-`{-~])|&(#(?:\\d{1,7}|x[\\da-f]{1,6})|[\\da-z]{1,31});/gi;\n\n/**\n * Decode markdown strings (which occur in places such as fenced code info\n * strings, destinations, labels, and titles).\n *\n * The “string” content type allows character escapes and -references.\n * This decodes those.\n *\n * @param {string} value\n * Value to decode.\n * @returns {string}\n * Decoded value.\n */\nexport function decodeString(value) {\n return value.replace(characterEscapeOrReference, decode);\n}\n\n/**\n * @param {string} $0\n * Match.\n * @param {string} $1\n * Character escape.\n * @param {string} $2\n * Character reference.\n * @returns {string}\n * Decoded value\n */\nfunction decode($0, $1, $2) {\n if ($1) {\n // Escape.\n return $1;\n }\n\n // Reference.\n const head = $2.charCodeAt(0);\n if (head === 35) {\n const head = $2.charCodeAt(1);\n const hex = head === 120 || head === 88;\n return decodeNumericCharacterReference($2.slice(hex ? 2 : 1), hex ? 16 : 10);\n }\n return decodeNamedCharacterReference($2) || $0;\n}","/**\n * @import {\n * Break,\n * Blockquote,\n * Code,\n * Definition,\n * Emphasis,\n * Heading,\n * Html,\n * Image,\n * InlineCode,\n * Link,\n * ListItem,\n * List,\n * Nodes,\n * Paragraph,\n * PhrasingContent,\n * ReferenceType,\n * Root,\n * Strong,\n * Text,\n * ThematicBreak\n * } from 'mdast'\n * @import {\n * Encoding,\n * Event,\n * Token,\n * Value\n * } from 'micromark-util-types'\n * @import {Point} from 'unist'\n * @import {\n * CompileContext,\n * CompileData,\n * Config,\n * Extension,\n * Handle,\n * OnEnterError,\n * Options\n * } from './types.js'\n */\n\nimport { toString } from 'mdast-util-to-string';\nimport { parse, postprocess, preprocess } from 'micromark';\nimport { decodeNumericCharacterReference } from 'micromark-util-decode-numeric-character-reference';\nimport { decodeString } from 'micromark-util-decode-string';\nimport { normalizeIdentifier } from 'micromark-util-normalize-identifier';\nimport { decodeNamedCharacterReference } from 'decode-named-character-reference';\nimport { stringifyPosition } from 'unist-util-stringify-position';\nconst own = {}.hasOwnProperty;\n\n/**\n * Turn markdown into a syntax tree.\n *\n * @overload\n * @param {Value} value\n * @param {Encoding | null | undefined} [encoding]\n * @param {Options | null | undefined} [options]\n * @returns {Root}\n *\n * @overload\n * @param {Value} value\n * @param {Options | null | undefined} [options]\n * @returns {Root}\n *\n * @param {Value} value\n * Markdown to parse.\n * @param {Encoding | Options | null | undefined} [encoding]\n * Character encoding for when `value` is `Buffer`.\n * @param {Options | null | undefined} [options]\n * Configuration.\n * @returns {Root}\n * mdast tree.\n */\nexport function fromMarkdown(value, encoding, options) {\n if (typeof encoding !== 'string') {\n options = encoding;\n encoding = undefined;\n }\n return compiler(options)(postprocess(parse(options).document().write(preprocess()(value, encoding, true))));\n}\n\n/**\n * Note this compiler only understand complete buffering, not streaming.\n *\n * @param {Options | null | undefined} [options]\n */\nfunction compiler(options) {\n /** @type {Config} */\n const config = {\n transforms: [],\n canContainEols: ['emphasis', 'fragment', 'heading', 'paragraph', 'strong'],\n enter: {\n autolink: opener(link),\n autolinkProtocol: onenterdata,\n autolinkEmail: onenterdata,\n atxHeading: opener(heading),\n blockQuote: opener(blockQuote),\n characterEscape: onenterdata,\n characterReference: onenterdata,\n codeFenced: opener(codeFlow),\n codeFencedFenceInfo: buffer,\n codeFencedFenceMeta: buffer,\n codeIndented: opener(codeFlow, buffer),\n codeText: opener(codeText, buffer),\n codeTextData: onenterdata,\n data: onenterdata,\n codeFlowValue: onenterdata,\n definition: opener(definition),\n definitionDestinationString: buffer,\n definitionLabelString: buffer,\n definitionTitleString: buffer,\n emphasis: opener(emphasis),\n hardBreakEscape: opener(hardBreak),\n hardBreakTrailing: opener(hardBreak),\n htmlFlow: opener(html, buffer),\n htmlFlowData: onenterdata,\n htmlText: opener(html, buffer),\n htmlTextData: onenterdata,\n image: opener(image),\n label: buffer,\n link: opener(link),\n listItem: opener(listItem),\n listItemValue: onenterlistitemvalue,\n listOrdered: opener(list, onenterlistordered),\n listUnordered: opener(list),\n paragraph: opener(paragraph),\n reference: onenterreference,\n referenceString: buffer,\n resourceDestinationString: buffer,\n resourceTitleString: buffer,\n setextHeading: opener(heading),\n strong: opener(strong),\n thematicBreak: opener(thematicBreak)\n },\n exit: {\n atxHeading: closer(),\n atxHeadingSequence: onexitatxheadingsequence,\n autolink: closer(),\n autolinkEmail: onexitautolinkemail,\n autolinkProtocol: onexitautolinkprotocol,\n blockQuote: closer(),\n characterEscapeValue: onexitdata,\n characterReferenceMarkerHexadecimal: onexitcharacterreferencemarker,\n characterReferenceMarkerNumeric: onexitcharacterreferencemarker,\n characterReferenceValue: onexitcharacterreferencevalue,\n characterReference: onexitcharacterreference,\n codeFenced: closer(onexitcodefenced),\n codeFencedFence: onexitcodefencedfence,\n codeFencedFenceInfo: onexitcodefencedfenceinfo,\n codeFencedFenceMeta: onexitcodefencedfencemeta,\n codeFlowValue: onexitdata,\n codeIndented: closer(onexitcodeindented),\n codeText: closer(onexitcodetext),\n codeTextData: onexitdata,\n data: onexitdata,\n definition: closer(),\n definitionDestinationString: onexitdefinitiondestinationstring,\n definitionLabelString: onexitdefinitionlabelstring,\n definitionTitleString: onexitdefinitiontitlestring,\n emphasis: closer(),\n hardBreakEscape: closer(onexithardbreak),\n hardBreakTrailing: closer(onexithardbreak),\n htmlFlow: closer(onexithtmlflow),\n htmlFlowData: onexitdata,\n htmlText: closer(onexithtmltext),\n htmlTextData: onexitdata,\n image: closer(onexitimage),\n label: onexitlabel,\n labelText: onexitlabeltext,\n lineEnding: onexitlineending,\n link: closer(onexitlink),\n listItem: closer(),\n listOrdered: closer(),\n listUnordered: closer(),\n paragraph: closer(),\n referenceString: onexitreferencestring,\n resourceDestinationString: onexitresourcedestinationstring,\n resourceTitleString: onexitresourcetitlestring,\n resource: onexitresource,\n setextHeading: closer(onexitsetextheading),\n setextHeadingLineSequence: onexitsetextheadinglinesequence,\n setextHeadingText: onexitsetextheadingtext,\n strong: closer(),\n thematicBreak: closer()\n }\n };\n configure(config, (options || {}).mdastExtensions || []);\n\n /** @type {CompileData} */\n const data = {};\n return compile;\n\n /**\n * Turn micromark events into an mdast tree.\n *\n * @param {Array} events\n * Events.\n * @returns {Root}\n * mdast tree.\n */\n function compile(events) {\n /** @type {Root} */\n let tree = {\n type: 'root',\n children: []\n };\n /** @type {Omit} */\n const context = {\n stack: [tree],\n tokenStack: [],\n config,\n enter,\n exit,\n buffer,\n resume,\n data\n };\n /** @type {Array} */\n const listStack = [];\n let index = -1;\n while (++index < events.length) {\n // We preprocess lists to add `listItem` tokens, and to infer whether\n // items the list itself are spread out.\n if (events[index][1].type === \"listOrdered\" || events[index][1].type === \"listUnordered\") {\n if (events[index][0] === 'enter') {\n listStack.push(index);\n } else {\n const tail = listStack.pop();\n index = prepareList(events, tail, index);\n }\n }\n }\n index = -1;\n while (++index < events.length) {\n const handler = config[events[index][0]];\n if (own.call(handler, events[index][1].type)) {\n handler[events[index][1].type].call(Object.assign({\n sliceSerialize: events[index][2].sliceSerialize\n }, context), events[index][1]);\n }\n }\n\n // Handle tokens still being open.\n if (context.tokenStack.length > 0) {\n const tail = context.tokenStack[context.tokenStack.length - 1];\n const handler = tail[1] || defaultOnError;\n handler.call(context, undefined, tail[0]);\n }\n\n // Figure out `root` position.\n tree.position = {\n start: point(events.length > 0 ? events[0][1].start : {\n line: 1,\n column: 1,\n offset: 0\n }),\n end: point(events.length > 0 ? events[events.length - 2][1].end : {\n line: 1,\n column: 1,\n offset: 0\n })\n };\n\n // Call transforms.\n index = -1;\n while (++index < config.transforms.length) {\n tree = config.transforms[index](tree) || tree;\n }\n return tree;\n }\n\n /**\n * @param {Array} events\n * @param {number} start\n * @param {number} length\n * @returns {number}\n */\n function prepareList(events, start, length) {\n let index = start - 1;\n let containerBalance = -1;\n let listSpread = false;\n /** @type {Token | undefined} */\n let listItem;\n /** @type {number | undefined} */\n let lineIndex;\n /** @type {number | undefined} */\n let firstBlankLineIndex;\n /** @type {boolean | undefined} */\n let atMarker;\n while (++index <= length) {\n const event = events[index];\n switch (event[1].type) {\n case \"listUnordered\":\n case \"listOrdered\":\n case \"blockQuote\":\n {\n if (event[0] === 'enter') {\n containerBalance++;\n } else {\n containerBalance--;\n }\n atMarker = undefined;\n break;\n }\n case \"lineEndingBlank\":\n {\n if (event[0] === 'enter') {\n if (listItem && !atMarker && !containerBalance && !firstBlankLineIndex) {\n firstBlankLineIndex = index;\n }\n atMarker = undefined;\n }\n break;\n }\n case \"linePrefix\":\n case \"listItemValue\":\n case \"listItemMarker\":\n case \"listItemPrefix\":\n case \"listItemPrefixWhitespace\":\n {\n // Empty.\n\n break;\n }\n default:\n {\n atMarker = undefined;\n }\n }\n if (!containerBalance && event[0] === 'enter' && event[1].type === \"listItemPrefix\" || containerBalance === -1 && event[0] === 'exit' && (event[1].type === \"listUnordered\" || event[1].type === \"listOrdered\")) {\n if (listItem) {\n let tailIndex = index;\n lineIndex = undefined;\n while (tailIndex--) {\n const tailEvent = events[tailIndex];\n if (tailEvent[1].type === \"lineEnding\" || tailEvent[1].type === \"lineEndingBlank\") {\n if (tailEvent[0] === 'exit') continue;\n if (lineIndex) {\n events[lineIndex][1].type = \"lineEndingBlank\";\n listSpread = true;\n }\n tailEvent[1].type = \"lineEnding\";\n lineIndex = tailIndex;\n } else if (tailEvent[1].type === \"linePrefix\" || tailEvent[1].type === \"blockQuotePrefix\" || tailEvent[1].type === \"blockQuotePrefixWhitespace\" || tailEvent[1].type === \"blockQuoteMarker\" || tailEvent[1].type === \"listItemIndent\") {\n // Empty\n } else {\n break;\n }\n }\n if (firstBlankLineIndex && (!lineIndex || firstBlankLineIndex < lineIndex)) {\n listItem._spread = true;\n }\n\n // Fix position.\n listItem.end = Object.assign({}, lineIndex ? events[lineIndex][1].start : event[1].end);\n events.splice(lineIndex || index, 0, ['exit', listItem, event[2]]);\n index++;\n length++;\n }\n\n // Create a new list item.\n if (event[1].type === \"listItemPrefix\") {\n /** @type {Token} */\n const item = {\n type: 'listItem',\n _spread: false,\n start: Object.assign({}, event[1].start),\n // @ts-expect-error: we’ll add `end` in a second.\n end: undefined\n };\n listItem = item;\n events.splice(index, 0, ['enter', item, event[2]]);\n index++;\n length++;\n firstBlankLineIndex = undefined;\n atMarker = true;\n }\n }\n }\n events[start][1]._spread = listSpread;\n return length;\n }\n\n /**\n * Create an opener handle.\n *\n * @param {(token: Token) => Nodes} create\n * Create a node.\n * @param {Handle | undefined} [and]\n * Optional function to also run.\n * @returns {Handle}\n * Handle.\n */\n function opener(create, and) {\n return open;\n\n /**\n * @this {CompileContext}\n * @param {Token} token\n * @returns {undefined}\n */\n function open(token) {\n enter.call(this, create(token), token);\n if (and) and.call(this, token);\n }\n }\n\n /**\n * @type {CompileContext['buffer']}\n */\n function buffer() {\n this.stack.push({\n type: 'fragment',\n children: []\n });\n }\n\n /**\n * @type {CompileContext['enter']}\n */\n function enter(node, token, errorHandler) {\n const parent = this.stack[this.stack.length - 1];\n /** @type {Array} */\n const siblings = parent.children;\n siblings.push(node);\n this.stack.push(node);\n this.tokenStack.push([token, errorHandler || undefined]);\n node.position = {\n start: point(token.start),\n // @ts-expect-error: `end` will be patched later.\n end: undefined\n };\n }\n\n /**\n * Create a closer handle.\n *\n * @param {Handle | undefined} [and]\n * Optional function to also run.\n * @returns {Handle}\n * Handle.\n */\n function closer(and) {\n return close;\n\n /**\n * @this {CompileContext}\n * @param {Token} token\n * @returns {undefined}\n */\n function close(token) {\n if (and) and.call(this, token);\n exit.call(this, token);\n }\n }\n\n /**\n * @type {CompileContext['exit']}\n */\n function exit(token, onExitError) {\n const node = this.stack.pop();\n const open = this.tokenStack.pop();\n if (!open) {\n throw new Error('Cannot close `' + token.type + '` (' + stringifyPosition({\n start: token.start,\n end: token.end\n }) + '): it’s not open');\n } else if (open[0].type !== token.type) {\n if (onExitError) {\n onExitError.call(this, token, open[0]);\n } else {\n const handler = open[1] || defaultOnError;\n handler.call(this, token, open[0]);\n }\n }\n node.position.end = point(token.end);\n }\n\n /**\n * @type {CompileContext['resume']}\n */\n function resume() {\n return toString(this.stack.pop());\n }\n\n //\n // Handlers.\n //\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onenterlistordered() {\n this.data.expectingFirstListItemValue = true;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onenterlistitemvalue(token) {\n if (this.data.expectingFirstListItemValue) {\n const ancestor = this.stack[this.stack.length - 2];\n ancestor.start = Number.parseInt(this.sliceSerialize(token), 10);\n this.data.expectingFirstListItemValue = undefined;\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefencedfenceinfo() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.lang = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefencedfencemeta() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.meta = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefencedfence() {\n // Exit if this is the closing fence.\n if (this.data.flowCodeInside) return;\n this.buffer();\n this.data.flowCodeInside = true;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodefenced() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.value = data.replace(/^(\\r?\\n|\\r)|(\\r?\\n|\\r)$/g, '');\n this.data.flowCodeInside = undefined;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcodeindented() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.value = data.replace(/(\\r?\\n|\\r)$/g, '');\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitdefinitionlabelstring(token) {\n const label = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.label = label;\n node.identifier = normalizeIdentifier(this.sliceSerialize(token)).toLowerCase();\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitdefinitiontitlestring() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.title = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitdefinitiondestinationstring() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.url = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitatxheadingsequence(token) {\n const node = this.stack[this.stack.length - 1];\n if (!node.depth) {\n const depth = this.sliceSerialize(token).length;\n node.depth = depth;\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitsetextheadingtext() {\n this.data.setextHeadingSlurpLineEnding = true;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitsetextheadinglinesequence(token) {\n const node = this.stack[this.stack.length - 1];\n node.depth = this.sliceSerialize(token).codePointAt(0) === 61 ? 1 : 2;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitsetextheading() {\n this.data.setextHeadingSlurpLineEnding = undefined;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onenterdata(token) {\n const node = this.stack[this.stack.length - 1];\n /** @type {Array} */\n const siblings = node.children;\n let tail = siblings[siblings.length - 1];\n if (!tail || tail.type !== 'text') {\n // Add a new text node.\n tail = text();\n tail.position = {\n start: point(token.start),\n // @ts-expect-error: we’ll add `end` later.\n end: undefined\n };\n siblings.push(tail);\n }\n this.stack.push(tail);\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitdata(token) {\n const tail = this.stack.pop();\n tail.value += this.sliceSerialize(token);\n tail.position.end = point(token.end);\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlineending(token) {\n const context = this.stack[this.stack.length - 1];\n // If we’re at a hard break, include the line ending in there.\n if (this.data.atHardBreak) {\n const tail = context.children[context.children.length - 1];\n tail.position.end = point(token.end);\n this.data.atHardBreak = undefined;\n return;\n }\n if (!this.data.setextHeadingSlurpLineEnding && config.canContainEols.includes(context.type)) {\n onenterdata.call(this, token);\n onexitdata.call(this, token);\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexithardbreak() {\n this.data.atHardBreak = true;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexithtmlflow() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.value = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexithtmltext() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.value = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitcodetext() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.value = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlink() {\n const node = this.stack[this.stack.length - 1];\n // Note: there are also `identifier` and `label` fields on this link node!\n // These are used / cleaned here.\n\n // To do: clean.\n if (this.data.inReference) {\n /** @type {ReferenceType} */\n const referenceType = this.data.referenceType || 'shortcut';\n node.type += 'Reference';\n // @ts-expect-error: mutate.\n node.referenceType = referenceType;\n // @ts-expect-error: mutate.\n delete node.url;\n delete node.title;\n } else {\n // @ts-expect-error: mutate.\n delete node.identifier;\n // @ts-expect-error: mutate.\n delete node.label;\n }\n this.data.referenceType = undefined;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitimage() {\n const node = this.stack[this.stack.length - 1];\n // Note: there are also `identifier` and `label` fields on this link node!\n // These are used / cleaned here.\n\n // To do: clean.\n if (this.data.inReference) {\n /** @type {ReferenceType} */\n const referenceType = this.data.referenceType || 'shortcut';\n node.type += 'Reference';\n // @ts-expect-error: mutate.\n node.referenceType = referenceType;\n // @ts-expect-error: mutate.\n delete node.url;\n delete node.title;\n } else {\n // @ts-expect-error: mutate.\n delete node.identifier;\n // @ts-expect-error: mutate.\n delete node.label;\n }\n this.data.referenceType = undefined;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlabeltext(token) {\n const string = this.sliceSerialize(token);\n const ancestor = this.stack[this.stack.length - 2];\n // @ts-expect-error: stash this on the node, as it might become a reference\n // later.\n ancestor.label = decodeString(string);\n // @ts-expect-error: same as above.\n ancestor.identifier = normalizeIdentifier(string).toLowerCase();\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitlabel() {\n const fragment = this.stack[this.stack.length - 1];\n const value = this.resume();\n const node = this.stack[this.stack.length - 1];\n // Assume a reference.\n this.data.inReference = true;\n if (node.type === 'link') {\n /** @type {Array} */\n const children = fragment.children;\n node.children = children;\n } else {\n node.alt = value;\n }\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitresourcedestinationstring() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.url = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitresourcetitlestring() {\n const data = this.resume();\n const node = this.stack[this.stack.length - 1];\n node.title = data;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitresource() {\n this.data.inReference = undefined;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onenterreference() {\n this.data.referenceType = 'collapsed';\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitreferencestring(token) {\n const label = this.resume();\n const node = this.stack[this.stack.length - 1];\n // @ts-expect-error: stash this on the node, as it might become a reference\n // later.\n node.label = label;\n // @ts-expect-error: same as above.\n node.identifier = normalizeIdentifier(this.sliceSerialize(token)).toLowerCase();\n this.data.referenceType = 'full';\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n\n function onexitcharacterreferencemarker(token) {\n this.data.characterReferenceType = token.type;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcharacterreferencevalue(token) {\n const data = this.sliceSerialize(token);\n const type = this.data.characterReferenceType;\n /** @type {string} */\n let value;\n if (type) {\n value = decodeNumericCharacterReference(data, type === \"characterReferenceMarkerNumeric\" ? 10 : 16);\n this.data.characterReferenceType = undefined;\n } else {\n const result = decodeNamedCharacterReference(data);\n value = result;\n }\n const tail = this.stack[this.stack.length - 1];\n tail.value += value;\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitcharacterreference(token) {\n const tail = this.stack.pop();\n tail.position.end = point(token.end);\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitautolinkprotocol(token) {\n onexitdata.call(this, token);\n const node = this.stack[this.stack.length - 1];\n node.url = this.sliceSerialize(token);\n }\n\n /**\n * @this {CompileContext}\n * @type {Handle}\n */\n function onexitautolinkemail(token) {\n onexitdata.call(this, token);\n const node = this.stack[this.stack.length - 1];\n node.url = 'mailto:' + this.sliceSerialize(token);\n }\n\n //\n // Creaters.\n //\n\n /** @returns {Blockquote} */\n function blockQuote() {\n return {\n type: 'blockquote',\n children: []\n };\n }\n\n /** @returns {Code} */\n function codeFlow() {\n return {\n type: 'code',\n lang: null,\n meta: null,\n value: ''\n };\n }\n\n /** @returns {InlineCode} */\n function codeText() {\n return {\n type: 'inlineCode',\n value: ''\n };\n }\n\n /** @returns {Definition} */\n function definition() {\n return {\n type: 'definition',\n identifier: '',\n label: null,\n title: null,\n url: ''\n };\n }\n\n /** @returns {Emphasis} */\n function emphasis() {\n return {\n type: 'emphasis',\n children: []\n };\n }\n\n /** @returns {Heading} */\n function heading() {\n return {\n type: 'heading',\n // @ts-expect-error `depth` will be set later.\n depth: 0,\n children: []\n };\n }\n\n /** @returns {Break} */\n function hardBreak() {\n return {\n type: 'break'\n };\n }\n\n /** @returns {Html} */\n function html() {\n return {\n type: 'html',\n value: ''\n };\n }\n\n /** @returns {Image} */\n function image() {\n return {\n type: 'image',\n title: null,\n url: '',\n alt: null\n };\n }\n\n /** @returns {Link} */\n function link() {\n return {\n type: 'link',\n title: null,\n url: '',\n children: []\n };\n }\n\n /**\n * @param {Token} token\n * @returns {List}\n */\n function list(token) {\n return {\n type: 'list',\n ordered: token.type === 'listOrdered',\n start: null,\n spread: token._spread,\n children: []\n };\n }\n\n /**\n * @param {Token} token\n * @returns {ListItem}\n */\n function listItem(token) {\n return {\n type: 'listItem',\n spread: token._spread,\n checked: null,\n children: []\n };\n }\n\n /** @returns {Paragraph} */\n function paragraph() {\n return {\n type: 'paragraph',\n children: []\n };\n }\n\n /** @returns {Strong} */\n function strong() {\n return {\n type: 'strong',\n children: []\n };\n }\n\n /** @returns {Text} */\n function text() {\n return {\n type: 'text',\n value: ''\n };\n }\n\n /** @returns {ThematicBreak} */\n function thematicBreak() {\n return {\n type: 'thematicBreak'\n };\n }\n}\n\n/**\n * Copy a point-like value.\n *\n * @param {Point} d\n * Point-like value.\n * @returns {Point}\n * unist point.\n */\nfunction point(d) {\n return {\n line: d.line,\n column: d.column,\n offset: d.offset\n };\n}\n\n/**\n * @param {Config} combined\n * @param {Array | Extension>} extensions\n * @returns {undefined}\n */\nfunction configure(combined, extensions) {\n let index = -1;\n while (++index < extensions.length) {\n const value = extensions[index];\n if (Array.isArray(value)) {\n configure(combined, value);\n } else {\n extension(combined, value);\n }\n }\n}\n\n/**\n * @param {Config} combined\n * @param {Extension} extension\n * @returns {undefined}\n */\nfunction extension(combined, extension) {\n /** @type {keyof Extension} */\n let key;\n for (key in extension) {\n if (own.call(extension, key)) {\n switch (key) {\n case 'canContainEols':\n {\n const right = extension[key];\n if (right) {\n combined[key].push(...right);\n }\n break;\n }\n case 'transforms':\n {\n const right = extension[key];\n if (right) {\n combined[key].push(...right);\n }\n break;\n }\n case 'enter':\n case 'exit':\n {\n const right = extension[key];\n if (right) {\n Object.assign(combined[key], right);\n }\n break;\n }\n // No default\n }\n }\n }\n}\n\n/** @type {OnEnterError} */\nfunction defaultOnError(left, right) {\n if (left) {\n throw new Error('Cannot close `' + left.type + '` (' + stringifyPosition({\n start: left.start,\n end: left.end\n }) + '): a different token (`' + right.type + '`, ' + stringifyPosition({\n start: right.start,\n end: right.end\n }) + ') is open');\n } else {\n throw new Error('Cannot close document, a token (`' + right.type + '`, ' + stringifyPosition({\n start: right.start,\n end: right.end\n }) + ') is still open');\n }\n}","/**\n * @import {Event} from 'micromark-util-types'\n */\n\nimport { subtokenize } from 'micromark-util-subtokenize';\n\n/**\n * @param {Array} events\n * Events.\n * @returns {Array}\n * Events.\n */\nexport function postprocess(events) {\n while (!subtokenize(events)) {\n // Empty\n }\n return events;\n}","/**\n * @import {\n * Create,\n * FullNormalizedExtension,\n * InitialConstruct,\n * ParseContext,\n * ParseOptions\n * } from 'micromark-util-types'\n */\n\nimport { combineExtensions } from 'micromark-util-combine-extensions';\nimport { content } from './initialize/content.js';\nimport { document } from './initialize/document.js';\nimport { flow } from './initialize/flow.js';\nimport { string, text } from './initialize/text.js';\nimport * as defaultConstructs from './constructs.js';\nimport { createTokenizer } from './create-tokenizer.js';\n\n/**\n * @param {ParseOptions | null | undefined} [options]\n * Configuration (optional).\n * @returns {ParseContext}\n * Parser.\n */\nexport function parse(options) {\n const settings = options || {};\n const constructs = /** @type {FullNormalizedExtension} */\n combineExtensions([defaultConstructs, ...(settings.extensions || [])]);\n\n /** @type {ParseContext} */\n const parser = {\n constructs,\n content: create(content),\n defined: [],\n document: create(document),\n flow: create(flow),\n lazy: {},\n string: create(string),\n text: create(text)\n };\n return parser;\n\n /**\n * @param {InitialConstruct} initial\n * Construct to start with.\n * @returns {Create}\n * Create a tokenizer.\n */\n function create(initial) {\n return creator;\n /** @type {Create} */\n function creator(from) {\n return createTokenizer(parser, initial, from);\n }\n }\n}","/**\n * @typedef {import('mdast').Root} Root\n * @typedef {import('mdast-util-from-markdown').Options} FromMarkdownOptions\n * @typedef {import('unified').Parser} Parser\n * @typedef {import('unified').Processor} Processor\n */\n\n/**\n * @typedef {Omit} Options\n */\n\nimport {fromMarkdown} from 'mdast-util-from-markdown'\n\n/**\n * Aadd support for parsing from markdown.\n *\n * @param {Readonly | null | undefined} [options]\n * Configuration (optional).\n * @returns {undefined}\n * Nothing.\n */\nexport default function remarkParse(options) {\n /** @type {Processor} */\n // @ts-expect-error: TS in JSDoc generates wrong types if `this` is typed regularly.\n const self = this\n\n self.parser = parser\n\n /**\n * @type {Parser}\n */\n function parser(doc) {\n return fromMarkdown(doc, {\n ...self.data('settings'),\n ...options,\n // Note: these options are not in the readme.\n // The goal is for them to be set by plugins on `data` instead of being\n // passed by users.\n extensions: self.data('micromarkExtensions') || [],\n mdastExtensions: self.data('fromMarkdownExtensions') || []\n })\n }\n}\n","export const VOID = -1;\nexport const PRIMITIVE = 0;\nexport const ARRAY = 1;\nexport const OBJECT = 2;\nexport const DATE = 3;\nexport const REGEXP = 4;\nexport const MAP = 5;\nexport const SET = 6;\nexport const ERROR = 7;\nexport const BIGINT = 8;\n// export const SYMBOL = 9;\n","import {\n VOID, PRIMITIVE,\n ARRAY, OBJECT,\n DATE, REGEXP, MAP, SET,\n ERROR, BIGINT\n} from './types.js';\n\nconst env = typeof self === 'object' ? self : globalThis;\n\nconst deserializer = ($, _) => {\n const as = (out, index) => {\n $.set(index, out);\n return out;\n };\n\n const unpair = index => {\n if ($.has(index))\n return $.get(index);\n\n const [type, value] = _[index];\n switch (type) {\n case PRIMITIVE:\n case VOID:\n return as(value, index);\n case ARRAY: {\n const arr = as([], index);\n for (const index of value)\n arr.push(unpair(index));\n return arr;\n }\n case OBJECT: {\n const object = as({}, index);\n for (const [key, index] of value)\n object[unpair(key)] = unpair(index);\n return object;\n }\n case DATE:\n return as(new Date(value), index);\n case REGEXP: {\n const {source, flags} = value;\n return as(new RegExp(source, flags), index);\n }\n case MAP: {\n const map = as(new Map, index);\n for (const [key, index] of value)\n map.set(unpair(key), unpair(index));\n return map;\n }\n case SET: {\n const set = as(new Set, index);\n for (const index of value)\n set.add(unpair(index));\n return set;\n }\n case ERROR: {\n const {name, message} = value;\n return as(new env[name](message), index);\n }\n case BIGINT:\n return as(BigInt(value), index);\n case 'BigInt':\n return as(Object(BigInt(value)), index);\n case 'ArrayBuffer':\n return as(new Uint8Array(value).buffer, value);\n case 'DataView': {\n const { buffer } = new Uint8Array(value);\n return as(new DataView(buffer), value);\n }\n }\n return as(new env[type](value), index);\n };\n\n return unpair;\n};\n\n/**\n * @typedef {Array} Record a type representation\n */\n\n/**\n * Returns a deserialized value from a serialized array of Records.\n * @param {Record[]} serialized a previously serialized value.\n * @returns {any}\n */\nexport const deserialize = serialized => deserializer(new Map, serialized)(0);\n","import {\n VOID, PRIMITIVE,\n ARRAY, OBJECT,\n DATE, REGEXP, MAP, SET,\n ERROR, BIGINT\n} from './types.js';\n\nconst EMPTY = '';\n\nconst {toString} = {};\nconst {keys} = Object;\n\nconst typeOf = value => {\n const type = typeof value;\n if (type !== 'object' || !value)\n return [PRIMITIVE, type];\n\n const asString = toString.call(value).slice(8, -1);\n switch (asString) {\n case 'Array':\n return [ARRAY, EMPTY];\n case 'Object':\n return [OBJECT, EMPTY];\n case 'Date':\n return [DATE, EMPTY];\n case 'RegExp':\n return [REGEXP, EMPTY];\n case 'Map':\n return [MAP, EMPTY];\n case 'Set':\n return [SET, EMPTY];\n case 'DataView':\n return [ARRAY, asString];\n }\n\n if (asString.includes('Array'))\n return [ARRAY, asString];\n\n if (asString.includes('Error'))\n return [ERROR, asString];\n\n return [OBJECT, asString];\n};\n\nconst shouldSkip = ([TYPE, type]) => (\n TYPE === PRIMITIVE &&\n (type === 'function' || type === 'symbol')\n);\n\nconst serializer = (strict, json, $, _) => {\n\n const as = (out, value) => {\n const index = _.push(out) - 1;\n $.set(value, index);\n return index;\n };\n\n const pair = value => {\n if ($.has(value))\n return $.get(value);\n\n let [TYPE, type] = typeOf(value);\n switch (TYPE) {\n case PRIMITIVE: {\n let entry = value;\n switch (type) {\n case 'bigint':\n TYPE = BIGINT;\n entry = value.toString();\n break;\n case 'function':\n case 'symbol':\n if (strict)\n throw new TypeError('unable to serialize ' + type);\n entry = null;\n break;\n case 'undefined':\n return as([VOID], value);\n }\n return as([TYPE, entry], value);\n }\n case ARRAY: {\n if (type) {\n let spread = value;\n if (type === 'DataView') {\n spread = new Uint8Array(value.buffer);\n }\n else if (type === 'ArrayBuffer') {\n spread = new Uint8Array(value);\n }\n return as([type, [...spread]], value);\n }\n\n const arr = [];\n const index = as([TYPE, arr], value);\n for (const entry of value)\n arr.push(pair(entry));\n return index;\n }\n case OBJECT: {\n if (type) {\n switch (type) {\n case 'BigInt':\n return as([type, value.toString()], value);\n case 'Boolean':\n case 'Number':\n case 'String':\n return as([type, value.valueOf()], value);\n }\n }\n\n if (json && ('toJSON' in value))\n return pair(value.toJSON());\n\n const entries = [];\n const index = as([TYPE, entries], value);\n for (const key of keys(value)) {\n if (strict || !shouldSkip(typeOf(value[key])))\n entries.push([pair(key), pair(value[key])]);\n }\n return index;\n }\n case DATE:\n return as([TYPE, value.toISOString()], value);\n case REGEXP: {\n const {source, flags} = value;\n return as([TYPE, {source, flags}], value);\n }\n case MAP: {\n const entries = [];\n const index = as([TYPE, entries], value);\n for (const [key, entry] of value) {\n if (strict || !(shouldSkip(typeOf(key)) || shouldSkip(typeOf(entry))))\n entries.push([pair(key), pair(entry)]);\n }\n return index;\n }\n case SET: {\n const entries = [];\n const index = as([TYPE, entries], value);\n for (const entry of value) {\n if (strict || !shouldSkip(typeOf(entry)))\n entries.push(pair(entry));\n }\n return index;\n }\n }\n\n const {message} = value;\n return as([TYPE, {name: type, message}], value);\n };\n\n return pair;\n};\n\n/**\n * @typedef {Array} Record a type representation\n */\n\n/**\n * Returns an array of serialized Records.\n * @param {any} value a serializable value.\n * @param {{json?: boolean, lossy?: boolean}?} options an object with a `lossy` or `json` property that,\n * if `true`, will not throw errors on incompatible types, and behave more\n * like JSON stringify would behave. Symbol and Function will be discarded.\n * @returns {Record[]}\n */\n export const serialize = (value, {json, lossy} = {}) => {\n const _ = [];\n return serializer(!(json || lossy), !!json, new Map, _)(value), _;\n};\n","import {deserialize} from './deserialize.js';\nimport {serialize} from './serialize.js';\n\n/**\n * @typedef {Array} Record a type representation\n */\n\n/**\n * Returns an array of serialized Records.\n * @param {any} any a serializable value.\n * @param {{transfer?: any[], json?: boolean, lossy?: boolean}?} options an object with\n * a transfer option (ignored when polyfilled) and/or non standard fields that\n * fallback to the polyfill if present.\n * @returns {Record[]}\n */\nexport default typeof structuredClone === \"function\" ?\n /* c8 ignore start */\n (any, options) => (\n options && ('json' in options || 'lossy' in options) ?\n deserialize(serialize(any, options)) : structuredClone(any)\n ) :\n (any, options) => deserialize(serialize(any, options));\n /* c8 ignore stop */\n\nexport {deserialize, serialize};\n","import { asciiAlphanumeric } from 'micromark-util-character';\nimport { encode } from 'micromark-util-encode';\n/**\n * Make a value safe for injection as a URL.\n *\n * This encodes unsafe characters with percent-encoding and skips already\n * encoded sequences (see `normalizeUri`).\n * Further unsafe characters are encoded as character references (see\n * `micromark-util-encode`).\n *\n * A regex of allowed protocols can be given, in which case the URL is\n * sanitized.\n * For example, `/^(https?|ircs?|mailto|xmpp)$/i` can be used for `a[href]`, or\n * `/^https?$/i` for `img[src]` (this is what `github.com` allows).\n * If the URL includes an unknown protocol (one not matched by `protocol`, such\n * as a dangerous example, `javascript:`), the value is ignored.\n *\n * @param {string | null | undefined} url\n * URI to sanitize.\n * @param {RegExp | null | undefined} [protocol]\n * Allowed protocols.\n * @returns {string}\n * Sanitized URI.\n */\nexport function sanitizeUri(url, protocol) {\n const value = encode(normalizeUri(url || ''));\n if (!protocol) {\n return value;\n }\n const colon = value.indexOf(':');\n const questionMark = value.indexOf('?');\n const numberSign = value.indexOf('#');\n const slash = value.indexOf('/');\n if (\n // If there is no protocol, it’s relative.\n colon < 0 ||\n // If the first colon is after a `?`, `#`, or `/`, it’s not a protocol.\n slash > -1 && colon > slash || questionMark > -1 && colon > questionMark || numberSign > -1 && colon > numberSign ||\n // It is a protocol, it should be allowed.\n protocol.test(value.slice(0, colon))) {\n return value;\n }\n return '';\n}\n\n/**\n * Normalize a URL.\n *\n * Encode unsafe characters with percent-encoding, skipping already encoded\n * sequences.\n *\n * @param {string} value\n * URI to normalize.\n * @returns {string}\n * Normalized URI.\n */\nexport function normalizeUri(value) {\n /** @type {Array} */\n const result = [];\n let index = -1;\n let start = 0;\n let skip = 0;\n while (++index < value.length) {\n const code = value.charCodeAt(index);\n /** @type {string} */\n let replace = '';\n\n // A correct percent encoded value.\n if (code === 37 && asciiAlphanumeric(value.charCodeAt(index + 1)) && asciiAlphanumeric(value.charCodeAt(index + 2))) {\n skip = 2;\n }\n // ASCII.\n else if (code < 128) {\n if (!/[!#$&-;=?-Z_a-z~]/.test(String.fromCharCode(code))) {\n replace = String.fromCharCode(code);\n }\n }\n // Astral.\n else if (code > 55_295 && code < 57_344) {\n const next = value.charCodeAt(index + 1);\n\n // A correct surrogate pair.\n if (code < 56_320 && next > 56_319 && next < 57_344) {\n replace = String.fromCharCode(code, next);\n skip = 1;\n }\n // Lone surrogate.\n else {\n replace = \"\\uFFFD\";\n }\n }\n // Unicode.\n else {\n replace = String.fromCharCode(code);\n }\n if (replace) {\n result.push(value.slice(start, index), encodeURIComponent(replace));\n start = index + skip + 1;\n replace = '';\n }\n if (skip) {\n index += skip;\n skip = 0;\n }\n }\n return result.join('') + value.slice(start);\n}","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').ElementContent} ElementContent\n *\n * @typedef {import('./state.js').State} State\n */\n\n/**\n * @callback FootnoteBackContentTemplate\n * Generate content for the backreference dynamically.\n *\n * For the following markdown:\n *\n * ```markdown\n * Alpha[^micromark], bravo[^micromark], and charlie[^remark].\n *\n * [^remark]: things about remark\n * [^micromark]: things about micromark\n * ```\n *\n * This function will be called with:\n *\n * * `0` and `0` for the backreference from `things about micromark` to\n * `alpha`, as it is the first used definition, and the first call to it\n * * `0` and `1` for the backreference from `things about micromark` to\n * `bravo`, as it is the first used definition, and the second call to it\n * * `1` and `0` for the backreference from `things about remark` to\n * `charlie`, as it is the second used definition\n * @param {number} referenceIndex\n * Index of the definition in the order that they are first referenced,\n * 0-indexed.\n * @param {number} rereferenceIndex\n * Index of calls to the same definition, 0-indexed.\n * @returns {Array | ElementContent | string}\n * Content for the backreference when linking back from definitions to their\n * reference.\n *\n * @callback FootnoteBackLabelTemplate\n * Generate a back label dynamically.\n *\n * For the following markdown:\n *\n * ```markdown\n * Alpha[^micromark], bravo[^micromark], and charlie[^remark].\n *\n * [^remark]: things about remark\n * [^micromark]: things about micromark\n * ```\n *\n * This function will be called with:\n *\n * * `0` and `0` for the backreference from `things about micromark` to\n * `alpha`, as it is the first used definition, and the first call to it\n * * `0` and `1` for the backreference from `things about micromark` to\n * `bravo`, as it is the first used definition, and the second call to it\n * * `1` and `0` for the backreference from `things about remark` to\n * `charlie`, as it is the second used definition\n * @param {number} referenceIndex\n * Index of the definition in the order that they are first referenced,\n * 0-indexed.\n * @param {number} rereferenceIndex\n * Index of calls to the same definition, 0-indexed.\n * @returns {string}\n * Back label to use when linking back from definitions to their reference.\n */\n\nimport structuredClone from '@ungap/structured-clone'\nimport {normalizeUri} from 'micromark-util-sanitize-uri'\n\n/**\n * Generate the default content that GitHub uses on backreferences.\n *\n * @param {number} _\n * Index of the definition in the order that they are first referenced,\n * 0-indexed.\n * @param {number} rereferenceIndex\n * Index of calls to the same definition, 0-indexed.\n * @returns {Array}\n * Content.\n */\nexport function defaultFootnoteBackContent(_, rereferenceIndex) {\n /** @type {Array} */\n const result = [{type: 'text', value: '↩'}]\n\n if (rereferenceIndex > 1) {\n result.push({\n type: 'element',\n tagName: 'sup',\n properties: {},\n children: [{type: 'text', value: String(rereferenceIndex)}]\n })\n }\n\n return result\n}\n\n/**\n * Generate the default label that GitHub uses on backreferences.\n *\n * @param {number} referenceIndex\n * Index of the definition in the order that they are first referenced,\n * 0-indexed.\n * @param {number} rereferenceIndex\n * Index of calls to the same definition, 0-indexed.\n * @returns {string}\n * Label.\n */\nexport function defaultFootnoteBackLabel(referenceIndex, rereferenceIndex) {\n return (\n 'Back to reference ' +\n (referenceIndex + 1) +\n (rereferenceIndex > 1 ? '-' + rereferenceIndex : '')\n )\n}\n\n/**\n * Generate a hast footer for called footnote definitions.\n *\n * @param {State} state\n * Info passed around.\n * @returns {Element | undefined}\n * `section` element or `undefined`.\n */\n// eslint-disable-next-line complexity\nexport function footer(state) {\n const clobberPrefix =\n typeof state.options.clobberPrefix === 'string'\n ? state.options.clobberPrefix\n : 'user-content-'\n const footnoteBackContent =\n state.options.footnoteBackContent || defaultFootnoteBackContent\n const footnoteBackLabel =\n state.options.footnoteBackLabel || defaultFootnoteBackLabel\n const footnoteLabel = state.options.footnoteLabel || 'Footnotes'\n const footnoteLabelTagName = state.options.footnoteLabelTagName || 'h2'\n const footnoteLabelProperties = state.options.footnoteLabelProperties || {\n className: ['sr-only']\n }\n /** @type {Array} */\n const listItems = []\n let referenceIndex = -1\n\n while (++referenceIndex < state.footnoteOrder.length) {\n const definition = state.footnoteById.get(\n state.footnoteOrder[referenceIndex]\n )\n\n if (!definition) {\n continue\n }\n\n const content = state.all(definition)\n const id = String(definition.identifier).toUpperCase()\n const safeId = normalizeUri(id.toLowerCase())\n let rereferenceIndex = 0\n /** @type {Array} */\n const backReferences = []\n const counts = state.footnoteCounts.get(id)\n\n // eslint-disable-next-line no-unmodified-loop-condition\n while (counts !== undefined && ++rereferenceIndex <= counts) {\n if (backReferences.length > 0) {\n backReferences.push({type: 'text', value: ' '})\n }\n\n let children =\n typeof footnoteBackContent === 'string'\n ? footnoteBackContent\n : footnoteBackContent(referenceIndex, rereferenceIndex)\n\n if (typeof children === 'string') {\n children = {type: 'text', value: children}\n }\n\n backReferences.push({\n type: 'element',\n tagName: 'a',\n properties: {\n href:\n '#' +\n clobberPrefix +\n 'fnref-' +\n safeId +\n (rereferenceIndex > 1 ? '-' + rereferenceIndex : ''),\n dataFootnoteBackref: '',\n ariaLabel:\n typeof footnoteBackLabel === 'string'\n ? footnoteBackLabel\n : footnoteBackLabel(referenceIndex, rereferenceIndex),\n className: ['data-footnote-backref']\n },\n children: Array.isArray(children) ? children : [children]\n })\n }\n\n const tail = content[content.length - 1]\n\n if (tail && tail.type === 'element' && tail.tagName === 'p') {\n const tailTail = tail.children[tail.children.length - 1]\n if (tailTail && tailTail.type === 'text') {\n tailTail.value += ' '\n } else {\n tail.children.push({type: 'text', value: ' '})\n }\n\n tail.children.push(...backReferences)\n } else {\n content.push(...backReferences)\n }\n\n /** @type {Element} */\n const listItem = {\n type: 'element',\n tagName: 'li',\n properties: {id: clobberPrefix + 'fn-' + safeId},\n children: state.wrap(content, true)\n }\n\n state.patch(definition, listItem)\n\n listItems.push(listItem)\n }\n\n if (listItems.length === 0) {\n return\n }\n\n return {\n type: 'element',\n tagName: 'section',\n properties: {dataFootnotes: true, className: ['footnotes']},\n children: [\n {\n type: 'element',\n tagName: footnoteLabelTagName,\n properties: {\n ...structuredClone(footnoteLabelProperties),\n id: 'footnote-label'\n },\n children: [{type: 'text', value: footnoteLabel}]\n },\n {type: 'text', value: '\\n'},\n {\n type: 'element',\n tagName: 'ol',\n properties: {},\n children: state.wrap(listItems, true)\n },\n {type: 'text', value: '\\n'}\n ]\n }\n}\n","/**\n * @typedef {import('unist').Node} Node\n * @typedef {import('unist').Parent} Parent\n */\n\n/**\n * @template Fn\n * @template Fallback\n * @typedef {Fn extends (value: any) => value is infer Thing ? Thing : Fallback} Predicate\n */\n\n/**\n * @callback Check\n * Check that an arbitrary value is a node.\n * @param {unknown} this\n * The given context.\n * @param {unknown} [node]\n * Anything (typically a node).\n * @param {number | null | undefined} [index]\n * The node’s position in its parent.\n * @param {Parent | null | undefined} [parent]\n * The node’s parent.\n * @returns {boolean}\n * Whether this is a node and passes a test.\n *\n * @typedef {Record | Node} Props\n * Object to check for equivalence.\n *\n * Note: `Node` is included as it is common but is not indexable.\n *\n * @typedef {Array | Props | TestFunction | string | null | undefined} Test\n * Check for an arbitrary node.\n *\n * @callback TestFunction\n * Check if a node passes a test.\n * @param {unknown} this\n * The given context.\n * @param {Node} node\n * A node.\n * @param {number | undefined} [index]\n * The node’s position in its parent.\n * @param {Parent | undefined} [parent]\n * The node’s parent.\n * @returns {boolean | undefined | void}\n * Whether this node passes the test.\n *\n * Note: `void` is included until TS sees no return as `undefined`.\n */\n\n/**\n * Check if `node` is a `Node` and whether it passes the given test.\n *\n * @param {unknown} node\n * Thing to check, typically `Node`.\n * @param {Test} test\n * A check for a specific node.\n * @param {number | null | undefined} index\n * The node’s position in its parent.\n * @param {Parent | null | undefined} parent\n * The node’s parent.\n * @param {unknown} context\n * Context object (`this`) to pass to `test` functions.\n * @returns {boolean}\n * Whether `node` is a node and passes a test.\n */\nexport const is =\n // Note: overloads in JSDoc can’t yet use different `@template`s.\n /**\n * @type {(\n * ((node: unknown, test: Condition, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & {type: Condition}) &\n * ((node: unknown, test: Condition, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & Condition) &\n * ((node: unknown, test: Condition, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & Predicate) &\n * ((node?: null | undefined) => false) &\n * ((node: unknown, test?: null | undefined, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node) &\n * ((node: unknown, test?: Test, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => boolean)\n * )}\n */\n (\n /**\n * @param {unknown} [node]\n * @param {Test} [test]\n * @param {number | null | undefined} [index]\n * @param {Parent | null | undefined} [parent]\n * @param {unknown} [context]\n * @returns {boolean}\n */\n // eslint-disable-next-line max-params\n function (node, test, index, parent, context) {\n const check = convert(test)\n\n if (\n index !== undefined &&\n index !== null &&\n (typeof index !== 'number' ||\n index < 0 ||\n index === Number.POSITIVE_INFINITY)\n ) {\n throw new Error('Expected positive finite index')\n }\n\n if (\n parent !== undefined &&\n parent !== null &&\n (!is(parent) || !parent.children)\n ) {\n throw new Error('Expected parent node')\n }\n\n if (\n (parent === undefined || parent === null) !==\n (index === undefined || index === null)\n ) {\n throw new Error('Expected both parent and index')\n }\n\n return looksLikeANode(node)\n ? check.call(context, node, index, parent)\n : false\n }\n )\n\n/**\n * Generate an assertion from a test.\n *\n * Useful if you’re going to test many nodes, for example when creating a\n * utility where something else passes a compatible test.\n *\n * The created function is a bit faster because it expects valid input only:\n * a `node`, `index`, and `parent`.\n *\n * @param {Test} test\n * * when nullish, checks if `node` is a `Node`.\n * * when `string`, works like passing `(node) => node.type === test`.\n * * when `function` checks if function passed the node is true.\n * * when `object`, checks that all keys in test are in node, and that they have (strictly) equal values.\n * * when `array`, checks if any one of the subtests pass.\n * @returns {Check}\n * An assertion.\n */\nexport const convert =\n // Note: overloads in JSDoc can’t yet use different `@template`s.\n /**\n * @type {(\n * ((test: Condition) => (node: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & {type: Condition}) &\n * ((test: Condition) => (node: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & Condition) &\n * ((test: Condition) => (node: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node & Predicate) &\n * ((test?: null | undefined) => (node?: unknown, index?: number | null | undefined, parent?: Parent | null | undefined, context?: unknown) => node is Node) &\n * ((test?: Test) => Check)\n * )}\n */\n (\n /**\n * @param {Test} [test]\n * @returns {Check}\n */\n function (test) {\n if (test === null || test === undefined) {\n return ok\n }\n\n if (typeof test === 'function') {\n return castFactory(test)\n }\n\n if (typeof test === 'object') {\n return Array.isArray(test) ? anyFactory(test) : propsFactory(test)\n }\n\n if (typeof test === 'string') {\n return typeFactory(test)\n }\n\n throw new Error('Expected function, string, or object as test')\n }\n )\n\n/**\n * @param {Array} tests\n * @returns {Check}\n */\nfunction anyFactory(tests) {\n /** @type {Array} */\n const checks = []\n let index = -1\n\n while (++index < tests.length) {\n checks[index] = convert(tests[index])\n }\n\n return castFactory(any)\n\n /**\n * @this {unknown}\n * @type {TestFunction}\n */\n function any(...parameters) {\n let index = -1\n\n while (++index < checks.length) {\n if (checks[index].apply(this, parameters)) return true\n }\n\n return false\n }\n}\n\n/**\n * Turn an object into a test for a node with a certain fields.\n *\n * @param {Props} check\n * @returns {Check}\n */\nfunction propsFactory(check) {\n const checkAsRecord = /** @type {Record} */ (check)\n\n return castFactory(all)\n\n /**\n * @param {Node} node\n * @returns {boolean}\n */\n function all(node) {\n const nodeAsRecord = /** @type {Record} */ (\n /** @type {unknown} */ (node)\n )\n\n /** @type {string} */\n let key\n\n for (key in check) {\n if (nodeAsRecord[key] !== checkAsRecord[key]) return false\n }\n\n return true\n }\n}\n\n/**\n * Turn a string into a test for a node with a certain type.\n *\n * @param {string} check\n * @returns {Check}\n */\nfunction typeFactory(check) {\n return castFactory(type)\n\n /**\n * @param {Node} node\n */\n function type(node) {\n return node && node.type === check\n }\n}\n\n/**\n * Turn a custom test into a test for a node that passes that test.\n *\n * @param {TestFunction} testFunction\n * @returns {Check}\n */\nfunction castFactory(testFunction) {\n return check\n\n /**\n * @this {unknown}\n * @type {Check}\n */\n function check(value, index, parent) {\n return Boolean(\n looksLikeANode(value) &&\n testFunction.call(\n this,\n value,\n typeof index === 'number' ? index : undefined,\n parent || undefined\n )\n )\n }\n}\n\nfunction ok() {\n return true\n}\n\n/**\n * @param {unknown} value\n * @returns {value is Node}\n */\nfunction looksLikeANode(value) {\n return value !== null && typeof value === 'object' && 'type' in value\n}\n","/**\n * @typedef {import('unist').Node} UnistNode\n * @typedef {import('unist').Parent} UnistParent\n */\n\n/**\n * @typedef {Exclude | undefined} Test\n * Test from `unist-util-is`.\n *\n * Note: we have remove and add `undefined`, because otherwise when generating\n * automatic `.d.ts` files, TS tries to flatten paths from a local perspective,\n * which doesn’t work when publishing on npm.\n */\n\n/**\n * @typedef {(\n * Fn extends (value: any) => value is infer Thing\n * ? Thing\n * : Fallback\n * )} Predicate\n * Get the value of a type guard `Fn`.\n * @template Fn\n * Value; typically function that is a type guard (such as `(x): x is Y`).\n * @template Fallback\n * Value to yield if `Fn` is not a type guard.\n */\n\n/**\n * @typedef {(\n * Check extends null | undefined // No test.\n * ? Value\n * : Value extends {type: Check} // String (type) test.\n * ? Value\n * : Value extends Check // Partial test.\n * ? Value\n * : Check extends Function // Function test.\n * ? Predicate extends Value\n * ? Predicate\n * : never\n * : never // Some other test?\n * )} MatchesOne\n * Check whether a node matches a primitive check in the type system.\n * @template Value\n * Value; typically unist `Node`.\n * @template Check\n * Value; typically `unist-util-is`-compatible test, but not arrays.\n */\n\n/**\n * @typedef {(\n * Check extends Array\n * ? MatchesOne\n * : MatchesOne\n * )} Matches\n * Check whether a node matches a check in the type system.\n * @template Value\n * Value; typically unist `Node`.\n * @template Check\n * Value; typically `unist-util-is`-compatible test.\n */\n\n/**\n * @typedef {0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10} Uint\n * Number; capped reasonably.\n */\n\n/**\n * @typedef {I extends 0 ? 1 : I extends 1 ? 2 : I extends 2 ? 3 : I extends 3 ? 4 : I extends 4 ? 5 : I extends 5 ? 6 : I extends 6 ? 7 : I extends 7 ? 8 : I extends 8 ? 9 : 10} Increment\n * Increment a number in the type system.\n * @template {Uint} [I=0]\n * Index.\n */\n\n/**\n * @typedef {(\n * Node extends UnistParent\n * ? Node extends {children: Array}\n * ? Child extends Children ? Node : never\n * : never\n * : never\n * )} InternalParent\n * Collect nodes that can be parents of `Child`.\n * @template {UnistNode} Node\n * All node types in a tree.\n * @template {UnistNode} Child\n * Node to search for.\n */\n\n/**\n * @typedef {InternalParent, Child>} Parent\n * Collect nodes in `Tree` that can be parents of `Child`.\n * @template {UnistNode} Tree\n * All node types in a tree.\n * @template {UnistNode} Child\n * Node to search for.\n */\n\n/**\n * @typedef {(\n * Depth extends Max\n * ? never\n * :\n * | InternalParent\n * | InternalAncestor, Max, Increment>\n * )} InternalAncestor\n * Collect nodes in `Tree` that can be ancestors of `Child`.\n * @template {UnistNode} Node\n * All node types in a tree.\n * @template {UnistNode} Child\n * Node to search for.\n * @template {Uint} [Max=10]\n * Max; searches up to this depth.\n * @template {Uint} [Depth=0]\n * Current depth.\n */\n\n/**\n * @typedef {InternalAncestor, Child>} Ancestor\n * Collect nodes in `Tree` that can be ancestors of `Child`.\n * @template {UnistNode} Tree\n * All node types in a tree.\n * @template {UnistNode} Child\n * Node to search for.\n */\n\n/**\n * @typedef {(\n * Tree extends UnistParent\n * ? Depth extends Max\n * ? Tree\n * : Tree | InclusiveDescendant>\n * : Tree\n * )} InclusiveDescendant\n * Collect all (inclusive) descendants of `Tree`.\n *\n * > 👉 **Note**: for performance reasons, this seems to be the fastest way to\n * > recurse without actually running into an infinite loop, which the\n * > previous version did.\n * >\n * > Practically, a max of `2` is typically enough assuming a `Root` is\n * > passed, but it doesn’t improve performance.\n * > It gets higher with `List > ListItem > Table > TableRow > TableCell`.\n * > Using up to `10` doesn’t hurt or help either.\n * @template {UnistNode} Tree\n * Tree type.\n * @template {Uint} [Max=10]\n * Max; searches up to this depth.\n * @template {Uint} [Depth=0]\n * Current depth.\n */\n\n/**\n * @typedef {'skip' | boolean} Action\n * Union of the action types.\n *\n * @typedef {number} Index\n * Move to the sibling at `index` next (after node itself is completely\n * traversed).\n *\n * Useful if mutating the tree, such as removing the node the visitor is\n * currently on, or any of its previous siblings.\n * Results less than 0 or greater than or equal to `children.length` stop\n * traversing the parent.\n *\n * @typedef {[(Action | null | undefined | void)?, (Index | null | undefined)?]} ActionTuple\n * List with one or two values, the first an action, the second an index.\n *\n * @typedef {Action | ActionTuple | Index | null | undefined | void} VisitorResult\n * Any value that can be returned from a visitor.\n */\n\n/**\n * @callback Visitor\n * Handle a node (matching `test`, if given).\n *\n * Visitors are free to transform `node`.\n * They can also transform the parent of node (the last of `ancestors`).\n *\n * Replacing `node` itself, if `SKIP` is not returned, still causes its\n * descendants to be walked (which is a bug).\n *\n * When adding or removing previous siblings of `node` (or next siblings, in\n * case of reverse), the `Visitor` should return a new `Index` to specify the\n * sibling to traverse after `node` is traversed.\n * Adding or removing next siblings of `node` (or previous siblings, in case\n * of reverse) is handled as expected without needing to return a new `Index`.\n *\n * Removing the children property of an ancestor still results in them being\n * traversed.\n * @param {Visited} node\n * Found node.\n * @param {Array} ancestors\n * Ancestors of `node`.\n * @returns {VisitorResult}\n * What to do next.\n *\n * An `Index` is treated as a tuple of `[CONTINUE, Index]`.\n * An `Action` is treated as a tuple of `[Action]`.\n *\n * Passing a tuple back only makes sense if the `Action` is `SKIP`.\n * When the `Action` is `EXIT`, that action can be returned.\n * When the `Action` is `CONTINUE`, `Index` can be returned.\n * @template {UnistNode} [Visited=UnistNode]\n * Visited node type.\n * @template {UnistParent} [VisitedParents=UnistParent]\n * Ancestor type.\n */\n\n/**\n * @typedef {Visitor, Check>, Ancestor, Check>>>} BuildVisitor\n * Build a typed `Visitor` function from a tree and a test.\n *\n * It will infer which values are passed as `node` and which as `parents`.\n * @template {UnistNode} [Tree=UnistNode]\n * Tree type.\n * @template {Test} [Check=Test]\n * Test type.\n */\n\nimport {convert} from 'unist-util-is'\nimport {color} from 'unist-util-visit-parents/do-not-use-color'\n\n/** @type {Readonly} */\nconst empty = []\n\n/**\n * Continue traversing as normal.\n */\nexport const CONTINUE = true\n\n/**\n * Stop traversing immediately.\n */\nexport const EXIT = false\n\n/**\n * Do not traverse this node’s children.\n */\nexport const SKIP = 'skip'\n\n/**\n * Visit nodes, with ancestral information.\n *\n * This algorithm performs *depth-first* *tree traversal* in *preorder*\n * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**).\n *\n * You can choose for which nodes `visitor` is called by passing a `test`.\n * For complex tests, you should test yourself in `visitor`, as it will be\n * faster and will have improved type information.\n *\n * Walking the tree is an intensive task.\n * Make use of the return values of the visitor when possible.\n * Instead of walking a tree multiple times, walk it once, use `unist-util-is`\n * to check if a node matches, and then perform different operations.\n *\n * You can change the tree.\n * See `Visitor` for more info.\n *\n * @overload\n * @param {Tree} tree\n * @param {Check} check\n * @param {BuildVisitor} visitor\n * @param {boolean | null | undefined} [reverse]\n * @returns {undefined}\n *\n * @overload\n * @param {Tree} tree\n * @param {BuildVisitor} visitor\n * @param {boolean | null | undefined} [reverse]\n * @returns {undefined}\n *\n * @param {UnistNode} tree\n * Tree to traverse.\n * @param {Visitor | Test} test\n * `unist-util-is`-compatible test\n * @param {Visitor | boolean | null | undefined} [visitor]\n * Handle each node.\n * @param {boolean | null | undefined} [reverse]\n * Traverse in reverse preorder (NRL) instead of the default preorder (NLR).\n * @returns {undefined}\n * Nothing.\n *\n * @template {UnistNode} Tree\n * Node type.\n * @template {Test} Check\n * `unist-util-is`-compatible test.\n */\nexport function visitParents(tree, test, visitor, reverse) {\n /** @type {Test} */\n let check\n\n if (typeof test === 'function' && typeof visitor !== 'function') {\n reverse = visitor\n // @ts-expect-error no visitor given, so `visitor` is test.\n visitor = test\n } else {\n // @ts-expect-error visitor given, so `test` isn’t a visitor.\n check = test\n }\n\n const is = convert(check)\n const step = reverse ? -1 : 1\n\n factory(tree, undefined, [])()\n\n /**\n * @param {UnistNode} node\n * @param {number | undefined} index\n * @param {Array} parents\n */\n function factory(node, index, parents) {\n const value = /** @type {Record} */ (\n node && typeof node === 'object' ? node : {}\n )\n\n if (typeof value.type === 'string') {\n const name =\n // `hast`\n typeof value.tagName === 'string'\n ? value.tagName\n : // `xast`\n typeof value.name === 'string'\n ? value.name\n : undefined\n\n Object.defineProperty(visit, 'name', {\n value:\n 'node (' + color(node.type + (name ? '<' + name + '>' : '')) + ')'\n })\n }\n\n return visit\n\n function visit() {\n /** @type {Readonly} */\n let result = empty\n /** @type {Readonly} */\n let subresult\n /** @type {number} */\n let offset\n /** @type {Array} */\n let grandparents\n\n if (!test || is(node, index, parents[parents.length - 1] || undefined)) {\n // @ts-expect-error: `visitor` is now a visitor.\n result = toResult(visitor(node, parents))\n\n if (result[0] === EXIT) {\n return result\n }\n }\n\n if ('children' in node && node.children) {\n const nodeAsParent = /** @type {UnistParent} */ (node)\n\n if (nodeAsParent.children && result[0] !== SKIP) {\n offset = (reverse ? nodeAsParent.children.length : -1) + step\n grandparents = parents.concat(nodeAsParent)\n\n while (offset > -1 && offset < nodeAsParent.children.length) {\n const child = nodeAsParent.children[offset]\n\n subresult = factory(child, offset, grandparents)()\n\n if (subresult[0] === EXIT) {\n return subresult\n }\n\n offset =\n typeof subresult[1] === 'number' ? subresult[1] : offset + step\n }\n }\n }\n\n return result\n }\n }\n}\n\n/**\n * Turn a return value into a clean result.\n *\n * @param {VisitorResult} value\n * Valid return values from visitors.\n * @returns {Readonly}\n * Clean result.\n */\nfunction toResult(value) {\n if (Array.isArray(value)) {\n return value\n }\n\n if (typeof value === 'number') {\n return [CONTINUE, value]\n }\n\n return value === null || value === undefined ? empty : [value]\n}\n","/**\n * @typedef {import('unist').Node} UnistNode\n * @typedef {import('unist').Parent} UnistParent\n * @typedef {import('unist-util-visit-parents').VisitorResult} VisitorResult\n */\n\n/**\n * @typedef {Exclude | undefined} Test\n * Test from `unist-util-is`.\n *\n * Note: we have remove and add `undefined`, because otherwise when generating\n * automatic `.d.ts` files, TS tries to flatten paths from a local perspective,\n * which doesn’t work when publishing on npm.\n */\n\n// To do: use types from `unist-util-visit-parents` when it’s released.\n\n/**\n * @typedef {(\n * Fn extends (value: any) => value is infer Thing\n * ? Thing\n * : Fallback\n * )} Predicate\n * Get the value of a type guard `Fn`.\n * @template Fn\n * Value; typically function that is a type guard (such as `(x): x is Y`).\n * @template Fallback\n * Value to yield if `Fn` is not a type guard.\n */\n\n/**\n * @typedef {(\n * Check extends null | undefined // No test.\n * ? Value\n * : Value extends {type: Check} // String (type) test.\n * ? Value\n * : Value extends Check // Partial test.\n * ? Value\n * : Check extends Function // Function test.\n * ? Predicate extends Value\n * ? Predicate\n * : never\n * : never // Some other test?\n * )} MatchesOne\n * Check whether a node matches a primitive check in the type system.\n * @template Value\n * Value; typically unist `Node`.\n * @template Check\n * Value; typically `unist-util-is`-compatible test, but not arrays.\n */\n\n/**\n * @typedef {(\n * Check extends Array\n * ? MatchesOne\n * : MatchesOne\n * )} Matches\n * Check whether a node matches a check in the type system.\n * @template Value\n * Value; typically unist `Node`.\n * @template Check\n * Value; typically `unist-util-is`-compatible test.\n */\n\n/**\n * @typedef {0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10} Uint\n * Number; capped reasonably.\n */\n\n/**\n * @typedef {I extends 0 ? 1 : I extends 1 ? 2 : I extends 2 ? 3 : I extends 3 ? 4 : I extends 4 ? 5 : I extends 5 ? 6 : I extends 6 ? 7 : I extends 7 ? 8 : I extends 8 ? 9 : 10} Increment\n * Increment a number in the type system.\n * @template {Uint} [I=0]\n * Index.\n */\n\n/**\n * @typedef {(\n * Node extends UnistParent\n * ? Node extends {children: Array}\n * ? Child extends Children ? Node : never\n * : never\n * : never\n * )} InternalParent\n * Collect nodes that can be parents of `Child`.\n * @template {UnistNode} Node\n * All node types in a tree.\n * @template {UnistNode} Child\n * Node to search for.\n */\n\n/**\n * @typedef {InternalParent, Child>} Parent\n * Collect nodes in `Tree` that can be parents of `Child`.\n * @template {UnistNode} Tree\n * All node types in a tree.\n * @template {UnistNode} Child\n * Node to search for.\n */\n\n/**\n * @typedef {(\n * Depth extends Max\n * ? never\n * :\n * | InternalParent\n * | InternalAncestor, Max, Increment>\n * )} InternalAncestor\n * Collect nodes in `Tree` that can be ancestors of `Child`.\n * @template {UnistNode} Node\n * All node types in a tree.\n * @template {UnistNode} Child\n * Node to search for.\n * @template {Uint} [Max=10]\n * Max; searches up to this depth.\n * @template {Uint} [Depth=0]\n * Current depth.\n */\n\n/**\n * @typedef {(\n * Tree extends UnistParent\n * ? Depth extends Max\n * ? Tree\n * : Tree | InclusiveDescendant>\n * : Tree\n * )} InclusiveDescendant\n * Collect all (inclusive) descendants of `Tree`.\n *\n * > 👉 **Note**: for performance reasons, this seems to be the fastest way to\n * > recurse without actually running into an infinite loop, which the\n * > previous version did.\n * >\n * > Practically, a max of `2` is typically enough assuming a `Root` is\n * > passed, but it doesn’t improve performance.\n * > It gets higher with `List > ListItem > Table > TableRow > TableCell`.\n * > Using up to `10` doesn’t hurt or help either.\n * @template {UnistNode} Tree\n * Tree type.\n * @template {Uint} [Max=10]\n * Max; searches up to this depth.\n * @template {Uint} [Depth=0]\n * Current depth.\n */\n\n/**\n * @callback Visitor\n * Handle a node (matching `test`, if given).\n *\n * Visitors are free to transform `node`.\n * They can also transform `parent`.\n *\n * Replacing `node` itself, if `SKIP` is not returned, still causes its\n * descendants to be walked (which is a bug).\n *\n * When adding or removing previous siblings of `node` (or next siblings, in\n * case of reverse), the `Visitor` should return a new `Index` to specify the\n * sibling to traverse after `node` is traversed.\n * Adding or removing next siblings of `node` (or previous siblings, in case\n * of reverse) is handled as expected without needing to return a new `Index`.\n *\n * Removing the children property of `parent` still results in them being\n * traversed.\n * @param {Visited} node\n * Found node.\n * @param {Visited extends UnistNode ? number | undefined : never} index\n * Index of `node` in `parent`.\n * @param {Ancestor extends UnistParent ? Ancestor | undefined : never} parent\n * Parent of `node`.\n * @returns {VisitorResult}\n * What to do next.\n *\n * An `Index` is treated as a tuple of `[CONTINUE, Index]`.\n * An `Action` is treated as a tuple of `[Action]`.\n *\n * Passing a tuple back only makes sense if the `Action` is `SKIP`.\n * When the `Action` is `EXIT`, that action can be returned.\n * When the `Action` is `CONTINUE`, `Index` can be returned.\n * @template {UnistNode} [Visited=UnistNode]\n * Visited node type.\n * @template {UnistParent} [Ancestor=UnistParent]\n * Ancestor type.\n */\n\n/**\n * @typedef {Visitor>} BuildVisitorFromMatch\n * Build a typed `Visitor` function from a node and all possible parents.\n *\n * It will infer which values are passed as `node` and which as `parent`.\n * @template {UnistNode} Visited\n * Node type.\n * @template {UnistParent} Ancestor\n * Parent type.\n */\n\n/**\n * @typedef {(\n * BuildVisitorFromMatch<\n * Matches,\n * Extract\n * >\n * )} BuildVisitorFromDescendants\n * Build a typed `Visitor` function from a list of descendants and a test.\n *\n * It will infer which values are passed as `node` and which as `parent`.\n * @template {UnistNode} Descendant\n * Node type.\n * @template {Test} Check\n * Test type.\n */\n\n/**\n * @typedef {(\n * BuildVisitorFromDescendants<\n * InclusiveDescendant,\n * Check\n * >\n * )} BuildVisitor\n * Build a typed `Visitor` function from a tree and a test.\n *\n * It will infer which values are passed as `node` and which as `parent`.\n * @template {UnistNode} [Tree=UnistNode]\n * Node type.\n * @template {Test} [Check=Test]\n * Test type.\n */\n\nimport {visitParents} from 'unist-util-visit-parents'\n\nexport {CONTINUE, EXIT, SKIP} from 'unist-util-visit-parents'\n\n/**\n * Visit nodes.\n *\n * This algorithm performs *depth-first* *tree traversal* in *preorder*\n * (**NLR**) or if `reverse` is given, in *reverse preorder* (**NRL**).\n *\n * You can choose for which nodes `visitor` is called by passing a `test`.\n * For complex tests, you should test yourself in `visitor`, as it will be\n * faster and will have improved type information.\n *\n * Walking the tree is an intensive task.\n * Make use of the return values of the visitor when possible.\n * Instead of walking a tree multiple times, walk it once, use `unist-util-is`\n * to check if a node matches, and then perform different operations.\n *\n * You can change the tree.\n * See `Visitor` for more info.\n *\n * @overload\n * @param {Tree} tree\n * @param {Check} check\n * @param {BuildVisitor} visitor\n * @param {boolean | null | undefined} [reverse]\n * @returns {undefined}\n *\n * @overload\n * @param {Tree} tree\n * @param {BuildVisitor} visitor\n * @param {boolean | null | undefined} [reverse]\n * @returns {undefined}\n *\n * @param {UnistNode} tree\n * Tree to traverse.\n * @param {Visitor | Test} testOrVisitor\n * `unist-util-is`-compatible test (optional, omit to pass a visitor).\n * @param {Visitor | boolean | null | undefined} [visitorOrReverse]\n * Handle each node (when test is omitted, pass `reverse`).\n * @param {boolean | null | undefined} [maybeReverse=false]\n * Traverse in reverse preorder (NRL) instead of the default preorder (NLR).\n * @returns {undefined}\n * Nothing.\n *\n * @template {UnistNode} Tree\n * Node type.\n * @template {Test} Check\n * `unist-util-is`-compatible test.\n */\nexport function visit(tree, testOrVisitor, visitorOrReverse, maybeReverse) {\n /** @type {boolean | null | undefined} */\n let reverse\n /** @type {Test} */\n let test\n /** @type {Visitor} */\n let visitor\n\n if (\n typeof testOrVisitor === 'function' &&\n typeof visitorOrReverse !== 'function'\n ) {\n test = undefined\n visitor = testOrVisitor\n reverse = visitorOrReverse\n } else {\n // @ts-expect-error: assume the overload with test was given.\n test = testOrVisitor\n // @ts-expect-error: assume the overload with test was given.\n visitor = visitorOrReverse\n reverse = maybeReverse\n }\n\n visitParents(tree, test, overload, reverse)\n\n /**\n * @param {UnistNode} node\n * @param {Array} parents\n */\n function overload(node, parents) {\n const parent = parents[parents.length - 1]\n const index = parent ? parent.children.indexOf(node) : undefined\n return visitor(node, index, parent)\n }\n}\n","/**\n * @typedef {import('hast').ElementContent} ElementContent\n *\n * @typedef {import('mdast').Nodes} Nodes\n * @typedef {import('mdast').Reference} Reference\n *\n * @typedef {import('./state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Return the content of a reference without definition as plain text.\n *\n * @param {State} state\n * Info passed around.\n * @param {Extract} node\n * Reference node (image, link).\n * @returns {Array}\n * hast content.\n */\nexport function revert(state, node) {\n const subtype = node.referenceType\n let suffix = ']'\n\n if (subtype === 'collapsed') {\n suffix += '[]'\n } else if (subtype === 'full') {\n suffix += '[' + (node.label || node.identifier) + ']'\n }\n\n if (node.type === 'imageReference') {\n return [{type: 'text', value: '![' + node.alt + suffix}]\n }\n\n const contents = state.all(node)\n const head = contents[0]\n\n if (head && head.type === 'text') {\n head.value = '[' + head.value\n } else {\n contents.unshift({type: 'text', value: '['})\n }\n\n const tail = contents[contents.length - 1]\n\n if (tail && tail.type === 'text') {\n tail.value += suffix\n } else {\n contents.push({type: 'text', value: suffix})\n }\n\n return contents\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').ElementContent} ElementContent\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').ListItem} ListItem\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `listItem` node into hast.\n *\n * @param {State} state\n * Info passed around.\n * @param {ListItem} node\n * mdast node.\n * @param {Parents | undefined} parent\n * Parent of `node`.\n * @returns {Element}\n * hast node.\n */\nexport function listItem(state, node, parent) {\n const results = state.all(node)\n const loose = parent ? listLoose(parent) : listItemLoose(node)\n /** @type {Properties} */\n const properties = {}\n /** @type {Array} */\n const children = []\n\n if (typeof node.checked === 'boolean') {\n const head = results[0]\n /** @type {Element} */\n let paragraph\n\n if (head && head.type === 'element' && head.tagName === 'p') {\n paragraph = head\n } else {\n paragraph = {type: 'element', tagName: 'p', properties: {}, children: []}\n results.unshift(paragraph)\n }\n\n if (paragraph.children.length > 0) {\n paragraph.children.unshift({type: 'text', value: ' '})\n }\n\n paragraph.children.unshift({\n type: 'element',\n tagName: 'input',\n properties: {type: 'checkbox', checked: node.checked, disabled: true},\n children: []\n })\n\n // According to github-markdown-css, this class hides bullet.\n // See: .\n properties.className = ['task-list-item']\n }\n\n let index = -1\n\n while (++index < results.length) {\n const child = results[index]\n\n // Add eols before nodes, except if this is a loose, first paragraph.\n if (\n loose ||\n index !== 0 ||\n child.type !== 'element' ||\n child.tagName !== 'p'\n ) {\n children.push({type: 'text', value: '\\n'})\n }\n\n if (child.type === 'element' && child.tagName === 'p' && !loose) {\n children.push(...child.children)\n } else {\n children.push(child)\n }\n }\n\n const tail = results[results.length - 1]\n\n // Add a final eol.\n if (tail && (loose || tail.type !== 'element' || tail.tagName !== 'p')) {\n children.push({type: 'text', value: '\\n'})\n }\n\n /** @type {Element} */\n const result = {type: 'element', tagName: 'li', properties, children}\n state.patch(node, result)\n return state.applyData(node, result)\n}\n\n/**\n * @param {Parents} node\n * @return {Boolean}\n */\nfunction listLoose(node) {\n let loose = false\n if (node.type === 'list') {\n loose = node.spread || false\n const children = node.children\n let index = -1\n\n while (!loose && ++index < children.length) {\n loose = listItemLoose(children[index])\n }\n }\n\n return loose\n}\n\n/**\n * @param {ListItem} node\n * @return {Boolean}\n */\nfunction listItemLoose(node) {\n const spread = node.spread\n\n return spread === null || spread === undefined\n ? node.children.length > 1\n : spread\n}\n","const tab = 9 /* `\\t` */\nconst space = 32 /* ` ` */\n\n/**\n * Remove initial and final spaces and tabs at the line breaks in `value`.\n * Does not trim initial and final spaces and tabs of the value itself.\n *\n * @param {string} value\n * Value to trim.\n * @returns {string}\n * Trimmed value.\n */\nexport function trimLines(value) {\n const source = String(value)\n const search = /\\r?\\n|\\r/g\n let match = search.exec(source)\n let last = 0\n /** @type {Array} */\n const lines = []\n\n while (match) {\n lines.push(\n trimLine(source.slice(last, match.index), last > 0, true),\n match[0]\n )\n\n last = match.index + match[0].length\n match = search.exec(source)\n }\n\n lines.push(trimLine(source.slice(last), last > 0, false))\n\n return lines.join('')\n}\n\n/**\n * @param {string} value\n * Line to trim.\n * @param {boolean} start\n * Whether to trim the start of the line.\n * @param {boolean} end\n * Whether to trim the end of the line.\n * @returns {string}\n * Trimmed line.\n */\nfunction trimLine(value, start, end) {\n let startIndex = 0\n let endIndex = value.length\n\n if (start) {\n let code = value.codePointAt(startIndex)\n\n while (code === tab || code === space) {\n startIndex++\n code = value.codePointAt(startIndex)\n }\n }\n\n if (end) {\n let code = value.codePointAt(endIndex - 1)\n\n while (code === tab || code === space) {\n endIndex--\n code = value.codePointAt(endIndex - 1)\n }\n }\n\n return endIndex > startIndex ? value.slice(startIndex, endIndex) : ''\n}\n","import {blockquote} from './blockquote.js'\nimport {hardBreak} from './break.js'\nimport {code} from './code.js'\nimport {strikethrough} from './delete.js'\nimport {emphasis} from './emphasis.js'\nimport {footnoteReference} from './footnote-reference.js'\nimport {heading} from './heading.js'\nimport {html} from './html.js'\nimport {imageReference} from './image-reference.js'\nimport {image} from './image.js'\nimport {inlineCode} from './inline-code.js'\nimport {linkReference} from './link-reference.js'\nimport {link} from './link.js'\nimport {listItem} from './list-item.js'\nimport {list} from './list.js'\nimport {paragraph} from './paragraph.js'\nimport {root} from './root.js'\nimport {strong} from './strong.js'\nimport {table} from './table.js'\nimport {tableRow} from './table-row.js'\nimport {tableCell} from './table-cell.js'\nimport {text} from './text.js'\nimport {thematicBreak} from './thematic-break.js'\n\n/**\n * Default handlers for nodes.\n *\n * @satisfies {import('../state.js').Handlers}\n */\nexport const handlers = {\n blockquote,\n break: hardBreak,\n code,\n delete: strikethrough,\n emphasis,\n footnoteReference,\n heading,\n html,\n imageReference,\n image,\n inlineCode,\n linkReference,\n link,\n listItem,\n list,\n paragraph,\n // @ts-expect-error: root is different, but hard to type.\n root,\n strong,\n table,\n tableCell,\n tableRow,\n text,\n thematicBreak,\n toml: ignore,\n yaml: ignore,\n definition: ignore,\n footnoteDefinition: ignore\n}\n\n// Return nothing for nodes that are ignored.\nfunction ignore() {\n return undefined\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Blockquote} Blockquote\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `blockquote` node into hast.\n *\n * @param {State} state\n * Info passed around.\n * @param {Blockquote} node\n * mdast node.\n * @returns {Element}\n * hast node.\n */\nexport function blockquote(state, node) {\n /** @type {Element} */\n const result = {\n type: 'element',\n tagName: 'blockquote',\n properties: {},\n children: state.wrap(state.all(node), true)\n }\n state.patch(node, result)\n return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').Text} Text\n * @typedef {import('mdast').Break} Break\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `break` node into hast.\n *\n * @param {State} state\n * Info passed around.\n * @param {Break} node\n * mdast node.\n * @returns {Array}\n * hast element content.\n */\nexport function hardBreak(state, node) {\n /** @type {Element} */\n const result = {type: 'element', tagName: 'br', properties: {}, children: []}\n state.patch(node, result)\n return [state.applyData(node, result), {type: 'text', value: '\\n'}]\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').Code} Code\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `code` node into hast.\n *\n * @param {State} state\n * Info passed around.\n * @param {Code} node\n * mdast node.\n * @returns {Element}\n * hast node.\n */\nexport function code(state, node) {\n const value = node.value ? node.value + '\\n' : ''\n /** @type {Properties} */\n const properties = {}\n\n if (node.lang) {\n properties.className = ['language-' + node.lang]\n }\n\n // Create ``.\n /** @type {Element} */\n let result = {\n type: 'element',\n tagName: 'code',\n properties,\n children: [{type: 'text', value}]\n }\n\n if (node.meta) {\n result.data = {meta: node.meta}\n }\n\n state.patch(node, result)\n result = state.applyData(node, result)\n\n // Create `
    `.\n  result = {type: 'element', tagName: 'pre', properties: {}, children: [result]}\n  state.patch(node, result)\n  return result\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Delete} Delete\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `delete` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Delete} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function strikethrough(state, node) {\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'del',\n    properties: {},\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Emphasis} Emphasis\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `emphasis` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Emphasis} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function emphasis(state, node) {\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'em',\n    properties: {},\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').FootnoteReference} FootnoteReference\n * @typedef {import('../state.js').State} State\n */\n\nimport {normalizeUri} from 'micromark-util-sanitize-uri'\n\n/**\n * Turn an mdast `footnoteReference` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {FootnoteReference} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function footnoteReference(state, node) {\n  const clobberPrefix =\n    typeof state.options.clobberPrefix === 'string'\n      ? state.options.clobberPrefix\n      : 'user-content-'\n  const id = String(node.identifier).toUpperCase()\n  const safeId = normalizeUri(id.toLowerCase())\n  const index = state.footnoteOrder.indexOf(id)\n  /** @type {number} */\n  let counter\n\n  let reuseCounter = state.footnoteCounts.get(id)\n\n  if (reuseCounter === undefined) {\n    reuseCounter = 0\n    state.footnoteOrder.push(id)\n    counter = state.footnoteOrder.length\n  } else {\n    counter = index + 1\n  }\n\n  reuseCounter += 1\n  state.footnoteCounts.set(id, reuseCounter)\n\n  /** @type {Element} */\n  const link = {\n    type: 'element',\n    tagName: 'a',\n    properties: {\n      href: '#' + clobberPrefix + 'fn-' + safeId,\n      id:\n        clobberPrefix +\n        'fnref-' +\n        safeId +\n        (reuseCounter > 1 ? '-' + reuseCounter : ''),\n      dataFootnoteRef: true,\n      ariaDescribedBy: ['footnote-label']\n    },\n    children: [{type: 'text', value: String(counter)}]\n  }\n  state.patch(node, link)\n\n  /** @type {Element} */\n  const sup = {\n    type: 'element',\n    tagName: 'sup',\n    properties: {},\n    children: [link]\n  }\n  state.patch(node, sup)\n  return state.applyData(node, sup)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Heading} Heading\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `heading` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Heading} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function heading(state, node) {\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'h' + node.depth,\n    properties: {},\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Html} Html\n * @typedef {import('../state.js').State} State\n * @typedef {import('../../index.js').Raw} Raw\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `html` node into hast (`raw` node in dangerous mode, otherwise\n * nothing).\n *\n * @param {State} state\n *   Info passed around.\n * @param {Html} node\n *   mdast node.\n * @returns {Element | Raw | undefined}\n *   hast node.\n */\nexport function html(state, node) {\n  if (state.options.allowDangerousHtml) {\n    /** @type {Raw} */\n    const result = {type: 'raw', value: node.value}\n    state.patch(node, result)\n    return state.applyData(node, result)\n  }\n\n  return undefined\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').ElementContent} ElementContent\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').ImageReference} ImageReference\n * @typedef {import('../state.js').State} State\n */\n\nimport {normalizeUri} from 'micromark-util-sanitize-uri'\nimport {revert} from '../revert.js'\n\n/**\n * Turn an mdast `imageReference` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {ImageReference} node\n *   mdast node.\n * @returns {Array | ElementContent}\n *   hast node.\n */\nexport function imageReference(state, node) {\n  const id = String(node.identifier).toUpperCase()\n  const definition = state.definitionById.get(id)\n\n  if (!definition) {\n    return revert(state, node)\n  }\n\n  /** @type {Properties} */\n  const properties = {src: normalizeUri(definition.url || ''), alt: node.alt}\n\n  if (definition.title !== null && definition.title !== undefined) {\n    properties.title = definition.title\n  }\n\n  /** @type {Element} */\n  const result = {type: 'element', tagName: 'img', properties, children: []}\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').Image} Image\n * @typedef {import('../state.js').State} State\n */\n\nimport {normalizeUri} from 'micromark-util-sanitize-uri'\n\n/**\n * Turn an mdast `image` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Image} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function image(state, node) {\n  /** @type {Properties} */\n  const properties = {src: normalizeUri(node.url)}\n\n  if (node.alt !== null && node.alt !== undefined) {\n    properties.alt = node.alt\n  }\n\n  if (node.title !== null && node.title !== undefined) {\n    properties.title = node.title\n  }\n\n  /** @type {Element} */\n  const result = {type: 'element', tagName: 'img', properties, children: []}\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').Text} Text\n * @typedef {import('mdast').InlineCode} InlineCode\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `inlineCode` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {InlineCode} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function inlineCode(state, node) {\n  /** @type {Text} */\n  const text = {type: 'text', value: node.value.replace(/\\r?\\n|\\r/g, ' ')}\n  state.patch(node, text)\n\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'code',\n    properties: {},\n    children: [text]\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').ElementContent} ElementContent\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').LinkReference} LinkReference\n * @typedef {import('../state.js').State} State\n */\n\nimport {normalizeUri} from 'micromark-util-sanitize-uri'\nimport {revert} from '../revert.js'\n\n/**\n * Turn an mdast `linkReference` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {LinkReference} node\n *   mdast node.\n * @returns {Array | ElementContent}\n *   hast node.\n */\nexport function linkReference(state, node) {\n  const id = String(node.identifier).toUpperCase()\n  const definition = state.definitionById.get(id)\n\n  if (!definition) {\n    return revert(state, node)\n  }\n\n  /** @type {Properties} */\n  const properties = {href: normalizeUri(definition.url || '')}\n\n  if (definition.title !== null && definition.title !== undefined) {\n    properties.title = definition.title\n  }\n\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'a',\n    properties,\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').Link} Link\n * @typedef {import('../state.js').State} State\n */\n\nimport {normalizeUri} from 'micromark-util-sanitize-uri'\n\n/**\n * Turn an mdast `link` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Link} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function link(state, node) {\n  /** @type {Properties} */\n  const properties = {href: normalizeUri(node.url)}\n\n  if (node.title !== null && node.title !== undefined) {\n    properties.title = node.title\n  }\n\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'a',\n    properties,\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').List} List\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `list` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {List} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function list(state, node) {\n  /** @type {Properties} */\n  const properties = {}\n  const results = state.all(node)\n  let index = -1\n\n  if (typeof node.start === 'number' && node.start !== 1) {\n    properties.start = node.start\n  }\n\n  // Like GitHub, add a class for custom styling.\n  while (++index < results.length) {\n    const child = results[index]\n\n    if (\n      child.type === 'element' &&\n      child.tagName === 'li' &&\n      child.properties &&\n      Array.isArray(child.properties.className) &&\n      child.properties.className.includes('task-list-item')\n    ) {\n      properties.className = ['contains-task-list']\n      break\n    }\n  }\n\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: node.ordered ? 'ol' : 'ul',\n    properties,\n    children: state.wrap(results, true)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Paragraph} Paragraph\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `paragraph` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Paragraph} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function paragraph(state, node) {\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'p',\n    properties: {},\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Parents} HastParents\n * @typedef {import('hast').Root} HastRoot\n * @typedef {import('mdast').Root} MdastRoot\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `root` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {MdastRoot} node\n *   mdast node.\n * @returns {HastParents}\n *   hast node.\n */\nexport function root(state, node) {\n  /** @type {HastRoot} */\n  const result = {type: 'root', children: state.wrap(state.all(node))}\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Strong} Strong\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `strong` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Strong} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function strong(state, node) {\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'strong',\n    properties: {},\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').Table} Table\n * @typedef {import('../state.js').State} State\n */\n\nimport {pointEnd, pointStart} from 'unist-util-position'\n\n/**\n * Turn an mdast `table` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {Table} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function table(state, node) {\n  const rows = state.all(node)\n  const firstRow = rows.shift()\n  /** @type {Array} */\n  const tableContent = []\n\n  if (firstRow) {\n    /** @type {Element} */\n    const head = {\n      type: 'element',\n      tagName: 'thead',\n      properties: {},\n      children: state.wrap([firstRow], true)\n    }\n    state.patch(node.children[0], head)\n    tableContent.push(head)\n  }\n\n  if (rows.length > 0) {\n    /** @type {Element} */\n    const body = {\n      type: 'element',\n      tagName: 'tbody',\n      properties: {},\n      children: state.wrap(rows, true)\n    }\n\n    const start = pointStart(node.children[1])\n    const end = pointEnd(node.children[node.children.length - 1])\n    if (start && end) body.position = {start, end}\n    tableContent.push(body)\n  }\n\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'table',\n    properties: {},\n    children: state.wrap(tableContent, true)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').TableCell} TableCell\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `tableCell` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {TableCell} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function tableCell(state, node) {\n  // Note: this function is normally not called: see `table-row` for how rows\n  // and their cells are compiled.\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'td', // Assume body cell.\n    properties: {},\n    children: state.all(node)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('hast').ElementContent} ElementContent\n * @typedef {import('hast').Properties} Properties\n * @typedef {import('mdast').Parents} Parents\n * @typedef {import('mdast').TableRow} TableRow\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `tableRow` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {TableRow} node\n *   mdast node.\n * @param {Parents | undefined} parent\n *   Parent of `node`.\n * @returns {Element}\n *   hast node.\n */\nexport function tableRow(state, node, parent) {\n  const siblings = parent ? parent.children : undefined\n  // Generate a body row when without parent.\n  const rowIndex = siblings ? siblings.indexOf(node) : 1\n  const tagName = rowIndex === 0 ? 'th' : 'td'\n  // To do: option to use `style`?\n  const align = parent && parent.type === 'table' ? parent.align : undefined\n  const length = align ? align.length : node.children.length\n  let cellIndex = -1\n  /** @type {Array} */\n  const cells = []\n\n  while (++cellIndex < length) {\n    // Note: can also be undefined.\n    const cell = node.children[cellIndex]\n    /** @type {Properties} */\n    const properties = {}\n    const alignValue = align ? align[cellIndex] : undefined\n\n    if (alignValue) {\n      properties.align = alignValue\n    }\n\n    /** @type {Element} */\n    let result = {type: 'element', tagName, properties, children: []}\n\n    if (cell) {\n      result.children = state.all(cell)\n      state.patch(cell, result)\n      result = state.applyData(cell, result)\n    }\n\n    cells.push(result)\n  }\n\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'tr',\n    properties: {},\n    children: state.wrap(cells, true)\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} HastElement\n * @typedef {import('hast').Text} HastText\n * @typedef {import('mdast').Text} MdastText\n * @typedef {import('../state.js').State} State\n */\n\nimport {trimLines} from 'trim-lines'\n\n/**\n * Turn an mdast `text` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {MdastText} node\n *   mdast node.\n * @returns {HastElement | HastText}\n *   hast node.\n */\nexport function text(state, node) {\n  /** @type {HastText} */\n  const result = {type: 'text', value: trimLines(String(node.value))}\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} Element\n * @typedef {import('mdast').ThematicBreak} ThematicBreak\n * @typedef {import('../state.js').State} State\n */\n\n// Make VS Code show references to the above types.\n''\n\n/**\n * Turn an mdast `thematicBreak` node into hast.\n *\n * @param {State} state\n *   Info passed around.\n * @param {ThematicBreak} node\n *   mdast node.\n * @returns {Element}\n *   hast node.\n */\nexport function thematicBreak(state, node) {\n  /** @type {Element} */\n  const result = {\n    type: 'element',\n    tagName: 'hr',\n    properties: {},\n    children: []\n  }\n  state.patch(node, result)\n  return state.applyData(node, result)\n}\n","/**\n * @typedef {import('hast').Element} HastElement\n * @typedef {import('hast').ElementContent} HastElementContent\n * @typedef {import('hast').Nodes} HastNodes\n * @typedef {import('hast').Properties} HastProperties\n * @typedef {import('hast').RootContent} HastRootContent\n * @typedef {import('hast').Text} HastText\n *\n * @typedef {import('mdast').Definition} MdastDefinition\n * @typedef {import('mdast').FootnoteDefinition} MdastFootnoteDefinition\n * @typedef {import('mdast').Nodes} MdastNodes\n * @typedef {import('mdast').Parents} MdastParents\n *\n * @typedef {import('vfile').VFile} VFile\n *\n * @typedef {import('./footer.js').FootnoteBackContentTemplate} FootnoteBackContentTemplate\n * @typedef {import('./footer.js').FootnoteBackLabelTemplate} FootnoteBackLabelTemplate\n */\n\n/**\n * @callback Handler\n *   Handle a node.\n * @param {State} state\n *   Info passed around.\n * @param {any} node\n *   mdast node to handle.\n * @param {MdastParents | undefined} parent\n *   Parent of `node`.\n * @returns {Array | HastElementContent | undefined}\n *   hast node.\n *\n * @typedef {Partial>} Handlers\n *   Handle nodes.\n *\n * @typedef Options\n *   Configuration (optional).\n * @property {boolean | null | undefined} [allowDangerousHtml=false]\n *   Whether to persist raw HTML in markdown in the hast tree (default:\n *   `false`).\n * @property {string | null | undefined} [clobberPrefix='user-content-']\n *   Prefix to use before the `id` property on footnotes to prevent them from\n *   *clobbering* (default: `'user-content-'`).\n *\n *   Pass `''` for trusted markdown and when you are careful with\n *   polyfilling.\n *   You could pass a different prefix.\n *\n *   DOM clobbering is this:\n *\n *   ```html\n *   

    \n * \n * ```\n *\n * The above example shows that elements are made available by browsers, by\n * their ID, on the `window` object.\n * This is a security risk because you might be expecting some other variable\n * at that place.\n * It can also break polyfills.\n * Using a prefix solves these problems.\n * @property {VFile | null | undefined} [file]\n * Corresponding virtual file representing the input document (optional).\n * @property {FootnoteBackContentTemplate | string | null | undefined} [footnoteBackContent]\n * Content of the backreference back to references (default: `defaultFootnoteBackContent`).\n *\n * The default value is:\n *\n * ```js\n * function defaultFootnoteBackContent(_, rereferenceIndex) {\n * const result = [{type: 'text', value: '↩'}]\n *\n * if (rereferenceIndex > 1) {\n * result.push({\n * type: 'element',\n * tagName: 'sup',\n * properties: {},\n * children: [{type: 'text', value: String(rereferenceIndex)}]\n * })\n * }\n *\n * return result\n * }\n * ```\n *\n * This content is used in the `a` element of each backreference (the `↩`\n * links).\n * @property {FootnoteBackLabelTemplate | string | null | undefined} [footnoteBackLabel]\n * Label to describe the backreference back to references (default:\n * `defaultFootnoteBackLabel`).\n *\n * The default value is:\n *\n * ```js\n * function defaultFootnoteBackLabel(referenceIndex, rereferenceIndex) {\n * return (\n * 'Back to reference ' +\n * (referenceIndex + 1) +\n * (rereferenceIndex > 1 ? '-' + rereferenceIndex : '')\n * )\n * }\n * ```\n *\n * Change it when the markdown is not in English.\n *\n * This label is used in the `ariaLabel` property on each backreference\n * (the `↩` links).\n * It affects users of assistive technology.\n * @property {string | null | undefined} [footnoteLabel='Footnotes']\n * Textual label to use for the footnotes section (default: `'Footnotes'`).\n *\n * Change it when the markdown is not in English.\n *\n * This label is typically hidden visually (assuming a `sr-only` CSS class\n * is defined that does that) and so affects screen readers only.\n * If you do have such a class, but want to show this section to everyone,\n * pass different properties with the `footnoteLabelProperties` option.\n * @property {HastProperties | null | undefined} [footnoteLabelProperties={className: ['sr-only']}]\n * Properties to use on the footnote label (default: `{className:\n * ['sr-only']}`).\n *\n * Change it to show the label and add other properties.\n *\n * This label is typically hidden visually (assuming an `sr-only` CSS class\n * is defined that does that) and so affects screen readers only.\n * If you do have such a class, but want to show this section to everyone,\n * pass an empty string.\n * You can also add different properties.\n *\n * > **Note**: `id: 'footnote-label'` is always added, because footnote\n * > calls use it with `aria-describedby` to provide an accessible label.\n * @property {string | null | undefined} [footnoteLabelTagName='h2']\n * HTML tag name to use for the footnote label element (default: `'h2'`).\n *\n * Change it to match your document structure.\n *\n * This label is typically hidden visually (assuming a `sr-only` CSS class\n * is defined that does that) and so affects screen readers only.\n * If you do have such a class, but want to show this section to everyone,\n * pass different properties with the `footnoteLabelProperties` option.\n * @property {Handlers | null | undefined} [handlers]\n * Extra handlers for nodes (optional).\n * @property {Array | null | undefined} [passThrough]\n * List of custom mdast node types to pass through (keep) in hast (note that\n * the node itself is passed, but eventual children are transformed)\n * (optional).\n * @property {Handler | null | undefined} [unknownHandler]\n * Handler for all unknown nodes (optional).\n *\n * @typedef State\n * Info passed around.\n * @property {(node: MdastNodes) => Array} all\n * Transform the children of an mdast parent to hast.\n * @property {(from: MdastNodes, to: Type) => HastElement | Type} applyData\n * Honor the `data` of `from`, and generate an element instead of `node`.\n * @property {Map} definitionById\n * Definitions by their identifier.\n * @property {Map} footnoteById\n * Footnote definitions by their identifier.\n * @property {Map} footnoteCounts\n * Counts for how often the same footnote was called.\n * @property {Array} footnoteOrder\n * Identifiers of order when footnote calls first appear in tree order.\n * @property {Handlers} handlers\n * Applied handlers.\n * @property {(node: MdastNodes, parent: MdastParents | undefined) => Array | HastElementContent | undefined} one\n * Transform an mdast node to hast.\n * @property {Options} options\n * Configuration.\n * @property {(from: MdastNodes, node: HastNodes) => undefined} patch\n * Copy a node’s positional info.\n * @property {(nodes: Array, loose?: boolean | undefined) => Array} wrap\n * Wrap `nodes` with line endings between each node, adds initial/final line endings when `loose`.\n */\n\nimport structuredClone from '@ungap/structured-clone'\nimport {visit} from 'unist-util-visit'\nimport {position} from 'unist-util-position'\nimport {handlers as defaultHandlers} from './handlers/index.js'\n\nconst own = {}.hasOwnProperty\n\n/** @type {Options} */\nconst emptyOptions = {}\n\n/**\n * Create `state` from an mdast tree.\n *\n * @param {MdastNodes} tree\n * mdast node to transform.\n * @param {Options | null | undefined} [options]\n * Configuration (optional).\n * @returns {State}\n * `state` function.\n */\nexport function createState(tree, options) {\n const settings = options || emptyOptions\n /** @type {Map} */\n const definitionById = new Map()\n /** @type {Map} */\n const footnoteById = new Map()\n /** @type {Map} */\n const footnoteCounts = new Map()\n /** @type {Handlers} */\n // @ts-expect-error: the root handler returns a root.\n // Hard to type.\n const handlers = {...defaultHandlers, ...settings.handlers}\n\n /** @type {State} */\n const state = {\n all,\n applyData,\n definitionById,\n footnoteById,\n footnoteCounts,\n footnoteOrder: [],\n handlers,\n one,\n options: settings,\n patch,\n wrap\n }\n\n visit(tree, function (node) {\n if (node.type === 'definition' || node.type === 'footnoteDefinition') {\n const map = node.type === 'definition' ? definitionById : footnoteById\n const id = String(node.identifier).toUpperCase()\n\n // Mimick CM behavior of link definitions.\n // See: .\n if (!map.has(id)) {\n // @ts-expect-error: node type matches map.\n map.set(id, node)\n }\n }\n })\n\n return state\n\n /**\n * Transform an mdast node into a hast node.\n *\n * @param {MdastNodes} node\n * mdast node.\n * @param {MdastParents | undefined} [parent]\n * Parent of `node`.\n * @returns {Array | HastElementContent | undefined}\n * Resulting hast node.\n */\n function one(node, parent) {\n const type = node.type\n const handle = state.handlers[type]\n\n if (own.call(state.handlers, type) && handle) {\n return handle(state, node, parent)\n }\n\n if (state.options.passThrough && state.options.passThrough.includes(type)) {\n if ('children' in node) {\n const {children, ...shallow} = node\n const result = structuredClone(shallow)\n // @ts-expect-error: TS doesn’t understand…\n result.children = state.all(node)\n // @ts-expect-error: TS doesn’t understand…\n return result\n }\n\n // @ts-expect-error: it’s custom.\n return structuredClone(node)\n }\n\n const unknown = state.options.unknownHandler || defaultUnknownHandler\n\n return unknown(state, node, parent)\n }\n\n /**\n * Transform the children of an mdast node into hast nodes.\n *\n * @param {MdastNodes} parent\n * mdast node to compile\n * @returns {Array}\n * Resulting hast nodes.\n */\n function all(parent) {\n /** @type {Array} */\n const values = []\n\n if ('children' in parent) {\n const nodes = parent.children\n let index = -1\n while (++index < nodes.length) {\n const result = state.one(nodes[index], parent)\n\n // To do: see if we van clean this? Can we merge texts?\n if (result) {\n if (index && nodes[index - 1].type === 'break') {\n if (!Array.isArray(result) && result.type === 'text') {\n result.value = trimMarkdownSpaceStart(result.value)\n }\n\n if (!Array.isArray(result) && result.type === 'element') {\n const head = result.children[0]\n\n if (head && head.type === 'text') {\n head.value = trimMarkdownSpaceStart(head.value)\n }\n }\n }\n\n if (Array.isArray(result)) {\n values.push(...result)\n } else {\n values.push(result)\n }\n }\n }\n }\n\n return values\n }\n}\n\n/**\n * Copy a node’s positional info.\n *\n * @param {MdastNodes} from\n * mdast node to copy from.\n * @param {HastNodes} to\n * hast node to copy into.\n * @returns {undefined}\n * Nothing.\n */\nfunction patch(from, to) {\n if (from.position) to.position = position(from)\n}\n\n/**\n * Honor the `data` of `from` and maybe generate an element instead of `to`.\n *\n * @template {HastNodes} Type\n * Node type.\n * @param {MdastNodes} from\n * mdast node to use data from.\n * @param {Type} to\n * hast node to change.\n * @returns {HastElement | Type}\n * Nothing.\n */\nfunction applyData(from, to) {\n /** @type {HastElement | Type} */\n let result = to\n\n // Handle `data.hName`, `data.hProperties, `data.hChildren`.\n if (from && from.data) {\n const hName = from.data.hName\n const hChildren = from.data.hChildren\n const hProperties = from.data.hProperties\n\n if (typeof hName === 'string') {\n // Transforming the node resulted in an element with a different name\n // than wanted:\n if (result.type === 'element') {\n result.tagName = hName\n }\n // Transforming the node resulted in a non-element, which happens for\n // raw, text, and root nodes (unless custom handlers are passed).\n // The intent of `hName` is to create an element, but likely also to keep\n // the content around (otherwise: pass `hChildren`).\n else {\n /** @type {Array} */\n // @ts-expect-error: assume no doctypes in `root`.\n const children = 'children' in result ? result.children : [result]\n result = {type: 'element', tagName: hName, properties: {}, children}\n }\n }\n\n if (result.type === 'element' && hProperties) {\n Object.assign(result.properties, structuredClone(hProperties))\n }\n\n if (\n 'children' in result &&\n result.children &&\n hChildren !== null &&\n hChildren !== undefined\n ) {\n result.children = hChildren\n }\n }\n\n return result\n}\n\n/**\n * Transform an unknown node.\n *\n * @param {State} state\n * Info passed around.\n * @param {MdastNodes} node\n * Unknown mdast node.\n * @returns {HastElement | HastText}\n * Resulting hast node.\n */\nfunction defaultUnknownHandler(state, node) {\n const data = node.data || {}\n /** @type {HastElement | HastText} */\n const result =\n 'value' in node &&\n !(own.call(data, 'hProperties') || own.call(data, 'hChildren'))\n ? {type: 'text', value: node.value}\n : {\n type: 'element',\n tagName: 'div',\n properties: {},\n children: state.all(node)\n }\n\n state.patch(node, result)\n return state.applyData(node, result)\n}\n\n/**\n * Wrap `nodes` with line endings between each node.\n *\n * @template {HastRootContent} Type\n * Node type.\n * @param {Array} nodes\n * List of nodes to wrap.\n * @param {boolean | undefined} [loose=false]\n * Whether to add line endings at start and end (default: `false`).\n * @returns {Array}\n * Wrapped nodes.\n */\nexport function wrap(nodes, loose) {\n /** @type {Array} */\n const result = []\n let index = -1\n\n if (loose) {\n result.push({type: 'text', value: '\\n'})\n }\n\n while (++index < nodes.length) {\n if (index) result.push({type: 'text', value: '\\n'})\n result.push(nodes[index])\n }\n\n if (loose && nodes.length > 0) {\n result.push({type: 'text', value: '\\n'})\n }\n\n return result\n}\n\n/**\n * Trim spaces and tabs at the start of `value`.\n *\n * @param {string} value\n * Value to trim.\n * @returns {string}\n * Result.\n */\nfunction trimMarkdownSpaceStart(value) {\n let index = 0\n let code = value.charCodeAt(index)\n\n while (code === 9 || code === 32) {\n index++\n code = value.charCodeAt(index)\n }\n\n return value.slice(index)\n}\n","/**\n * @typedef {import('hast').Nodes} HastNodes\n * @typedef {import('mdast').Nodes} MdastNodes\n * @typedef {import('./state.js').Options} Options\n */\n\nimport {ok as assert} from 'devlop'\nimport {footer} from './footer.js'\nimport {createState} from './state.js'\n\n/**\n * Transform mdast to hast.\n *\n * ##### Notes\n *\n * ###### HTML\n *\n * Raw HTML is available in mdast as `html` nodes and can be embedded in hast\n * as semistandard `raw` nodes.\n * Most utilities ignore `raw` nodes but two notable ones don’t:\n *\n * * `hast-util-to-html` also has an option `allowDangerousHtml` which will\n * output the raw HTML.\n * This is typically discouraged as noted by the option name but is useful\n * if you completely trust authors\n * * `hast-util-raw` can handle the raw embedded HTML strings by parsing them\n * into standard hast nodes (`element`, `text`, etc).\n * This is a heavy task as it needs a full HTML parser, but it is the only\n * way to support untrusted content\n *\n * ###### Footnotes\n *\n * Many options supported here relate to footnotes.\n * Footnotes are not specified by CommonMark, which we follow by default.\n * They are supported by GitHub, so footnotes can be enabled in markdown with\n * `mdast-util-gfm`.\n *\n * The options `footnoteBackLabel` and `footnoteLabel` define natural language\n * that explains footnotes, which is hidden for sighted users but shown to\n * assistive technology.\n * When your page is not in English, you must define translated values.\n *\n * Back references use ARIA attributes, but the section label itself uses a\n * heading that is hidden with an `sr-only` class.\n * To show it to sighted users, define different attributes in\n * `footnoteLabelProperties`.\n *\n * ###### Clobbering\n *\n * Footnotes introduces a problem, as it links footnote calls to footnote\n * definitions on the page through `id` attributes generated from user content,\n * which results in DOM clobbering.\n *\n * DOM clobbering is this:\n *\n * ```html\n *

    \n * \n * ```\n *\n * Elements by their ID are made available by browsers on the `window` object,\n * which is a security risk.\n * Using a prefix solves this problem.\n *\n * More information on how to handle clobbering and the prefix is explained in\n * Example: headings (DOM clobbering) in `rehype-sanitize`.\n *\n * ###### Unknown nodes\n *\n * Unknown nodes are nodes with a type that isn’t in `handlers` or `passThrough`.\n * The default behavior for unknown nodes is:\n *\n * * when the node has a `value` (and doesn’t have `data.hName`,\n * `data.hProperties`, or `data.hChildren`, see later), create a hast `text`\n * node\n * * otherwise, create a `
    ` element (which could be changed with\n * `data.hName`), with its children mapped from mdast to hast as well\n *\n * This behavior can be changed by passing an `unknownHandler`.\n *\n * @param {MdastNodes} tree\n * mdast tree.\n * @param {Options | null | undefined} [options]\n * Configuration (optional).\n * @returns {HastNodes}\n * hast tree.\n */\nexport function toHast(tree, options) {\n const state = createState(tree, options)\n const node = state.one(tree, undefined)\n const foot = footer(state)\n /** @type {HastNodes} */\n const result = Array.isArray(node)\n ? {type: 'root', children: node}\n : node || {type: 'root', children: []}\n\n if (foot) {\n // If there’s a footer, there were definitions, meaning block\n // content.\n // So `result` is a parent node.\n assert('children' in result)\n result.children.push({type: 'text', value: '\\n'}, foot)\n }\n\n return result\n}\n","/**\n * @import {Root as HastRoot} from 'hast'\n * @import {Root as MdastRoot} from 'mdast'\n * @import {Options as ToHastOptions} from 'mdast-util-to-hast'\n * @import {Processor} from 'unified'\n * @import {VFile} from 'vfile'\n */\n\n/**\n * @typedef {Omit} Options\n *\n * @callback TransformBridge\n * Bridge-mode.\n *\n * Runs the destination with the new hast tree.\n * Discards result.\n * @param {MdastRoot} tree\n * Tree.\n * @param {VFile} file\n * File.\n * @returns {Promise}\n * Nothing.\n *\n * @callback TransformMutate\n * Mutate-mode.\n *\n * Further transformers run on the hast tree.\n * @param {MdastRoot} tree\n * Tree.\n * @param {VFile} file\n * File.\n * @returns {HastRoot}\n * Tree (hast).\n */\n\nimport {toHast} from 'mdast-util-to-hast'\n\n/**\n * Turn markdown into HTML.\n *\n * ##### Notes\n *\n * ###### Signature\n *\n * * if a processor is given,\n * runs the (rehype) plugins used on it with a hast tree,\n * then discards the result (*bridge mode*)\n * * otherwise,\n * returns a hast tree,\n * the plugins used after `remarkRehype` are rehype plugins (*mutate mode*)\n *\n * > 👉 **Note**:\n * > It’s highly unlikely that you want to pass a `processor`.\n *\n * ###### HTML\n *\n * Raw HTML is available in mdast as `html` nodes and can be embedded in hast\n * as semistandard `raw` nodes.\n * Most plugins ignore `raw` nodes but two notable ones don’t:\n *\n * * `rehype-stringify` also has an option `allowDangerousHtml` which will\n * output the raw HTML.\n * This is typically discouraged as noted by the option name but is useful if\n * you completely trust authors\n * * `rehype-raw` can handle the raw embedded HTML strings by parsing them\n * into standard hast nodes (`element`, `text`, etc);\n * this is a heavy task as it needs a full HTML parser,\n * but it is the only way to support untrusted content\n *\n * ###### Footnotes\n *\n * Many options supported here relate to footnotes.\n * Footnotes are not specified by CommonMark,\n * which we follow by default.\n * They are supported by GitHub,\n * so footnotes can be enabled in markdown with `remark-gfm`.\n *\n * The options `footnoteBackLabel` and `footnoteLabel` define natural language\n * that explains footnotes,\n * which is hidden for sighted users but shown to assistive technology.\n * When your page is not in English,\n * you must define translated values.\n *\n * Back references use ARIA attributes,\n * but the section label itself uses a heading that is hidden with an\n * `sr-only` class.\n * To show it to sighted users,\n * define different attributes in `footnoteLabelProperties`.\n *\n * ###### Clobbering\n *\n * Footnotes introduces a problem,\n * as it links footnote calls to footnote definitions on the page through `id`\n * attributes generated from user content,\n * which results in DOM clobbering.\n *\n * DOM clobbering is this:\n *\n * ```html\n *

    \n * \n * ```\n *\n * Elements by their ID are made available by browsers on the `window` object,\n * which is a security risk.\n * Using a prefix solves this problem.\n *\n * More information on how to handle clobbering and the prefix is explained in\n * *Example: headings (DOM clobbering)* in `rehype-sanitize`.\n *\n * ###### Unknown nodes\n *\n * Unknown nodes are nodes with a type that isn’t in `handlers` or `passThrough`.\n * The default behavior for unknown nodes is:\n *\n * * when the node has a `value`\n * (and doesn’t have `data.hName`, `data.hProperties`, or `data.hChildren`,\n * see later),\n * create a hast `text` node\n * * otherwise,\n * create a `
    ` element (which could be changed with `data.hName`),\n * with its children mapped from mdast to hast as well\n *\n * This behavior can be changed by passing an `unknownHandler`.\n *\n * @overload\n * @param {Processor} processor\n * @param {Readonly | null | undefined} [options]\n * @returns {TransformBridge}\n *\n * @overload\n * @param {Readonly | null | undefined} [options]\n * @returns {TransformMutate}\n *\n * @overload\n * @param {Readonly | Processor | null | undefined} [destination]\n * @param {Readonly | null | undefined} [options]\n * @returns {TransformBridge | TransformMutate}\n *\n * @param {Readonly | Processor | null | undefined} [destination]\n * Processor or configuration (optional).\n * @param {Readonly | null | undefined} [options]\n * When a processor was given,\n * configuration (optional).\n * @returns {TransformBridge | TransformMutate}\n * Transform.\n */\nexport default function remarkRehype(destination, options) {\n if (destination && 'run' in destination) {\n /**\n * @type {TransformBridge}\n */\n return async function (tree, file) {\n // Cast because root in -> root out.\n const hastTree = /** @type {HastRoot} */ (\n toHast(tree, {file, ...options})\n )\n await destination.run(hastTree, file)\n }\n }\n\n /**\n * @type {TransformMutate}\n */\n return function (tree, file) {\n // Cast because root in -> root out.\n // To do: in the future, disallow ` || options` fallback.\n // With `unified-engine`, `destination` can be `undefined` but\n // `options` will be the file set.\n // We should not pass that as `options`.\n return /** @type {HastRoot} */ (\n toHast(tree, {file, ...(destination || options)})\n )\n }\n}\n","/**\n * Throw a given error.\n *\n * @param {Error|null|undefined} [error]\n * Maybe error.\n * @returns {asserts error is null|undefined}\n */\nexport function bail(error) {\n if (error) {\n throw error\n }\n}\n","export default function isPlainObject(value) {\n\tif (typeof value !== 'object' || value === null) {\n\t\treturn false;\n\t}\n\n\tconst prototype = Object.getPrototypeOf(value);\n\treturn (prototype === null || prototype === Object.prototype || Object.getPrototypeOf(prototype) === null) && !(Symbol.toStringTag in value) && !(Symbol.iterator in value);\n}\n","// To do: remove `void`s\n// To do: remove `null` from output of our APIs, allow it as user APIs.\n\n/**\n * @typedef {(error?: Error | null | undefined, ...output: Array) => void} Callback\n * Callback.\n *\n * @typedef {(...input: Array) => any} Middleware\n * Ware.\n *\n * @typedef Pipeline\n * Pipeline.\n * @property {Run} run\n * Run the pipeline.\n * @property {Use} use\n * Add middleware.\n *\n * @typedef {(...input: Array) => void} Run\n * Call all middleware.\n *\n * Calls `done` on completion with either an error or the output of the\n * last middleware.\n *\n * > 👉 **Note**: as the length of input defines whether async functions get a\n * > `next` function,\n * > it’s recommended to keep `input` at one value normally.\n\n *\n * @typedef {(fn: Middleware) => Pipeline} Use\n * Add middleware.\n */\n\n/**\n * Create new middleware.\n *\n * @returns {Pipeline}\n * Pipeline.\n */\nexport function trough() {\n /** @type {Array} */\n const fns = []\n /** @type {Pipeline} */\n const pipeline = {run, use}\n\n return pipeline\n\n /** @type {Run} */\n function run(...values) {\n let middlewareIndex = -1\n /** @type {Callback} */\n const callback = values.pop()\n\n if (typeof callback !== 'function') {\n throw new TypeError('Expected function as last argument, not ' + callback)\n }\n\n next(null, ...values)\n\n /**\n * Run the next `fn`, or we’re done.\n *\n * @param {Error | null | undefined} error\n * @param {Array} output\n */\n function next(error, ...output) {\n const fn = fns[++middlewareIndex]\n let index = -1\n\n if (error) {\n callback(error)\n return\n }\n\n // Copy non-nullish input into values.\n while (++index < values.length) {\n if (output[index] === null || output[index] === undefined) {\n output[index] = values[index]\n }\n }\n\n // Save the newly created `output` for the next call.\n values = output\n\n // Next or done.\n if (fn) {\n wrap(fn, next)(...output)\n } else {\n callback(null, ...output)\n }\n }\n }\n\n /** @type {Use} */\n function use(middelware) {\n if (typeof middelware !== 'function') {\n throw new TypeError(\n 'Expected `middelware` to be a function, not ' + middelware\n )\n }\n\n fns.push(middelware)\n return pipeline\n }\n}\n\n/**\n * Wrap `middleware` into a uniform interface.\n *\n * You can pass all input to the resulting function.\n * `callback` is then called with the output of `middleware`.\n *\n * If `middleware` accepts more arguments than the later given in input,\n * an extra `done` function is passed to it after that input,\n * which must be called by `middleware`.\n *\n * The first value in `input` is the main input value.\n * All other input values are the rest input values.\n * The values given to `callback` are the input values,\n * merged with every non-nullish output value.\n *\n * * if `middleware` throws an error,\n * returns a promise that is rejected,\n * or calls the given `done` function with an error,\n * `callback` is called with that error\n * * if `middleware` returns a value or returns a promise that is resolved,\n * that value is the main output value\n * * if `middleware` calls `done`,\n * all non-nullish values except for the first one (the error) overwrite the\n * output values\n *\n * @param {Middleware} middleware\n * Function to wrap.\n * @param {Callback} callback\n * Callback called with the output of `middleware`.\n * @returns {Run}\n * Wrapped middleware.\n */\nexport function wrap(middleware, callback) {\n /** @type {boolean} */\n let called\n\n return wrapped\n\n /**\n * Call `middleware`.\n * @this {any}\n * @param {Array} parameters\n * @returns {void}\n */\n function wrapped(...parameters) {\n const fnExpectsCallback = middleware.length > parameters.length\n /** @type {any} */\n let result\n\n if (fnExpectsCallback) {\n parameters.push(done)\n }\n\n try {\n result = middleware.apply(this, parameters)\n } catch (error) {\n const exception = /** @type {Error} */ (error)\n\n // Well, this is quite the pickle.\n // `middleware` received a callback and called it synchronously, but that\n // threw an error.\n // The only thing left to do is to throw the thing instead.\n if (fnExpectsCallback && called) {\n throw exception\n }\n\n return done(exception)\n }\n\n if (!fnExpectsCallback) {\n if (result && result.then && typeof result.then === 'function') {\n result.then(then, done)\n } else if (result instanceof Error) {\n done(result)\n } else {\n then(result)\n }\n }\n }\n\n /**\n * Call `callback`, only once.\n *\n * @type {Callback}\n */\n function done(error, ...output) {\n if (!called) {\n called = true\n callback(error, ...output)\n }\n }\n\n /**\n * Call `done` with one value.\n *\n * @param {any} [value]\n */\n function then(value) {\n done(null, value)\n }\n}\n","// A derivative work based on:\n// .\n// Which is licensed:\n//\n// MIT License\n//\n// Copyright (c) 2013 James Halliday\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy of\n// this software and associated documentation files (the \"Software\"), to deal in\n// the Software without restriction, including without limitation the rights to\n// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n// the Software, and to permit persons to whom the Software is furnished to do so,\n// subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in all\n// copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n// A derivative work based on:\n//\n// Parts of that are extracted from Node’s internal `path` module:\n// .\n// Which is licensed:\n//\n// Copyright Joyent, Inc. and other Node contributors.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a\n// copy of this software and associated documentation files (the\n// \"Software\"), to deal in the Software without restriction, including\n// without limitation the rights to use, copy, modify, merge, publish,\n// distribute, sublicense, and/or sell copies of the Software, and to permit\n// persons to whom the Software is furnished to do so, subject to the\n// following conditions:\n//\n// The above copyright notice and this permission notice shall be included\n// in all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE\n// USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nexport const minpath = {basename, dirname, extname, join, sep: '/'}\n\n/* eslint-disable max-depth, complexity */\n\n/**\n * Get the basename from a path.\n *\n * @param {string} path\n * File path.\n * @param {string | null | undefined} [extname]\n * Extension to strip.\n * @returns {string}\n * Stem or basename.\n */\nfunction basename(path, extname) {\n if (extname !== undefined && typeof extname !== 'string') {\n throw new TypeError('\"ext\" argument must be a string')\n }\n\n assertPath(path)\n let start = 0\n let end = -1\n let index = path.length\n /** @type {boolean | undefined} */\n let seenNonSlash\n\n if (\n extname === undefined ||\n extname.length === 0 ||\n extname.length > path.length\n ) {\n while (index--) {\n if (path.codePointAt(index) === 47 /* `/` */) {\n // If we reached a path separator that was not part of a set of path\n // separators at the end of the string, stop now.\n if (seenNonSlash) {\n start = index + 1\n break\n }\n } else if (end < 0) {\n // We saw the first non-path separator, mark this as the end of our\n // path component.\n seenNonSlash = true\n end = index + 1\n }\n }\n\n return end < 0 ? '' : path.slice(start, end)\n }\n\n if (extname === path) {\n return ''\n }\n\n let firstNonSlashEnd = -1\n let extnameIndex = extname.length - 1\n\n while (index--) {\n if (path.codePointAt(index) === 47 /* `/` */) {\n // If we reached a path separator that was not part of a set of path\n // separators at the end of the string, stop now.\n if (seenNonSlash) {\n start = index + 1\n break\n }\n } else {\n if (firstNonSlashEnd < 0) {\n // We saw the first non-path separator, remember this index in case\n // we need it if the extension ends up not matching.\n seenNonSlash = true\n firstNonSlashEnd = index + 1\n }\n\n if (extnameIndex > -1) {\n // Try to match the explicit extension.\n if (path.codePointAt(index) === extname.codePointAt(extnameIndex--)) {\n if (extnameIndex < 0) {\n // We matched the extension, so mark this as the end of our path\n // component\n end = index\n }\n } else {\n // Extension does not match, so our result is the entire path\n // component\n extnameIndex = -1\n end = firstNonSlashEnd\n }\n }\n }\n }\n\n if (start === end) {\n end = firstNonSlashEnd\n } else if (end < 0) {\n end = path.length\n }\n\n return path.slice(start, end)\n}\n\n/**\n * Get the dirname from a path.\n *\n * @param {string} path\n * File path.\n * @returns {string}\n * File path.\n */\nfunction dirname(path) {\n assertPath(path)\n\n if (path.length === 0) {\n return '.'\n }\n\n let end = -1\n let index = path.length\n /** @type {boolean | undefined} */\n let unmatchedSlash\n\n // Prefix `--` is important to not run on `0`.\n while (--index) {\n if (path.codePointAt(index) === 47 /* `/` */) {\n if (unmatchedSlash) {\n end = index\n break\n }\n } else if (!unmatchedSlash) {\n // We saw the first non-path separator\n unmatchedSlash = true\n }\n }\n\n return end < 0\n ? path.codePointAt(0) === 47 /* `/` */\n ? '/'\n : '.'\n : end === 1 && path.codePointAt(0) === 47 /* `/` */\n ? '//'\n : path.slice(0, end)\n}\n\n/**\n * Get an extname from a path.\n *\n * @param {string} path\n * File path.\n * @returns {string}\n * Extname.\n */\nfunction extname(path) {\n assertPath(path)\n\n let index = path.length\n\n let end = -1\n let startPart = 0\n let startDot = -1\n // Track the state of characters (if any) we see before our first dot and\n // after any path separator we find.\n let preDotState = 0\n /** @type {boolean | undefined} */\n let unmatchedSlash\n\n while (index--) {\n const code = path.codePointAt(index)\n\n if (code === 47 /* `/` */) {\n // If we reached a path separator that was not part of a set of path\n // separators at the end of the string, stop now.\n if (unmatchedSlash) {\n startPart = index + 1\n break\n }\n\n continue\n }\n\n if (end < 0) {\n // We saw the first non-path separator, mark this as the end of our\n // extension.\n unmatchedSlash = true\n end = index + 1\n }\n\n if (code === 46 /* `.` */) {\n // If this is our first dot, mark it as the start of our extension.\n if (startDot < 0) {\n startDot = index\n } else if (preDotState !== 1) {\n preDotState = 1\n }\n } else if (startDot > -1) {\n // We saw a non-dot and non-path separator before our dot, so we should\n // have a good chance at having a non-empty extension.\n preDotState = -1\n }\n }\n\n if (\n startDot < 0 ||\n end < 0 ||\n // We saw a non-dot character immediately before the dot.\n preDotState === 0 ||\n // The (right-most) trimmed path component is exactly `..`.\n (preDotState === 1 && startDot === end - 1 && startDot === startPart + 1)\n ) {\n return ''\n }\n\n return path.slice(startDot, end)\n}\n\n/**\n * Join segments from a path.\n *\n * @param {Array} segments\n * Path segments.\n * @returns {string}\n * File path.\n */\nfunction join(...segments) {\n let index = -1\n /** @type {string | undefined} */\n let joined\n\n while (++index < segments.length) {\n assertPath(segments[index])\n\n if (segments[index]) {\n joined =\n joined === undefined ? segments[index] : joined + '/' + segments[index]\n }\n }\n\n return joined === undefined ? '.' : normalize(joined)\n}\n\n/**\n * Normalize a basic file path.\n *\n * @param {string} path\n * File path.\n * @returns {string}\n * File path.\n */\n// Note: `normalize` is not exposed as `path.normalize`, so some code is\n// manually removed from it.\nfunction normalize(path) {\n assertPath(path)\n\n const absolute = path.codePointAt(0) === 47 /* `/` */\n\n // Normalize the path according to POSIX rules.\n let value = normalizeString(path, !absolute)\n\n if (value.length === 0 && !absolute) {\n value = '.'\n }\n\n if (value.length > 0 && path.codePointAt(path.length - 1) === 47 /* / */) {\n value += '/'\n }\n\n return absolute ? '/' + value : value\n}\n\n/**\n * Resolve `.` and `..` elements in a path with directory names.\n *\n * @param {string} path\n * File path.\n * @param {boolean} allowAboveRoot\n * Whether `..` can move above root.\n * @returns {string}\n * File path.\n */\nfunction normalizeString(path, allowAboveRoot) {\n let result = ''\n let lastSegmentLength = 0\n let lastSlash = -1\n let dots = 0\n let index = -1\n /** @type {number | undefined} */\n let code\n /** @type {number} */\n let lastSlashIndex\n\n while (++index <= path.length) {\n if (index < path.length) {\n code = path.codePointAt(index)\n } else if (code === 47 /* `/` */) {\n break\n } else {\n code = 47 /* `/` */\n }\n\n if (code === 47 /* `/` */) {\n if (lastSlash === index - 1 || dots === 1) {\n // Empty.\n } else if (lastSlash !== index - 1 && dots === 2) {\n if (\n result.length < 2 ||\n lastSegmentLength !== 2 ||\n result.codePointAt(result.length - 1) !== 46 /* `.` */ ||\n result.codePointAt(result.length - 2) !== 46 /* `.` */\n ) {\n if (result.length > 2) {\n lastSlashIndex = result.lastIndexOf('/')\n\n if (lastSlashIndex !== result.length - 1) {\n if (lastSlashIndex < 0) {\n result = ''\n lastSegmentLength = 0\n } else {\n result = result.slice(0, lastSlashIndex)\n lastSegmentLength = result.length - 1 - result.lastIndexOf('/')\n }\n\n lastSlash = index\n dots = 0\n continue\n }\n } else if (result.length > 0) {\n result = ''\n lastSegmentLength = 0\n lastSlash = index\n dots = 0\n continue\n }\n }\n\n if (allowAboveRoot) {\n result = result.length > 0 ? result + '/..' : '..'\n lastSegmentLength = 2\n }\n } else {\n if (result.length > 0) {\n result += '/' + path.slice(lastSlash + 1, index)\n } else {\n result = path.slice(lastSlash + 1, index)\n }\n\n lastSegmentLength = index - lastSlash - 1\n }\n\n lastSlash = index\n dots = 0\n } else if (code === 46 /* `.` */ && dots > -1) {\n dots++\n } else {\n dots = -1\n }\n }\n\n return result\n}\n\n/**\n * Make sure `path` is a string.\n *\n * @param {string} path\n * File path.\n * @returns {asserts path is string}\n * Nothing.\n */\nfunction assertPath(path) {\n if (typeof path !== 'string') {\n throw new TypeError(\n 'Path must be a string. Received ' + JSON.stringify(path)\n )\n }\n}\n\n/* eslint-enable max-depth, complexity */\n","// Somewhat based on:\n// .\n// But I don’t think one tiny line of code can be copyrighted. 😅\nexport const minproc = {cwd}\n\nfunction cwd() {\n return '/'\n}\n","/**\n * Checks if a value has the shape of a WHATWG URL object.\n *\n * Using a symbol or instanceof would not be able to recognize URL objects\n * coming from other implementations (e.g. in Electron), so instead we are\n * checking some well known properties for a lack of a better test.\n *\n * We use `href` and `protocol` as they are the only properties that are\n * easy to retrieve and calculate due to the lazy nature of the getters.\n *\n * We check for auth attribute to distinguish legacy url instance with\n * WHATWG URL instance.\n *\n * @param {unknown} fileUrlOrPath\n * File path or URL.\n * @returns {fileUrlOrPath is URL}\n * Whether it’s a URL.\n */\n// From: \nexport function isUrl(fileUrlOrPath) {\n return Boolean(\n fileUrlOrPath !== null &&\n typeof fileUrlOrPath === 'object' &&\n 'href' in fileUrlOrPath &&\n fileUrlOrPath.href &&\n 'protocol' in fileUrlOrPath &&\n fileUrlOrPath.protocol &&\n // @ts-expect-error: indexing is fine.\n fileUrlOrPath.auth === undefined\n )\n}\n","import {isUrl} from './minurl.shared.js'\n\nexport {isUrl} from './minurl.shared.js'\n\n// See: \n\n/**\n * @param {URL | string} path\n * File URL.\n * @returns {string}\n * File URL.\n */\nexport function urlToPath(path) {\n if (typeof path === 'string') {\n path = new URL(path)\n } else if (!isUrl(path)) {\n /** @type {NodeJS.ErrnoException} */\n const error = new TypeError(\n 'The \"path\" argument must be of type string or an instance of URL. Received `' +\n path +\n '`'\n )\n error.code = 'ERR_INVALID_ARG_TYPE'\n throw error\n }\n\n if (path.protocol !== 'file:') {\n /** @type {NodeJS.ErrnoException} */\n const error = new TypeError('The URL must be of scheme file')\n error.code = 'ERR_INVALID_URL_SCHEME'\n throw error\n }\n\n return getPathFromURLPosix(path)\n}\n\n/**\n * Get a path from a POSIX URL.\n *\n * @param {URL} url\n * URL.\n * @returns {string}\n * File path.\n */\nfunction getPathFromURLPosix(url) {\n if (url.hostname !== '') {\n /** @type {NodeJS.ErrnoException} */\n const error = new TypeError(\n 'File URL host must be \"localhost\" or empty on darwin'\n )\n error.code = 'ERR_INVALID_FILE_URL_HOST'\n throw error\n }\n\n const pathname = url.pathname\n let index = -1\n\n while (++index < pathname.length) {\n if (\n pathname.codePointAt(index) === 37 /* `%` */ &&\n pathname.codePointAt(index + 1) === 50 /* `2` */\n ) {\n const third = pathname.codePointAt(index + 2)\n if (third === 70 /* `F` */ || third === 102 /* `f` */) {\n /** @type {NodeJS.ErrnoException} */\n const error = new TypeError(\n 'File URL path must not include encoded / characters'\n )\n error.code = 'ERR_INVALID_FILE_URL_PATH'\n throw error\n }\n }\n }\n\n return decodeURIComponent(pathname)\n}\n","/**\n * @import {Node, Point, Position} from 'unist'\n * @import {Options as MessageOptions} from 'vfile-message'\n * @import {Compatible, Data, Map, Options, Value} from 'vfile'\n */\n\n/**\n * @typedef {object & {type: string, position?: Position | undefined}} NodeLike\n */\n\nimport {VFileMessage} from 'vfile-message'\nimport {minpath} from '#minpath'\nimport {minproc} from '#minproc'\nimport {urlToPath, isUrl} from '#minurl'\n\n/**\n * Order of setting (least specific to most), we need this because otherwise\n * `{stem: 'a', path: '~/b.js'}` would throw, as a path is needed before a\n * stem can be set.\n */\nconst order = /** @type {const} */ ([\n 'history',\n 'path',\n 'basename',\n 'stem',\n 'extname',\n 'dirname'\n])\n\nexport class VFile {\n /**\n * Create a new virtual file.\n *\n * `options` is treated as:\n *\n * * `string` or `Uint8Array` — `{value: options}`\n * * `URL` — `{path: options}`\n * * `VFile` — shallow copies its data over to the new file\n * * `object` — all fields are shallow copied over to the new file\n *\n * Path related fields are set in the following order (least specific to\n * most specific): `history`, `path`, `basename`, `stem`, `extname`,\n * `dirname`.\n *\n * You cannot set `dirname` or `extname` without setting either `history`,\n * `path`, `basename`, or `stem` too.\n *\n * @param {Compatible | null | undefined} [value]\n * File value.\n * @returns\n * New instance.\n */\n constructor(value) {\n /** @type {Options | VFile} */\n let options\n\n if (!value) {\n options = {}\n } else if (isUrl(value)) {\n options = {path: value}\n } else if (typeof value === 'string' || isUint8Array(value)) {\n options = {value}\n } else {\n options = value\n }\n\n /* eslint-disable no-unused-expressions */\n\n /**\n * Base of `path` (default: `process.cwd()` or `'/'` in browsers).\n *\n * @type {string}\n */\n // Prevent calling `cwd` (which could be expensive) if it’s not needed;\n // the empty string will be overridden in the next block.\n this.cwd = 'cwd' in options ? '' : minproc.cwd()\n\n /**\n * Place to store custom info (default: `{}`).\n *\n * It’s OK to store custom data directly on the file but moving it to\n * `data` is recommended.\n *\n * @type {Data}\n */\n this.data = {}\n\n /**\n * List of file paths the file moved between.\n *\n * The first is the original path and the last is the current path.\n *\n * @type {Array}\n */\n this.history = []\n\n /**\n * List of messages associated with the file.\n *\n * @type {Array}\n */\n this.messages = []\n\n /**\n * Raw value.\n *\n * @type {Value}\n */\n this.value\n\n // The below are non-standard, they are “well-known”.\n // As in, used in several tools.\n /**\n * Source map.\n *\n * This type is equivalent to the `RawSourceMap` type from the `source-map`\n * module.\n *\n * @type {Map | null | undefined}\n */\n this.map\n\n /**\n * Custom, non-string, compiled, representation.\n *\n * This is used by unified to store non-string results.\n * One example is when turning markdown into React nodes.\n *\n * @type {unknown}\n */\n this.result\n\n /**\n * Whether a file was saved to disk.\n *\n * This is used by vfile reporters.\n *\n * @type {boolean}\n */\n this.stored\n /* eslint-enable no-unused-expressions */\n\n // Set path related properties in the correct order.\n let index = -1\n\n while (++index < order.length) {\n const field = order[index]\n\n // Note: we specifically use `in` instead of `hasOwnProperty` to accept\n // `vfile`s too.\n if (\n field in options &&\n options[field] !== undefined &&\n options[field] !== null\n ) {\n // @ts-expect-error: TS doesn’t understand basic reality.\n this[field] = field === 'history' ? [...options[field]] : options[field]\n }\n }\n\n /** @type {string} */\n let field\n\n // Set non-path related properties.\n for (field in options) {\n // @ts-expect-error: fine to set other things.\n if (!order.includes(field)) {\n // @ts-expect-error: fine to set other things.\n this[field] = options[field]\n }\n }\n }\n\n /**\n * Get the basename (including extname) (example: `'index.min.js'`).\n *\n * @returns {string | undefined}\n * Basename.\n */\n get basename() {\n return typeof this.path === 'string'\n ? minpath.basename(this.path)\n : undefined\n }\n\n /**\n * Set basename (including extname) (`'index.min.js'`).\n *\n * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\\'`\n * on windows).\n * Cannot be nullified (use `file.path = file.dirname` instead).\n *\n * @param {string} basename\n * Basename.\n * @returns {undefined}\n * Nothing.\n */\n set basename(basename) {\n assertNonEmpty(basename, 'basename')\n assertPart(basename, 'basename')\n this.path = minpath.join(this.dirname || '', basename)\n }\n\n /**\n * Get the parent path (example: `'~'`).\n *\n * @returns {string | undefined}\n * Dirname.\n */\n get dirname() {\n return typeof this.path === 'string'\n ? minpath.dirname(this.path)\n : undefined\n }\n\n /**\n * Set the parent path (example: `'~'`).\n *\n * Cannot be set if there’s no `path` yet.\n *\n * @param {string | undefined} dirname\n * Dirname.\n * @returns {undefined}\n * Nothing.\n */\n set dirname(dirname) {\n assertPath(this.basename, 'dirname')\n this.path = minpath.join(dirname || '', this.basename)\n }\n\n /**\n * Get the extname (including dot) (example: `'.js'`).\n *\n * @returns {string | undefined}\n * Extname.\n */\n get extname() {\n return typeof this.path === 'string'\n ? minpath.extname(this.path)\n : undefined\n }\n\n /**\n * Set the extname (including dot) (example: `'.js'`).\n *\n * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\\'`\n * on windows).\n * Cannot be set if there’s no `path` yet.\n *\n * @param {string | undefined} extname\n * Extname.\n * @returns {undefined}\n * Nothing.\n */\n set extname(extname) {\n assertPart(extname, 'extname')\n assertPath(this.dirname, 'extname')\n\n if (extname) {\n if (extname.codePointAt(0) !== 46 /* `.` */) {\n throw new Error('`extname` must start with `.`')\n }\n\n if (extname.includes('.', 1)) {\n throw new Error('`extname` cannot contain multiple dots')\n }\n }\n\n this.path = minpath.join(this.dirname, this.stem + (extname || ''))\n }\n\n /**\n * Get the full path (example: `'~/index.min.js'`).\n *\n * @returns {string}\n * Path.\n */\n get path() {\n return this.history[this.history.length - 1]\n }\n\n /**\n * Set the full path (example: `'~/index.min.js'`).\n *\n * Cannot be nullified.\n * You can set a file URL (a `URL` object with a `file:` protocol) which will\n * be turned into a path with `url.fileURLToPath`.\n *\n * @param {URL | string} path\n * Path.\n * @returns {undefined}\n * Nothing.\n */\n set path(path) {\n if (isUrl(path)) {\n path = urlToPath(path)\n }\n\n assertNonEmpty(path, 'path')\n\n if (this.path !== path) {\n this.history.push(path)\n }\n }\n\n /**\n * Get the stem (basename w/o extname) (example: `'index.min'`).\n *\n * @returns {string | undefined}\n * Stem.\n */\n get stem() {\n return typeof this.path === 'string'\n ? minpath.basename(this.path, this.extname)\n : undefined\n }\n\n /**\n * Set the stem (basename w/o extname) (example: `'index.min'`).\n *\n * Cannot contain path separators (`'/'` on unix, macOS, and browsers, `'\\'`\n * on windows).\n * Cannot be nullified (use `file.path = file.dirname` instead).\n *\n * @param {string} stem\n * Stem.\n * @returns {undefined}\n * Nothing.\n */\n set stem(stem) {\n assertNonEmpty(stem, 'stem')\n assertPart(stem, 'stem')\n this.path = minpath.join(this.dirname || '', stem + (this.extname || ''))\n }\n\n // Normal prototypal methods.\n /**\n * Create a fatal message for `reason` associated with the file.\n *\n * The `fatal` field of the message is set to `true` (error; file not usable)\n * and the `file` field is set to the current file path.\n * The message is added to the `messages` field on `file`.\n *\n * > 🪦 **Note**: also has obsolete signatures.\n *\n * @overload\n * @param {string} reason\n * @param {MessageOptions | null | undefined} [options]\n * @returns {never}\n *\n * @overload\n * @param {string} reason\n * @param {Node | NodeLike | null | undefined} parent\n * @param {string | null | undefined} [origin]\n * @returns {never}\n *\n * @overload\n * @param {string} reason\n * @param {Point | Position | null | undefined} place\n * @param {string | null | undefined} [origin]\n * @returns {never}\n *\n * @overload\n * @param {string} reason\n * @param {string | null | undefined} [origin]\n * @returns {never}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {Node | NodeLike | null | undefined} parent\n * @param {string | null | undefined} [origin]\n * @returns {never}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {Point | Position | null | undefined} place\n * @param {string | null | undefined} [origin]\n * @returns {never}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {string | null | undefined} [origin]\n * @returns {never}\n *\n * @param {Error | VFileMessage | string} causeOrReason\n * Reason for message, should use markdown.\n * @param {Node | NodeLike | MessageOptions | Point | Position | string | null | undefined} [optionsOrParentOrPlace]\n * Configuration (optional).\n * @param {string | null | undefined} [origin]\n * Place in code where the message originates (example:\n * `'my-package:my-rule'` or `'my-rule'`).\n * @returns {never}\n * Never.\n * @throws {VFileMessage}\n * Message.\n */\n fail(causeOrReason, optionsOrParentOrPlace, origin) {\n // @ts-expect-error: the overloads are fine.\n const message = this.message(causeOrReason, optionsOrParentOrPlace, origin)\n\n message.fatal = true\n\n throw message\n }\n\n /**\n * Create an info message for `reason` associated with the file.\n *\n * The `fatal` field of the message is set to `undefined` (info; change\n * likely not needed) and the `file` field is set to the current file path.\n * The message is added to the `messages` field on `file`.\n *\n * > 🪦 **Note**: also has obsolete signatures.\n *\n * @overload\n * @param {string} reason\n * @param {MessageOptions | null | undefined} [options]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {string} reason\n * @param {Node | NodeLike | null | undefined} parent\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {string} reason\n * @param {Point | Position | null | undefined} place\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {string} reason\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {Node | NodeLike | null | undefined} parent\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {Point | Position | null | undefined} place\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @param {Error | VFileMessage | string} causeOrReason\n * Reason for message, should use markdown.\n * @param {Node | NodeLike | MessageOptions | Point | Position | string | null | undefined} [optionsOrParentOrPlace]\n * Configuration (optional).\n * @param {string | null | undefined} [origin]\n * Place in code where the message originates (example:\n * `'my-package:my-rule'` or `'my-rule'`).\n * @returns {VFileMessage}\n * Message.\n */\n info(causeOrReason, optionsOrParentOrPlace, origin) {\n // @ts-expect-error: the overloads are fine.\n const message = this.message(causeOrReason, optionsOrParentOrPlace, origin)\n\n message.fatal = undefined\n\n return message\n }\n\n /**\n * Create a message for `reason` associated with the file.\n *\n * The `fatal` field of the message is set to `false` (warning; change may be\n * needed) and the `file` field is set to the current file path.\n * The message is added to the `messages` field on `file`.\n *\n * > 🪦 **Note**: also has obsolete signatures.\n *\n * @overload\n * @param {string} reason\n * @param {MessageOptions | null | undefined} [options]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {string} reason\n * @param {Node | NodeLike | null | undefined} parent\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {string} reason\n * @param {Point | Position | null | undefined} place\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {string} reason\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {Node | NodeLike | null | undefined} parent\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {Point | Position | null | undefined} place\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @overload\n * @param {Error | VFileMessage} cause\n * @param {string | null | undefined} [origin]\n * @returns {VFileMessage}\n *\n * @param {Error | VFileMessage | string} causeOrReason\n * Reason for message, should use markdown.\n * @param {Node | NodeLike | MessageOptions | Point | Position | string | null | undefined} [optionsOrParentOrPlace]\n * Configuration (optional).\n * @param {string | null | undefined} [origin]\n * Place in code where the message originates (example:\n * `'my-package:my-rule'` or `'my-rule'`).\n * @returns {VFileMessage}\n * Message.\n */\n message(causeOrReason, optionsOrParentOrPlace, origin) {\n const message = new VFileMessage(\n // @ts-expect-error: the overloads are fine.\n causeOrReason,\n optionsOrParentOrPlace,\n origin\n )\n\n if (this.path) {\n message.name = this.path + ':' + message.name\n message.file = this.path\n }\n\n message.fatal = false\n\n this.messages.push(message)\n\n return message\n }\n\n /**\n * Serialize the file.\n *\n * > **Note**: which encodings are supported depends on the engine.\n * > For info on Node.js, see:\n * > .\n *\n * @param {string | null | undefined} [encoding='utf8']\n * Character encoding to understand `value` as when it’s a `Uint8Array`\n * (default: `'utf-8'`).\n * @returns {string}\n * Serialized file.\n */\n toString(encoding) {\n if (this.value === undefined) {\n return ''\n }\n\n if (typeof this.value === 'string') {\n return this.value\n }\n\n const decoder = new TextDecoder(encoding || undefined)\n return decoder.decode(this.value)\n }\n}\n\n/**\n * Assert that `part` is not a path (as in, does not contain `path.sep`).\n *\n * @param {string | null | undefined} part\n * File path part.\n * @param {string} name\n * Part name.\n * @returns {undefined}\n * Nothing.\n */\nfunction assertPart(part, name) {\n if (part && part.includes(minpath.sep)) {\n throw new Error(\n '`' + name + '` cannot be a path: did not expect `' + minpath.sep + '`'\n )\n }\n}\n\n/**\n * Assert that `part` is not empty.\n *\n * @param {string | undefined} part\n * Thing.\n * @param {string} name\n * Part name.\n * @returns {asserts part is string}\n * Nothing.\n */\nfunction assertNonEmpty(part, name) {\n if (!part) {\n throw new Error('`' + name + '` cannot be empty')\n }\n}\n\n/**\n * Assert `path` exists.\n *\n * @param {string | undefined} path\n * Path.\n * @param {string} name\n * Dependency name.\n * @returns {asserts path is string}\n * Nothing.\n */\nfunction assertPath(path, name) {\n if (!path) {\n throw new Error('Setting `' + name + '` requires `path` to be set too')\n }\n}\n\n/**\n * Assert `value` is an `Uint8Array`.\n *\n * @param {unknown} value\n * thing.\n * @returns {value is Uint8Array}\n * Whether `value` is an `Uint8Array`.\n */\nfunction isUint8Array(value) {\n return Boolean(\n value &&\n typeof value === 'object' &&\n 'byteLength' in value &&\n 'byteOffset' in value\n )\n}\n","export const CallableInstance =\n /**\n * @type {new , Result>(property: string | symbol) => (...parameters: Parameters) => Result}\n */\n (\n /** @type {unknown} */\n (\n /**\n * @this {Function}\n * @param {string | symbol} property\n * @returns {(...parameters: Array) => unknown}\n */\n function (property) {\n const self = this\n const constr = self.constructor\n const proto = /** @type {Record} */ (\n // Prototypes do exist.\n // type-coverage:ignore-next-line\n constr.prototype\n )\n const value = proto[property]\n /** @type {(...parameters: Array) => unknown} */\n const apply = function () {\n return value.apply(apply, arguments)\n }\n\n Object.setPrototypeOf(apply, proto)\n\n // Not needed for us in `unified`: we only call this on the `copy`\n // function,\n // and we don't need to add its fields (`length`, `name`)\n // over.\n // See also: GH-246.\n // const names = Object.getOwnPropertyNames(value)\n //\n // for (const p of names) {\n // const descriptor = Object.getOwnPropertyDescriptor(value, p)\n // if (descriptor) Object.defineProperty(apply, p, descriptor)\n // }\n\n return apply\n }\n )\n )\n","/**\n * @typedef {import('trough').Pipeline} Pipeline\n *\n * @typedef {import('unist').Node} Node\n *\n * @typedef {import('vfile').Compatible} Compatible\n * @typedef {import('vfile').Value} Value\n *\n * @typedef {import('../index.js').CompileResultMap} CompileResultMap\n * @typedef {import('../index.js').Data} Data\n * @typedef {import('../index.js').Settings} Settings\n */\n\n/**\n * @typedef {CompileResultMap[keyof CompileResultMap]} CompileResults\n * Acceptable results from compilers.\n *\n * To register custom results, add them to\n * {@linkcode CompileResultMap}.\n */\n\n/**\n * @template {Node} [Tree=Node]\n * The node that the compiler receives (default: `Node`).\n * @template {CompileResults} [Result=CompileResults]\n * The thing that the compiler yields (default: `CompileResults`).\n * @callback Compiler\n * A **compiler** handles the compiling of a syntax tree to something else\n * (in most cases, text) (TypeScript type).\n *\n * It is used in the stringify phase and called with a {@linkcode Node}\n * and {@linkcode VFile} representation of the document to compile.\n * It should return the textual representation of the given tree (typically\n * `string`).\n *\n * > **Note**: unified typically compiles by serializing: most compilers\n * > return `string` (or `Uint8Array`).\n * > Some compilers, such as the one configured with\n * > [`rehype-react`][rehype-react], return other values (in this case, a\n * > React tree).\n * > If you’re using a compiler that doesn’t serialize, expect different\n * > result values.\n * >\n * > To register custom results in TypeScript, add them to\n * > {@linkcode CompileResultMap}.\n *\n * [rehype-react]: https://github.com/rehypejs/rehype-react\n * @param {Tree} tree\n * Tree to compile.\n * @param {VFile} file\n * File associated with `tree`.\n * @returns {Result}\n * New content: compiled text (`string` or `Uint8Array`, for `file.value`) or\n * something else (for `file.result`).\n */\n\n/**\n * @template {Node} [Tree=Node]\n * The node that the parser yields (default: `Node`)\n * @callback Parser\n * A **parser** handles the parsing of text to a syntax tree.\n *\n * It is used in the parse phase and is called with a `string` and\n * {@linkcode VFile} of the document to parse.\n * It must return the syntax tree representation of the given file\n * ({@linkcode Node}).\n * @param {string} document\n * Document to parse.\n * @param {VFile} file\n * File associated with `document`.\n * @returns {Tree}\n * Node representing the given file.\n */\n\n/**\n * @typedef {(\n * Plugin, any, any> |\n * PluginTuple, any, any> |\n * Preset\n * )} Pluggable\n * Union of the different ways to add plugins and settings.\n */\n\n/**\n * @typedef {Array} PluggableList\n * List of plugins and presets.\n */\n\n// Note: we can’t use `callback` yet as it messes up `this`:\n// .\n/**\n * @template {Array} [PluginParameters=[]]\n * Arguments passed to the plugin (default: `[]`, the empty tuple).\n * @template {Node | string | undefined} [Input=Node]\n * Value that is expected as input (default: `Node`).\n *\n * * If the plugin returns a {@linkcode Transformer}, this\n * should be the node it expects.\n * * If the plugin sets a {@linkcode Parser}, this should be\n * `string`.\n * * If the plugin sets a {@linkcode Compiler}, this should be the\n * node it expects.\n * @template [Output=Input]\n * Value that is yielded as output (default: `Input`).\n *\n * * If the plugin returns a {@linkcode Transformer}, this\n * should be the node that that yields.\n * * If the plugin sets a {@linkcode Parser}, this should be the\n * node that it yields.\n * * If the plugin sets a {@linkcode Compiler}, this should be\n * result it yields.\n * @typedef {(\n * (this: Processor, ...parameters: PluginParameters) =>\n * Input extends string ? // Parser.\n * Output extends Node | undefined ? undefined | void : never :\n * Output extends CompileResults ? // Compiler.\n * Input extends Node | undefined ? undefined | void : never :\n * Transformer<\n * Input extends Node ? Input : Node,\n * Output extends Node ? Output : Node\n * > | undefined | void\n * )} Plugin\n * Single plugin.\n *\n * Plugins configure the processors they are applied on in the following\n * ways:\n *\n * * they change the processor, such as the parser, the compiler, or by\n * configuring data\n * * they specify how to handle trees and files\n *\n * In practice, they are functions that can receive options and configure the\n * processor (`this`).\n *\n * > **Note**: plugins are called when the processor is *frozen*, not when\n * > they are applied.\n */\n\n/**\n * Tuple of a plugin and its configuration.\n *\n * The first item is a plugin, the rest are its parameters.\n *\n * @template {Array} [TupleParameters=[]]\n * Arguments passed to the plugin (default: `[]`, the empty tuple).\n * @template {Node | string | undefined} [Input=undefined]\n * Value that is expected as input (optional).\n *\n * * If the plugin returns a {@linkcode Transformer}, this\n * should be the node it expects.\n * * If the plugin sets a {@linkcode Parser}, this should be\n * `string`.\n * * If the plugin sets a {@linkcode Compiler}, this should be the\n * node it expects.\n * @template [Output=undefined] (optional).\n * Value that is yielded as output.\n *\n * * If the plugin returns a {@linkcode Transformer}, this\n * should be the node that that yields.\n * * If the plugin sets a {@linkcode Parser}, this should be the\n * node that it yields.\n * * If the plugin sets a {@linkcode Compiler}, this should be\n * result it yields.\n * @typedef {(\n * [\n * plugin: Plugin,\n * ...parameters: TupleParameters\n * ]\n * )} PluginTuple\n */\n\n/**\n * @typedef Preset\n * Sharable configuration.\n *\n * They can contain plugins and settings.\n * @property {PluggableList | undefined} [plugins]\n * List of plugins and presets (optional).\n * @property {Settings | undefined} [settings]\n * Shared settings for parsers and compilers (optional).\n */\n\n/**\n * @template {VFile} [File=VFile]\n * The file that the callback receives (default: `VFile`).\n * @callback ProcessCallback\n * Callback called when the process is done.\n *\n * Called with either an error or a result.\n * @param {Error | undefined} [error]\n * Fatal error (optional).\n * @param {File | undefined} [file]\n * Processed file (optional).\n * @returns {undefined}\n * Nothing.\n */\n\n/**\n * @template {Node} [Tree=Node]\n * The tree that the callback receives (default: `Node`).\n * @callback RunCallback\n * Callback called when transformers are done.\n *\n * Called with either an error or results.\n * @param {Error | undefined} [error]\n * Fatal error (optional).\n * @param {Tree | undefined} [tree]\n * Transformed tree (optional).\n * @param {VFile | undefined} [file]\n * File (optional).\n * @returns {undefined}\n * Nothing.\n */\n\n/**\n * @template {Node} [Output=Node]\n * Node type that the transformer yields (default: `Node`).\n * @callback TransformCallback\n * Callback passed to transforms.\n *\n * If the signature of a `transformer` accepts a third argument, the\n * transformer may perform asynchronous operations, and must call it.\n * @param {Error | undefined} [error]\n * Fatal error to stop the process (optional).\n * @param {Output | undefined} [tree]\n * New, changed, tree (optional).\n * @param {VFile | undefined} [file]\n * New, changed, file (optional).\n * @returns {undefined}\n * Nothing.\n */\n\n/**\n * @template {Node} [Input=Node]\n * Node type that the transformer expects (default: `Node`).\n * @template {Node} [Output=Input]\n * Node type that the transformer yields (default: `Input`).\n * @callback Transformer\n * Transformers handle syntax trees and files.\n *\n * They are functions that are called each time a syntax tree and file are\n * passed through the run phase.\n * When an error occurs in them (either because it’s thrown, returned,\n * rejected, or passed to `next`), the process stops.\n *\n * The run phase is handled by [`trough`][trough], see its documentation for\n * the exact semantics of these functions.\n *\n * > **Note**: you should likely ignore `next`: don’t accept it.\n * > it supports callback-style async work.\n * > But promises are likely easier to reason about.\n *\n * [trough]: https://github.com/wooorm/trough#function-fninput-next\n * @param {Input} tree\n * Tree to handle.\n * @param {VFile} file\n * File to handle.\n * @param {TransformCallback} next\n * Callback.\n * @returns {(\n * Promise |\n * Promise | // For some reason this is needed separately.\n * Output |\n * Error |\n * undefined |\n * void\n * )}\n * If you accept `next`, nothing.\n * Otherwise:\n *\n * * `Error` — fatal error to stop the process\n * * `Promise` or `undefined` — the next transformer keeps using\n * same tree\n * * `Promise` or `Node` — new, changed, tree\n */\n\n/**\n * @template {Node | undefined} ParseTree\n * Output of `parse`.\n * @template {Node | undefined} HeadTree\n * Input for `run`.\n * @template {Node | undefined} TailTree\n * Output for `run`.\n * @template {Node | undefined} CompileTree\n * Input of `stringify`.\n * @template {CompileResults | undefined} CompileResult\n * Output of `stringify`.\n * @template {Node | string | undefined} Input\n * Input of plugin.\n * @template Output\n * Output of plugin (optional).\n * @typedef {(\n * Input extends string\n * ? Output extends Node | undefined\n * ? // Parser.\n * Processor<\n * Output extends undefined ? ParseTree : Output,\n * HeadTree,\n * TailTree,\n * CompileTree,\n * CompileResult\n * >\n * : // Unknown.\n * Processor\n * : Output extends CompileResults\n * ? Input extends Node | undefined\n * ? // Compiler.\n * Processor<\n * ParseTree,\n * HeadTree,\n * TailTree,\n * Input extends undefined ? CompileTree : Input,\n * Output extends undefined ? CompileResult : Output\n * >\n * : // Unknown.\n * Processor\n * : Input extends Node | undefined\n * ? Output extends Node | undefined\n * ? // Transform.\n * Processor<\n * ParseTree,\n * HeadTree extends undefined ? Input : HeadTree,\n * Output extends undefined ? TailTree : Output,\n * CompileTree,\n * CompileResult\n * >\n * : // Unknown.\n * Processor\n * : // Unknown.\n * Processor\n * )} UsePlugin\n * Create a processor based on the input/output of a {@link Plugin plugin}.\n */\n\n/**\n * @template {CompileResults | undefined} Result\n * Node type that the transformer yields.\n * @typedef {(\n * Result extends Value | undefined ?\n * VFile :\n * VFile & {result: Result}\n * )} VFileWithOutput\n * Type to generate a {@linkcode VFile} corresponding to a compiler result.\n *\n * If a result that is not acceptable on a `VFile` is used, that will\n * be stored on the `result` field of {@linkcode VFile}.\n */\n\nimport {bail} from 'bail'\nimport extend from 'extend'\nimport {ok as assert} from 'devlop'\nimport isPlainObj from 'is-plain-obj'\nimport {trough} from 'trough'\nimport {VFile} from 'vfile'\nimport {CallableInstance} from './callable-instance.js'\n\n// To do: next major: drop `Compiler`, `Parser`: prefer lowercase.\n\n// To do: we could start yielding `never` in TS when a parser is missing and\n// `parse` is called.\n// Currently, we allow directly setting `processor.parser`, which is untyped.\n\nconst own = {}.hasOwnProperty\n\n/**\n * @template {Node | undefined} [ParseTree=undefined]\n * Output of `parse` (optional).\n * @template {Node | undefined} [HeadTree=undefined]\n * Input for `run` (optional).\n * @template {Node | undefined} [TailTree=undefined]\n * Output for `run` (optional).\n * @template {Node | undefined} [CompileTree=undefined]\n * Input of `stringify` (optional).\n * @template {CompileResults | undefined} [CompileResult=undefined]\n * Output of `stringify` (optional).\n * @extends {CallableInstance<[], Processor>}\n */\nexport class Processor extends CallableInstance {\n /**\n * Create a processor.\n */\n constructor() {\n // If `Processor()` is called (w/o new), `copy` is called instead.\n super('copy')\n\n /**\n * Compiler to use (deprecated).\n *\n * @deprecated\n * Use `compiler` instead.\n * @type {(\n * Compiler<\n * CompileTree extends undefined ? Node : CompileTree,\n * CompileResult extends undefined ? CompileResults : CompileResult\n * > |\n * undefined\n * )}\n */\n this.Compiler = undefined\n\n /**\n * Parser to use (deprecated).\n *\n * @deprecated\n * Use `parser` instead.\n * @type {(\n * Parser |\n * undefined\n * )}\n */\n this.Parser = undefined\n\n // Note: the following fields are considered private.\n // However, they are needed for tests, and TSC generates an untyped\n // `private freezeIndex` field for, which trips `type-coverage` up.\n // Instead, we use `@deprecated` to visualize that they shouldn’t be used.\n /**\n * Internal list of configured plugins.\n *\n * @deprecated\n * This is a private internal property and should not be used.\n * @type {Array>>}\n */\n this.attachers = []\n\n /**\n * Compiler to use.\n *\n * @type {(\n * Compiler<\n * CompileTree extends undefined ? Node : CompileTree,\n * CompileResult extends undefined ? CompileResults : CompileResult\n * > |\n * undefined\n * )}\n */\n this.compiler = undefined\n\n /**\n * Internal state to track where we are while freezing.\n *\n * @deprecated\n * This is a private internal property and should not be used.\n * @type {number}\n */\n this.freezeIndex = -1\n\n /**\n * Internal state to track whether we’re frozen.\n *\n * @deprecated\n * This is a private internal property and should not be used.\n * @type {boolean | undefined}\n */\n this.frozen = undefined\n\n /**\n * Internal state.\n *\n * @deprecated\n * This is a private internal property and should not be used.\n * @type {Data}\n */\n this.namespace = {}\n\n /**\n * Parser to use.\n *\n * @type {(\n * Parser |\n * undefined\n * )}\n */\n this.parser = undefined\n\n /**\n * Internal list of configured transformers.\n *\n * @deprecated\n * This is a private internal property and should not be used.\n * @type {Pipeline}\n */\n this.transformers = trough()\n }\n\n /**\n * Copy a processor.\n *\n * @deprecated\n * This is a private internal method and should not be used.\n * @returns {Processor}\n * New *unfrozen* processor ({@linkcode Processor}) that is\n * configured to work the same as its ancestor.\n * When the descendant processor is configured in the future it does not\n * affect the ancestral processor.\n */\n copy() {\n // Cast as the type parameters will be the same after attaching.\n const destination =\n /** @type {Processor} */ (\n new Processor()\n )\n let index = -1\n\n while (++index < this.attachers.length) {\n const attacher = this.attachers[index]\n destination.use(...attacher)\n }\n\n destination.data(extend(true, {}, this.namespace))\n\n return destination\n }\n\n /**\n * Configure the processor with info available to all plugins.\n * Information is stored in an object.\n *\n * Typically, options can be given to a specific plugin, but sometimes it\n * makes sense to have information shared with several plugins.\n * For example, a list of HTML elements that are self-closing, which is\n * needed during all phases.\n *\n * > **Note**: setting information cannot occur on *frozen* processors.\n * > Call the processor first to create a new unfrozen processor.\n *\n * > **Note**: to register custom data in TypeScript, augment the\n * > {@linkcode Data} interface.\n *\n * @example\n * This example show how to get and set info:\n *\n * ```js\n * import {unified} from 'unified'\n *\n * const processor = unified().data('alpha', 'bravo')\n *\n * processor.data('alpha') // => 'bravo'\n *\n * processor.data() // => {alpha: 'bravo'}\n *\n * processor.data({charlie: 'delta'})\n *\n * processor.data() // => {charlie: 'delta'}\n * ```\n *\n * @template {keyof Data} Key\n *\n * @overload\n * @returns {Data}\n *\n * @overload\n * @param {Data} dataset\n * @returns {Processor}\n *\n * @overload\n * @param {Key} key\n * @returns {Data[Key]}\n *\n * @overload\n * @param {Key} key\n * @param {Data[Key]} value\n * @returns {Processor}\n *\n * @param {Data | Key} [key]\n * Key to get or set, or entire dataset to set, or nothing to get the\n * entire dataset (optional).\n * @param {Data[Key]} [value]\n * Value to set (optional).\n * @returns {unknown}\n * The current processor when setting, the value at `key` when getting, or\n * the entire dataset when getting without key.\n */\n data(key, value) {\n if (typeof key === 'string') {\n // Set `key`.\n if (arguments.length === 2) {\n assertUnfrozen('data', this.frozen)\n this.namespace[key] = value\n return this\n }\n\n // Get `key`.\n return (own.call(this.namespace, key) && this.namespace[key]) || undefined\n }\n\n // Set space.\n if (key) {\n assertUnfrozen('data', this.frozen)\n this.namespace = key\n return this\n }\n\n // Get space.\n return this.namespace\n }\n\n /**\n * Freeze a processor.\n *\n * Frozen processors are meant to be extended and not to be configured\n * directly.\n *\n * When a processor is frozen it cannot be unfrozen.\n * New processors working the same way can be created by calling the\n * processor.\n *\n * It’s possible to freeze processors explicitly by calling `.freeze()`.\n * Processors freeze automatically when `.parse()`, `.run()`, `.runSync()`,\n * `.stringify()`, `.process()`, or `.processSync()` are called.\n *\n * @returns {Processor}\n * The current processor.\n */\n freeze() {\n if (this.frozen) {\n return this\n }\n\n // Cast so that we can type plugins easier.\n // Plugins are supposed to be usable on different processors, not just on\n // this exact processor.\n const self = /** @type {Processor} */ (/** @type {unknown} */ (this))\n\n while (++this.freezeIndex < this.attachers.length) {\n const [attacher, ...options] = this.attachers[this.freezeIndex]\n\n if (options[0] === false) {\n continue\n }\n\n if (options[0] === true) {\n options[0] = undefined\n }\n\n const transformer = attacher.call(self, ...options)\n\n if (typeof transformer === 'function') {\n this.transformers.use(transformer)\n }\n }\n\n this.frozen = true\n this.freezeIndex = Number.POSITIVE_INFINITY\n\n return this\n }\n\n /**\n * Parse text to a syntax tree.\n *\n * > **Note**: `parse` freezes the processor if not already *frozen*.\n *\n * > **Note**: `parse` performs the parse phase, not the run phase or other\n * > phases.\n *\n * @param {Compatible | undefined} [file]\n * file to parse (optional); typically `string` or `VFile`; any value\n * accepted as `x` in `new VFile(x)`.\n * @returns {ParseTree extends undefined ? Node : ParseTree}\n * Syntax tree representing `file`.\n */\n parse(file) {\n this.freeze()\n const realFile = vfile(file)\n const parser = this.parser || this.Parser\n assertParser('parse', parser)\n return parser(String(realFile), realFile)\n }\n\n /**\n * Process the given file as configured on the processor.\n *\n * > **Note**: `process` freezes the processor if not already *frozen*.\n *\n * > **Note**: `process` performs the parse, run, and stringify phases.\n *\n * @overload\n * @param {Compatible | undefined} file\n * @param {ProcessCallback>} done\n * @returns {undefined}\n *\n * @overload\n * @param {Compatible | undefined} [file]\n * @returns {Promise>}\n *\n * @param {Compatible | undefined} [file]\n * File (optional); typically `string` or `VFile`]; any value accepted as\n * `x` in `new VFile(x)`.\n * @param {ProcessCallback> | undefined} [done]\n * Callback (optional).\n * @returns {Promise | undefined}\n * Nothing if `done` is given.\n * Otherwise a promise, rejected with a fatal error or resolved with the\n * processed file.\n *\n * The parsed, transformed, and compiled value is available at\n * `file.value` (see note).\n *\n * > **Note**: unified typically compiles by serializing: most\n * > compilers return `string` (or `Uint8Array`).\n * > Some compilers, such as the one configured with\n * > [`rehype-react`][rehype-react], return other values (in this case, a\n * > React tree).\n * > If you’re using a compiler that doesn’t serialize, expect different\n * > result values.\n * >\n * > To register custom results in TypeScript, add them to\n * > {@linkcode CompileResultMap}.\n *\n * [rehype-react]: https://github.com/rehypejs/rehype-react\n */\n process(file, done) {\n const self = this\n\n this.freeze()\n assertParser('process', this.parser || this.Parser)\n assertCompiler('process', this.compiler || this.Compiler)\n\n return done ? executor(undefined, done) : new Promise(executor)\n\n // Note: `void`s needed for TS.\n /**\n * @param {((file: VFileWithOutput) => undefined | void) | undefined} resolve\n * @param {(error: Error | undefined) => undefined | void} reject\n * @returns {undefined}\n */\n function executor(resolve, reject) {\n const realFile = vfile(file)\n // Assume `ParseTree` (the result of the parser) matches `HeadTree` (the\n // input of the first transform).\n const parseTree =\n /** @type {HeadTree extends undefined ? Node : HeadTree} */ (\n /** @type {unknown} */ (self.parse(realFile))\n )\n\n self.run(parseTree, realFile, function (error, tree, file) {\n if (error || !tree || !file) {\n return realDone(error)\n }\n\n // Assume `TailTree` (the output of the last transform) matches\n // `CompileTree` (the input of the compiler).\n const compileTree =\n /** @type {CompileTree extends undefined ? Node : CompileTree} */ (\n /** @type {unknown} */ (tree)\n )\n\n const compileResult = self.stringify(compileTree, file)\n\n if (looksLikeAValue(compileResult)) {\n file.value = compileResult\n } else {\n file.result = compileResult\n }\n\n realDone(error, /** @type {VFileWithOutput} */ (file))\n })\n\n /**\n * @param {Error | undefined} error\n * @param {VFileWithOutput | undefined} [file]\n * @returns {undefined}\n */\n function realDone(error, file) {\n if (error || !file) {\n reject(error)\n } else if (resolve) {\n resolve(file)\n } else {\n assert(done, '`done` is defined if `resolve` is not')\n done(undefined, file)\n }\n }\n }\n }\n\n /**\n * Process the given file as configured on the processor.\n *\n * An error is thrown if asynchronous transforms are configured.\n *\n * > **Note**: `processSync` freezes the processor if not already *frozen*.\n *\n * > **Note**: `processSync` performs the parse, run, and stringify phases.\n *\n * @param {Compatible | undefined} [file]\n * File (optional); typically `string` or `VFile`; any value accepted as\n * `x` in `new VFile(x)`.\n * @returns {VFileWithOutput}\n * The processed file.\n *\n * The parsed, transformed, and compiled value is available at\n * `file.value` (see note).\n *\n * > **Note**: unified typically compiles by serializing: most\n * > compilers return `string` (or `Uint8Array`).\n * > Some compilers, such as the one configured with\n * > [`rehype-react`][rehype-react], return other values (in this case, a\n * > React tree).\n * > If you’re using a compiler that doesn’t serialize, expect different\n * > result values.\n * >\n * > To register custom results in TypeScript, add them to\n * > {@linkcode CompileResultMap}.\n *\n * [rehype-react]: https://github.com/rehypejs/rehype-react\n */\n processSync(file) {\n /** @type {boolean} */\n let complete = false\n /** @type {VFileWithOutput | undefined} */\n let result\n\n this.freeze()\n assertParser('processSync', this.parser || this.Parser)\n assertCompiler('processSync', this.compiler || this.Compiler)\n\n this.process(file, realDone)\n assertDone('processSync', 'process', complete)\n assert(result, 'we either bailed on an error or have a tree')\n\n return result\n\n /**\n * @type {ProcessCallback>}\n */\n function realDone(error, file) {\n complete = true\n bail(error)\n result = file\n }\n }\n\n /**\n * Run *transformers* on a syntax tree.\n *\n * > **Note**: `run` freezes the processor if not already *frozen*.\n *\n * > **Note**: `run` performs the run phase, not other phases.\n *\n * @overload\n * @param {HeadTree extends undefined ? Node : HeadTree} tree\n * @param {RunCallback} done\n * @returns {undefined}\n *\n * @overload\n * @param {HeadTree extends undefined ? Node : HeadTree} tree\n * @param {Compatible | undefined} file\n * @param {RunCallback} done\n * @returns {undefined}\n *\n * @overload\n * @param {HeadTree extends undefined ? Node : HeadTree} tree\n * @param {Compatible | undefined} [file]\n * @returns {Promise}\n *\n * @param {HeadTree extends undefined ? Node : HeadTree} tree\n * Tree to transform and inspect.\n * @param {(\n * RunCallback |\n * Compatible\n * )} [file]\n * File associated with `node` (optional); any value accepted as `x` in\n * `new VFile(x)`.\n * @param {RunCallback} [done]\n * Callback (optional).\n * @returns {Promise | undefined}\n * Nothing if `done` is given.\n * Otherwise, a promise rejected with a fatal error or resolved with the\n * transformed tree.\n */\n run(tree, file, done) {\n assertNode(tree)\n this.freeze()\n\n const transformers = this.transformers\n\n if (!done && typeof file === 'function') {\n done = file\n file = undefined\n }\n\n return done ? executor(undefined, done) : new Promise(executor)\n\n // Note: `void`s needed for TS.\n /**\n * @param {(\n * ((tree: TailTree extends undefined ? Node : TailTree) => undefined | void) |\n * undefined\n * )} resolve\n * @param {(error: Error) => undefined | void} reject\n * @returns {undefined}\n */\n function executor(resolve, reject) {\n assert(\n typeof file !== 'function',\n '`file` can’t be a `done` anymore, we checked'\n )\n const realFile = vfile(file)\n transformers.run(tree, realFile, realDone)\n\n /**\n * @param {Error | undefined} error\n * @param {Node} outputTree\n * @param {VFile} file\n * @returns {undefined}\n */\n function realDone(error, outputTree, file) {\n const resultingTree =\n /** @type {TailTree extends undefined ? Node : TailTree} */ (\n outputTree || tree\n )\n\n if (error) {\n reject(error)\n } else if (resolve) {\n resolve(resultingTree)\n } else {\n assert(done, '`done` is defined if `resolve` is not')\n done(undefined, resultingTree, file)\n }\n }\n }\n }\n\n /**\n * Run *transformers* on a syntax tree.\n *\n * An error is thrown if asynchronous transforms are configured.\n *\n * > **Note**: `runSync` freezes the processor if not already *frozen*.\n *\n * > **Note**: `runSync` performs the run phase, not other phases.\n *\n * @param {HeadTree extends undefined ? Node : HeadTree} tree\n * Tree to transform and inspect.\n * @param {Compatible | undefined} [file]\n * File associated with `node` (optional); any value accepted as `x` in\n * `new VFile(x)`.\n * @returns {TailTree extends undefined ? Node : TailTree}\n * Transformed tree.\n */\n runSync(tree, file) {\n /** @type {boolean} */\n let complete = false\n /** @type {(TailTree extends undefined ? Node : TailTree) | undefined} */\n let result\n\n this.run(tree, file, realDone)\n\n assertDone('runSync', 'run', complete)\n assert(result, 'we either bailed on an error or have a tree')\n return result\n\n /**\n * @type {RunCallback}\n */\n function realDone(error, tree) {\n bail(error)\n result = tree\n complete = true\n }\n }\n\n /**\n * Compile a syntax tree.\n *\n * > **Note**: `stringify` freezes the processor if not already *frozen*.\n *\n * > **Note**: `stringify` performs the stringify phase, not the run phase\n * > or other phases.\n *\n * @param {CompileTree extends undefined ? Node : CompileTree} tree\n * Tree to compile.\n * @param {Compatible | undefined} [file]\n * File associated with `node` (optional); any value accepted as `x` in\n * `new VFile(x)`.\n * @returns {CompileResult extends undefined ? Value : CompileResult}\n * Textual representation of the tree (see note).\n *\n * > **Note**: unified typically compiles by serializing: most compilers\n * > return `string` (or `Uint8Array`).\n * > Some compilers, such as the one configured with\n * > [`rehype-react`][rehype-react], return other values (in this case, a\n * > React tree).\n * > If you’re using a compiler that doesn’t serialize, expect different\n * > result values.\n * >\n * > To register custom results in TypeScript, add them to\n * > {@linkcode CompileResultMap}.\n *\n * [rehype-react]: https://github.com/rehypejs/rehype-react\n */\n stringify(tree, file) {\n this.freeze()\n const realFile = vfile(file)\n const compiler = this.compiler || this.Compiler\n assertCompiler('stringify', compiler)\n assertNode(tree)\n\n return compiler(tree, realFile)\n }\n\n /**\n * Configure the processor to use a plugin, a list of usable values, or a\n * preset.\n *\n * If the processor is already using a plugin, the previous plugin\n * configuration is changed based on the options that are passed in.\n * In other words, the plugin is not added a second time.\n *\n * > **Note**: `use` cannot be called on *frozen* processors.\n * > Call the processor first to create a new unfrozen processor.\n *\n * @example\n * There are many ways to pass plugins to `.use()`.\n * This example gives an overview:\n *\n * ```js\n * import {unified} from 'unified'\n *\n * unified()\n * // Plugin with options:\n * .use(pluginA, {x: true, y: true})\n * // Passing the same plugin again merges configuration (to `{x: true, y: false, z: true}`):\n * .use(pluginA, {y: false, z: true})\n * // Plugins:\n * .use([pluginB, pluginC])\n * // Two plugins, the second with options:\n * .use([pluginD, [pluginE, {}]])\n * // Preset with plugins and settings:\n * .use({plugins: [pluginF, [pluginG, {}]], settings: {position: false}})\n * // Settings only:\n * .use({settings: {position: false}})\n * ```\n *\n * @template {Array} [Parameters=[]]\n * @template {Node | string | undefined} [Input=undefined]\n * @template [Output=Input]\n *\n * @overload\n * @param {Preset | null | undefined} [preset]\n * @returns {Processor}\n *\n * @overload\n * @param {PluggableList} list\n * @returns {Processor}\n *\n * @overload\n * @param {Plugin} plugin\n * @param {...(Parameters | [boolean])} parameters\n * @returns {UsePlugin}\n *\n * @param {PluggableList | Plugin | Preset | null | undefined} value\n * Usable value.\n * @param {...unknown} parameters\n * Parameters, when a plugin is given as a usable value.\n * @returns {Processor}\n * Current processor.\n */\n use(value, ...parameters) {\n const attachers = this.attachers\n const namespace = this.namespace\n\n assertUnfrozen('use', this.frozen)\n\n if (value === null || value === undefined) {\n // Empty.\n } else if (typeof value === 'function') {\n addPlugin(value, parameters)\n } else if (typeof value === 'object') {\n if (Array.isArray(value)) {\n addList(value)\n } else {\n addPreset(value)\n }\n } else {\n throw new TypeError('Expected usable value, not `' + value + '`')\n }\n\n return this\n\n /**\n * @param {Pluggable} value\n * @returns {undefined}\n */\n function add(value) {\n if (typeof value === 'function') {\n addPlugin(value, [])\n } else if (typeof value === 'object') {\n if (Array.isArray(value)) {\n const [plugin, ...parameters] =\n /** @type {PluginTuple>} */ (value)\n addPlugin(plugin, parameters)\n } else {\n addPreset(value)\n }\n } else {\n throw new TypeError('Expected usable value, not `' + value + '`')\n }\n }\n\n /**\n * @param {Preset} result\n * @returns {undefined}\n */\n function addPreset(result) {\n if (!('plugins' in result) && !('settings' in result)) {\n throw new Error(\n 'Expected usable value but received an empty preset, which is probably a mistake: presets typically come with `plugins` and sometimes with `settings`, but this has neither'\n )\n }\n\n addList(result.plugins)\n\n if (result.settings) {\n namespace.settings = extend(true, namespace.settings, result.settings)\n }\n }\n\n /**\n * @param {PluggableList | null | undefined} plugins\n * @returns {undefined}\n */\n function addList(plugins) {\n let index = -1\n\n if (plugins === null || plugins === undefined) {\n // Empty.\n } else if (Array.isArray(plugins)) {\n while (++index < plugins.length) {\n const thing = plugins[index]\n add(thing)\n }\n } else {\n throw new TypeError('Expected a list of plugins, not `' + plugins + '`')\n }\n }\n\n /**\n * @param {Plugin} plugin\n * @param {Array} parameters\n * @returns {undefined}\n */\n function addPlugin(plugin, parameters) {\n let index = -1\n let entryIndex = -1\n\n while (++index < attachers.length) {\n if (attachers[index][0] === plugin) {\n entryIndex = index\n break\n }\n }\n\n if (entryIndex === -1) {\n attachers.push([plugin, ...parameters])\n }\n // Only set if there was at least a `primary` value, otherwise we’d change\n // `arguments.length`.\n else if (parameters.length > 0) {\n let [primary, ...rest] = parameters\n const currentPrimary = attachers[entryIndex][1]\n if (isPlainObj(currentPrimary) && isPlainObj(primary)) {\n primary = extend(true, currentPrimary, primary)\n }\n\n attachers[entryIndex] = [plugin, primary, ...rest]\n }\n }\n }\n}\n\n// Note: this returns a *callable* instance.\n// That’s why it’s documented as a function.\n/**\n * Create a new processor.\n *\n * @example\n * This example shows how a new processor can be created (from `remark`) and linked\n * to **stdin**(4) and **stdout**(4).\n *\n * ```js\n * import process from 'node:process'\n * import concatStream from 'concat-stream'\n * import {remark} from 'remark'\n *\n * process.stdin.pipe(\n * concatStream(function (buf) {\n * process.stdout.write(String(remark().processSync(buf)))\n * })\n * )\n * ```\n *\n * @returns\n * New *unfrozen* processor (`processor`).\n *\n * This processor is configured to work the same as its ancestor.\n * When the descendant processor is configured in the future it does not\n * affect the ancestral processor.\n */\nexport const unified = new Processor().freeze()\n\n/**\n * Assert a parser is available.\n *\n * @param {string} name\n * @param {unknown} value\n * @returns {asserts value is Parser}\n */\nfunction assertParser(name, value) {\n if (typeof value !== 'function') {\n throw new TypeError('Cannot `' + name + '` without `parser`')\n }\n}\n\n/**\n * Assert a compiler is available.\n *\n * @param {string} name\n * @param {unknown} value\n * @returns {asserts value is Compiler}\n */\nfunction assertCompiler(name, value) {\n if (typeof value !== 'function') {\n throw new TypeError('Cannot `' + name + '` without `compiler`')\n }\n}\n\n/**\n * Assert the processor is not frozen.\n *\n * @param {string} name\n * @param {unknown} frozen\n * @returns {asserts frozen is false}\n */\nfunction assertUnfrozen(name, frozen) {\n if (frozen) {\n throw new Error(\n 'Cannot call `' +\n name +\n '` on a frozen processor.\\nCreate a new processor first, by calling it: use `processor()` instead of `processor`.'\n )\n }\n}\n\n/**\n * Assert `node` is a unist node.\n *\n * @param {unknown} node\n * @returns {asserts node is Node}\n */\nfunction assertNode(node) {\n // `isPlainObj` unfortunately uses `any` instead of `unknown`.\n // type-coverage:ignore-next-line\n if (!isPlainObj(node) || typeof node.type !== 'string') {\n throw new TypeError('Expected node, got `' + node + '`')\n // Fine.\n }\n}\n\n/**\n * Assert that `complete` is `true`.\n *\n * @param {string} name\n * @param {string} asyncName\n * @param {unknown} complete\n * @returns {asserts complete is true}\n */\nfunction assertDone(name, asyncName, complete) {\n if (!complete) {\n throw new Error(\n '`' + name + '` finished async. Use `' + asyncName + '` instead'\n )\n }\n}\n\n/**\n * @param {Compatible | undefined} [value]\n * @returns {VFile}\n */\nfunction vfile(value) {\n return looksLikeAVFile(value) ? value : new VFile(value)\n}\n\n/**\n * @param {Compatible | undefined} [value]\n * @returns {value is VFile}\n */\nfunction looksLikeAVFile(value) {\n return Boolean(\n value &&\n typeof value === 'object' &&\n 'message' in value &&\n 'messages' in value\n )\n}\n\n/**\n * @param {unknown} [value]\n * @returns {value is Value}\n */\nfunction looksLikeAValue(value) {\n return typeof value === 'string' || isUint8Array(value)\n}\n\n/**\n * Assert `value` is an `Uint8Array`.\n *\n * @param {unknown} value\n * thing.\n * @returns {value is Uint8Array}\n * Whether `value` is an `Uint8Array`.\n */\nfunction isUint8Array(value) {\n return Boolean(\n value &&\n typeof value === 'object' &&\n 'byteLength' in value &&\n 'byteOffset' in value\n )\n}\n","/**\n * @import {Element, ElementContent, Nodes, Parents, Root} from 'hast'\n * @import {Root as MdastRoot} from 'mdast'\n * @import {ComponentProps, ElementType, ReactElement} from 'react'\n * @import {Options as RemarkRehypeOptions} from 'remark-rehype'\n * @import {BuildVisitor} from 'unist-util-visit'\n * @import {PluggableList, Processor} from 'unified'\n */\n\n/**\n * @callback AllowElement\n * Filter elements.\n * @param {Readonly} element\n * Element to check.\n * @param {number} index\n * Index of `element` in `parent`.\n * @param {Readonly | undefined} parent\n * Parent of `element`.\n * @returns {boolean | null | undefined}\n * Whether to allow `element` (default: `false`).\n */\n\n/**\n * @typedef ExtraProps\n * Extra fields we pass.\n * @property {Element | undefined} [node]\n * passed when `passNode` is on.\n */\n\n/**\n * @typedef {{\n * [Key in Extract]?: ElementType & ExtraProps>\n * }} Components\n * Map tag names to components.\n */\n\n/**\n * @typedef Deprecation\n * Deprecation.\n * @property {string} from\n * Old field.\n * @property {string} id\n * ID in readme.\n * @property {keyof Options} [to]\n * New field.\n */\n\n/**\n * @typedef Options\n * Configuration.\n * @property {AllowElement | null | undefined} [allowElement]\n * Filter elements (optional);\n * `allowedElements` / `disallowedElements` is used first.\n * @property {ReadonlyArray | null | undefined} [allowedElements]\n * Tag names to allow (default: all tag names);\n * cannot combine w/ `disallowedElements`.\n * @property {string | null | undefined} [children]\n * Markdown.\n * @property {string | null | undefined} [className]\n * Wrap in a `div` with this class name.\n * @property {Components | null | undefined} [components]\n * Map tag names to components.\n * @property {ReadonlyArray | null | undefined} [disallowedElements]\n * Tag names to disallow (default: `[]`);\n * cannot combine w/ `allowedElements`.\n * @property {PluggableList | null | undefined} [rehypePlugins]\n * List of rehype plugins to use.\n * @property {PluggableList | null | undefined} [remarkPlugins]\n * List of remark plugins to use.\n * @property {Readonly | null | undefined} [remarkRehypeOptions]\n * Options to pass through to `remark-rehype`.\n * @property {boolean | null | undefined} [skipHtml=false]\n * Ignore HTML in markdown completely (default: `false`).\n * @property {boolean | null | undefined} [unwrapDisallowed=false]\n * Extract (unwrap) what’s in disallowed elements (default: `false`);\n * normally when say `strong` is not allowed, it and it’s children are dropped,\n * with `unwrapDisallowed` the element itself is replaced by its children.\n * @property {UrlTransform | null | undefined} [urlTransform]\n * Change URLs (default: `defaultUrlTransform`)\n */\n\n/**\n * @callback UrlTransform\n * Transform all URLs.\n * @param {string} url\n * URL.\n * @param {string} key\n * Property name (example: `'href'`).\n * @param {Readonly} node\n * Node.\n * @returns {string | null | undefined}\n * Transformed URL (optional).\n */\n\nimport {unreachable} from 'devlop'\nimport {toJsxRuntime} from 'hast-util-to-jsx-runtime'\nimport {urlAttributes} from 'html-url-attributes'\nimport {Fragment, jsx, jsxs} from 'react/jsx-runtime'\nimport {createElement, useEffect, useState} from 'react'\nimport remarkParse from 'remark-parse'\nimport remarkRehype from 'remark-rehype'\nimport {unified} from 'unified'\nimport {visit} from 'unist-util-visit'\nimport {VFile} from 'vfile'\n\nconst changelog =\n 'https://github.com/remarkjs/react-markdown/blob/main/changelog.md'\n\n/** @type {PluggableList} */\nconst emptyPlugins = []\n/** @type {Readonly} */\nconst emptyRemarkRehypeOptions = {allowDangerousHtml: true}\nconst safeProtocol = /^(https?|ircs?|mailto|xmpp)$/i\n\n// Mutable because we `delete` any time it’s used and a message is sent.\n/** @type {ReadonlyArray>} */\nconst deprecations = [\n {from: 'astPlugins', id: 'remove-buggy-html-in-markdown-parser'},\n {from: 'allowDangerousHtml', id: 'remove-buggy-html-in-markdown-parser'},\n {\n from: 'allowNode',\n id: 'replace-allownode-allowedtypes-and-disallowedtypes',\n to: 'allowElement'\n },\n {\n from: 'allowedTypes',\n id: 'replace-allownode-allowedtypes-and-disallowedtypes',\n to: 'allowedElements'\n },\n {\n from: 'disallowedTypes',\n id: 'replace-allownode-allowedtypes-and-disallowedtypes',\n to: 'disallowedElements'\n },\n {from: 'escapeHtml', id: 'remove-buggy-html-in-markdown-parser'},\n {from: 'includeElementIndex', id: '#remove-includeelementindex'},\n {\n from: 'includeNodeIndex',\n id: 'change-includenodeindex-to-includeelementindex'\n },\n {from: 'linkTarget', id: 'remove-linktarget'},\n {from: 'plugins', id: 'change-plugins-to-remarkplugins', to: 'remarkPlugins'},\n {from: 'rawSourcePos', id: '#remove-rawsourcepos'},\n {from: 'renderers', id: 'change-renderers-to-components', to: 'components'},\n {from: 'source', id: 'change-source-to-children', to: 'children'},\n {from: 'sourcePos', id: '#remove-sourcepos'},\n {from: 'transformImageUri', id: '#add-urltransform', to: 'urlTransform'},\n {from: 'transformLinkUri', id: '#add-urltransform', to: 'urlTransform'}\n]\n\n/**\n * Component to render markdown.\n *\n * This is a synchronous component.\n * When using async plugins,\n * see {@linkcode MarkdownAsync} or {@linkcode MarkdownHooks}.\n *\n * @param {Readonly} options\n * Props.\n * @returns {ReactElement}\n * React element.\n */\nexport function Markdown(options) {\n const processor = createProcessor(options)\n const file = createFile(options)\n return post(processor.runSync(processor.parse(file), file), options)\n}\n\n/**\n * Component to render markdown with support for async plugins\n * through async/await.\n *\n * Components returning promises are supported on the server.\n * For async support on the client,\n * see {@linkcode MarkdownHooks}.\n *\n * @param {Readonly} options\n * Props.\n * @returns {Promise}\n * Promise to a React element.\n */\nexport async function MarkdownAsync(options) {\n const processor = createProcessor(options)\n const file = createFile(options)\n const tree = await processor.run(processor.parse(file), file)\n return post(tree, options)\n}\n\n/**\n * Component to render markdown with support for async plugins through hooks.\n *\n * This uses `useEffect` and `useState` hooks.\n * Hooks run on the client and do not immediately render something.\n * For async support on the server,\n * see {@linkcode MarkdownAsync}.\n *\n * @param {Readonly} options\n * Props.\n * @returns {ReactElement}\n * React element.\n */\nexport function MarkdownHooks(options) {\n const processor = createProcessor(options)\n const [error, setError] = useState(\n /** @type {Error | undefined} */ (undefined)\n )\n const [tree, setTree] = useState(/** @type {Root | undefined} */ (undefined))\n\n useEffect(\n /* c8 ignore next 7 -- hooks are client-only. */\n function () {\n const file = createFile(options)\n processor.run(processor.parse(file), file, function (error, tree) {\n setError(error)\n setTree(tree)\n })\n },\n [\n options.children,\n options.rehypePlugins,\n options.remarkPlugins,\n options.remarkRehypeOptions\n ]\n )\n\n /* c8 ignore next -- hooks are client-only. */\n if (error) throw error\n\n /* c8 ignore next -- hooks are client-only. */\n return tree ? post(tree, options) : createElement(Fragment)\n}\n\n/**\n * Set up the `unified` processor.\n *\n * @param {Readonly} options\n * Props.\n * @returns {Processor}\n * Result.\n */\nfunction createProcessor(options) {\n const rehypePlugins = options.rehypePlugins || emptyPlugins\n const remarkPlugins = options.remarkPlugins || emptyPlugins\n const remarkRehypeOptions = options.remarkRehypeOptions\n ? {...options.remarkRehypeOptions, ...emptyRemarkRehypeOptions}\n : emptyRemarkRehypeOptions\n\n const processor = unified()\n .use(remarkParse)\n .use(remarkPlugins)\n .use(remarkRehype, remarkRehypeOptions)\n .use(rehypePlugins)\n\n return processor\n}\n\n/**\n * Set up the virtual file.\n *\n * @param {Readonly} options\n * Props.\n * @returns {VFile}\n * Result.\n */\nfunction createFile(options) {\n const children = options.children || ''\n const file = new VFile()\n\n if (typeof children === 'string') {\n file.value = children\n } else {\n unreachable(\n 'Unexpected value `' +\n children +\n '` for `children` prop, expected `string`'\n )\n }\n\n return file\n}\n\n/**\n * Process the result from unified some more.\n *\n * @param {Nodes} tree\n * Tree.\n * @param {Readonly} options\n * Props.\n * @returns {ReactElement}\n * React element.\n */\nfunction post(tree, options) {\n const allowedElements = options.allowedElements\n const allowElement = options.allowElement\n const components = options.components\n const disallowedElements = options.disallowedElements\n const skipHtml = options.skipHtml\n const unwrapDisallowed = options.unwrapDisallowed\n const urlTransform = options.urlTransform || defaultUrlTransform\n\n for (const deprecation of deprecations) {\n if (Object.hasOwn(options, deprecation.from)) {\n unreachable(\n 'Unexpected `' +\n deprecation.from +\n '` prop, ' +\n (deprecation.to\n ? 'use `' + deprecation.to + '` instead'\n : 'remove it') +\n ' (see <' +\n changelog +\n '#' +\n deprecation.id +\n '> for more info)'\n )\n }\n }\n\n if (allowedElements && disallowedElements) {\n unreachable(\n 'Unexpected combined `allowedElements` and `disallowedElements`, expected one or the other'\n )\n }\n\n // Wrap in `div` if there’s a class name.\n if (options.className) {\n tree = {\n type: 'element',\n tagName: 'div',\n properties: {className: options.className},\n // Assume no doctypes.\n children: /** @type {Array} */ (\n tree.type === 'root' ? tree.children : [tree]\n )\n }\n }\n\n visit(tree, transform)\n\n return toJsxRuntime(tree, {\n Fragment,\n // @ts-expect-error\n // React components are allowed to return numbers,\n // but not according to the types in hast-util-to-jsx-runtime\n components,\n ignoreInvalidStyle: true,\n jsx,\n jsxs,\n passKeys: true,\n passNode: true\n })\n\n /** @type {BuildVisitor} */\n function transform(node, index, parent) {\n if (node.type === 'raw' && parent && typeof index === 'number') {\n if (skipHtml) {\n parent.children.splice(index, 1)\n } else {\n parent.children[index] = {type: 'text', value: node.value}\n }\n\n return index\n }\n\n if (node.type === 'element') {\n /** @type {string} */\n let key\n\n for (key in urlAttributes) {\n if (\n Object.hasOwn(urlAttributes, key) &&\n Object.hasOwn(node.properties, key)\n ) {\n const value = node.properties[key]\n const test = urlAttributes[key]\n if (test === null || test.includes(node.tagName)) {\n node.properties[key] = urlTransform(String(value || ''), key, node)\n }\n }\n }\n }\n\n if (node.type === 'element') {\n let remove = allowedElements\n ? !allowedElements.includes(node.tagName)\n : disallowedElements\n ? disallowedElements.includes(node.tagName)\n : false\n\n if (!remove && allowElement && typeof index === 'number') {\n remove = !allowElement(node, index, parent)\n }\n\n if (remove && parent && typeof index === 'number') {\n if (unwrapDisallowed && node.children) {\n parent.children.splice(index, 1, ...node.children)\n } else {\n parent.children.splice(index, 1)\n }\n\n return index\n }\n }\n }\n}\n\n/**\n * Make a URL safe.\n *\n * @satisfies {UrlTransform}\n * @param {string} value\n * URL.\n * @returns {string}\n * Safe URL.\n */\nexport function defaultUrlTransform(value) {\n // Same as:\n // \n // But without the `encode` part.\n const colon = value.indexOf(':')\n const questionMark = value.indexOf('?')\n const numberSign = value.indexOf('#')\n const slash = value.indexOf('/')\n\n if (\n // If there is no protocol, it’s relative.\n colon === -1 ||\n // If the first colon is after a `?`, `#`, or `/`, it’s not a protocol.\n (slash !== -1 && colon > slash) ||\n (questionMark !== -1 && colon > questionMark) ||\n (numberSign !== -1 && colon > numberSign) ||\n // It is a protocol, it should be allowed.\n safeProtocol.test(value.slice(0, colon))\n ) {\n return value\n }\n\n return ''\n}\n","/**\n * Count how often a character (or substring) is used in a string.\n *\n * @param {string} value\n * Value to search in.\n * @param {string} character\n * Character (or substring) to look for.\n * @return {number}\n * Number of times `character` occurred in `value`.\n */\nexport function ccount(value, character) {\n const source = String(value)\n\n if (typeof character !== 'string') {\n throw new TypeError('Expected character')\n }\n\n let count = 0\n let index = source.indexOf(character)\n\n while (index !== -1) {\n count++\n index = source.indexOf(character, index + character.length)\n }\n\n return count\n}\n","/**\n * @import {Nodes, Parents, PhrasingContent, Root, Text} from 'mdast'\n * @import {BuildVisitor, Test, VisitorResult} from 'unist-util-visit-parents'\n */\n\n/**\n * @typedef RegExpMatchObject\n * Info on the match.\n * @property {number} index\n * The index of the search at which the result was found.\n * @property {string} input\n * A copy of the search string in the text node.\n * @property {[...Array, Text]} stack\n * All ancestors of the text node, where the last node is the text itself.\n *\n * @typedef {RegExp | string} Find\n * Pattern to find.\n *\n * Strings are escaped and then turned into global expressions.\n *\n * @typedef {Array} FindAndReplaceList\n * Several find and replaces, in array form.\n *\n * @typedef {[Find, Replace?]} FindAndReplaceTuple\n * Find and replace in tuple form.\n *\n * @typedef {ReplaceFunction | string | null | undefined} Replace\n * Thing to replace with.\n *\n * @callback ReplaceFunction\n * Callback called when a search matches.\n * @param {...any} parameters\n * The parameters are the result of corresponding search expression:\n *\n * * `value` (`string`) — whole match\n * * `...capture` (`Array`) — matches from regex capture groups\n * * `match` (`RegExpMatchObject`) — info on the match\n * @returns {Array | PhrasingContent | string | false | null | undefined}\n * Thing to replace with.\n *\n * * when `null`, `undefined`, `''`, remove the match\n * * …or when `false`, do not replace at all\n * * …or when `string`, replace with a text node of that value\n * * …or when `Node` or `Array`, replace with those nodes\n *\n * @typedef {[RegExp, ReplaceFunction]} Pair\n * Normalized find and replace.\n *\n * @typedef {Array} Pairs\n * All find and replaced.\n *\n * @typedef Options\n * Configuration.\n * @property {Test | null | undefined} [ignore]\n * Test for which nodes to ignore (optional).\n */\n\nimport escape from 'escape-string-regexp'\nimport {visitParents} from 'unist-util-visit-parents'\nimport {convert} from 'unist-util-is'\n\n/**\n * Find patterns in a tree and replace them.\n *\n * The algorithm searches the tree in *preorder* for complete values in `Text`\n * nodes.\n * Partial matches are not supported.\n *\n * @param {Nodes} tree\n * Tree to change.\n * @param {FindAndReplaceList | FindAndReplaceTuple} list\n * Patterns to find.\n * @param {Options | null | undefined} [options]\n * Configuration (when `find` is not `Find`).\n * @returns {undefined}\n * Nothing.\n */\nexport function findAndReplace(tree, list, options) {\n const settings = options || {}\n const ignored = convert(settings.ignore || [])\n const pairs = toPairs(list)\n let pairIndex = -1\n\n while (++pairIndex < pairs.length) {\n visitParents(tree, 'text', visitor)\n }\n\n /** @type {BuildVisitor} */\n function visitor(node, parents) {\n let index = -1\n /** @type {Parents | undefined} */\n let grandparent\n\n while (++index < parents.length) {\n const parent = parents[index]\n /** @type {Array | undefined} */\n const siblings = grandparent ? grandparent.children : undefined\n\n if (\n ignored(\n parent,\n siblings ? siblings.indexOf(parent) : undefined,\n grandparent\n )\n ) {\n return\n }\n\n grandparent = parent\n }\n\n if (grandparent) {\n return handler(node, parents)\n }\n }\n\n /**\n * Handle a text node which is not in an ignored parent.\n *\n * @param {Text} node\n * Text node.\n * @param {Array} parents\n * Parents.\n * @returns {VisitorResult}\n * Result.\n */\n function handler(node, parents) {\n const parent = parents[parents.length - 1]\n const find = pairs[pairIndex][0]\n const replace = pairs[pairIndex][1]\n let start = 0\n /** @type {Array} */\n const siblings = parent.children\n const index = siblings.indexOf(node)\n let change = false\n /** @type {Array} */\n let nodes = []\n\n find.lastIndex = 0\n\n let match = find.exec(node.value)\n\n while (match) {\n const position = match.index\n /** @type {RegExpMatchObject} */\n const matchObject = {\n index: match.index,\n input: match.input,\n stack: [...parents, node]\n }\n let value = replace(...match, matchObject)\n\n if (typeof value === 'string') {\n value = value.length > 0 ? {type: 'text', value} : undefined\n }\n\n // It wasn’t a match after all.\n if (value === false) {\n // False acts as if there was no match.\n // So we need to reset `lastIndex`, which currently being at the end of\n // the current match, to the beginning.\n find.lastIndex = position + 1\n } else {\n if (start !== position) {\n nodes.push({\n type: 'text',\n value: node.value.slice(start, position)\n })\n }\n\n if (Array.isArray(value)) {\n nodes.push(...value)\n } else if (value) {\n nodes.push(value)\n }\n\n start = position + match[0].length\n change = true\n }\n\n if (!find.global) {\n break\n }\n\n match = find.exec(node.value)\n }\n\n if (change) {\n if (start < node.value.length) {\n nodes.push({type: 'text', value: node.value.slice(start)})\n }\n\n parent.children.splice(index, 1, ...nodes)\n } else {\n nodes = [node]\n }\n\n return index + nodes.length\n }\n}\n\n/**\n * Turn a tuple or a list of tuples into pairs.\n *\n * @param {FindAndReplaceList | FindAndReplaceTuple} tupleOrList\n * Schema.\n * @returns {Pairs}\n * Clean pairs.\n */\nfunction toPairs(tupleOrList) {\n /** @type {Pairs} */\n const result = []\n\n if (!Array.isArray(tupleOrList)) {\n throw new TypeError('Expected find and replace tuple or list of tuples')\n }\n\n /** @type {FindAndReplaceList} */\n // @ts-expect-error: correct.\n const list =\n !tupleOrList[0] || Array.isArray(tupleOrList[0])\n ? tupleOrList\n : [tupleOrList]\n\n let index = -1\n\n while (++index < list.length) {\n const tuple = list[index]\n result.push([toExpression(tuple[0]), toFunction(tuple[1])])\n }\n\n return result\n}\n\n/**\n * Turn a find into an expression.\n *\n * @param {Find} find\n * Find.\n * @returns {RegExp}\n * Expression.\n */\nfunction toExpression(find) {\n return typeof find === 'string' ? new RegExp(escape(find), 'g') : find\n}\n\n/**\n * Turn a replace into a function.\n *\n * @param {Replace} replace\n * Replace.\n * @returns {ReplaceFunction}\n * Function.\n */\nfunction toFunction(replace) {\n return typeof replace === 'function'\n ? replace\n : function () {\n return replace\n }\n}\n","export default function escapeStringRegexp(string) {\n\tif (typeof string !== 'string') {\n\t\tthrow new TypeError('Expected a string');\n\t}\n\n\t// Escape characters with special meaning either inside or outside character sets.\n\t// Use a simple backslash escape when it’s always valid, and a `\\xnn` escape when the simpler form would be disallowed by Unicode patterns’ stricter grammar.\n\treturn string\n\t\t.replace(/[|\\\\{}()[\\]^$+*?.]/g, '\\\\$&')\n\t\t.replace(/-/g, '\\\\x2d');\n}\n","/**\n * @import {RegExpMatchObject, ReplaceFunction} from 'mdast-util-find-and-replace'\n * @import {CompileContext, Extension as FromMarkdownExtension, Handle as FromMarkdownHandle, Transform as FromMarkdownTransform} from 'mdast-util-from-markdown'\n * @import {ConstructName, Options as ToMarkdownExtension} from 'mdast-util-to-markdown'\n * @import {Link, PhrasingContent} from 'mdast'\n */\n\nimport {ccount} from 'ccount'\nimport {ok as assert} from 'devlop'\nimport {unicodePunctuation, unicodeWhitespace} from 'micromark-util-character'\nimport {findAndReplace} from 'mdast-util-find-and-replace'\n\n/** @type {ConstructName} */\nconst inConstruct = 'phrasing'\n/** @type {Array} */\nconst notInConstruct = ['autolink', 'link', 'image', 'label']\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM autolink\n * literals in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM autolink literals.\n */\nexport function gfmAutolinkLiteralFromMarkdown() {\n return {\n transforms: [transformGfmAutolinkLiterals],\n enter: {\n literalAutolink: enterLiteralAutolink,\n literalAutolinkEmail: enterLiteralAutolinkValue,\n literalAutolinkHttp: enterLiteralAutolinkValue,\n literalAutolinkWww: enterLiteralAutolinkValue\n },\n exit: {\n literalAutolink: exitLiteralAutolink,\n literalAutolinkEmail: exitLiteralAutolinkEmail,\n literalAutolinkHttp: exitLiteralAutolinkHttp,\n literalAutolinkWww: exitLiteralAutolinkWww\n }\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM autolink\n * literals in markdown.\n *\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM autolink literals.\n */\nexport function gfmAutolinkLiteralToMarkdown() {\n return {\n unsafe: [\n {\n character: '@',\n before: '[+\\\\-.\\\\w]',\n after: '[\\\\-.\\\\w]',\n inConstruct,\n notInConstruct\n },\n {\n character: '.',\n before: '[Ww]',\n after: '[\\\\-.\\\\w]',\n inConstruct,\n notInConstruct\n },\n {\n character: ':',\n before: '[ps]',\n after: '\\\\/',\n inConstruct,\n notInConstruct\n }\n ]\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterLiteralAutolink(token) {\n this.enter({type: 'link', title: null, url: '', children: []}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterLiteralAutolinkValue(token) {\n this.config.enter.autolinkProtocol.call(this, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolinkHttp(token) {\n this.config.exit.autolinkProtocol.call(this, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolinkWww(token) {\n this.config.exit.data.call(this, token)\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'link')\n node.url = 'http://' + this.sliceSerialize(token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolinkEmail(token) {\n this.config.exit.autolinkEmail.call(this, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitLiteralAutolink(token) {\n this.exit(token)\n}\n\n/** @type {FromMarkdownTransform} */\nfunction transformGfmAutolinkLiterals(tree) {\n findAndReplace(\n tree,\n [\n [/(https?:\\/\\/|www(?=\\.))([-.\\w]+)([^ \\t\\r\\n]*)/gi, findUrl],\n [/(?<=^|\\s|\\p{P}|\\p{S})([-.\\w+]+)@([-\\w]+(?:\\.[-\\w]+)+)/gu, findEmail]\n ],\n {ignore: ['link', 'linkReference']}\n )\n}\n\n/**\n * @type {ReplaceFunction}\n * @param {string} _\n * @param {string} protocol\n * @param {string} domain\n * @param {string} path\n * @param {RegExpMatchObject} match\n * @returns {Array | Link | false}\n */\n// eslint-disable-next-line max-params\nfunction findUrl(_, protocol, domain, path, match) {\n let prefix = ''\n\n // Not an expected previous character.\n if (!previous(match)) {\n return false\n }\n\n // Treat `www` as part of the domain.\n if (/^w/i.test(protocol)) {\n domain = protocol + domain\n protocol = ''\n prefix = 'http://'\n }\n\n if (!isCorrectDomain(domain)) {\n return false\n }\n\n const parts = splitUrl(domain + path)\n\n if (!parts[0]) return false\n\n /** @type {Link} */\n const result = {\n type: 'link',\n title: null,\n url: prefix + protocol + parts[0],\n children: [{type: 'text', value: protocol + parts[0]}]\n }\n\n if (parts[1]) {\n return [result, {type: 'text', value: parts[1]}]\n }\n\n return result\n}\n\n/**\n * @type {ReplaceFunction}\n * @param {string} _\n * @param {string} atext\n * @param {string} label\n * @param {RegExpMatchObject} match\n * @returns {Link | false}\n */\nfunction findEmail(_, atext, label, match) {\n if (\n // Not an expected previous character.\n !previous(match, true) ||\n // Label ends in not allowed character.\n /[-\\d_]$/.test(label)\n ) {\n return false\n }\n\n return {\n type: 'link',\n title: null,\n url: 'mailto:' + atext + '@' + label,\n children: [{type: 'text', value: atext + '@' + label}]\n }\n}\n\n/**\n * @param {string} domain\n * @returns {boolean}\n */\nfunction isCorrectDomain(domain) {\n const parts = domain.split('.')\n\n if (\n parts.length < 2 ||\n (parts[parts.length - 1] &&\n (/_/.test(parts[parts.length - 1]) ||\n !/[a-zA-Z\\d]/.test(parts[parts.length - 1]))) ||\n (parts[parts.length - 2] &&\n (/_/.test(parts[parts.length - 2]) ||\n !/[a-zA-Z\\d]/.test(parts[parts.length - 2])))\n ) {\n return false\n }\n\n return true\n}\n\n/**\n * @param {string} url\n * @returns {[string, string | undefined]}\n */\nfunction splitUrl(url) {\n const trailExec = /[!\"&'),.:;<>?\\]}]+$/.exec(url)\n\n if (!trailExec) {\n return [url, undefined]\n }\n\n url = url.slice(0, trailExec.index)\n\n let trail = trailExec[0]\n let closingParenIndex = trail.indexOf(')')\n const openingParens = ccount(url, '(')\n let closingParens = ccount(url, ')')\n\n while (closingParenIndex !== -1 && openingParens > closingParens) {\n url += trail.slice(0, closingParenIndex + 1)\n trail = trail.slice(closingParenIndex + 1)\n closingParenIndex = trail.indexOf(')')\n closingParens++\n }\n\n return [url, trail]\n}\n\n/**\n * @param {RegExpMatchObject} match\n * @param {boolean | null | undefined} [email=false]\n * @returns {boolean}\n */\nfunction previous(match, email) {\n const code = match.input.charCodeAt(match.index - 1)\n\n return (\n (match.index === 0 ||\n unicodeWhitespace(code) ||\n unicodePunctuation(code)) &&\n // If it’s an email, the previous character should not be a slash.\n (!email || code !== 47)\n )\n}\n","/**\n * @import {\n * CompileContext,\n * Extension as FromMarkdownExtension,\n * Handle as FromMarkdownHandle\n * } from 'mdast-util-from-markdown'\n * @import {ToMarkdownOptions} from 'mdast-util-gfm-footnote'\n * @import {\n * Handle as ToMarkdownHandle,\n * Map,\n * Options as ToMarkdownExtension\n * } from 'mdast-util-to-markdown'\n * @import {FootnoteDefinition, FootnoteReference} from 'mdast'\n */\n\nimport {ok as assert} from 'devlop'\nimport {normalizeIdentifier} from 'micromark-util-normalize-identifier'\n\nfootnoteReference.peek = footnoteReferencePeek\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteCallString() {\n this.buffer()\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteCall(token) {\n this.enter({type: 'footnoteReference', identifier: '', label: ''}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteDefinitionLabelString() {\n this.buffer()\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterFootnoteDefinition(token) {\n this.enter(\n {type: 'footnoteDefinition', identifier: '', label: '', children: []},\n token\n )\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteCallString(token) {\n const label = this.resume()\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'footnoteReference')\n node.identifier = normalizeIdentifier(\n this.sliceSerialize(token)\n ).toLowerCase()\n node.label = label\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteCall(token) {\n this.exit(token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteDefinitionLabelString(token) {\n const label = this.resume()\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'footnoteDefinition')\n node.identifier = normalizeIdentifier(\n this.sliceSerialize(token)\n ).toLowerCase()\n node.label = label\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitFootnoteDefinition(token) {\n this.exit(token)\n}\n\n/** @type {ToMarkdownHandle} */\nfunction footnoteReferencePeek() {\n return '['\n}\n\n/**\n * @type {ToMarkdownHandle}\n * @param {FootnoteReference} node\n */\nfunction footnoteReference(node, _, state, info) {\n const tracker = state.createTracker(info)\n let value = tracker.move('[^')\n const exit = state.enter('footnoteReference')\n const subexit = state.enter('reference')\n value += tracker.move(\n state.safe(state.associationId(node), {after: ']', before: value})\n )\n subexit()\n exit()\n value += tracker.move(']')\n return value\n}\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM footnotes\n * in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown`.\n */\nexport function gfmFootnoteFromMarkdown() {\n return {\n enter: {\n gfmFootnoteCallString: enterFootnoteCallString,\n gfmFootnoteCall: enterFootnoteCall,\n gfmFootnoteDefinitionLabelString: enterFootnoteDefinitionLabelString,\n gfmFootnoteDefinition: enterFootnoteDefinition\n },\n exit: {\n gfmFootnoteCallString: exitFootnoteCallString,\n gfmFootnoteCall: exitFootnoteCall,\n gfmFootnoteDefinitionLabelString: exitFootnoteDefinitionLabelString,\n gfmFootnoteDefinition: exitFootnoteDefinition\n }\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM footnotes\n * in markdown.\n *\n * @param {ToMarkdownOptions | null | undefined} [options]\n * Configuration (optional).\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown`.\n */\nexport function gfmFootnoteToMarkdown(options) {\n // To do: next major: change default.\n let firstLineBlank = false\n\n if (options && options.firstLineBlank) {\n firstLineBlank = true\n }\n\n return {\n handlers: {footnoteDefinition, footnoteReference},\n // This is on by default already.\n unsafe: [{character: '[', inConstruct: ['label', 'phrasing', 'reference']}]\n }\n\n /**\n * @type {ToMarkdownHandle}\n * @param {FootnoteDefinition} node\n */\n function footnoteDefinition(node, _, state, info) {\n const tracker = state.createTracker(info)\n let value = tracker.move('[^')\n const exit = state.enter('footnoteDefinition')\n const subexit = state.enter('label')\n value += tracker.move(\n state.safe(state.associationId(node), {before: value, after: ']'})\n )\n subexit()\n\n value += tracker.move(']:')\n\n if (node.children && node.children.length > 0) {\n tracker.shift(4)\n\n value += tracker.move(\n (firstLineBlank ? '\\n' : ' ') +\n state.indentLines(\n state.containerFlow(node, tracker.current()),\n firstLineBlank ? mapAll : mapExceptFirst\n )\n )\n }\n\n exit()\n\n return value\n }\n}\n\n/** @type {Map} */\nfunction mapExceptFirst(line, index, blank) {\n return index === 0 ? line : mapAll(line, index, blank)\n}\n\n/** @type {Map} */\nfunction mapAll(line, index, blank) {\n return (blank ? '' : ' ') + line\n}\n","/**\n * @typedef {import('mdast').Delete} Delete\n *\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n *\n * @typedef {import('mdast-util-to-markdown').ConstructName} ConstructName\n * @typedef {import('mdast-util-to-markdown').Handle} ToMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n */\n\n/**\n * List of constructs that occur in phrasing (paragraphs, headings), but cannot\n * contain strikethrough.\n * So they sort of cancel each other out.\n * Note: could use a better name.\n *\n * Note: keep in sync with: \n *\n * @type {Array}\n */\nconst constructsWithoutStrikethrough = [\n 'autolink',\n 'destinationLiteral',\n 'destinationRaw',\n 'reference',\n 'titleQuote',\n 'titleApostrophe'\n]\n\nhandleDelete.peek = peekDelete\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM\n * strikethrough in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown` to enable GFM strikethrough.\n */\nexport function gfmStrikethroughFromMarkdown() {\n return {\n canContainEols: ['delete'],\n enter: {strikethrough: enterStrikethrough},\n exit: {strikethrough: exitStrikethrough}\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM\n * strikethrough in markdown.\n *\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM strikethrough.\n */\nexport function gfmStrikethroughToMarkdown() {\n return {\n unsafe: [\n {\n character: '~',\n inConstruct: 'phrasing',\n notInConstruct: constructsWithoutStrikethrough\n }\n ],\n handlers: {delete: handleDelete}\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterStrikethrough(token) {\n this.enter({type: 'delete', children: []}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitStrikethrough(token) {\n this.exit(token)\n}\n\n/**\n * @type {ToMarkdownHandle}\n * @param {Delete} node\n */\nfunction handleDelete(node, _, state, info) {\n const tracker = state.createTracker(info)\n const exit = state.enter('strikethrough')\n let value = tracker.move('~~')\n value += state.containerPhrasing(node, {\n ...tracker.current(),\n before: value,\n after: '~'\n })\n value += tracker.move('~~')\n exit()\n return value\n}\n\n/** @type {ToMarkdownHandle} */\nfunction peekDelete() {\n return '~'\n}\n","// To do: next major: remove.\n/**\n * @typedef {Options} MarkdownTableOptions\n * Configuration.\n */\n\n/**\n * @typedef Options\n * Configuration.\n * @property {boolean | null | undefined} [alignDelimiters=true]\n * Whether to align the delimiters (default: `true`);\n * they are aligned by default:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * Pass `false` to make them staggered:\n *\n * ```markdown\n * | Alpha | B |\n * | - | - |\n * | C | Delta |\n * ```\n * @property {ReadonlyArray | string | null | undefined} [align]\n * How to align columns (default: `''`);\n * one style for all columns or styles for their respective columns;\n * each style is either `'l'` (left), `'r'` (right), or `'c'` (center);\n * other values are treated as `''`, which doesn’t place the colon in the\n * alignment row but does align left;\n * *only the lowercased first character is used, so `Right` is fine.*\n * @property {boolean | null | undefined} [delimiterEnd=true]\n * Whether to end each row with the delimiter (default: `true`).\n *\n * > 👉 **Note**: please don’t use this: it could create fragile structures\n * > that aren’t understandable to some markdown parsers.\n *\n * When `true`, there are ending delimiters:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * When `false`, there are no ending delimiters:\n *\n * ```markdown\n * | Alpha | B\n * | ----- | -----\n * | C | Delta\n * ```\n * @property {boolean | null | undefined} [delimiterStart=true]\n * Whether to begin each row with the delimiter (default: `true`).\n *\n * > 👉 **Note**: please don’t use this: it could create fragile structures\n * > that aren’t understandable to some markdown parsers.\n *\n * When `true`, there are starting delimiters:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * When `false`, there are no starting delimiters:\n *\n * ```markdown\n * Alpha | B |\n * ----- | ----- |\n * C | Delta |\n * ```\n * @property {boolean | null | undefined} [padding=true]\n * Whether to add a space of padding between delimiters and cells\n * (default: `true`).\n *\n * When `true`, there is padding:\n *\n * ```markdown\n * | Alpha | B |\n * | ----- | ----- |\n * | C | Delta |\n * ```\n *\n * When `false`, there is no padding:\n *\n * ```markdown\n * |Alpha|B |\n * |-----|-----|\n * |C |Delta|\n * ```\n * @property {((value: string) => number) | null | undefined} [stringLength]\n * Function to detect the length of table cell content (optional);\n * this is used when aligning the delimiters (`|`) between table cells;\n * full-width characters and emoji mess up delimiter alignment when viewing\n * the markdown source;\n * to fix this, you can pass this function,\n * which receives the cell content and returns its “visible” size;\n * note that what is and isn’t visible depends on where the text is displayed.\n *\n * Without such a function, the following:\n *\n * ```js\n * markdownTable([\n * ['Alpha', 'Bravo'],\n * ['中文', 'Charlie'],\n * ['👩‍❤️‍👩', 'Delta']\n * ])\n * ```\n *\n * Yields:\n *\n * ```markdown\n * | Alpha | Bravo |\n * | - | - |\n * | 中文 | Charlie |\n * | 👩‍❤️‍👩 | Delta |\n * ```\n *\n * With [`string-width`](https://github.com/sindresorhus/string-width):\n *\n * ```js\n * import stringWidth from 'string-width'\n *\n * markdownTable(\n * [\n * ['Alpha', 'Bravo'],\n * ['中文', 'Charlie'],\n * ['👩‍❤️‍👩', 'Delta']\n * ],\n * {stringLength: stringWidth}\n * )\n * ```\n *\n * Yields:\n *\n * ```markdown\n * | Alpha | Bravo |\n * | ----- | ------- |\n * | 中文 | Charlie |\n * | 👩‍❤️‍👩 | Delta |\n * ```\n */\n\n/**\n * @param {string} value\n * Cell value.\n * @returns {number}\n * Cell size.\n */\nfunction defaultStringLength(value) {\n return value.length\n}\n\n/**\n * Generate a markdown\n * ([GFM](https://docs.github.com/en/github/writing-on-github/working-with-advanced-formatting/organizing-information-with-tables))\n * table.\n *\n * @param {ReadonlyArray>} table\n * Table data (matrix of strings).\n * @param {Readonly | null | undefined} [options]\n * Configuration (optional).\n * @returns {string}\n * Result.\n */\nexport function markdownTable(table, options) {\n const settings = options || {}\n // To do: next major: change to spread.\n const align = (settings.align || []).concat()\n const stringLength = settings.stringLength || defaultStringLength\n /** @type {Array} Character codes as symbols for alignment per column. */\n const alignments = []\n /** @type {Array>} Cells per row. */\n const cellMatrix = []\n /** @type {Array>} Sizes of each cell per row. */\n const sizeMatrix = []\n /** @type {Array} */\n const longestCellByColumn = []\n let mostCellsPerRow = 0\n let rowIndex = -1\n\n // This is a superfluous loop if we don’t align delimiters, but otherwise we’d\n // do superfluous work when aligning, so optimize for aligning.\n while (++rowIndex < table.length) {\n /** @type {Array} */\n const row = []\n /** @type {Array} */\n const sizes = []\n let columnIndex = -1\n\n if (table[rowIndex].length > mostCellsPerRow) {\n mostCellsPerRow = table[rowIndex].length\n }\n\n while (++columnIndex < table[rowIndex].length) {\n const cell = serialize(table[rowIndex][columnIndex])\n\n if (settings.alignDelimiters !== false) {\n const size = stringLength(cell)\n sizes[columnIndex] = size\n\n if (\n longestCellByColumn[columnIndex] === undefined ||\n size > longestCellByColumn[columnIndex]\n ) {\n longestCellByColumn[columnIndex] = size\n }\n }\n\n row.push(cell)\n }\n\n cellMatrix[rowIndex] = row\n sizeMatrix[rowIndex] = sizes\n }\n\n // Figure out which alignments to use.\n let columnIndex = -1\n\n if (typeof align === 'object' && 'length' in align) {\n while (++columnIndex < mostCellsPerRow) {\n alignments[columnIndex] = toAlignment(align[columnIndex])\n }\n } else {\n const code = toAlignment(align)\n\n while (++columnIndex < mostCellsPerRow) {\n alignments[columnIndex] = code\n }\n }\n\n // Inject the alignment row.\n columnIndex = -1\n /** @type {Array} */\n const row = []\n /** @type {Array} */\n const sizes = []\n\n while (++columnIndex < mostCellsPerRow) {\n const code = alignments[columnIndex]\n let before = ''\n let after = ''\n\n if (code === 99 /* `c` */) {\n before = ':'\n after = ':'\n } else if (code === 108 /* `l` */) {\n before = ':'\n } else if (code === 114 /* `r` */) {\n after = ':'\n }\n\n // There *must* be at least one hyphen-minus in each alignment cell.\n let size =\n settings.alignDelimiters === false\n ? 1\n : Math.max(\n 1,\n longestCellByColumn[columnIndex] - before.length - after.length\n )\n\n const cell = before + '-'.repeat(size) + after\n\n if (settings.alignDelimiters !== false) {\n size = before.length + size + after.length\n\n if (size > longestCellByColumn[columnIndex]) {\n longestCellByColumn[columnIndex] = size\n }\n\n sizes[columnIndex] = size\n }\n\n row[columnIndex] = cell\n }\n\n // Inject the alignment row.\n cellMatrix.splice(1, 0, row)\n sizeMatrix.splice(1, 0, sizes)\n\n rowIndex = -1\n /** @type {Array} */\n const lines = []\n\n while (++rowIndex < cellMatrix.length) {\n const row = cellMatrix[rowIndex]\n const sizes = sizeMatrix[rowIndex]\n columnIndex = -1\n /** @type {Array} */\n const line = []\n\n while (++columnIndex < mostCellsPerRow) {\n const cell = row[columnIndex] || ''\n let before = ''\n let after = ''\n\n if (settings.alignDelimiters !== false) {\n const size =\n longestCellByColumn[columnIndex] - (sizes[columnIndex] || 0)\n const code = alignments[columnIndex]\n\n if (code === 114 /* `r` */) {\n before = ' '.repeat(size)\n } else if (code === 99 /* `c` */) {\n if (size % 2) {\n before = ' '.repeat(size / 2 + 0.5)\n after = ' '.repeat(size / 2 - 0.5)\n } else {\n before = ' '.repeat(size / 2)\n after = before\n }\n } else {\n after = ' '.repeat(size)\n }\n }\n\n if (settings.delimiterStart !== false && !columnIndex) {\n line.push('|')\n }\n\n if (\n settings.padding !== false &&\n // Don’t add the opening space if we’re not aligning and the cell is\n // empty: there will be a closing space.\n !(settings.alignDelimiters === false && cell === '') &&\n (settings.delimiterStart !== false || columnIndex)\n ) {\n line.push(' ')\n }\n\n if (settings.alignDelimiters !== false) {\n line.push(before)\n }\n\n line.push(cell)\n\n if (settings.alignDelimiters !== false) {\n line.push(after)\n }\n\n if (settings.padding !== false) {\n line.push(' ')\n }\n\n if (\n settings.delimiterEnd !== false ||\n columnIndex !== mostCellsPerRow - 1\n ) {\n line.push('|')\n }\n }\n\n lines.push(\n settings.delimiterEnd === false\n ? line.join('').replace(/ +$/, '')\n : line.join('')\n )\n }\n\n return lines.join('\\n')\n}\n\n/**\n * @param {string | null | undefined} [value]\n * Value to serialize.\n * @returns {string}\n * Result.\n */\nfunction serialize(value) {\n return value === null || value === undefined ? '' : String(value)\n}\n\n/**\n * @param {string | null | undefined} value\n * Value.\n * @returns {number}\n * Alignment.\n */\nfunction toAlignment(value) {\n const code = typeof value === 'string' ? value.codePointAt(0) : 0\n\n return code === 67 /* `C` */ || code === 99 /* `c` */\n ? 99 /* `c` */\n : code === 76 /* `L` */ || code === 108 /* `l` */\n ? 108 /* `l` */\n : code === 82 /* `R` */ || code === 114 /* `r` */\n ? 114 /* `r` */\n : 0\n}\n","/**\n * @import {Blockquote, Parents} from 'mdast'\n * @import {Info, Map, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {Blockquote} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function blockquote(node, _, state, info) {\n const exit = state.enter('blockquote')\n const tracker = state.createTracker(info)\n tracker.move('> ')\n tracker.shift(2)\n const value = state.indentLines(\n state.containerFlow(node, tracker.current()),\n map\n )\n exit()\n return value\n}\n\n/** @type {Map} */\nfunction map(line, _, blank) {\n return '>' + (blank ? '' : ' ') + line\n}\n","/**\n * @import {ConstructName, Unsafe} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {Array} stack\n * @param {Unsafe} pattern\n * @returns {boolean}\n */\nexport function patternInScope(stack, pattern) {\n return (\n listInScope(stack, pattern.inConstruct, true) &&\n !listInScope(stack, pattern.notInConstruct, false)\n )\n}\n\n/**\n * @param {Array} stack\n * @param {Unsafe['inConstruct']} list\n * @param {boolean} none\n * @returns {boolean}\n */\nfunction listInScope(stack, list, none) {\n if (typeof list === 'string') {\n list = [list]\n }\n\n if (!list || list.length === 0) {\n return none\n }\n\n let index = -1\n\n while (++index < list.length) {\n if (stack.includes(list[index])) {\n return true\n }\n }\n\n return false\n}\n","/**\n * @import {Break, Parents} from 'mdast'\n * @import {Info, State} from 'mdast-util-to-markdown'\n */\n\nimport {patternInScope} from '../util/pattern-in-scope.js'\n\n/**\n * @param {Break} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function hardBreak(_, _1, state, info) {\n let index = -1\n\n while (++index < state.unsafe.length) {\n // If we can’t put eols in this construct (setext headings, tables), use a\n // space instead.\n if (\n state.unsafe[index].character === '\\n' &&\n patternInScope(state.stack, state.unsafe[index])\n ) {\n return /[ \\t]/.test(info.before) ? '' : ' '\n }\n }\n\n return '\\\\\\n'\n}\n","/**\n * @import {Info, Map, State} from 'mdast-util-to-markdown'\n * @import {Code, Parents} from 'mdast'\n */\n\nimport {longestStreak} from 'longest-streak'\nimport {formatCodeAsIndented} from '../util/format-code-as-indented.js'\nimport {checkFence} from '../util/check-fence.js'\n\n/**\n * @param {Code} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function code(node, _, state, info) {\n const marker = checkFence(state)\n const raw = node.value || ''\n const suffix = marker === '`' ? 'GraveAccent' : 'Tilde'\n\n if (formatCodeAsIndented(node, state)) {\n const exit = state.enter('codeIndented')\n const value = state.indentLines(raw, map)\n exit()\n return value\n }\n\n const tracker = state.createTracker(info)\n const sequence = marker.repeat(Math.max(longestStreak(raw, marker) + 1, 3))\n const exit = state.enter('codeFenced')\n let value = tracker.move(sequence)\n\n if (node.lang) {\n const subexit = state.enter(`codeFencedLang${suffix}`)\n value += tracker.move(\n state.safe(node.lang, {\n before: value,\n after: ' ',\n encode: ['`'],\n ...tracker.current()\n })\n )\n subexit()\n }\n\n if (node.lang && node.meta) {\n const subexit = state.enter(`codeFencedMeta${suffix}`)\n value += tracker.move(' ')\n value += tracker.move(\n state.safe(node.meta, {\n before: value,\n after: '\\n',\n encode: ['`'],\n ...tracker.current()\n })\n )\n subexit()\n }\n\n value += tracker.move('\\n')\n\n if (raw) {\n value += tracker.move(raw + '\\n')\n }\n\n value += tracker.move(sequence)\n exit()\n return value\n}\n\n/** @type {Map} */\nfunction map(line, _, blank) {\n return (blank ? '' : ' ') + line\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkQuote(state) {\n const marker = state.options.quote || '\"'\n\n if (marker !== '\"' && marker !== \"'\") {\n throw new Error(\n 'Cannot serialize title with `' +\n marker +\n '` for `options.quote`, expected `\"`, or `\\'`'\n )\n }\n\n return marker\n}\n","/**\n * Encode a code point as a character reference.\n *\n * @param {number} code\n * Code point to encode.\n * @returns {string}\n * Encoded character reference.\n */\nexport function encodeCharacterReference(code) {\n return '&#x' + code.toString(16).toUpperCase() + ';'\n}\n","/**\n * @import {EncodeSides} from '../types.js'\n */\n\nimport {classifyCharacter} from 'micromark-util-classify-character'\n\n/**\n * Check whether to encode (as a character reference) the characters\n * surrounding an attention run.\n *\n * Which characters are around an attention run influence whether it works or\n * not.\n *\n * See for more info.\n * See this markdown in a particular renderer to see what works:\n *\n * ```markdown\n * | | A (letter inside) | B (punctuation inside) | C (whitespace inside) | D (nothing inside) |\n * | ----------------------- | ----------------- | ---------------------- | --------------------- | ------------------ |\n * | 1 (letter outside) | x*y*z | x*.*z | x* *z | x**z |\n * | 2 (punctuation outside) | .*y*. | .*.*. | .* *. | .**. |\n * | 3 (whitespace outside) | x *y* z | x *.* z | x * * z | x ** z |\n * | 4 (nothing outside) | *x* | *.* | * * | ** |\n * ```\n *\n * @param {number} outside\n * Code point on the outer side of the run.\n * @param {number} inside\n * Code point on the inner side of the run.\n * @param {'*' | '_'} marker\n * Marker of the run.\n * Underscores are handled more strictly (they form less often) than\n * asterisks.\n * @returns {EncodeSides}\n * Whether to encode characters.\n */\n// Important: punctuation must never be encoded.\n// Punctuation is solely used by markdown constructs.\n// And by encoding itself.\n// Encoding them will break constructs or double encode things.\nexport function encodeInfo(outside, inside, marker) {\n const outsideKind = classifyCharacter(outside)\n const insideKind = classifyCharacter(inside)\n\n // Letter outside:\n if (outsideKind === undefined) {\n return insideKind === undefined\n ? // Letter inside:\n // we have to encode *both* letters for `_` as it is looser.\n // it already forms for `*` (and GFMs `~`).\n marker === '_'\n ? {inside: true, outside: true}\n : {inside: false, outside: false}\n : insideKind === 1\n ? // Whitespace inside: encode both (letter, whitespace).\n {inside: true, outside: true}\n : // Punctuation inside: encode outer (letter)\n {inside: false, outside: true}\n }\n\n // Whitespace outside:\n if (outsideKind === 1) {\n return insideKind === undefined\n ? // Letter inside: already forms.\n {inside: false, outside: false}\n : insideKind === 1\n ? // Whitespace inside: encode both (whitespace).\n {inside: true, outside: true}\n : // Punctuation inside: already forms.\n {inside: false, outside: false}\n }\n\n // Punctuation outside:\n return insideKind === undefined\n ? // Letter inside: already forms.\n {inside: false, outside: false}\n : insideKind === 1\n ? // Whitespace inside: encode inner (whitespace).\n {inside: true, outside: false}\n : // Punctuation inside: already forms.\n {inside: false, outside: false}\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Emphasis, Parents} from 'mdast'\n */\n\nimport {checkEmphasis} from '../util/check-emphasis.js'\nimport {encodeCharacterReference} from '../util/encode-character-reference.js'\nimport {encodeInfo} from '../util/encode-info.js'\n\nemphasis.peek = emphasisPeek\n\n/**\n * @param {Emphasis} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function emphasis(node, _, state, info) {\n const marker = checkEmphasis(state)\n const exit = state.enter('emphasis')\n const tracker = state.createTracker(info)\n const before = tracker.move(marker)\n\n let between = tracker.move(\n state.containerPhrasing(node, {\n after: marker,\n before,\n ...tracker.current()\n })\n )\n const betweenHead = between.charCodeAt(0)\n const open = encodeInfo(\n info.before.charCodeAt(info.before.length - 1),\n betweenHead,\n marker\n )\n\n if (open.inside) {\n between = encodeCharacterReference(betweenHead) + between.slice(1)\n }\n\n const betweenTail = between.charCodeAt(between.length - 1)\n const close = encodeInfo(info.after.charCodeAt(0), betweenTail, marker)\n\n if (close.inside) {\n between = between.slice(0, -1) + encodeCharacterReference(betweenTail)\n }\n\n const after = tracker.move(marker)\n\n exit()\n\n state.attentionEncodeSurroundingInfo = {\n after: close.outside,\n before: open.outside\n }\n return before + between + after\n}\n\n/**\n * @param {Emphasis} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @returns {string}\n */\nfunction emphasisPeek(_, _1, state) {\n return state.options.emphasis || '*'\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkEmphasis(state) {\n const marker = state.options.emphasis || '*'\n\n if (marker !== '*' && marker !== '_') {\n throw new Error(\n 'Cannot serialize emphasis with `' +\n marker +\n '` for `options.emphasis`, expected `*`, or `_`'\n )\n }\n\n return marker\n}\n","/**\n * @import {Html} from 'mdast'\n */\n\nhtml.peek = htmlPeek\n\n/**\n * @param {Html} node\n * @returns {string}\n */\nexport function html(node) {\n return node.value || ''\n}\n\n/**\n * @returns {string}\n */\nfunction htmlPeek() {\n return '<'\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Image, Parents} from 'mdast'\n */\n\nimport {checkQuote} from '../util/check-quote.js'\n\nimage.peek = imagePeek\n\n/**\n * @param {Image} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function image(node, _, state, info) {\n const quote = checkQuote(state)\n const suffix = quote === '\"' ? 'Quote' : 'Apostrophe'\n const exit = state.enter('image')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('![')\n value += tracker.move(\n state.safe(node.alt, {before: value, after: ']', ...tracker.current()})\n )\n value += tracker.move('](')\n\n subexit()\n\n if (\n // If there’s no url but there is a title…\n (!node.url && node.title) ||\n // If there are control characters or whitespace.\n /[\\0- \\u007F]/.test(node.url)\n ) {\n subexit = state.enter('destinationLiteral')\n value += tracker.move('<')\n value += tracker.move(\n state.safe(node.url, {before: value, after: '>', ...tracker.current()})\n )\n value += tracker.move('>')\n } else {\n // No whitespace, raw is prettier.\n subexit = state.enter('destinationRaw')\n value += tracker.move(\n state.safe(node.url, {\n before: value,\n after: node.title ? ' ' : ')',\n ...tracker.current()\n })\n )\n }\n\n subexit()\n\n if (node.title) {\n subexit = state.enter(`title${suffix}`)\n value += tracker.move(' ' + quote)\n value += tracker.move(\n state.safe(node.title, {\n before: value,\n after: quote,\n ...tracker.current()\n })\n )\n value += tracker.move(quote)\n subexit()\n }\n\n value += tracker.move(')')\n exit()\n\n return value\n}\n\n/**\n * @returns {string}\n */\nfunction imagePeek() {\n return '!'\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {ImageReference, Parents} from 'mdast'\n */\n\nimageReference.peek = imageReferencePeek\n\n/**\n * @param {ImageReference} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function imageReference(node, _, state, info) {\n const type = node.referenceType\n const exit = state.enter('imageReference')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('![')\n const alt = state.safe(node.alt, {\n before: value,\n after: ']',\n ...tracker.current()\n })\n value += tracker.move(alt + '][')\n\n subexit()\n // Hide the fact that we’re in phrasing, because escapes don’t work.\n const stack = state.stack\n state.stack = []\n subexit = state.enter('reference')\n // Note: for proper tracking, we should reset the output positions when we end\n // up making a `shortcut` reference, because then there is no brace output.\n // Practically, in that case, there is no content, so it doesn’t matter that\n // we’ve tracked one too many characters.\n const reference = state.safe(state.associationId(node), {\n before: value,\n after: ']',\n ...tracker.current()\n })\n subexit()\n state.stack = stack\n exit()\n\n if (type === 'full' || !alt || alt !== reference) {\n value += tracker.move(reference + ']')\n } else if (type === 'shortcut') {\n // Remove the unwanted `[`.\n value = value.slice(0, -1)\n } else {\n value += tracker.move(']')\n }\n\n return value\n}\n\n/**\n * @returns {string}\n */\nfunction imageReferencePeek() {\n return '!'\n}\n","/**\n * @import {State} from 'mdast-util-to-markdown'\n * @import {InlineCode, Parents} from 'mdast'\n */\n\ninlineCode.peek = inlineCodePeek\n\n/**\n * @param {InlineCode} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @returns {string}\n */\nexport function inlineCode(node, _, state) {\n let value = node.value || ''\n let sequence = '`'\n let index = -1\n\n // If there is a single grave accent on its own in the code, use a fence of\n // two.\n // If there are two in a row, use one.\n while (new RegExp('(^|[^`])' + sequence + '([^`]|$)').test(value)) {\n sequence += '`'\n }\n\n // If this is not just spaces or eols (tabs don’t count), and either the\n // first or last character are a space, eol, or tick, then pad with spaces.\n if (\n /[^ \\r\\n]/.test(value) &&\n ((/^[ \\r\\n]/.test(value) && /[ \\r\\n]$/.test(value)) || /^`|`$/.test(value))\n ) {\n value = ' ' + value + ' '\n }\n\n // We have a potential problem: certain characters after eols could result in\n // blocks being seen.\n // For example, if someone injected the string `'\\n# b'`, then that would\n // result in an ATX heading.\n // We can’t escape characters in `inlineCode`, but because eols are\n // transformed to spaces when going from markdown to HTML anyway, we can swap\n // them out.\n while (++index < state.unsafe.length) {\n const pattern = state.unsafe[index]\n const expression = state.compilePattern(pattern)\n /** @type {RegExpExecArray | null} */\n let match\n\n // Only look for `atBreak`s.\n // Btw: note that `atBreak` patterns will always start the regex at LF or\n // CR.\n if (!pattern.atBreak) continue\n\n while ((match = expression.exec(value))) {\n let position = match.index\n\n // Support CRLF (patterns only look for one of the characters).\n if (\n value.charCodeAt(position) === 10 /* `\\n` */ &&\n value.charCodeAt(position - 1) === 13 /* `\\r` */\n ) {\n position--\n }\n\n value = value.slice(0, position) + ' ' + value.slice(match.index + 1)\n }\n }\n\n return sequence + value + sequence\n}\n\n/**\n * @returns {string}\n */\nfunction inlineCodePeek() {\n return '`'\n}\n","/**\n * @import {State} from 'mdast-util-to-markdown'\n * @import {Link} from 'mdast'\n */\n\nimport {toString} from 'mdast-util-to-string'\n\n/**\n * @param {Link} node\n * @param {State} state\n * @returns {boolean}\n */\nexport function formatLinkAsAutolink(node, state) {\n const raw = toString(node)\n\n return Boolean(\n !state.options.resourceLink &&\n // If there’s a url…\n node.url &&\n // And there’s a no title…\n !node.title &&\n // And the content of `node` is a single text node…\n node.children &&\n node.children.length === 1 &&\n node.children[0].type === 'text' &&\n // And if the url is the same as the content…\n (raw === node.url || 'mailto:' + raw === node.url) &&\n // And that starts w/ a protocol…\n /^[a-z][a-z+.-]+:/i.test(node.url) &&\n // And that doesn’t contain ASCII control codes (character escapes and\n // references don’t work), space, or angle brackets…\n !/[\\0- <>\\u007F]/.test(node.url)\n )\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Link, Parents} from 'mdast'\n * @import {Exit} from '../types.js'\n */\n\nimport {checkQuote} from '../util/check-quote.js'\nimport {formatLinkAsAutolink} from '../util/format-link-as-autolink.js'\n\nlink.peek = linkPeek\n\n/**\n * @param {Link} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function link(node, _, state, info) {\n const quote = checkQuote(state)\n const suffix = quote === '\"' ? 'Quote' : 'Apostrophe'\n const tracker = state.createTracker(info)\n /** @type {Exit} */\n let exit\n /** @type {Exit} */\n let subexit\n\n if (formatLinkAsAutolink(node, state)) {\n // Hide the fact that we’re in phrasing, because escapes don’t work.\n const stack = state.stack\n state.stack = []\n exit = state.enter('autolink')\n let value = tracker.move('<')\n value += tracker.move(\n state.containerPhrasing(node, {\n before: value,\n after: '>',\n ...tracker.current()\n })\n )\n value += tracker.move('>')\n exit()\n state.stack = stack\n return value\n }\n\n exit = state.enter('link')\n subexit = state.enter('label')\n let value = tracker.move('[')\n value += tracker.move(\n state.containerPhrasing(node, {\n before: value,\n after: '](',\n ...tracker.current()\n })\n )\n value += tracker.move('](')\n subexit()\n\n if (\n // If there’s no url but there is a title…\n (!node.url && node.title) ||\n // If there are control characters or whitespace.\n /[\\0- \\u007F]/.test(node.url)\n ) {\n subexit = state.enter('destinationLiteral')\n value += tracker.move('<')\n value += tracker.move(\n state.safe(node.url, {before: value, after: '>', ...tracker.current()})\n )\n value += tracker.move('>')\n } else {\n // No whitespace, raw is prettier.\n subexit = state.enter('destinationRaw')\n value += tracker.move(\n state.safe(node.url, {\n before: value,\n after: node.title ? ' ' : ')',\n ...tracker.current()\n })\n )\n }\n\n subexit()\n\n if (node.title) {\n subexit = state.enter(`title${suffix}`)\n value += tracker.move(' ' + quote)\n value += tracker.move(\n state.safe(node.title, {\n before: value,\n after: quote,\n ...tracker.current()\n })\n )\n value += tracker.move(quote)\n subexit()\n }\n\n value += tracker.move(')')\n\n exit()\n return value\n}\n\n/**\n * @param {Link} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @returns {string}\n */\nfunction linkPeek(node, _, state) {\n return formatLinkAsAutolink(node, state) ? '<' : '['\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {LinkReference, Parents} from 'mdast'\n */\n\nlinkReference.peek = linkReferencePeek\n\n/**\n * @param {LinkReference} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function linkReference(node, _, state, info) {\n const type = node.referenceType\n const exit = state.enter('linkReference')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('[')\n const text = state.containerPhrasing(node, {\n before: value,\n after: ']',\n ...tracker.current()\n })\n value += tracker.move(text + '][')\n\n subexit()\n // Hide the fact that we’re in phrasing, because escapes don’t work.\n const stack = state.stack\n state.stack = []\n subexit = state.enter('reference')\n // Note: for proper tracking, we should reset the output positions when we end\n // up making a `shortcut` reference, because then there is no brace output.\n // Practically, in that case, there is no content, so it doesn’t matter that\n // we’ve tracked one too many characters.\n const reference = state.safe(state.associationId(node), {\n before: value,\n after: ']',\n ...tracker.current()\n })\n subexit()\n state.stack = stack\n exit()\n\n if (type === 'full' || !text || text !== reference) {\n value += tracker.move(reference + ']')\n } else if (type === 'shortcut') {\n // Remove the unwanted `[`.\n value = value.slice(0, -1)\n } else {\n value += tracker.move(']')\n }\n\n return value\n}\n\n/**\n * @returns {string}\n */\nfunction linkReferencePeek() {\n return '['\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkBullet(state) {\n const marker = state.options.bullet || '*'\n\n if (marker !== '*' && marker !== '+' && marker !== '-') {\n throw new Error(\n 'Cannot serialize items with `' +\n marker +\n '` for `options.bullet`, expected `*`, `+`, or `-`'\n )\n }\n\n return marker\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkRule(state) {\n const marker = state.options.rule || '*'\n\n if (marker !== '*' && marker !== '-' && marker !== '_') {\n throw new Error(\n 'Cannot serialize rules with `' +\n marker +\n '` for `options.rule`, expected `*`, `-`, or `_`'\n )\n }\n\n return marker\n}\n","/**\n * @typedef {import('mdast').Html} Html\n * @typedef {import('mdast').PhrasingContent} PhrasingContent\n */\n\nimport {convert} from 'unist-util-is'\n\n/**\n * Check if the given value is *phrasing content*.\n *\n * > 👉 **Note**: Excludes `html`, which can be both phrasing or flow.\n *\n * @param node\n * Thing to check, typically `Node`.\n * @returns\n * Whether `value` is phrasing content.\n */\n\nexport const phrasing =\n /** @type {(node?: unknown) => node is Exclude} */\n (\n convert([\n 'break',\n 'delete',\n 'emphasis',\n // To do: next major: removed since footnotes were added to GFM.\n 'footnote',\n 'footnoteReference',\n 'image',\n 'imageReference',\n 'inlineCode',\n // Enabled by `mdast-util-math`:\n 'inlineMath',\n 'link',\n 'linkReference',\n // Enabled by `mdast-util-mdx`:\n 'mdxJsxTextElement',\n // Enabled by `mdast-util-mdx`:\n 'mdxTextExpression',\n 'strong',\n 'text',\n // Enabled by `mdast-util-directive`:\n 'textDirective'\n ])\n )\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Parents, Strong} from 'mdast'\n */\n\nimport {checkStrong} from '../util/check-strong.js'\nimport {encodeCharacterReference} from '../util/encode-character-reference.js'\nimport {encodeInfo} from '../util/encode-info.js'\n\nstrong.peek = strongPeek\n\n/**\n * @param {Strong} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function strong(node, _, state, info) {\n const marker = checkStrong(state)\n const exit = state.enter('strong')\n const tracker = state.createTracker(info)\n const before = tracker.move(marker + marker)\n\n let between = tracker.move(\n state.containerPhrasing(node, {\n after: marker,\n before,\n ...tracker.current()\n })\n )\n const betweenHead = between.charCodeAt(0)\n const open = encodeInfo(\n info.before.charCodeAt(info.before.length - 1),\n betweenHead,\n marker\n )\n\n if (open.inside) {\n between = encodeCharacterReference(betweenHead) + between.slice(1)\n }\n\n const betweenTail = between.charCodeAt(between.length - 1)\n const close = encodeInfo(info.after.charCodeAt(0), betweenTail, marker)\n\n if (close.inside) {\n between = between.slice(0, -1) + encodeCharacterReference(betweenTail)\n }\n\n const after = tracker.move(marker + marker)\n\n exit()\n\n state.attentionEncodeSurroundingInfo = {\n after: close.outside,\n before: open.outside\n }\n return before + between + after\n}\n\n/**\n * @param {Strong} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @returns {string}\n */\nfunction strongPeek(_, _1, state) {\n return state.options.strong || '*'\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkStrong(state) {\n const marker = state.options.strong || '*'\n\n if (marker !== '*' && marker !== '_') {\n throw new Error(\n 'Cannot serialize strong with `' +\n marker +\n '` for `options.strong`, expected `*`, or `_`'\n )\n }\n\n return marker\n}\n","import {blockquote} from './blockquote.js'\nimport {hardBreak} from './break.js'\nimport {code} from './code.js'\nimport {definition} from './definition.js'\nimport {emphasis} from './emphasis.js'\nimport {heading} from './heading.js'\nimport {html} from './html.js'\nimport {image} from './image.js'\nimport {imageReference} from './image-reference.js'\nimport {inlineCode} from './inline-code.js'\nimport {link} from './link.js'\nimport {linkReference} from './link-reference.js'\nimport {list} from './list.js'\nimport {listItem} from './list-item.js'\nimport {paragraph} from './paragraph.js'\nimport {root} from './root.js'\nimport {strong} from './strong.js'\nimport {text} from './text.js'\nimport {thematicBreak} from './thematic-break.js'\n\n/**\n * Default (CommonMark) handlers.\n */\nexport const handle = {\n blockquote,\n break: hardBreak,\n code,\n definition,\n emphasis,\n hardBreak,\n heading,\n html,\n image,\n imageReference,\n inlineCode,\n link,\n linkReference,\n list,\n listItem,\n paragraph,\n root,\n strong,\n text,\n thematicBreak\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkFence(state) {\n const marker = state.options.fence || '`'\n\n if (marker !== '`' && marker !== '~') {\n throw new Error(\n 'Cannot serialize code with `' +\n marker +\n '` for `options.fence`, expected `` ` `` or `~`'\n )\n }\n\n return marker\n}\n","/**\n * @import {State} from 'mdast-util-to-markdown'\n * @import {Code} from 'mdast'\n */\n\n/**\n * @param {Code} node\n * @param {State} state\n * @returns {boolean}\n */\nexport function formatCodeAsIndented(node, state) {\n return Boolean(\n state.options.fences === false &&\n node.value &&\n // If there’s no info…\n !node.lang &&\n // And there’s a non-whitespace character…\n /[^ \\r\\n]/.test(node.value) &&\n // And the value doesn’t start or end in a blank…\n !/^[\\t ]*(?:[\\r\\n]|$)|(?:^|[\\r\\n])[\\t ]*$/.test(node.value)\n )\n}\n","/**\n * Get the count of the longest repeating streak of `substring` in `value`.\n *\n * @param {string} value\n * Content to search in.\n * @param {string} substring\n * Substring to look for, typically one character.\n * @returns {number}\n * Count of most frequent adjacent `substring`s in `value`.\n */\nexport function longestStreak(value, substring) {\n const source = String(value)\n let index = source.indexOf(substring)\n let expected = index\n let count = 0\n let max = 0\n\n if (typeof substring !== 'string') {\n throw new TypeError('Expected substring')\n }\n\n while (index !== -1) {\n if (index === expected) {\n if (++count > max) {\n max = count\n }\n } else {\n count = 1\n }\n\n expected = index + substring.length\n index = source.indexOf(substring, expected)\n }\n\n return max\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Definition, Parents} from 'mdast'\n */\n\nimport {checkQuote} from '../util/check-quote.js'\n\n/**\n * @param {Definition} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function definition(node, _, state, info) {\n const quote = checkQuote(state)\n const suffix = quote === '\"' ? 'Quote' : 'Apostrophe'\n const exit = state.enter('definition')\n let subexit = state.enter('label')\n const tracker = state.createTracker(info)\n let value = tracker.move('[')\n value += tracker.move(\n state.safe(state.associationId(node), {\n before: value,\n after: ']',\n ...tracker.current()\n })\n )\n value += tracker.move(']: ')\n\n subexit()\n\n if (\n // If there’s no url, or…\n !node.url ||\n // If there are control characters or whitespace.\n /[\\0- \\u007F]/.test(node.url)\n ) {\n subexit = state.enter('destinationLiteral')\n value += tracker.move('<')\n value += tracker.move(\n state.safe(node.url, {before: value, after: '>', ...tracker.current()})\n )\n value += tracker.move('>')\n } else {\n // No whitespace, raw is prettier.\n subexit = state.enter('destinationRaw')\n value += tracker.move(\n state.safe(node.url, {\n before: value,\n after: node.title ? ' ' : '\\n',\n ...tracker.current()\n })\n )\n }\n\n subexit()\n\n if (node.title) {\n subexit = state.enter(`title${suffix}`)\n value += tracker.move(' ' + quote)\n value += tracker.move(\n state.safe(node.title, {\n before: value,\n after: quote,\n ...tracker.current()\n })\n )\n value += tracker.move(quote)\n subexit()\n }\n\n exit()\n\n return value\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Heading, Parents} from 'mdast'\n */\n\nimport {encodeCharacterReference} from '../util/encode-character-reference.js'\nimport {formatHeadingAsSetext} from '../util/format-heading-as-setext.js'\n\n/**\n * @param {Heading} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function heading(node, _, state, info) {\n const rank = Math.max(Math.min(6, node.depth || 1), 1)\n const tracker = state.createTracker(info)\n\n if (formatHeadingAsSetext(node, state)) {\n const exit = state.enter('headingSetext')\n const subexit = state.enter('phrasing')\n const value = state.containerPhrasing(node, {\n ...tracker.current(),\n before: '\\n',\n after: '\\n'\n })\n subexit()\n exit()\n\n return (\n value +\n '\\n' +\n (rank === 1 ? '=' : '-').repeat(\n // The whole size…\n value.length -\n // Minus the position of the character after the last EOL (or\n // 0 if there is none)…\n (Math.max(value.lastIndexOf('\\r'), value.lastIndexOf('\\n')) + 1)\n )\n )\n }\n\n const sequence = '#'.repeat(rank)\n const exit = state.enter('headingAtx')\n const subexit = state.enter('phrasing')\n\n // Note: for proper tracking, we should reset the output positions when there\n // is no content returned, because then the space is not output.\n // Practically, in that case, there is no content, so it doesn’t matter that\n // we’ve tracked one too many characters.\n tracker.move(sequence + ' ')\n\n let value = state.containerPhrasing(node, {\n before: '# ',\n after: '\\n',\n ...tracker.current()\n })\n\n if (/^[\\t ]/.test(value)) {\n // To do: what effect has the character reference on tracking?\n value = encodeCharacterReference(value.charCodeAt(0)) + value.slice(1)\n }\n\n value = value ? sequence + ' ' + value : sequence\n\n if (state.options.closeAtx) {\n value += ' ' + sequence\n }\n\n subexit()\n exit()\n\n return value\n}\n","/**\n * @import {State} from 'mdast-util-to-markdown'\n * @import {Heading} from 'mdast'\n */\n\nimport {EXIT, visit} from 'unist-util-visit'\nimport {toString} from 'mdast-util-to-string'\n\n/**\n * @param {Heading} node\n * @param {State} state\n * @returns {boolean}\n */\nexport function formatHeadingAsSetext(node, state) {\n let literalWithBreak = false\n\n // Look for literals with a line break.\n // Note that this also\n visit(node, function (node) {\n if (\n ('value' in node && /\\r?\\n|\\r/.test(node.value)) ||\n node.type === 'break'\n ) {\n literalWithBreak = true\n return EXIT\n }\n })\n\n return Boolean(\n (!node.depth || node.depth < 3) &&\n toString(node) &&\n (state.options.setext || literalWithBreak)\n )\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {List, Parents} from 'mdast'\n */\n\nimport {checkBullet} from '../util/check-bullet.js'\nimport {checkBulletOther} from '../util/check-bullet-other.js'\nimport {checkBulletOrdered} from '../util/check-bullet-ordered.js'\nimport {checkRule} from '../util/check-rule.js'\n\n/**\n * @param {List} node\n * @param {Parents | undefined} parent\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function list(node, parent, state, info) {\n const exit = state.enter('list')\n const bulletCurrent = state.bulletCurrent\n /** @type {string} */\n let bullet = node.ordered ? checkBulletOrdered(state) : checkBullet(state)\n /** @type {string} */\n const bulletOther = node.ordered\n ? bullet === '.'\n ? ')'\n : '.'\n : checkBulletOther(state)\n let useDifferentMarker =\n parent && state.bulletLastUsed ? bullet === state.bulletLastUsed : false\n\n if (!node.ordered) {\n const firstListItem = node.children ? node.children[0] : undefined\n\n // If there’s an empty first list item directly in two list items,\n // we have to use a different bullet:\n //\n // ```markdown\n // * - *\n // ```\n //\n // …because otherwise it would become one big thematic break.\n if (\n // Bullet could be used as a thematic break marker:\n (bullet === '*' || bullet === '-') &&\n // Empty first list item:\n firstListItem &&\n (!firstListItem.children || !firstListItem.children[0]) &&\n // Directly in two other list items:\n state.stack[state.stack.length - 1] === 'list' &&\n state.stack[state.stack.length - 2] === 'listItem' &&\n state.stack[state.stack.length - 3] === 'list' &&\n state.stack[state.stack.length - 4] === 'listItem' &&\n // That are each the first child.\n state.indexStack[state.indexStack.length - 1] === 0 &&\n state.indexStack[state.indexStack.length - 2] === 0 &&\n state.indexStack[state.indexStack.length - 3] === 0\n ) {\n useDifferentMarker = true\n }\n\n // If there’s a thematic break at the start of the first list item,\n // we have to use a different bullet:\n //\n // ```markdown\n // * ---\n // ```\n //\n // …because otherwise it would become one big thematic break.\n if (checkRule(state) === bullet && firstListItem) {\n let index = -1\n\n while (++index < node.children.length) {\n const item = node.children[index]\n\n if (\n item &&\n item.type === 'listItem' &&\n item.children &&\n item.children[0] &&\n item.children[0].type === 'thematicBreak'\n ) {\n useDifferentMarker = true\n break\n }\n }\n }\n }\n\n if (useDifferentMarker) {\n bullet = bulletOther\n }\n\n state.bulletCurrent = bullet\n const value = state.containerFlow(node, info)\n state.bulletLastUsed = bullet\n state.bulletCurrent = bulletCurrent\n exit()\n return value\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkBulletOrdered(state) {\n const marker = state.options.bulletOrdered || '.'\n\n if (marker !== '.' && marker !== ')') {\n throw new Error(\n 'Cannot serialize items with `' +\n marker +\n '` for `options.bulletOrdered`, expected `.` or `)`'\n )\n }\n\n return marker\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\nimport {checkBullet} from './check-bullet.js'\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkBulletOther(state) {\n const bullet = checkBullet(state)\n const bulletOther = state.options.bulletOther\n\n if (!bulletOther) {\n return bullet === '*' ? '-' : '*'\n }\n\n if (bulletOther !== '*' && bulletOther !== '+' && bulletOther !== '-') {\n throw new Error(\n 'Cannot serialize items with `' +\n bulletOther +\n '` for `options.bulletOther`, expected `*`, `+`, or `-`'\n )\n }\n\n if (bulletOther === bullet) {\n throw new Error(\n 'Expected `bullet` (`' +\n bullet +\n '`) and `bulletOther` (`' +\n bulletOther +\n '`) to be different'\n )\n }\n\n return bulletOther\n}\n","/**\n * @import {Info, Map, State} from 'mdast-util-to-markdown'\n * @import {ListItem, Parents} from 'mdast'\n */\n\nimport {checkBullet} from '../util/check-bullet.js'\nimport {checkListItemIndent} from '../util/check-list-item-indent.js'\n\n/**\n * @param {ListItem} node\n * @param {Parents | undefined} parent\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function listItem(node, parent, state, info) {\n const listItemIndent = checkListItemIndent(state)\n let bullet = state.bulletCurrent || checkBullet(state)\n\n // Add the marker value for ordered lists.\n if (parent && parent.type === 'list' && parent.ordered) {\n bullet =\n (typeof parent.start === 'number' && parent.start > -1\n ? parent.start\n : 1) +\n (state.options.incrementListMarker === false\n ? 0\n : parent.children.indexOf(node)) +\n bullet\n }\n\n let size = bullet.length + 1\n\n if (\n listItemIndent === 'tab' ||\n (listItemIndent === 'mixed' &&\n ((parent && parent.type === 'list' && parent.spread) || node.spread))\n ) {\n size = Math.ceil(size / 4) * 4\n }\n\n const tracker = state.createTracker(info)\n tracker.move(bullet + ' '.repeat(size - bullet.length))\n tracker.shift(size)\n const exit = state.enter('listItem')\n const value = state.indentLines(\n state.containerFlow(node, tracker.current()),\n map\n )\n exit()\n\n return value\n\n /** @type {Map} */\n function map(line, index, blank) {\n if (index) {\n return (blank ? '' : ' '.repeat(size)) + line\n }\n\n return (blank ? bullet : bullet + ' '.repeat(size - bullet.length)) + line\n }\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkListItemIndent(state) {\n const style = state.options.listItemIndent || 'one'\n\n if (style !== 'tab' && style !== 'one' && style !== 'mixed') {\n throw new Error(\n 'Cannot serialize items with `' +\n style +\n '` for `options.listItemIndent`, expected `tab`, `one`, or `mixed`'\n )\n }\n\n return style\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Paragraph, Parents} from 'mdast'\n */\n\n/**\n * @param {Paragraph} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function paragraph(node, _, state, info) {\n const exit = state.enter('paragraph')\n const subexit = state.enter('phrasing')\n const value = state.containerPhrasing(node, info)\n subexit()\n exit()\n return value\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Parents, Root} from 'mdast'\n */\n\nimport {phrasing} from 'mdast-util-phrasing'\n\n/**\n * @param {Root} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function root(node, _, state, info) {\n // Note: `html` nodes are ambiguous.\n const hasPhrasing = node.children.some(function (d) {\n return phrasing(d)\n })\n\n const container = hasPhrasing ? state.containerPhrasing : state.containerFlow\n return container.call(state, node, info)\n}\n","/**\n * @import {Info, State} from 'mdast-util-to-markdown'\n * @import {Parents, Text} from 'mdast'\n */\n\n/**\n * @param {Text} node\n * @param {Parents | undefined} _\n * @param {State} state\n * @param {Info} info\n * @returns {string}\n */\nexport function text(node, _, state, info) {\n return state.safe(node.value, info)\n}\n","/**\n * @import {State} from 'mdast-util-to-markdown'\n * @import {Parents, ThematicBreak} from 'mdast'\n */\n\nimport {checkRuleRepetition} from '../util/check-rule-repetition.js'\nimport {checkRule} from '../util/check-rule.js'\n\n/**\n * @param {ThematicBreak} _\n * @param {Parents | undefined} _1\n * @param {State} state\n * @returns {string}\n */\nexport function thematicBreak(_, _1, state) {\n const value = (\n checkRule(state) + (state.options.ruleSpaces ? ' ' : '')\n ).repeat(checkRuleRepetition(state))\n\n return state.options.ruleSpaces ? value.slice(0, -1) : value\n}\n","/**\n * @import {Options, State} from 'mdast-util-to-markdown'\n */\n\n/**\n * @param {State} state\n * @returns {Exclude}\n */\nexport function checkRuleRepetition(state) {\n const repetition = state.options.ruleRepetition || 3\n\n if (repetition < 3) {\n throw new Error(\n 'Cannot serialize rules with repetition `' +\n repetition +\n '` for `options.ruleRepetition`, expected `3` or more'\n )\n }\n\n return repetition\n}\n","/**\n * @typedef {import('mdast').InlineCode} InlineCode\n * @typedef {import('mdast').Table} Table\n * @typedef {import('mdast').TableCell} TableCell\n * @typedef {import('mdast').TableRow} TableRow\n *\n * @typedef {import('markdown-table').Options} MarkdownTableOptions\n *\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n *\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n * @typedef {import('mdast-util-to-markdown').Handle} ToMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').State} State\n * @typedef {import('mdast-util-to-markdown').Info} Info\n */\n\n/**\n * @typedef Options\n * Configuration.\n * @property {boolean | null | undefined} [tableCellPadding=true]\n * Whether to add a space of padding between delimiters and cells (default:\n * `true`).\n * @property {boolean | null | undefined} [tablePipeAlign=true]\n * Whether to align the delimiters (default: `true`).\n * @property {MarkdownTableOptions['stringLength'] | null | undefined} [stringLength]\n * Function to detect the length of table cell content, used when aligning\n * the delimiters between cells (optional).\n */\n\nimport {ok as assert} from 'devlop'\nimport {markdownTable} from 'markdown-table'\nimport {defaultHandlers} from 'mdast-util-to-markdown'\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM tables in\n * markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown` to enable GFM tables.\n */\nexport function gfmTableFromMarkdown() {\n return {\n enter: {\n table: enterTable,\n tableData: enterCell,\n tableHeader: enterCell,\n tableRow: enterRow\n },\n exit: {\n codeText: exitCodeText,\n table: exitTable,\n tableData: exit,\n tableHeader: exit,\n tableRow: exit\n }\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterTable(token) {\n const align = token._align\n assert(align, 'expected `_align` on table')\n this.enter(\n {\n type: 'table',\n align: align.map(function (d) {\n return d === 'none' ? null : d\n }),\n children: []\n },\n token\n )\n this.data.inTable = true\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitTable(token) {\n this.exit(token)\n this.data.inTable = undefined\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterRow(token) {\n this.enter({type: 'tableRow', children: []}, token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exit(token) {\n this.exit(token)\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction enterCell(token) {\n this.enter({type: 'tableCell', children: []}, token)\n}\n\n// Overwrite the default code text data handler to unescape escaped pipes when\n// they are in tables.\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitCodeText(token) {\n let value = this.resume()\n\n if (this.data.inTable) {\n value = value.replace(/\\\\([\\\\|])/g, replace)\n }\n\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'inlineCode')\n node.value = value\n this.exit(token)\n}\n\n/**\n * @param {string} $0\n * @param {string} $1\n * @returns {string}\n */\nfunction replace($0, $1) {\n // Pipes work, backslashes don’t (but can’t escape pipes).\n return $1 === '|' ? $1 : $0\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM tables in\n * markdown.\n *\n * @param {Options | null | undefined} [options]\n * Configuration.\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM tables.\n */\nexport function gfmTableToMarkdown(options) {\n const settings = options || {}\n const padding = settings.tableCellPadding\n const alignDelimiters = settings.tablePipeAlign\n const stringLength = settings.stringLength\n const around = padding ? ' ' : '|'\n\n return {\n unsafe: [\n {character: '\\r', inConstruct: 'tableCell'},\n {character: '\\n', inConstruct: 'tableCell'},\n // A pipe, when followed by a tab or space (padding), or a dash or colon\n // (unpadded delimiter row), could result in a table.\n {atBreak: true, character: '|', after: '[\\t :-]'},\n // A pipe in a cell must be encoded.\n {character: '|', inConstruct: 'tableCell'},\n // A colon must be followed by a dash, in which case it could start a\n // delimiter row.\n {atBreak: true, character: ':', after: '-'},\n // A delimiter row can also start with a dash, when followed by more\n // dashes, a colon, or a pipe.\n // This is a stricter version than the built in check for lists, thematic\n // breaks, and setex heading underlines though:\n // \n {atBreak: true, character: '-', after: '[:|-]'}\n ],\n handlers: {\n inlineCode: inlineCodeWithTable,\n table: handleTable,\n tableCell: handleTableCell,\n tableRow: handleTableRow\n }\n }\n\n /**\n * @type {ToMarkdownHandle}\n * @param {Table} node\n */\n function handleTable(node, _, state, info) {\n return serializeData(handleTableAsData(node, state, info), node.align)\n }\n\n /**\n * This function isn’t really used normally, because we handle rows at the\n * table level.\n * But, if someone passes in a table row, this ensures we make somewhat sense.\n *\n * @type {ToMarkdownHandle}\n * @param {TableRow} node\n */\n function handleTableRow(node, _, state, info) {\n const row = handleTableRowAsData(node, state, info)\n const value = serializeData([row])\n // `markdown-table` will always add an align row\n return value.slice(0, value.indexOf('\\n'))\n }\n\n /**\n * @type {ToMarkdownHandle}\n * @param {TableCell} node\n */\n function handleTableCell(node, _, state, info) {\n const exit = state.enter('tableCell')\n const subexit = state.enter('phrasing')\n const value = state.containerPhrasing(node, {\n ...info,\n before: around,\n after: around\n })\n subexit()\n exit()\n return value\n }\n\n /**\n * @param {Array>} matrix\n * @param {Array | null | undefined} [align]\n */\n function serializeData(matrix, align) {\n return markdownTable(matrix, {\n align,\n // @ts-expect-error: `markdown-table` types should support `null`.\n alignDelimiters,\n // @ts-expect-error: `markdown-table` types should support `null`.\n padding,\n // @ts-expect-error: `markdown-table` types should support `null`.\n stringLength\n })\n }\n\n /**\n * @param {Table} node\n * @param {State} state\n * @param {Info} info\n */\n function handleTableAsData(node, state, info) {\n const children = node.children\n let index = -1\n /** @type {Array>} */\n const result = []\n const subexit = state.enter('table')\n\n while (++index < children.length) {\n result[index] = handleTableRowAsData(children[index], state, info)\n }\n\n subexit()\n\n return result\n }\n\n /**\n * @param {TableRow} node\n * @param {State} state\n * @param {Info} info\n */\n function handleTableRowAsData(node, state, info) {\n const children = node.children\n let index = -1\n /** @type {Array} */\n const result = []\n const subexit = state.enter('tableRow')\n\n while (++index < children.length) {\n // Note: the positional info as used here is incorrect.\n // Making it correct would be impossible due to aligning cells?\n // And it would need copy/pasting `markdown-table` into this project.\n result[index] = handleTableCell(children[index], node, state, info)\n }\n\n subexit()\n\n return result\n }\n\n /**\n * @type {ToMarkdownHandle}\n * @param {InlineCode} node\n */\n function inlineCodeWithTable(node, parent, state) {\n let value = defaultHandlers.inlineCode(node, parent, state)\n\n if (state.stack.includes('tableCell')) {\n value = value.replace(/\\|/g, '\\\\$&')\n }\n\n return value\n }\n}\n","/**\n * @typedef {import('mdast').ListItem} ListItem\n * @typedef {import('mdast').Paragraph} Paragraph\n * @typedef {import('mdast-util-from-markdown').CompileContext} CompileContext\n * @typedef {import('mdast-util-from-markdown').Extension} FromMarkdownExtension\n * @typedef {import('mdast-util-from-markdown').Handle} FromMarkdownHandle\n * @typedef {import('mdast-util-to-markdown').Options} ToMarkdownExtension\n * @typedef {import('mdast-util-to-markdown').Handle} ToMarkdownHandle\n */\n\nimport {ok as assert} from 'devlop'\nimport {defaultHandlers} from 'mdast-util-to-markdown'\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM task\n * list items in markdown.\n *\n * @returns {FromMarkdownExtension}\n * Extension for `mdast-util-from-markdown` to enable GFM task list items.\n */\nexport function gfmTaskListItemFromMarkdown() {\n return {\n exit: {\n taskListCheckValueChecked: exitCheck,\n taskListCheckValueUnchecked: exitCheck,\n paragraph: exitParagraphWithTaskListItem\n }\n }\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM task list\n * items in markdown.\n *\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM task list items.\n */\nexport function gfmTaskListItemToMarkdown() {\n return {\n unsafe: [{atBreak: true, character: '-', after: '[:|-]'}],\n handlers: {listItem: listItemWithTaskListItem}\n }\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitCheck(token) {\n // We’re always in a paragraph, in a list item.\n const node = this.stack[this.stack.length - 2]\n assert(node.type === 'listItem')\n node.checked = token.type === 'taskListCheckValueChecked'\n}\n\n/**\n * @this {CompileContext}\n * @type {FromMarkdownHandle}\n */\nfunction exitParagraphWithTaskListItem(token) {\n const parent = this.stack[this.stack.length - 2]\n\n if (\n parent &&\n parent.type === 'listItem' &&\n typeof parent.checked === 'boolean'\n ) {\n const node = this.stack[this.stack.length - 1]\n assert(node.type === 'paragraph')\n const head = node.children[0]\n\n if (head && head.type === 'text') {\n const siblings = parent.children\n let index = -1\n /** @type {Paragraph | undefined} */\n let firstParaghraph\n\n while (++index < siblings.length) {\n const sibling = siblings[index]\n if (sibling.type === 'paragraph') {\n firstParaghraph = sibling\n break\n }\n }\n\n if (firstParaghraph === node) {\n // Must start with a space or a tab.\n head.value = head.value.slice(1)\n\n if (head.value.length === 0) {\n node.children.shift()\n } else if (\n node.position &&\n head.position &&\n typeof head.position.start.offset === 'number'\n ) {\n head.position.start.column++\n head.position.start.offset++\n node.position.start = Object.assign({}, head.position.start)\n }\n }\n }\n }\n\n this.exit(token)\n}\n\n/**\n * @type {ToMarkdownHandle}\n * @param {ListItem} node\n */\nfunction listItemWithTaskListItem(node, parent, state, info) {\n const head = node.children[0]\n const checkable =\n typeof node.checked === 'boolean' && head && head.type === 'paragraph'\n const checkbox = '[' + (node.checked ? 'x' : ' ') + '] '\n const tracker = state.createTracker(info)\n\n if (checkable) {\n tracker.move(checkbox)\n }\n\n let value = defaultHandlers.listItem(node, parent, state, {\n ...info,\n ...tracker.current()\n })\n\n if (checkable) {\n value = value.replace(/^(?:[*+-]|\\d+\\.)([\\r\\n]| {1,3})/, check)\n }\n\n return value\n\n /**\n * @param {string} $0\n * @returns {string}\n */\n function check($0) {\n return $0 + checkbox\n }\n}\n","/**\n * @import {Code, ConstructRecord, Event, Extension, Previous, State, TokenizeContext, Tokenizer} from 'micromark-util-types'\n */\n\nimport { asciiAlpha, asciiAlphanumeric, asciiControl, markdownLineEndingOrSpace, unicodePunctuation, unicodeWhitespace } from 'micromark-util-character';\nconst wwwPrefix = {\n tokenize: tokenizeWwwPrefix,\n partial: true\n};\nconst domain = {\n tokenize: tokenizeDomain,\n partial: true\n};\nconst path = {\n tokenize: tokenizePath,\n partial: true\n};\nconst trail = {\n tokenize: tokenizeTrail,\n partial: true\n};\nconst emailDomainDotTrail = {\n tokenize: tokenizeEmailDomainDotTrail,\n partial: true\n};\nconst wwwAutolink = {\n name: 'wwwAutolink',\n tokenize: tokenizeWwwAutolink,\n previous: previousWww\n};\nconst protocolAutolink = {\n name: 'protocolAutolink',\n tokenize: tokenizeProtocolAutolink,\n previous: previousProtocol\n};\nconst emailAutolink = {\n name: 'emailAutolink',\n tokenize: tokenizeEmailAutolink,\n previous: previousEmail\n};\n\n/** @type {ConstructRecord} */\nconst text = {};\n\n/**\n * Create an extension for `micromark` to support GitHub autolink literal\n * syntax.\n *\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `extensions` to enable GFM\n * autolink literal syntax.\n */\nexport function gfmAutolinkLiteral() {\n return {\n text\n };\n}\n\n/** @type {Code} */\nlet code = 48;\n\n// Add alphanumerics.\nwhile (code < 123) {\n text[code] = emailAutolink;\n code++;\n if (code === 58) code = 65;else if (code === 91) code = 97;\n}\ntext[43] = emailAutolink;\ntext[45] = emailAutolink;\ntext[46] = emailAutolink;\ntext[95] = emailAutolink;\ntext[72] = [emailAutolink, protocolAutolink];\ntext[104] = [emailAutolink, protocolAutolink];\ntext[87] = [emailAutolink, wwwAutolink];\ntext[119] = [emailAutolink, wwwAutolink];\n\n// To do: perform email autolink literals on events, afterwards.\n// That’s where `markdown-rs` and `cmark-gfm` perform it.\n// It should look for `@`, then for atext backwards, and then for a label\n// forwards.\n// To do: `mailto:`, `xmpp:` protocol as prefix.\n\n/**\n * Email autolink literal.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^^^^^^^^^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeEmailAutolink(effects, ok, nok) {\n const self = this;\n /** @type {boolean | undefined} */\n let dot;\n /** @type {boolean} */\n let data;\n return start;\n\n /**\n * Start of email autolink literal.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n if (!gfmAtext(code) || !previousEmail.call(self, self.previous) || previousUnbalanced(self.events)) {\n return nok(code);\n }\n effects.enter('literalAutolink');\n effects.enter('literalAutolinkEmail');\n return atext(code);\n }\n\n /**\n * In email atext.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function atext(code) {\n if (gfmAtext(code)) {\n effects.consume(code);\n return atext;\n }\n if (code === 64) {\n effects.consume(code);\n return emailDomain;\n }\n return nok(code);\n }\n\n /**\n * In email domain.\n *\n * The reference code is a bit overly complex as it handles the `@`, of which\n * there may be just one.\n * Source: \n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function emailDomain(code) {\n // Dot followed by alphanumerical (not `-` or `_`).\n if (code === 46) {\n return effects.check(emailDomainDotTrail, emailDomainAfter, emailDomainDot)(code);\n }\n\n // Alphanumerical, `-`, and `_`.\n if (code === 45 || code === 95 || asciiAlphanumeric(code)) {\n data = true;\n effects.consume(code);\n return emailDomain;\n }\n\n // To do: `/` if xmpp.\n\n // Note: normally we’d truncate trailing punctuation from the link.\n // However, email autolink literals cannot contain any of those markers,\n // except for `.`, but that can only occur if it isn’t trailing.\n // So we can ignore truncating!\n return emailDomainAfter(code);\n }\n\n /**\n * In email domain, on dot that is not a trail.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function emailDomainDot(code) {\n effects.consume(code);\n dot = true;\n return emailDomain;\n }\n\n /**\n * After email domain.\n *\n * ```markdown\n * > | a contact@example.org b\n * ^\n * ```\n *\n * @type {State}\n */\n function emailDomainAfter(code) {\n // Domain must not be empty, must include a dot, and must end in alphabetical.\n // Source: .\n if (data && dot && asciiAlpha(self.previous)) {\n effects.exit('literalAutolinkEmail');\n effects.exit('literalAutolink');\n return ok(code);\n }\n return nok(code);\n }\n}\n\n/**\n * `www` autolink literal.\n *\n * ```markdown\n * > | a www.example.org b\n * ^^^^^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeWwwAutolink(effects, ok, nok) {\n const self = this;\n return wwwStart;\n\n /**\n * Start of www autolink literal.\n *\n * ```markdown\n * > | www.example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function wwwStart(code) {\n if (code !== 87 && code !== 119 || !previousWww.call(self, self.previous) || previousUnbalanced(self.events)) {\n return nok(code);\n }\n effects.enter('literalAutolink');\n effects.enter('literalAutolinkWww');\n // Note: we *check*, so we can discard the `www.` we parsed.\n // If it worked, we consider it as a part of the domain.\n return effects.check(wwwPrefix, effects.attempt(domain, effects.attempt(path, wwwAfter), nok), nok)(code);\n }\n\n /**\n * After a www autolink literal.\n *\n * ```markdown\n * > | www.example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function wwwAfter(code) {\n effects.exit('literalAutolinkWww');\n effects.exit('literalAutolink');\n return ok(code);\n }\n}\n\n/**\n * Protocol autolink literal.\n *\n * ```markdown\n * > | a https://example.org b\n * ^^^^^^^^^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeProtocolAutolink(effects, ok, nok) {\n const self = this;\n let buffer = '';\n let seen = false;\n return protocolStart;\n\n /**\n * Start of protocol autolink literal.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function protocolStart(code) {\n if ((code === 72 || code === 104) && previousProtocol.call(self, self.previous) && !previousUnbalanced(self.events)) {\n effects.enter('literalAutolink');\n effects.enter('literalAutolinkHttp');\n buffer += String.fromCodePoint(code);\n effects.consume(code);\n return protocolPrefixInside;\n }\n return nok(code);\n }\n\n /**\n * In protocol.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^^^^^\n * ```\n *\n * @type {State}\n */\n function protocolPrefixInside(code) {\n // `5` is size of `https`\n if (asciiAlpha(code) && buffer.length < 5) {\n // @ts-expect-error: definitely number.\n buffer += String.fromCodePoint(code);\n effects.consume(code);\n return protocolPrefixInside;\n }\n if (code === 58) {\n const protocol = buffer.toLowerCase();\n if (protocol === 'http' || protocol === 'https') {\n effects.consume(code);\n return protocolSlashesInside;\n }\n }\n return nok(code);\n }\n\n /**\n * In slashes.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^^\n * ```\n *\n * @type {State}\n */\n function protocolSlashesInside(code) {\n if (code === 47) {\n effects.consume(code);\n if (seen) {\n return afterProtocol;\n }\n seen = true;\n return protocolSlashesInside;\n }\n return nok(code);\n }\n\n /**\n * After protocol, before domain.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function afterProtocol(code) {\n // To do: this is different from `markdown-rs`:\n // https://github.com/wooorm/markdown-rs/blob/b3a921c761309ae00a51fe348d8a43adbc54b518/src/construct/gfm_autolink_literal.rs#L172-L182\n return code === null || asciiControl(code) || markdownLineEndingOrSpace(code) || unicodeWhitespace(code) || unicodePunctuation(code) ? nok(code) : effects.attempt(domain, effects.attempt(path, protocolAfter), nok)(code);\n }\n\n /**\n * After a protocol autolink literal.\n *\n * ```markdown\n * > | https://example.com/a?b#c\n * ^\n * ```\n *\n * @type {State}\n */\n function protocolAfter(code) {\n effects.exit('literalAutolinkHttp');\n effects.exit('literalAutolink');\n return ok(code);\n }\n}\n\n/**\n * `www` prefix.\n *\n * ```markdown\n * > | a www.example.org b\n * ^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeWwwPrefix(effects, ok, nok) {\n let size = 0;\n return wwwPrefixInside;\n\n /**\n * In www prefix.\n *\n * ```markdown\n * > | www.example.com\n * ^^^^\n * ```\n *\n * @type {State}\n */\n function wwwPrefixInside(code) {\n if ((code === 87 || code === 119) && size < 3) {\n size++;\n effects.consume(code);\n return wwwPrefixInside;\n }\n if (code === 46 && size === 3) {\n effects.consume(code);\n return wwwPrefixAfter;\n }\n return nok(code);\n }\n\n /**\n * After www prefix.\n *\n * ```markdown\n * > | www.example.com\n * ^\n * ```\n *\n * @type {State}\n */\n function wwwPrefixAfter(code) {\n // If there is *anything*, we can link.\n return code === null ? nok(code) : ok(code);\n }\n}\n\n/**\n * Domain.\n *\n * ```markdown\n * > | a https://example.org b\n * ^^^^^^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeDomain(effects, ok, nok) {\n /** @type {boolean | undefined} */\n let underscoreInLastSegment;\n /** @type {boolean | undefined} */\n let underscoreInLastLastSegment;\n /** @type {boolean | undefined} */\n let seen;\n return domainInside;\n\n /**\n * In domain.\n *\n * ```markdown\n * > | https://example.com/a\n * ^^^^^^^^^^^\n * ```\n *\n * @type {State}\n */\n function domainInside(code) {\n // Check whether this marker, which is a trailing punctuation\n // marker, optionally followed by more trailing markers, and then\n // followed by an end.\n if (code === 46 || code === 95) {\n return effects.check(trail, domainAfter, domainAtPunctuation)(code);\n }\n\n // GH documents that only alphanumerics (other than `-`, `.`, and `_`) can\n // occur, which sounds like ASCII only, but they also support `www.點看.com`,\n // so that’s Unicode.\n // Instead of some new production for Unicode alphanumerics, markdown\n // already has that for Unicode punctuation and whitespace, so use those.\n // Source: .\n if (code === null || markdownLineEndingOrSpace(code) || unicodeWhitespace(code) || code !== 45 && unicodePunctuation(code)) {\n return domainAfter(code);\n }\n seen = true;\n effects.consume(code);\n return domainInside;\n }\n\n /**\n * In domain, at potential trailing punctuation, that was not trailing.\n *\n * ```markdown\n * > | https://example.com\n * ^\n * ```\n *\n * @type {State}\n */\n function domainAtPunctuation(code) {\n // There is an underscore in the last segment of the domain\n if (code === 95) {\n underscoreInLastSegment = true;\n }\n // Otherwise, it’s a `.`: save the last segment underscore in the\n // penultimate segment slot.\n else {\n underscoreInLastLastSegment = underscoreInLastSegment;\n underscoreInLastSegment = undefined;\n }\n effects.consume(code);\n return domainInside;\n }\n\n /**\n * After domain.\n *\n * ```markdown\n * > | https://example.com/a\n * ^\n * ```\n *\n * @type {State} */\n function domainAfter(code) {\n // Note: that’s GH says a dot is needed, but it’s not true:\n // \n if (underscoreInLastLastSegment || underscoreInLastSegment || !seen) {\n return nok(code);\n }\n return ok(code);\n }\n}\n\n/**\n * Path.\n *\n * ```markdown\n * > | a https://example.org/stuff b\n * ^^^^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizePath(effects, ok) {\n let sizeOpen = 0;\n let sizeClose = 0;\n return pathInside;\n\n /**\n * In path.\n *\n * ```markdown\n * > | https://example.com/a\n * ^^\n * ```\n *\n * @type {State}\n */\n function pathInside(code) {\n if (code === 40) {\n sizeOpen++;\n effects.consume(code);\n return pathInside;\n }\n\n // To do: `markdown-rs` also needs this.\n // If this is a paren, and there are less closings than openings,\n // we don’t check for a trail.\n if (code === 41 && sizeClose < sizeOpen) {\n return pathAtPunctuation(code);\n }\n\n // Check whether this trailing punctuation marker is optionally\n // followed by more trailing markers, and then followed\n // by an end.\n if (code === 33 || code === 34 || code === 38 || code === 39 || code === 41 || code === 42 || code === 44 || code === 46 || code === 58 || code === 59 || code === 60 || code === 63 || code === 93 || code === 95 || code === 126) {\n return effects.check(trail, ok, pathAtPunctuation)(code);\n }\n if (code === null || markdownLineEndingOrSpace(code) || unicodeWhitespace(code)) {\n return ok(code);\n }\n effects.consume(code);\n return pathInside;\n }\n\n /**\n * In path, at potential trailing punctuation, that was not trailing.\n *\n * ```markdown\n * > | https://example.com/a\"b\n * ^\n * ```\n *\n * @type {State}\n */\n function pathAtPunctuation(code) {\n // Count closing parens.\n if (code === 41) {\n sizeClose++;\n }\n effects.consume(code);\n return pathInside;\n }\n}\n\n/**\n * Trail.\n *\n * This calls `ok` if this *is* the trail, followed by an end, which means\n * the entire trail is not part of the link.\n * It calls `nok` if this *is* part of the link.\n *\n * ```markdown\n * > | https://example.com\").\n * ^^^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeTrail(effects, ok, nok) {\n return trail;\n\n /**\n * In trail of domain or path.\n *\n * ```markdown\n * > | https://example.com\").\n * ^\n * ```\n *\n * @type {State}\n */\n function trail(code) {\n // Regular trailing punctuation.\n if (code === 33 || code === 34 || code === 39 || code === 41 || code === 42 || code === 44 || code === 46 || code === 58 || code === 59 || code === 63 || code === 95 || code === 126) {\n effects.consume(code);\n return trail;\n }\n\n // `&` followed by one or more alphabeticals and then a `;`, is\n // as a whole considered as trailing punctuation.\n // In all other cases, it is considered as continuation of the URL.\n if (code === 38) {\n effects.consume(code);\n return trailCharacterReferenceStart;\n }\n\n // Needed because we allow literals after `[`, as we fix:\n // .\n // Check that it is not followed by `(` or `[`.\n if (code === 93) {\n effects.consume(code);\n return trailBracketAfter;\n }\n if (\n // `<` is an end.\n code === 60 ||\n // So is whitespace.\n code === null || markdownLineEndingOrSpace(code) || unicodeWhitespace(code)) {\n return ok(code);\n }\n return nok(code);\n }\n\n /**\n * In trail, after `]`.\n *\n * > 👉 **Note**: this deviates from `cmark-gfm` to fix a bug.\n * > See end of for more.\n *\n * ```markdown\n * > | https://example.com](\n * ^\n * ```\n *\n * @type {State}\n */\n function trailBracketAfter(code) {\n // Whitespace or something that could start a resource or reference is the end.\n // Switch back to trail otherwise.\n if (code === null || code === 40 || code === 91 || markdownLineEndingOrSpace(code) || unicodeWhitespace(code)) {\n return ok(code);\n }\n return trail(code);\n }\n\n /**\n * In character-reference like trail, after `&`.\n *\n * ```markdown\n * > | https://example.com&).\n * ^\n * ```\n *\n * @type {State}\n */\n function trailCharacterReferenceStart(code) {\n // When non-alpha, it’s not a trail.\n return asciiAlpha(code) ? trailCharacterReferenceInside(code) : nok(code);\n }\n\n /**\n * In character-reference like trail.\n *\n * ```markdown\n * > | https://example.com&).\n * ^\n * ```\n *\n * @type {State}\n */\n function trailCharacterReferenceInside(code) {\n // Switch back to trail if this is well-formed.\n if (code === 59) {\n effects.consume(code);\n return trail;\n }\n if (asciiAlpha(code)) {\n effects.consume(code);\n return trailCharacterReferenceInside;\n }\n\n // It’s not a trail.\n return nok(code);\n }\n}\n\n/**\n * Dot in email domain trail.\n *\n * This calls `ok` if this *is* the trail, followed by an end, which means\n * the trail is not part of the link.\n * It calls `nok` if this *is* part of the link.\n *\n * ```markdown\n * > | contact@example.org.\n * ^\n * ```\n *\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeEmailDomainDotTrail(effects, ok, nok) {\n return start;\n\n /**\n * Dot.\n *\n * ```markdown\n * > | contact@example.org.\n * ^ ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n // Must be dot.\n effects.consume(code);\n return after;\n }\n\n /**\n * After dot.\n *\n * ```markdown\n * > | contact@example.org.\n * ^ ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n // Not a trail if alphanumeric.\n return asciiAlphanumeric(code) ? nok(code) : ok(code);\n }\n}\n\n/**\n * See:\n * .\n *\n * @type {Previous}\n */\nfunction previousWww(code) {\n return code === null || code === 40 || code === 42 || code === 95 || code === 91 || code === 93 || code === 126 || markdownLineEndingOrSpace(code);\n}\n\n/**\n * See:\n * .\n *\n * @type {Previous}\n */\nfunction previousProtocol(code) {\n return !asciiAlpha(code);\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Previous}\n */\nfunction previousEmail(code) {\n // Do not allow a slash “inside” atext.\n // The reference code is a bit weird, but that’s what it results in.\n // Source: .\n // Other than slash, every preceding character is allowed.\n return !(code === 47 || gfmAtext(code));\n}\n\n/**\n * @param {Code} code\n * @returns {boolean}\n */\nfunction gfmAtext(code) {\n return code === 43 || code === 45 || code === 46 || code === 95 || asciiAlphanumeric(code);\n}\n\n/**\n * @param {Array} events\n * @returns {boolean}\n */\nfunction previousUnbalanced(events) {\n let index = events.length;\n let result = false;\n while (index--) {\n const token = events[index][1];\n if ((token.type === 'labelLink' || token.type === 'labelImage') && !token._balanced) {\n result = true;\n break;\n }\n\n // If we’ve seen this token, and it was marked as not having any unbalanced\n // bracket before it, we can exit.\n if (token._gfmAutolinkLiteralWalkedInto) {\n result = false;\n break;\n }\n }\n if (events.length > 0 && !result) {\n // Mark the last token as “walked into” w/o finding\n // anything.\n events[events.length - 1][1]._gfmAutolinkLiteralWalkedInto = true;\n }\n return result;\n}","/**\n * @import {Event, Exiter, Extension, Resolver, State, Token, TokenizeContext, Tokenizer} from 'micromark-util-types'\n */\n\nimport { blankLine } from 'micromark-core-commonmark';\nimport { factorySpace } from 'micromark-factory-space';\nimport { markdownLineEndingOrSpace } from 'micromark-util-character';\nimport { normalizeIdentifier } from 'micromark-util-normalize-identifier';\nconst indent = {\n tokenize: tokenizeIndent,\n partial: true\n};\n\n// To do: micromark should support a `_hiddenGfmFootnoteSupport`, which only\n// affects label start (image).\n// That will let us drop `tokenizePotentialGfmFootnote*`.\n// It currently has a `_hiddenFootnoteSupport`, which affects that and more.\n// That can be removed when `micromark-extension-footnote` is archived.\n\n/**\n * Create an extension for `micromark` to enable GFM footnote syntax.\n *\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `extensions` to\n * enable GFM footnote syntax.\n */\nexport function gfmFootnote() {\n /** @type {Extension} */\n return {\n document: {\n [91]: {\n name: 'gfmFootnoteDefinition',\n tokenize: tokenizeDefinitionStart,\n continuation: {\n tokenize: tokenizeDefinitionContinuation\n },\n exit: gfmFootnoteDefinitionEnd\n }\n },\n text: {\n [91]: {\n name: 'gfmFootnoteCall',\n tokenize: tokenizeGfmFootnoteCall\n },\n [93]: {\n name: 'gfmPotentialFootnoteCall',\n add: 'after',\n tokenize: tokenizePotentialGfmFootnoteCall,\n resolveTo: resolveToPotentialGfmFootnoteCall\n }\n }\n };\n}\n\n// To do: remove after micromark update.\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizePotentialGfmFootnoteCall(effects, ok, nok) {\n const self = this;\n let index = self.events.length;\n const defined = self.parser.gfmFootnotes || (self.parser.gfmFootnotes = []);\n /** @type {Token} */\n let labelStart;\n\n // Find an opening.\n while (index--) {\n const token = self.events[index][1];\n if (token.type === \"labelImage\") {\n labelStart = token;\n break;\n }\n\n // Exit if we’ve walked far enough.\n if (token.type === 'gfmFootnoteCall' || token.type === \"labelLink\" || token.type === \"label\" || token.type === \"image\" || token.type === \"link\") {\n break;\n }\n }\n return start;\n\n /**\n * @type {State}\n */\n function start(code) {\n if (!labelStart || !labelStart._balanced) {\n return nok(code);\n }\n const id = normalizeIdentifier(self.sliceSerialize({\n start: labelStart.end,\n end: self.now()\n }));\n if (id.codePointAt(0) !== 94 || !defined.includes(id.slice(1))) {\n return nok(code);\n }\n effects.enter('gfmFootnoteCallLabelMarker');\n effects.consume(code);\n effects.exit('gfmFootnoteCallLabelMarker');\n return ok(code);\n }\n}\n\n// To do: remove after micromark update.\n/** @type {Resolver} */\nfunction resolveToPotentialGfmFootnoteCall(events, context) {\n let index = events.length;\n /** @type {Token | undefined} */\n let labelStart;\n\n // Find an opening.\n while (index--) {\n if (events[index][1].type === \"labelImage\" && events[index][0] === 'enter') {\n labelStart = events[index][1];\n break;\n }\n }\n // Change the `labelImageMarker` to a `data`.\n events[index + 1][1].type = \"data\";\n events[index + 3][1].type = 'gfmFootnoteCallLabelMarker';\n\n // The whole (without `!`):\n /** @type {Token} */\n const call = {\n type: 'gfmFootnoteCall',\n start: Object.assign({}, events[index + 3][1].start),\n end: Object.assign({}, events[events.length - 1][1].end)\n };\n // The `^` marker\n /** @type {Token} */\n const marker = {\n type: 'gfmFootnoteCallMarker',\n start: Object.assign({}, events[index + 3][1].end),\n end: Object.assign({}, events[index + 3][1].end)\n };\n // Increment the end 1 character.\n marker.end.column++;\n marker.end.offset++;\n marker.end._bufferIndex++;\n /** @type {Token} */\n const string = {\n type: 'gfmFootnoteCallString',\n start: Object.assign({}, marker.end),\n end: Object.assign({}, events[events.length - 1][1].start)\n };\n /** @type {Token} */\n const chunk = {\n type: \"chunkString\",\n contentType: 'string',\n start: Object.assign({}, string.start),\n end: Object.assign({}, string.end)\n };\n\n /** @type {Array} */\n const replacement = [\n // Take the `labelImageMarker` (now `data`, the `!`)\n events[index + 1], events[index + 2], ['enter', call, context],\n // The `[`\n events[index + 3], events[index + 4],\n // The `^`.\n ['enter', marker, context], ['exit', marker, context],\n // Everything in between.\n ['enter', string, context], ['enter', chunk, context], ['exit', chunk, context], ['exit', string, context],\n // The ending (`]`, properly parsed and labelled).\n events[events.length - 2], events[events.length - 1], ['exit', call, context]];\n events.splice(index, events.length - index + 1, ...replacement);\n return events;\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeGfmFootnoteCall(effects, ok, nok) {\n const self = this;\n const defined = self.parser.gfmFootnotes || (self.parser.gfmFootnotes = []);\n let size = 0;\n /** @type {boolean} */\n let data;\n\n // Note: the implementation of `markdown-rs` is different, because it houses\n // core *and* extensions in one project.\n // Therefore, it can include footnote logic inside `label-end`.\n // We can’t do that, but luckily, we can parse footnotes in a simpler way than\n // needed for labels.\n return start;\n\n /**\n * Start of footnote label.\n *\n * ```markdown\n * > | a [^b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter('gfmFootnoteCall');\n effects.enter('gfmFootnoteCallLabelMarker');\n effects.consume(code);\n effects.exit('gfmFootnoteCallLabelMarker');\n return callStart;\n }\n\n /**\n * After `[`, at `^`.\n *\n * ```markdown\n * > | a [^b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function callStart(code) {\n if (code !== 94) return nok(code);\n effects.enter('gfmFootnoteCallMarker');\n effects.consume(code);\n effects.exit('gfmFootnoteCallMarker');\n effects.enter('gfmFootnoteCallString');\n effects.enter('chunkString').contentType = 'string';\n return callData;\n }\n\n /**\n * In label.\n *\n * ```markdown\n * > | a [^b] c\n * ^\n * ```\n *\n * @type {State}\n */\n function callData(code) {\n if (\n // Too long.\n size > 999 ||\n // Closing brace with nothing.\n code === 93 && !data ||\n // Space or tab is not supported by GFM for some reason.\n // `\\n` and `[` not being supported makes sense.\n code === null || code === 91 || markdownLineEndingOrSpace(code)) {\n return nok(code);\n }\n if (code === 93) {\n effects.exit('chunkString');\n const token = effects.exit('gfmFootnoteCallString');\n if (!defined.includes(normalizeIdentifier(self.sliceSerialize(token)))) {\n return nok(code);\n }\n effects.enter('gfmFootnoteCallLabelMarker');\n effects.consume(code);\n effects.exit('gfmFootnoteCallLabelMarker');\n effects.exit('gfmFootnoteCall');\n return ok;\n }\n if (!markdownLineEndingOrSpace(code)) {\n data = true;\n }\n size++;\n effects.consume(code);\n return code === 92 ? callEscape : callData;\n }\n\n /**\n * On character after escape.\n *\n * ```markdown\n * > | a [^b\\c] d\n * ^\n * ```\n *\n * @type {State}\n */\n function callEscape(code) {\n if (code === 91 || code === 92 || code === 93) {\n effects.consume(code);\n size++;\n return callData;\n }\n return callData(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeDefinitionStart(effects, ok, nok) {\n const self = this;\n const defined = self.parser.gfmFootnotes || (self.parser.gfmFootnotes = []);\n /** @type {string} */\n let identifier;\n let size = 0;\n /** @type {boolean | undefined} */\n let data;\n return start;\n\n /**\n * Start of GFM footnote definition.\n *\n * ```markdown\n * > | [^a]: b\n * ^\n * ```\n *\n * @type {State}\n */\n function start(code) {\n effects.enter('gfmFootnoteDefinition')._container = true;\n effects.enter('gfmFootnoteDefinitionLabel');\n effects.enter('gfmFootnoteDefinitionLabelMarker');\n effects.consume(code);\n effects.exit('gfmFootnoteDefinitionLabelMarker');\n return labelAtMarker;\n }\n\n /**\n * In label, at caret.\n *\n * ```markdown\n * > | [^a]: b\n * ^\n * ```\n *\n * @type {State}\n */\n function labelAtMarker(code) {\n if (code === 94) {\n effects.enter('gfmFootnoteDefinitionMarker');\n effects.consume(code);\n effects.exit('gfmFootnoteDefinitionMarker');\n effects.enter('gfmFootnoteDefinitionLabelString');\n effects.enter('chunkString').contentType = 'string';\n return labelInside;\n }\n return nok(code);\n }\n\n /**\n * In label.\n *\n * > 👉 **Note**: `cmark-gfm` prevents whitespace from occurring in footnote\n * > definition labels.\n *\n * ```markdown\n * > | [^a]: b\n * ^\n * ```\n *\n * @type {State}\n */\n function labelInside(code) {\n if (\n // Too long.\n size > 999 ||\n // Closing brace with nothing.\n code === 93 && !data ||\n // Space or tab is not supported by GFM for some reason.\n // `\\n` and `[` not being supported makes sense.\n code === null || code === 91 || markdownLineEndingOrSpace(code)) {\n return nok(code);\n }\n if (code === 93) {\n effects.exit('chunkString');\n const token = effects.exit('gfmFootnoteDefinitionLabelString');\n identifier = normalizeIdentifier(self.sliceSerialize(token));\n effects.enter('gfmFootnoteDefinitionLabelMarker');\n effects.consume(code);\n effects.exit('gfmFootnoteDefinitionLabelMarker');\n effects.exit('gfmFootnoteDefinitionLabel');\n return labelAfter;\n }\n if (!markdownLineEndingOrSpace(code)) {\n data = true;\n }\n size++;\n effects.consume(code);\n return code === 92 ? labelEscape : labelInside;\n }\n\n /**\n * After `\\`, at a special character.\n *\n * > 👉 **Note**: `cmark-gfm` currently does not support escaped brackets:\n * > \n *\n * ```markdown\n * > | [^a\\*b]: c\n * ^\n * ```\n *\n * @type {State}\n */\n function labelEscape(code) {\n if (code === 91 || code === 92 || code === 93) {\n effects.consume(code);\n size++;\n return labelInside;\n }\n return labelInside(code);\n }\n\n /**\n * After definition label.\n *\n * ```markdown\n * > | [^a]: b\n * ^\n * ```\n *\n * @type {State}\n */\n function labelAfter(code) {\n if (code === 58) {\n effects.enter('definitionMarker');\n effects.consume(code);\n effects.exit('definitionMarker');\n if (!defined.includes(identifier)) {\n defined.push(identifier);\n }\n\n // Any whitespace after the marker is eaten, forming indented code\n // is not possible.\n // No space is also fine, just like a block quote marker.\n return factorySpace(effects, whitespaceAfter, 'gfmFootnoteDefinitionWhitespace');\n }\n return nok(code);\n }\n\n /**\n * After definition prefix.\n *\n * ```markdown\n * > | [^a]: b\n * ^\n * ```\n *\n * @type {State}\n */\n function whitespaceAfter(code) {\n // `markdown-rs` has a wrapping token for the prefix that is closed here.\n return ok(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeDefinitionContinuation(effects, ok, nok) {\n /// Start of footnote definition continuation.\n ///\n /// ```markdown\n /// | [^a]: b\n /// > | c\n /// ^\n /// ```\n //\n // Either a blank line, which is okay, or an indented thing.\n return effects.check(blankLine, ok, effects.attempt(indent, ok, nok));\n}\n\n/** @type {Exiter} */\nfunction gfmFootnoteDefinitionEnd(effects) {\n effects.exit('gfmFootnoteDefinition');\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeIndent(effects, ok, nok) {\n const self = this;\n return factorySpace(effects, afterPrefix, 'gfmFootnoteDefinitionIndent', 4 + 1);\n\n /**\n * @type {State}\n */\n function afterPrefix(code) {\n const tail = self.events[self.events.length - 1];\n return tail && tail[1].type === 'gfmFootnoteDefinitionIndent' && tail[2].sliceSerialize(tail[1], true).length === 4 ? ok(code) : nok(code);\n }\n}","/**\n * @import {Options} from 'micromark-extension-gfm-strikethrough'\n * @import {Event, Extension, Resolver, State, Token, TokenizeContext, Tokenizer} from 'micromark-util-types'\n */\n\nimport { splice } from 'micromark-util-chunked';\nimport { classifyCharacter } from 'micromark-util-classify-character';\nimport { resolveAll } from 'micromark-util-resolve-all';\n/**\n * Create an extension for `micromark` to enable GFM strikethrough syntax.\n *\n * @param {Options | null | undefined} [options={}]\n * Configuration.\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `extensions`, to\n * enable GFM strikethrough syntax.\n */\nexport function gfmStrikethrough(options) {\n const options_ = options || {};\n let single = options_.singleTilde;\n const tokenizer = {\n name: 'strikethrough',\n tokenize: tokenizeStrikethrough,\n resolveAll: resolveAllStrikethrough\n };\n if (single === null || single === undefined) {\n single = true;\n }\n return {\n text: {\n [126]: tokenizer\n },\n insideSpan: {\n null: [tokenizer]\n },\n attentionMarkers: {\n null: [126]\n }\n };\n\n /**\n * Take events and resolve strikethrough.\n *\n * @type {Resolver}\n */\n function resolveAllStrikethrough(events, context) {\n let index = -1;\n\n // Walk through all events.\n while (++index < events.length) {\n // Find a token that can close.\n if (events[index][0] === 'enter' && events[index][1].type === 'strikethroughSequenceTemporary' && events[index][1]._close) {\n let open = index;\n\n // Now walk back to find an opener.\n while (open--) {\n // Find a token that can open the closer.\n if (events[open][0] === 'exit' && events[open][1].type === 'strikethroughSequenceTemporary' && events[open][1]._open &&\n // If the sizes are the same:\n events[index][1].end.offset - events[index][1].start.offset === events[open][1].end.offset - events[open][1].start.offset) {\n events[index][1].type = 'strikethroughSequence';\n events[open][1].type = 'strikethroughSequence';\n\n /** @type {Token} */\n const strikethrough = {\n type: 'strikethrough',\n start: Object.assign({}, events[open][1].start),\n end: Object.assign({}, events[index][1].end)\n };\n\n /** @type {Token} */\n const text = {\n type: 'strikethroughText',\n start: Object.assign({}, events[open][1].end),\n end: Object.assign({}, events[index][1].start)\n };\n\n // Opening.\n /** @type {Array} */\n const nextEvents = [['enter', strikethrough, context], ['enter', events[open][1], context], ['exit', events[open][1], context], ['enter', text, context]];\n const insideSpan = context.parser.constructs.insideSpan.null;\n if (insideSpan) {\n // Between.\n splice(nextEvents, nextEvents.length, 0, resolveAll(insideSpan, events.slice(open + 1, index), context));\n }\n\n // Closing.\n splice(nextEvents, nextEvents.length, 0, [['exit', text, context], ['enter', events[index][1], context], ['exit', events[index][1], context], ['exit', strikethrough, context]]);\n splice(events, open - 1, index - open + 3, nextEvents);\n index = open + nextEvents.length - 2;\n break;\n }\n }\n }\n }\n index = -1;\n while (++index < events.length) {\n if (events[index][1].type === 'strikethroughSequenceTemporary') {\n events[index][1].type = \"data\";\n }\n }\n return events;\n }\n\n /**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\n function tokenizeStrikethrough(effects, ok, nok) {\n const previous = this.previous;\n const events = this.events;\n let size = 0;\n return start;\n\n /** @type {State} */\n function start(code) {\n if (previous === 126 && events[events.length - 1][1].type !== \"characterEscape\") {\n return nok(code);\n }\n effects.enter('strikethroughSequenceTemporary');\n return more(code);\n }\n\n /** @type {State} */\n function more(code) {\n const before = classifyCharacter(previous);\n if (code === 126) {\n // If this is the third marker, exit.\n if (size > 1) return nok(code);\n effects.consume(code);\n size++;\n return more;\n }\n if (size < 2 && !single) return nok(code);\n const token = effects.exit('strikethroughSequenceTemporary');\n const after = classifyCharacter(code);\n token._open = !after || after === 2 && Boolean(before);\n token._close = !before || before === 2 && Boolean(after);\n return ok(code);\n }\n }\n}","/**\n * @import {Event} from 'micromark-util-types'\n */\n\n// Port of `edit_map.rs` from `markdown-rs`.\n// This should move to `markdown-js` later.\n\n// Deal with several changes in events, batching them together.\n//\n// Preferably, changes should be kept to a minimum.\n// Sometimes, it’s needed to change the list of events, because parsing can be\n// messy, and it helps to expose a cleaner interface of events to the compiler\n// and other users.\n// It can also help to merge many adjacent similar events.\n// And, in other cases, it’s needed to parse subcontent: pass some events\n// through another tokenizer and inject the result.\n\n/**\n * @typedef {[number, number, Array]} Change\n * @typedef {[number, number, number]} Jump\n */\n\n/**\n * Tracks a bunch of edits.\n */\nexport class EditMap {\n /**\n * Create a new edit map.\n */\n constructor() {\n /**\n * Record of changes.\n *\n * @type {Array}\n */\n this.map = [];\n }\n\n /**\n * Create an edit: a remove and/or add at a certain place.\n *\n * @param {number} index\n * @param {number} remove\n * @param {Array} add\n * @returns {undefined}\n */\n add(index, remove, add) {\n addImplementation(this, index, remove, add);\n }\n\n // To do: add this when moving to `micromark`.\n // /**\n // * Create an edit: but insert `add` before existing additions.\n // *\n // * @param {number} index\n // * @param {number} remove\n // * @param {Array} add\n // * @returns {undefined}\n // */\n // addBefore(index, remove, add) {\n // addImplementation(this, index, remove, add, true)\n // }\n\n /**\n * Done, change the events.\n *\n * @param {Array} events\n * @returns {undefined}\n */\n consume(events) {\n this.map.sort(function (a, b) {\n return a[0] - b[0];\n });\n\n /* c8 ignore next 3 -- `resolve` is never called without tables, so without edits. */\n if (this.map.length === 0) {\n return;\n }\n\n // To do: if links are added in events, like they are in `markdown-rs`,\n // this is needed.\n // // Calculate jumps: where items in the current list move to.\n // /** @type {Array} */\n // const jumps = []\n // let index = 0\n // let addAcc = 0\n // let removeAcc = 0\n // while (index < this.map.length) {\n // const [at, remove, add] = this.map[index]\n // removeAcc += remove\n // addAcc += add.length\n // jumps.push([at, removeAcc, addAcc])\n // index += 1\n // }\n //\n // . shiftLinks(events, jumps)\n\n let index = this.map.length;\n /** @type {Array>} */\n const vecs = [];\n while (index > 0) {\n index -= 1;\n vecs.push(events.slice(this.map[index][0] + this.map[index][1]), this.map[index][2]);\n\n // Truncate rest.\n events.length = this.map[index][0];\n }\n vecs.push(events.slice());\n events.length = 0;\n let slice = vecs.pop();\n while (slice) {\n for (const element of slice) {\n events.push(element);\n }\n slice = vecs.pop();\n }\n\n // Truncate everything.\n this.map.length = 0;\n }\n}\n\n/**\n * Create an edit.\n *\n * @param {EditMap} editMap\n * @param {number} at\n * @param {number} remove\n * @param {Array} add\n * @returns {undefined}\n */\nfunction addImplementation(editMap, at, remove, add) {\n let index = 0;\n\n /* c8 ignore next 3 -- `resolve` is never called without tables, so without edits. */\n if (remove === 0 && add.length === 0) {\n return;\n }\n while (index < editMap.map.length) {\n if (editMap.map[index][0] === at) {\n editMap.map[index][1] += remove;\n\n // To do: before not used by tables, use when moving to micromark.\n // if (before) {\n // add.push(...editMap.map[index][2])\n // editMap.map[index][2] = add\n // } else {\n editMap.map[index][2].push(...add);\n // }\n\n return;\n }\n index += 1;\n }\n editMap.map.push([at, remove, add]);\n}\n\n// /**\n// * Shift `previous` and `next` links according to `jumps`.\n// *\n// * This fixes links in case there are events removed or added between them.\n// *\n// * @param {Array} events\n// * @param {Array} jumps\n// */\n// function shiftLinks(events, jumps) {\n// let jumpIndex = 0\n// let index = 0\n// let add = 0\n// let rm = 0\n\n// while (index < events.length) {\n// const rmCurr = rm\n\n// while (jumpIndex < jumps.length && jumps[jumpIndex][0] <= index) {\n// add = jumps[jumpIndex][2]\n// rm = jumps[jumpIndex][1]\n// jumpIndex += 1\n// }\n\n// // Ignore items that will be removed.\n// if (rm > rmCurr) {\n// index += rm - rmCurr\n// } else {\n// // ?\n// // if let Some(link) = &events[index].link {\n// // if let Some(next) = link.next {\n// // events[next].link.as_mut().unwrap().previous = Some(index + add - rm);\n// // while jumpIndex < jumps.len() && jumps[jumpIndex].0 <= next {\n// // add = jumps[jumpIndex].2;\n// // rm = jumps[jumpIndex].1;\n// // jumpIndex += 1;\n// // }\n// // events[index].link.as_mut().unwrap().next = Some(next + add - rm);\n// // index = next;\n// // continue;\n// // }\n// // }\n// index += 1\n// }\n// }\n// }","/**\n * @import {Event} from 'micromark-util-types'\n */\n\n/**\n * @typedef {'center' | 'left' | 'none' | 'right'} Align\n */\n\n/**\n * Figure out the alignment of a GFM table.\n *\n * @param {Readonly>} events\n * List of events.\n * @param {number} index\n * Table enter event.\n * @returns {Array}\n * List of aligns.\n */\nexport function gfmTableAlign(events, index) {\n let inDelimiterRow = false;\n /** @type {Array} */\n const align = [];\n while (index < events.length) {\n const event = events[index];\n if (inDelimiterRow) {\n if (event[0] === 'enter') {\n // Start of alignment value: set a new column.\n // To do: `markdown-rs` uses `tableDelimiterCellValue`.\n if (event[1].type === 'tableContent') {\n align.push(events[index + 1][1].type === 'tableDelimiterMarker' ? 'left' : 'none');\n }\n }\n // Exits:\n // End of alignment value: change the column.\n // To do: `markdown-rs` uses `tableDelimiterCellValue`.\n else if (event[1].type === 'tableContent') {\n if (events[index - 1][1].type === 'tableDelimiterMarker') {\n const alignIndex = align.length - 1;\n align[alignIndex] = align[alignIndex] === 'left' ? 'center' : 'right';\n }\n }\n // Done!\n else if (event[1].type === 'tableDelimiterRow') {\n break;\n }\n } else if (event[0] === 'enter' && event[1].type === 'tableDelimiterRow') {\n inDelimiterRow = true;\n }\n index += 1;\n }\n return align;\n}","/**\n * @import {Event, Extension, Point, Resolver, State, Token, TokenizeContext, Tokenizer} from 'micromark-util-types'\n */\n\n/**\n * @typedef {[number, number, number, number]} Range\n * Cell info.\n *\n * @typedef {0 | 1 | 2 | 3} RowKind\n * Where we are: `1` for head row, `2` for delimiter row, `3` for body row.\n */\n\nimport { factorySpace } from 'micromark-factory-space';\nimport { markdownLineEnding, markdownLineEndingOrSpace, markdownSpace } from 'micromark-util-character';\nimport { EditMap } from './edit-map.js';\nimport { gfmTableAlign } from './infer.js';\n\n/**\n * Create an HTML extension for `micromark` to support GitHub tables syntax.\n *\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `extensions` to enable GFM\n * table syntax.\n */\nexport function gfmTable() {\n return {\n flow: {\n null: {\n name: 'table',\n tokenize: tokenizeTable,\n resolveAll: resolveTable\n }\n }\n };\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeTable(effects, ok, nok) {\n const self = this;\n let size = 0;\n let sizeB = 0;\n /** @type {boolean | undefined} */\n let seen;\n return start;\n\n /**\n * Start of a GFM table.\n *\n * If there is a valid table row or table head before, then we try to parse\n * another row.\n * Otherwise, we try to parse a head.\n *\n * ```markdown\n * > | | a |\n * ^\n * | | - |\n * > | | b |\n * ^\n * ```\n * @type {State}\n */\n function start(code) {\n let index = self.events.length - 1;\n while (index > -1) {\n const type = self.events[index][1].type;\n if (type === \"lineEnding\" ||\n // Note: markdown-rs uses `whitespace` instead of `linePrefix`\n type === \"linePrefix\") index--;else break;\n }\n const tail = index > -1 ? self.events[index][1].type : null;\n const next = tail === 'tableHead' || tail === 'tableRow' ? bodyRowStart : headRowBefore;\n\n // Don’t allow lazy body rows.\n if (next === bodyRowStart && self.parser.lazy[self.now().line]) {\n return nok(code);\n }\n return next(code);\n }\n\n /**\n * Before table head row.\n *\n * ```markdown\n * > | | a |\n * ^\n * | | - |\n * | | b |\n * ```\n *\n * @type {State}\n */\n function headRowBefore(code) {\n effects.enter('tableHead');\n effects.enter('tableRow');\n return headRowStart(code);\n }\n\n /**\n * Before table head row, after whitespace.\n *\n * ```markdown\n * > | | a |\n * ^\n * | | - |\n * | | b |\n * ```\n *\n * @type {State}\n */\n function headRowStart(code) {\n if (code === 124) {\n return headRowBreak(code);\n }\n\n // To do: micromark-js should let us parse our own whitespace in extensions,\n // like `markdown-rs`:\n //\n // ```js\n // // 4+ spaces.\n // if (markdownSpace(code)) {\n // return nok(code)\n // }\n // ```\n\n seen = true;\n // Count the first character, that isn’t a pipe, double.\n sizeB += 1;\n return headRowBreak(code);\n }\n\n /**\n * At break in table head row.\n *\n * ```markdown\n * > | | a |\n * ^\n * ^\n * ^\n * | | - |\n * | | b |\n * ```\n *\n * @type {State}\n */\n function headRowBreak(code) {\n if (code === null) {\n // Note: in `markdown-rs`, we need to reset, in `micromark-js` we don‘t.\n return nok(code);\n }\n if (markdownLineEnding(code)) {\n // If anything other than one pipe (ignoring whitespace) was used, it’s fine.\n if (sizeB > 1) {\n sizeB = 0;\n // To do: check if this works.\n // Feel free to interrupt:\n self.interrupt = true;\n effects.exit('tableRow');\n effects.enter(\"lineEnding\");\n effects.consume(code);\n effects.exit(\"lineEnding\");\n return headDelimiterStart;\n }\n\n // Note: in `markdown-rs`, we need to reset, in `micromark-js` we don‘t.\n return nok(code);\n }\n if (markdownSpace(code)) {\n // To do: check if this is fine.\n // effects.attempt(State::Next(StateName::GfmTableHeadRowBreak), State::Nok)\n // State::Retry(space_or_tab(tokenizer))\n return factorySpace(effects, headRowBreak, \"whitespace\")(code);\n }\n sizeB += 1;\n if (seen) {\n seen = false;\n // Header cell count.\n size += 1;\n }\n if (code === 124) {\n effects.enter('tableCellDivider');\n effects.consume(code);\n effects.exit('tableCellDivider');\n // Whether a delimiter was seen.\n seen = true;\n return headRowBreak;\n }\n\n // Anything else is cell data.\n effects.enter(\"data\");\n return headRowData(code);\n }\n\n /**\n * In table head row data.\n *\n * ```markdown\n * > | | a |\n * ^\n * | | - |\n * | | b |\n * ```\n *\n * @type {State}\n */\n function headRowData(code) {\n if (code === null || code === 124 || markdownLineEndingOrSpace(code)) {\n effects.exit(\"data\");\n return headRowBreak(code);\n }\n effects.consume(code);\n return code === 92 ? headRowEscape : headRowData;\n }\n\n /**\n * In table head row escape.\n *\n * ```markdown\n * > | | a\\-b |\n * ^\n * | | ---- |\n * | | c |\n * ```\n *\n * @type {State}\n */\n function headRowEscape(code) {\n if (code === 92 || code === 124) {\n effects.consume(code);\n return headRowData;\n }\n return headRowData(code);\n }\n\n /**\n * Before delimiter row.\n *\n * ```markdown\n * | | a |\n * > | | - |\n * ^\n * | | b |\n * ```\n *\n * @type {State}\n */\n function headDelimiterStart(code) {\n // Reset `interrupt`.\n self.interrupt = false;\n\n // Note: in `markdown-rs`, we need to handle piercing here too.\n if (self.parser.lazy[self.now().line]) {\n return nok(code);\n }\n effects.enter('tableDelimiterRow');\n // Track if we’ve seen a `:` or `|`.\n seen = false;\n if (markdownSpace(code)) {\n return factorySpace(effects, headDelimiterBefore, \"linePrefix\", self.parser.constructs.disable.null.includes('codeIndented') ? undefined : 4)(code);\n }\n return headDelimiterBefore(code);\n }\n\n /**\n * Before delimiter row, after optional whitespace.\n *\n * Reused when a `|` is found later, to parse another cell.\n *\n * ```markdown\n * | | a |\n * > | | - |\n * ^\n * | | b |\n * ```\n *\n * @type {State}\n */\n function headDelimiterBefore(code) {\n if (code === 45 || code === 58) {\n return headDelimiterValueBefore(code);\n }\n if (code === 124) {\n seen = true;\n // If we start with a pipe, we open a cell marker.\n effects.enter('tableCellDivider');\n effects.consume(code);\n effects.exit('tableCellDivider');\n return headDelimiterCellBefore;\n }\n\n // More whitespace / empty row not allowed at start.\n return headDelimiterNok(code);\n }\n\n /**\n * After `|`, before delimiter cell.\n *\n * ```markdown\n * | | a |\n * > | | - |\n * ^\n * ```\n *\n * @type {State}\n */\n function headDelimiterCellBefore(code) {\n if (markdownSpace(code)) {\n return factorySpace(effects, headDelimiterValueBefore, \"whitespace\")(code);\n }\n return headDelimiterValueBefore(code);\n }\n\n /**\n * Before delimiter cell value.\n *\n * ```markdown\n * | | a |\n * > | | - |\n * ^\n * ```\n *\n * @type {State}\n */\n function headDelimiterValueBefore(code) {\n // Align: left.\n if (code === 58) {\n sizeB += 1;\n seen = true;\n effects.enter('tableDelimiterMarker');\n effects.consume(code);\n effects.exit('tableDelimiterMarker');\n return headDelimiterLeftAlignmentAfter;\n }\n\n // Align: none.\n if (code === 45) {\n sizeB += 1;\n // To do: seems weird that this *isn’t* left aligned, but that state is used?\n return headDelimiterLeftAlignmentAfter(code);\n }\n if (code === null || markdownLineEnding(code)) {\n return headDelimiterCellAfter(code);\n }\n return headDelimiterNok(code);\n }\n\n /**\n * After delimiter cell left alignment marker.\n *\n * ```markdown\n * | | a |\n * > | | :- |\n * ^\n * ```\n *\n * @type {State}\n */\n function headDelimiterLeftAlignmentAfter(code) {\n if (code === 45) {\n effects.enter('tableDelimiterFiller');\n return headDelimiterFiller(code);\n }\n\n // Anything else is not ok after the left-align colon.\n return headDelimiterNok(code);\n }\n\n /**\n * In delimiter cell filler.\n *\n * ```markdown\n * | | a |\n * > | | - |\n * ^\n * ```\n *\n * @type {State}\n */\n function headDelimiterFiller(code) {\n if (code === 45) {\n effects.consume(code);\n return headDelimiterFiller;\n }\n\n // Align is `center` if it was `left`, `right` otherwise.\n if (code === 58) {\n seen = true;\n effects.exit('tableDelimiterFiller');\n effects.enter('tableDelimiterMarker');\n effects.consume(code);\n effects.exit('tableDelimiterMarker');\n return headDelimiterRightAlignmentAfter;\n }\n effects.exit('tableDelimiterFiller');\n return headDelimiterRightAlignmentAfter(code);\n }\n\n /**\n * After delimiter cell right alignment marker.\n *\n * ```markdown\n * | | a |\n * > | | -: |\n * ^\n * ```\n *\n * @type {State}\n */\n function headDelimiterRightAlignmentAfter(code) {\n if (markdownSpace(code)) {\n return factorySpace(effects, headDelimiterCellAfter, \"whitespace\")(code);\n }\n return headDelimiterCellAfter(code);\n }\n\n /**\n * After delimiter cell.\n *\n * ```markdown\n * | | a |\n * > | | -: |\n * ^\n * ```\n *\n * @type {State}\n */\n function headDelimiterCellAfter(code) {\n if (code === 124) {\n return headDelimiterBefore(code);\n }\n if (code === null || markdownLineEnding(code)) {\n // Exit when:\n // * there was no `:` or `|` at all (it’s a thematic break or setext\n // underline instead)\n // * the header cell count is not the delimiter cell count\n if (!seen || size !== sizeB) {\n return headDelimiterNok(code);\n }\n\n // Note: in markdown-rs`, a reset is needed here.\n effects.exit('tableDelimiterRow');\n effects.exit('tableHead');\n // To do: in `markdown-rs`, resolvers need to be registered manually.\n // effects.register_resolver(ResolveName::GfmTable)\n return ok(code);\n }\n return headDelimiterNok(code);\n }\n\n /**\n * In delimiter row, at a disallowed byte.\n *\n * ```markdown\n * | | a |\n * > | | x |\n * ^\n * ```\n *\n * @type {State}\n */\n function headDelimiterNok(code) {\n // Note: in `markdown-rs`, we need to reset, in `micromark-js` we don‘t.\n return nok(code);\n }\n\n /**\n * Before table body row.\n *\n * ```markdown\n * | | a |\n * | | - |\n * > | | b |\n * ^\n * ```\n *\n * @type {State}\n */\n function bodyRowStart(code) {\n // Note: in `markdown-rs` we need to manually take care of a prefix,\n // but in `micromark-js` that is done for us, so if we’re here, we’re\n // never at whitespace.\n effects.enter('tableRow');\n return bodyRowBreak(code);\n }\n\n /**\n * At break in table body row.\n *\n * ```markdown\n * | | a |\n * | | - |\n * > | | b |\n * ^\n * ^\n * ^\n * ```\n *\n * @type {State}\n */\n function bodyRowBreak(code) {\n if (code === 124) {\n effects.enter('tableCellDivider');\n effects.consume(code);\n effects.exit('tableCellDivider');\n return bodyRowBreak;\n }\n if (code === null || markdownLineEnding(code)) {\n effects.exit('tableRow');\n return ok(code);\n }\n if (markdownSpace(code)) {\n return factorySpace(effects, bodyRowBreak, \"whitespace\")(code);\n }\n\n // Anything else is cell content.\n effects.enter(\"data\");\n return bodyRowData(code);\n }\n\n /**\n * In table body row data.\n *\n * ```markdown\n * | | a |\n * | | - |\n * > | | b |\n * ^\n * ```\n *\n * @type {State}\n */\n function bodyRowData(code) {\n if (code === null || code === 124 || markdownLineEndingOrSpace(code)) {\n effects.exit(\"data\");\n return bodyRowBreak(code);\n }\n effects.consume(code);\n return code === 92 ? bodyRowEscape : bodyRowData;\n }\n\n /**\n * In table body row escape.\n *\n * ```markdown\n * | | a |\n * | | ---- |\n * > | | b\\-c |\n * ^\n * ```\n *\n * @type {State}\n */\n function bodyRowEscape(code) {\n if (code === 92 || code === 124) {\n effects.consume(code);\n return bodyRowData;\n }\n return bodyRowData(code);\n }\n}\n\n/** @type {Resolver} */\n\nfunction resolveTable(events, context) {\n let index = -1;\n let inFirstCellAwaitingPipe = true;\n /** @type {RowKind} */\n let rowKind = 0;\n /** @type {Range} */\n let lastCell = [0, 0, 0, 0];\n /** @type {Range} */\n let cell = [0, 0, 0, 0];\n let afterHeadAwaitingFirstBodyRow = false;\n let lastTableEnd = 0;\n /** @type {Token | undefined} */\n let currentTable;\n /** @type {Token | undefined} */\n let currentBody;\n /** @type {Token | undefined} */\n let currentCell;\n const map = new EditMap();\n while (++index < events.length) {\n const event = events[index];\n const token = event[1];\n if (event[0] === 'enter') {\n // Start of head.\n if (token.type === 'tableHead') {\n afterHeadAwaitingFirstBodyRow = false;\n\n // Inject previous (body end and) table end.\n if (lastTableEnd !== 0) {\n flushTableEnd(map, context, lastTableEnd, currentTable, currentBody);\n currentBody = undefined;\n lastTableEnd = 0;\n }\n\n // Inject table start.\n currentTable = {\n type: 'table',\n start: Object.assign({}, token.start),\n // Note: correct end is set later.\n end: Object.assign({}, token.end)\n };\n map.add(index, 0, [['enter', currentTable, context]]);\n } else if (token.type === 'tableRow' || token.type === 'tableDelimiterRow') {\n inFirstCellAwaitingPipe = true;\n currentCell = undefined;\n lastCell = [0, 0, 0, 0];\n cell = [0, index + 1, 0, 0];\n\n // Inject table body start.\n if (afterHeadAwaitingFirstBodyRow) {\n afterHeadAwaitingFirstBodyRow = false;\n currentBody = {\n type: 'tableBody',\n start: Object.assign({}, token.start),\n // Note: correct end is set later.\n end: Object.assign({}, token.end)\n };\n map.add(index, 0, [['enter', currentBody, context]]);\n }\n rowKind = token.type === 'tableDelimiterRow' ? 2 : currentBody ? 3 : 1;\n }\n // Cell data.\n else if (rowKind && (token.type === \"data\" || token.type === 'tableDelimiterMarker' || token.type === 'tableDelimiterFiller')) {\n inFirstCellAwaitingPipe = false;\n\n // First value in cell.\n if (cell[2] === 0) {\n if (lastCell[1] !== 0) {\n cell[0] = cell[1];\n currentCell = flushCell(map, context, lastCell, rowKind, undefined, currentCell);\n lastCell = [0, 0, 0, 0];\n }\n cell[2] = index;\n }\n } else if (token.type === 'tableCellDivider') {\n if (inFirstCellAwaitingPipe) {\n inFirstCellAwaitingPipe = false;\n } else {\n if (lastCell[1] !== 0) {\n cell[0] = cell[1];\n currentCell = flushCell(map, context, lastCell, rowKind, undefined, currentCell);\n }\n lastCell = cell;\n cell = [lastCell[1], index, 0, 0];\n }\n }\n }\n // Exit events.\n else if (token.type === 'tableHead') {\n afterHeadAwaitingFirstBodyRow = true;\n lastTableEnd = index;\n } else if (token.type === 'tableRow' || token.type === 'tableDelimiterRow') {\n lastTableEnd = index;\n if (lastCell[1] !== 0) {\n cell[0] = cell[1];\n currentCell = flushCell(map, context, lastCell, rowKind, index, currentCell);\n } else if (cell[1] !== 0) {\n currentCell = flushCell(map, context, cell, rowKind, index, currentCell);\n }\n rowKind = 0;\n } else if (rowKind && (token.type === \"data\" || token.type === 'tableDelimiterMarker' || token.type === 'tableDelimiterFiller')) {\n cell[3] = index;\n }\n }\n if (lastTableEnd !== 0) {\n flushTableEnd(map, context, lastTableEnd, currentTable, currentBody);\n }\n map.consume(context.events);\n\n // To do: move this into `html`, when events are exposed there.\n // That’s what `markdown-rs` does.\n // That needs updates to `mdast-util-gfm-table`.\n index = -1;\n while (++index < context.events.length) {\n const event = context.events[index];\n if (event[0] === 'enter' && event[1].type === 'table') {\n event[1]._align = gfmTableAlign(context.events, index);\n }\n }\n return events;\n}\n\n/**\n * Generate a cell.\n *\n * @param {EditMap} map\n * @param {Readonly} context\n * @param {Readonly} range\n * @param {RowKind} rowKind\n * @param {number | undefined} rowEnd\n * @param {Token | undefined} previousCell\n * @returns {Token | undefined}\n */\n// eslint-disable-next-line max-params\nfunction flushCell(map, context, range, rowKind, rowEnd, previousCell) {\n // `markdown-rs` uses:\n // rowKind === 2 ? 'tableDelimiterCell' : 'tableCell'\n const groupName = rowKind === 1 ? 'tableHeader' : rowKind === 2 ? 'tableDelimiter' : 'tableData';\n // `markdown-rs` uses:\n // rowKind === 2 ? 'tableDelimiterCellValue' : 'tableCellText'\n const valueName = 'tableContent';\n\n // Insert an exit for the previous cell, if there is one.\n //\n // ```markdown\n // > | | aa | bb | cc |\n // ^-- exit\n // ^^^^-- this cell\n // ```\n if (range[0] !== 0) {\n previousCell.end = Object.assign({}, getPoint(context.events, range[0]));\n map.add(range[0], 0, [['exit', previousCell, context]]);\n }\n\n // Insert enter of this cell.\n //\n // ```markdown\n // > | | aa | bb | cc |\n // ^-- enter\n // ^^^^-- this cell\n // ```\n const now = getPoint(context.events, range[1]);\n previousCell = {\n type: groupName,\n start: Object.assign({}, now),\n // Note: correct end is set later.\n end: Object.assign({}, now)\n };\n map.add(range[1], 0, [['enter', previousCell, context]]);\n\n // Insert text start at first data start and end at last data end, and\n // remove events between.\n //\n // ```markdown\n // > | | aa | bb | cc |\n // ^-- enter\n // ^-- exit\n // ^^^^-- this cell\n // ```\n if (range[2] !== 0) {\n const relatedStart = getPoint(context.events, range[2]);\n const relatedEnd = getPoint(context.events, range[3]);\n /** @type {Token} */\n const valueToken = {\n type: valueName,\n start: Object.assign({}, relatedStart),\n end: Object.assign({}, relatedEnd)\n };\n map.add(range[2], 0, [['enter', valueToken, context]]);\n if (rowKind !== 2) {\n // Fix positional info on remaining events\n const start = context.events[range[2]];\n const end = context.events[range[3]];\n start[1].end = Object.assign({}, end[1].end);\n start[1].type = \"chunkText\";\n start[1].contentType = \"text\";\n\n // Remove if needed.\n if (range[3] > range[2] + 1) {\n const a = range[2] + 1;\n const b = range[3] - range[2] - 1;\n map.add(a, b, []);\n }\n }\n map.add(range[3] + 1, 0, [['exit', valueToken, context]]);\n }\n\n // Insert an exit for the last cell, if at the row end.\n //\n // ```markdown\n // > | | aa | bb | cc |\n // ^-- exit\n // ^^^^^^-- this cell (the last one contains two “between” parts)\n // ```\n if (rowEnd !== undefined) {\n previousCell.end = Object.assign({}, getPoint(context.events, rowEnd));\n map.add(rowEnd, 0, [['exit', previousCell, context]]);\n previousCell = undefined;\n }\n return previousCell;\n}\n\n/**\n * Generate table end (and table body end).\n *\n * @param {Readonly} map\n * @param {Readonly} context\n * @param {number} index\n * @param {Token} table\n * @param {Token | undefined} tableBody\n */\n// eslint-disable-next-line max-params\nfunction flushTableEnd(map, context, index, table, tableBody) {\n /** @type {Array} */\n const exits = [];\n const related = getPoint(context.events, index);\n if (tableBody) {\n tableBody.end = Object.assign({}, related);\n exits.push(['exit', tableBody, context]);\n }\n table.end = Object.assign({}, related);\n exits.push(['exit', table, context]);\n map.add(index + 1, 0, exits);\n}\n\n/**\n * @param {Readonly>} events\n * @param {number} index\n * @returns {Readonly}\n */\nfunction getPoint(events, index) {\n const event = events[index];\n const side = event[0] === 'enter' ? 'start' : 'end';\n return event[1][side];\n}","/**\n * @import {Extension, State, TokenizeContext, Tokenizer} from 'micromark-util-types'\n */\n\nimport { factorySpace } from 'micromark-factory-space';\nimport { markdownLineEnding, markdownLineEndingOrSpace, markdownSpace } from 'micromark-util-character';\nconst tasklistCheck = {\n name: 'tasklistCheck',\n tokenize: tokenizeTasklistCheck\n};\n\n/**\n * Create an HTML extension for `micromark` to support GFM task list items\n * syntax.\n *\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `htmlExtensions` to\n * support GFM task list items when serializing to HTML.\n */\nexport function gfmTaskListItem() {\n return {\n text: {\n [91]: tasklistCheck\n }\n };\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction tokenizeTasklistCheck(effects, ok, nok) {\n const self = this;\n return open;\n\n /**\n * At start of task list item check.\n *\n * ```markdown\n * > | * [x] y.\n * ^\n * ```\n *\n * @type {State}\n */\n function open(code) {\n if (\n // Exit if there’s stuff before.\n self.previous !== null ||\n // Exit if not in the first content that is the first child of a list\n // item.\n !self._gfmTasklistFirstContentOfListItem) {\n return nok(code);\n }\n effects.enter('taskListCheck');\n effects.enter('taskListCheckMarker');\n effects.consume(code);\n effects.exit('taskListCheckMarker');\n return inside;\n }\n\n /**\n * In task list item check.\n *\n * ```markdown\n * > | * [x] y.\n * ^\n * ```\n *\n * @type {State}\n */\n function inside(code) {\n // Currently we match how GH works in files.\n // To match how GH works in comments, use `markdownSpace` (`[\\t ]`) instead\n // of `markdownLineEndingOrSpace` (`[\\t\\n\\r ]`).\n if (markdownLineEndingOrSpace(code)) {\n effects.enter('taskListCheckValueUnchecked');\n effects.consume(code);\n effects.exit('taskListCheckValueUnchecked');\n return close;\n }\n if (code === 88 || code === 120) {\n effects.enter('taskListCheckValueChecked');\n effects.consume(code);\n effects.exit('taskListCheckValueChecked');\n return close;\n }\n return nok(code);\n }\n\n /**\n * At close of task list item check.\n *\n * ```markdown\n * > | * [x] y.\n * ^\n * ```\n *\n * @type {State}\n */\n function close(code) {\n if (code === 93) {\n effects.enter('taskListCheckMarker');\n effects.consume(code);\n effects.exit('taskListCheckMarker');\n effects.exit('taskListCheck');\n return after;\n }\n return nok(code);\n }\n\n /**\n * @type {State}\n */\n function after(code) {\n // EOL in paragraph means there must be something else after it.\n if (markdownLineEnding(code)) {\n return ok(code);\n }\n\n // Space or tab?\n // Check what comes after.\n if (markdownSpace(code)) {\n return effects.check({\n tokenize: spaceThenNonSpace\n }, ok, nok)(code);\n }\n\n // EOF, or non-whitespace, both wrong.\n return nok(code);\n }\n}\n\n/**\n * @this {TokenizeContext}\n * @type {Tokenizer}\n */\nfunction spaceThenNonSpace(effects, ok, nok) {\n return factorySpace(effects, after, \"whitespace\");\n\n /**\n * After whitespace, after task list item check.\n *\n * ```markdown\n * > | * [x] y.\n * ^\n * ```\n *\n * @type {State}\n */\n function after(code) {\n // EOF means there was nothing, so bad.\n // EOL means there’s content after it, so good.\n // Impossible to have more spaces.\n // Anything else is good.\n return code === null ? nok(code) : ok(code);\n }\n}","/**\n * @import {Root} from 'mdast'\n * @import {Options} from 'remark-gfm'\n * @import {} from 'remark-parse'\n * @import {} from 'remark-stringify'\n * @import {Processor} from 'unified'\n */\n\nimport {gfmFromMarkdown, gfmToMarkdown} from 'mdast-util-gfm'\nimport {gfm} from 'micromark-extension-gfm'\n\n/** @type {Options} */\nconst emptyOptions = {}\n\n/**\n * Add support GFM (autolink literals, footnotes, strikethrough, tables,\n * tasklists).\n *\n * @param {Options | null | undefined} [options]\n * Configuration (optional).\n * @returns {undefined}\n * Nothing.\n */\nexport default function remarkGfm(options) {\n // @ts-expect-error: TS is wrong about `this`.\n // eslint-disable-next-line unicorn/no-this-assignment\n const self = /** @type {Processor} */ (this)\n const settings = options || emptyOptions\n const data = self.data()\n\n const micromarkExtensions =\n data.micromarkExtensions || (data.micromarkExtensions = [])\n const fromMarkdownExtensions =\n data.fromMarkdownExtensions || (data.fromMarkdownExtensions = [])\n const toMarkdownExtensions =\n data.toMarkdownExtensions || (data.toMarkdownExtensions = [])\n\n micromarkExtensions.push(gfm(settings))\n fromMarkdownExtensions.push(gfmFromMarkdown())\n toMarkdownExtensions.push(gfmToMarkdown(settings))\n}\n","/**\n * @typedef {import('micromark-extension-gfm-footnote').HtmlOptions} HtmlOptions\n * @typedef {import('micromark-extension-gfm-strikethrough').Options} Options\n * @typedef {import('micromark-util-types').Extension} Extension\n * @typedef {import('micromark-util-types').HtmlExtension} HtmlExtension\n */\n\nimport {\n combineExtensions,\n combineHtmlExtensions\n} from 'micromark-util-combine-extensions'\nimport {\n gfmAutolinkLiteral,\n gfmAutolinkLiteralHtml\n} from 'micromark-extension-gfm-autolink-literal'\nimport {gfmFootnote, gfmFootnoteHtml} from 'micromark-extension-gfm-footnote'\nimport {\n gfmStrikethrough,\n gfmStrikethroughHtml\n} from 'micromark-extension-gfm-strikethrough'\nimport {gfmTable, gfmTableHtml} from 'micromark-extension-gfm-table'\nimport {gfmTagfilterHtml} from 'micromark-extension-gfm-tagfilter'\nimport {\n gfmTaskListItem,\n gfmTaskListItemHtml\n} from 'micromark-extension-gfm-task-list-item'\n\n/**\n * Create an extension for `micromark` to enable GFM syntax.\n *\n * @param {Options | null | undefined} [options]\n * Configuration (optional).\n *\n * Passed to `micromark-extens-gfm-strikethrough`.\n * @returns {Extension}\n * Extension for `micromark` that can be passed in `extensions` to enable GFM\n * syntax.\n */\nexport function gfm(options) {\n return combineExtensions([\n gfmAutolinkLiteral(),\n gfmFootnote(),\n gfmStrikethrough(options),\n gfmTable(),\n gfmTaskListItem()\n ])\n}\n\n/**\n * Create an extension for `micromark` to support GFM when serializing to HTML.\n *\n * @param {HtmlOptions | null | undefined} [options]\n * Configuration (optional).\n *\n * Passed to `micromark-extens-gfm-footnote`.\n * @returns {HtmlExtension}\n * Extension for `micromark` that can be passed in `htmlExtensions` to\n * support GFM when serializing to HTML.\n */\nexport function gfmHtml(options) {\n return combineHtmlExtensions([\n gfmAutolinkLiteralHtml(),\n gfmFootnoteHtml(options),\n gfmStrikethroughHtml(),\n gfmTableHtml(),\n gfmTagfilterHtml(),\n gfmTaskListItemHtml()\n ])\n}\n","/**\n * @import {Extension as FromMarkdownExtension} from 'mdast-util-from-markdown'\n * @import {Options} from 'mdast-util-gfm'\n * @import {Options as ToMarkdownExtension} from 'mdast-util-to-markdown'\n */\n\nimport {\n gfmAutolinkLiteralFromMarkdown,\n gfmAutolinkLiteralToMarkdown\n} from 'mdast-util-gfm-autolink-literal'\nimport {\n gfmFootnoteFromMarkdown,\n gfmFootnoteToMarkdown\n} from 'mdast-util-gfm-footnote'\nimport {\n gfmStrikethroughFromMarkdown,\n gfmStrikethroughToMarkdown\n} from 'mdast-util-gfm-strikethrough'\nimport {gfmTableFromMarkdown, gfmTableToMarkdown} from 'mdast-util-gfm-table'\nimport {\n gfmTaskListItemFromMarkdown,\n gfmTaskListItemToMarkdown\n} from 'mdast-util-gfm-task-list-item'\n\n/**\n * Create an extension for `mdast-util-from-markdown` to enable GFM (autolink\n * literals, footnotes, strikethrough, tables, tasklists).\n *\n * @returns {Array}\n * Extension for `mdast-util-from-markdown` to enable GFM (autolink literals,\n * footnotes, strikethrough, tables, tasklists).\n */\nexport function gfmFromMarkdown() {\n return [\n gfmAutolinkLiteralFromMarkdown(),\n gfmFootnoteFromMarkdown(),\n gfmStrikethroughFromMarkdown(),\n gfmTableFromMarkdown(),\n gfmTaskListItemFromMarkdown()\n ]\n}\n\n/**\n * Create an extension for `mdast-util-to-markdown` to enable GFM (autolink\n * literals, footnotes, strikethrough, tables, tasklists).\n *\n * @param {Options | null | undefined} [options]\n * Configuration (optional).\n * @returns {ToMarkdownExtension}\n * Extension for `mdast-util-to-markdown` to enable GFM (autolink literals,\n * footnotes, strikethrough, tables, tasklists).\n */\nexport function gfmToMarkdown(options) {\n return {\n extensions: [\n gfmAutolinkLiteralToMarkdown(),\n gfmFootnoteToMarkdown(options),\n gfmStrikethroughToMarkdown(),\n gfmTableToMarkdown(options),\n gfmTaskListItemToMarkdown()\n ]\n }\n}\n","import React, {memo, useCallback, useState} from 'react';\r\nimport styled from 'styled-components';\r\nimport {useSelector} from 'react-redux';\r\nimport {RootState} from '../store';\r\nimport ReactMarkdown from 'react-markdown';\r\nimport remarkGfm from 'remark-gfm';\r\nimport Prism from 'prismjs';\r\nimport FormatBoldIcon from '@mui/icons-material/FormatBold';\r\nimport FormatItalicIcon from '@mui/icons-material/FormatItalic';\r\nimport CodeIcon from '@mui/icons-material/Code';\r\nimport FormatListBulletedIcon from '@mui/icons-material/FormatListBulleted';\r\nimport FormatQuoteIcon from '@mui/icons-material/FormatQuote';\r\nimport LinkIcon from '@mui/icons-material/Link';\r\nimport TitleIcon from '@mui/icons-material/Title';\r\nimport TableChartIcon from '@mui/icons-material/TableChart';\r\nimport CheckBoxIcon from '@mui/icons-material/CheckBox';\r\nimport ImageIcon from '@mui/icons-material/Image';\r\nimport VisibilityIcon from '@mui/icons-material/Visibility';\r\nimport KeyboardArrowUpIcon from '@mui/icons-material/KeyboardArrowUp';\r\nimport KeyboardArrowDownIcon from '@mui/icons-material/KeyboardArrowDown';\r\nimport EditIcon from '@mui/icons-material/Edit';\r\nimport {debounce} from '../utils/tabHandling';\r\n\r\nconst CollapseButton = styled.button`\r\n position: absolute;\r\n top: -12px;\r\n right: 24px;\r\n width: 24px;\r\n height: 24px;\r\n border-radius: 50%;\r\n background: ${({theme}) => theme.colors.surface};\r\n border: 1px solid ${({theme}) => theme.colors.border};\r\n display: flex;\r\n align-items: center;\r\n justify-content: center;\r\n cursor: pointer;\r\n color: ${({theme}) => theme.colors.text};\r\n transition: all 0.2s ease;\r\n &:hover {\r\n background: ${({theme}) => theme.colors.hover};\r\n transform: translateY(-1px);\r\n }\r\n`;\r\nconst CollapsedPlaceholder = styled.div`\r\n padding: 0.75rem;\r\n background: ${({theme}) => theme.colors.surface}dd;\r\n border-top: 1px solid ${({theme}) => theme.colors.border};\r\n display: flex;\r\n align-items: center;\r\n justify-content: center;\r\n cursor: pointer;\r\n position: sticky;\r\n bottom: 0;\r\n backdrop-filter: blur(16px);\r\n &:hover {\r\n background: ${({theme}) => theme.colors.hover};\r\n }\r\n`;\r\n\r\nconst PreviewContainer = styled.div`\r\n padding: 0.5rem;\r\n border: 1px solid ${props => props.theme.colors.border};\r\n border-radius: 0 0 ${props => props.theme.sizing.borderRadius.md} ${props => props.theme.sizing.borderRadius.md};\r\n background: ${props => props.theme.colors.background};\r\n min-height: 120px;\r\n max-height: ${({theme}) => theme.sizing.console.maxHeight};\r\n overflow-y: auto;\r\n pre {\r\n background: ${props => props.theme.colors.surface};\r\n padding: 1rem;\r\n border-radius: ${props => props.theme.sizing.borderRadius.sm};\r\n overflow-x: auto;\r\n }\r\n code {\r\n font-family: monospace;\r\n }\r\n`;\r\n\r\nconst DEBUG = process.env.NODE_ENV === 'development';\r\nconst log = (message: string, data?: unknown) => {\r\n if (DEBUG) {\r\n if (data) {\r\n console.debug(`[InputArea] ${message}`, data);\r\n } else {\r\n console.debug(`[InputArea] ${message}`);\r\n }\r\n }\r\n};\r\n\r\nconst logError = (message: string, error?: unknown) => {\r\n console.error(`[InputArea] ${message}`, error);\r\n};\r\n\r\ninterface InputContainerProps {\r\n $hide?: boolean;\r\n}\r\n\r\nconst InputContainer = styled.div`\r\n padding: 1.5rem;\r\n background-color: ${(props) => props.theme.colors.surface};\r\n /* Add test id */\r\n &[data-testid] {\r\n outline: none;\r\n\r\n }\r\n border-top: 1px solid ${(props) => props.theme.colors.border};\r\n display: ${({theme, $hide}) => $hide ? 'none' : 'block'};\r\n position: sticky;\r\n bottom: 0;\r\n z-index: 10;\r\n backdrop-filter: blur(16px) saturate(180%);\r\n box-shadow: 0 -4px 16px rgba(0, 0, 0, 0.15);\r\n background: ${({theme}) => `linear-gradient(to top,\r\n\r\n ${theme.colors.surface}dd,\r\n ${theme.colors.background}aa\r\n )`};\r\n`;\r\nconst StyledForm = styled.form`\r\n display: flex;\r\n gap: 1rem;\r\n align-items: flex-start;\r\n`;\r\nconst EditorToolbar = styled.div`\r\n display: flex;\r\n gap: 0.25rem;\r\n padding: 0.5rem;\r\n flex-wrap: wrap;\r\n background: ${({theme}) => theme.colors.surface};\r\n border: 1px solid ${({theme}) => theme.colors.border};\r\n border-bottom: none;\r\n border-radius: ${({theme}) => theme.sizing.borderRadius.md}\r\n\r\n ${({theme}) => theme.sizing.borderRadius.md} 0 0;\r\n /* Toolbar sections */\r\n .toolbar-section {\r\n display: flex;\r\n gap: 0.25rem;\r\n padding: 0 0.5rem;\r\n border-right: 1px solid ${({theme}) => theme.colors.border};\r\n &:last-child {\r\n border-right: none;\r\n }\r\n }\r\n`;\r\nconst ToolbarButton = styled.button`\r\n padding: 0.5rem;\r\n background: transparent;\r\n border: none;\r\n border-radius: ${({theme}) => theme.sizing.borderRadius.sm};\r\n cursor: pointer;\r\n color: ${({theme}) => theme.colors.text};\r\n &:hover {\r\n background: ${({theme}) => theme.colors.hover};\r\n }\r\n &.active {\r\n color: ${({theme}) => theme.colors.primary};\r\n }\r\n`;\r\n\r\nconst TextArea = styled.textarea`\r\n width: 100%;\r\n padding: 0.5rem;\r\n border-radius: ${(props) => props.theme.sizing.borderRadius.md};\r\n border: 1px solid ${(props) => props.theme.colors.border};\r\n font-family: inherit;\r\n resize: vertical;\r\n min-height: 40px;\r\n max-height: ${({theme}) => theme.sizing.console.maxHeight};\r\n border-radius: 0 0 ${(props) => props.theme.sizing.borderRadius.md} ${(props) => props.theme.sizing.borderRadius.md};\r\n transition: all 0.3s ease;\r\n background: ${({theme}) => theme.colors.background};\r\n\r\n &:focus {\r\n outline: none;\r\n border-color: ${(props) => props.theme.colors.primary};\r\n box-shadow: 0 0 0 2px ${({theme}) => `${theme.colors.primary}40`};\r\n transform: translateY(-1px);\r\n }\r\n &:disabled {\r\n background-color: ${(props) => props.theme.colors.disabled};\r\n cursor: not-allowed;\r\n }\r\n`;\r\nconst SendButton = styled.button`\r\n padding: 0.75rem 1.5rem;\r\n background: ${({theme}) => `linear-gradient(135deg,\r\n\r\n ${theme.colors.primary},\r\n\r\n ${theme.colors.primaryDark}\r\n )`};\r\n color: white;\r\n border: none;\r\n border-radius: ${(props) => props.theme.sizing.borderRadius.md};\r\n cursor: pointer;\r\n transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1);\r\n font-weight: ${({theme}) => theme.typography.fontWeight.medium};\r\n text-transform: uppercase;\r\n letter-spacing: 0.5px;\r\n position: relative;\r\n overflow: hidden;\r\n min-width: 120px;\r\n\r\n &:disabled {\r\n opacity: 0.5;\r\n cursor: not-allowed;\r\n }\r\n &:hover:not(:disabled) {\r\n background: ${({theme}) => `linear-gradient(135deg,\r\n ${theme.colors.primaryDark},\r\n ${theme.colors.primary}\r\n )`};\r\n transform: translateY(-2px);\r\n box-shadow: 0 8px 16px ${({theme}) => theme.colors.primary + '40'};\r\n }\r\n\r\n &:active:not(:disabled) {\r\n transform: translateY(0);\r\n }\r\n\r\n &:after {\r\n content: '';\r\n position: absolute;\r\n top: 0;\r\n left: 0;\r\n width: 100%;\r\n height: 100%;\r\n background: linear-gradient(rgba(255, 255, 255, 0.2), transparent);\r\n pointer-events: none;\r\n }\r\n`;\r\n\r\ninterface InputAreaProps {\r\n onSendMessage: (message: string) => void;\r\n isWebSocketConnected?: boolean;\r\n}\r\n\r\nconst InputArea = memo(function InputArea({onSendMessage, isWebSocketConnected = true}: InputAreaProps) {\r\n\r\n const [message, setMessage] = useState('');\r\n\r\n const [isPreviewMode, setIsPreviewMode] = useState(false);\r\n const [isCollapsed, setIsCollapsed] = useState(false);\r\n const config = useSelector((state: RootState) => state.config);\r\n const messages = useSelector((state: RootState) => state.messages.messages);\r\n const [isSubmitting, setIsSubmitting] = useState(false);\r\n const handleToggleCollapse = useCallback(() => {\r\n setIsCollapsed(prev => {\r\n const newVal = !prev;\r\n\r\n if (!newVal) {\r\n setTimeout(() => textAreaRef.current?.focus(), 0);\r\n }\r\n return newVal;\r\n });\r\n }, []);\r\n const textAreaRef = React.useRef(null);\r\n const shouldHideInput = config.inputCnt > 0 && messages.length > config.inputCnt;\r\n\r\n React.useEffect(() => {\r\n if (isPreviewMode) {\r\n Prism.highlightAll();\r\n }\r\n }, [isPreviewMode, message]);\r\n const insertMarkdown = useCallback((syntax: string) => {\r\n const textarea = textAreaRef.current;\r\n if (textarea) {\r\n const start = textarea.selectionStart;\r\n const end = textarea.selectionEnd;\r\n const selectedText = textarea.value.substring(start, end);\r\n const newText = syntax.replace('$1', selectedText || 'text');\r\n setMessage(prev => prev.substring(0, start) + newText + prev.substring(end));\r\n\r\n setTimeout(() => {\r\n const newCursorPos = start + newText.indexOf(selectedText || 'text');\r\n textarea.focus();\r\n textarea.setSelectionRange(newCursorPos, newCursorPos + (selectedText || 'text').length);\r\n }, 0);\r\n }\r\n }, []);\r\n const insertTable = useCallback(() => {\r\n const tableTemplate = `\r\n| Header 1 | Header 2 | Header 3 |\r\n|----------|----------|----------|\r\n| Cell 1 | Cell 2 | Cell 3 |\r\n| Cell 4 | Cell 5 | Cell 6 |\r\n`.trim() + '\\n';\r\n insertMarkdown(tableTemplate);\r\n }, [insertMarkdown]);\r\n\r\n const handleSubmit = useCallback((e: React.FormEvent) => {\r\n e.preventDefault();\r\n if (isSubmitting || !isWebSocketConnected) return;\r\n\r\n if (message.trim()) {\r\n setIsSubmitting(true);\r\n if (DEBUG) {\r\n log('Sending message', {\r\n messageLength: message.length,\r\n message: message.substring(0, 100) + (message.length > 100 ? '...' : '')\r\n });\r\n }\r\n Promise.resolve(onSendMessage(message)).finally(() => {\r\n setMessage('');\r\n setIsSubmitting(false);\r\n }).catch(error => {\r\n logError('Failed to send message', error);\r\n });\r\n } else {\r\n log('Empty message submission prevented');\r\n }\r\n }, [message, onSendMessage, isSubmitting, isWebSocketConnected, DEBUG]);\r\n\r\n const handleMessageChange = useCallback((e: React.ChangeEvent) => {\r\n const newMessage = e.target.value;\r\n setMessage(newMessage);\r\n }, []);\r\n\r\n const handleKeyPress = useCallback((e: React.KeyboardEvent) => {\r\n if (e.key === 'Enter' && !e.shiftKey && isWebSocketConnected) {\r\n e.preventDefault();\r\n handleSubmit(e);\r\n }\r\n }, [handleSubmit, isWebSocketConnected]);\r\n\r\n React.useEffect(() => {\r\n try {\r\n textAreaRef.current?.focus();\r\n } catch (error) {\r\n logError('Failed to focus input on mount', error);\r\n }\r\n return () => {\r\n\r\n };\r\n }, [config]);\r\n\r\n const connectionStatusMessage = !isWebSocketConnected ? (\r\n
    \r\n ⚠️ Connection lost. Reconnecting... (Your message will be preserved)\r\n
    \r\n ) : null;\r\n\r\n if (isCollapsed) {\r\n return (\r\n \r\n \r\n \r\n \r\n \r\n Click to expand input\r\n {connectionStatusMessage}\r\n \r\n \r\n );\r\n }\r\n return (\r\n \r\n \r\n \r\n\r\n \r\n
    \r\n \r\n
    \r\n \r\n
    \r\n {\r\n const newValue = !isPreviewMode;\r\n debounce(() => setIsPreviewMode(newValue), 150)();\r\n }}\r\n title={isPreviewMode ? \"Edit\" : \"Preview\"}\r\n className={isPreviewMode ? 'active' : ''}\r\n >\r\n {isPreviewMode ? : }\r\n \r\n
    \r\n
    \r\n insertMarkdown('# $1')}\r\n title=\"Heading\"\r\n >\r\n \r\n \r\n insertMarkdown('**$1**')}\r\n title=\"Bold\"\r\n >\r\n \r\n \r\n insertMarkdown('*$1*')}\r\n title=\"Italic\"\r\n >\r\n \r\n \r\n
    \r\n
    \r\n insertMarkdown('`$1`')}\r\n title=\"Inline Code\"\r\n >\r\n \r\n \r\n insertMarkdown('```\\n$1\\n```')}\r\n title=\"Code Block\"\r\n >\r\n
    \r\n \r\n \r\n
    \r\n \r\n
    \r\n
    \r\n insertMarkdown('- $1')}\r\n title=\"Bullet List\"\r\n >\r\n \r\n \r\n insertMarkdown('> $1')}\r\n title=\"Quote\"\r\n >\r\n \r\n \r\n insertMarkdown('- [ ] $1')}\r\n title=\"Task List\"\r\n >\r\n \r\n \r\n
    \r\n
    \r\n insertMarkdown('[$1](url)')}\r\n title=\"Link\"\r\n >\r\n \r\n \r\n insertMarkdown('![$1](image-url)')}\r\n title=\"Image\"\r\n >\r\n \r\n \r\n \r\n \r\n \r\n
    \r\n
    \r\n
    \r\n {isPreviewMode ? (\r\n
    \r\n \r\n \r\n {children}\r\n
    \r\n );\r\n }\r\n }}\r\n >\r\n {message}\r\n \r\n \r\n

    !^a