diff --git a/src/DocumentLoaders/Word/src/ExcelLoader.cs b/src/DocumentLoaders/Word/src/ExcelLoader.cs index 86d646e1..66acf806 100644 --- a/src/DocumentLoaders/Word/src/ExcelLoader.cs +++ b/src/DocumentLoaders/Word/src/ExcelLoader.cs @@ -14,11 +14,11 @@ public async Task> LoadAsync( dataSource = dataSource ?? throw new ArgumentNullException(nameof(dataSource)); using var stream = await dataSource.GetStreamAsync(cancellationToken).ConfigureAwait(false); - + var markdowns = ExcelToMarkdown.Convert(stream, firstRowIsHeader); var metadata = settings.CollectMetadataIfRequired(dataSource); - + return markdowns .Select(x => new Document(x.Value, metadata: metadata?.With("Worksheet", x.Key))) .ToArray(); diff --git a/src/DocumentLoaders/Word/src/Helpers.ExcelToMarkdown.cs b/src/DocumentLoaders/Word/src/Helpers.ExcelToMarkdown.cs index 950043e5..26224ebf 100644 --- a/src/DocumentLoaders/Word/src/Helpers.ExcelToMarkdown.cs +++ b/src/DocumentLoaders/Word/src/Helpers.ExcelToMarkdown.cs @@ -34,10 +34,10 @@ public static IList> Convert( { continue; } - + var isFirstRow = true; var builder = new StringBuilder(); - + foreach (var row in (document.WorkbookPart.GetPartById(sheet.Id.Value) as WorksheetPart)?.Worksheet .GetFirstChild()? .Descendants() ?? []) @@ -60,16 +60,16 @@ public static IList> Convert( .ToList()) + " |"); } } - + markdowns.Add(new KeyValuePair( - sheet.Name?.Value ?? $"Sheet{markdowns.Count}", + sheet.Name?.Value ?? $"Sheet{markdowns.Count}", builder.ToString())); } - - + + return markdowns; } - + private static string GetCellValue(SharedStringTable? table, Cell cell) { var value = cell.CellValue?.InnerText ?? string.Empty; @@ -80,7 +80,7 @@ private static string GetCellValue(SharedStringTable? table, Cell cell) { return table.ChildElements[index].InnerText; } - + return value; } } \ No newline at end of file diff --git a/src/Helpers/GenerateDocs/Program.cs b/src/Helpers/GenerateDocs/Program.cs index 604675dc..2ec64d95 100644 --- a/src/Helpers/GenerateDocs/Program.cs +++ b/src/Helpers/GenerateDocs/Program.cs @@ -74,11 +74,11 @@ static async Task ConvertTestToMarkdown(string path, string outputFolder) { return; } - + var usings = string.Join('\n', lines .Where(x => x.StartsWith("using")) .ToArray()); - + var start = lines.IndexOf(" {"); var end = lines.IndexOf(" }"); lines = lines @@ -86,7 +86,7 @@ static async Task ConvertTestToMarkdown(string path, string outputFolder) .Where(x => !x.Contains(".Should()")) .Select(x => x.StartsWith(" ") ? x[8..] : x) .ToList(); - + const string commentPrefix = "//// "; var markdown = string.Empty; var completeCode = string.Join('\n', lines.Where(x => !x.StartsWith(commentPrefix))); @@ -101,7 +101,7 @@ static async Task ConvertTestToMarkdown(string path, string outputFolder) { i++; } - + var comment = string.Join('\n', lines .GetRange(startGroup, i - startGroup) .Select(x => x[commentPrefix.Length..])); @@ -120,14 +120,14 @@ static async Task ConvertTestToMarkdown(string path, string outputFolder) isFirstCode = false; markdown += Environment.NewLine + usings + Environment.NewLine; } - + markdown += $@" {string.Join('\n', lines .GetRange(startGroup, i - startGroup)).Trim()} ```" + '\n'; } } - + markdown = anyComment ? @"`Scroll till the end of the page if you just want code` " + markdown : markdown; markdown += anyComment ? @$" diff --git a/src/Meta/test/WikiTests.AgentWithOllamaReact.cs b/src/Meta/test/WikiTests.AgentWithOllamaReact.cs index 707a4d88..7d6a2dea 100644 --- a/src/Meta/test/WikiTests.AgentWithOllamaReact.cs +++ b/src/Meta/test/WikiTests.AgentWithOllamaReact.cs @@ -27,7 +27,7 @@ public async Task AgentWithOllamaReact() //// ## Using ReAct with Google search //// //// Now you should have all necessary to connect your LLM to Google search - + // var provider = new OllamaProvider( // options: new RequestOptions // { @@ -55,7 +55,7 @@ public async Task AgentWithOllamaReact() .UseTool(searchTool); // add the google search tool await chain.RunAsync(); - + //// Lets run it and see the output: //// As you can see, instead of giving answer right away, the model starts to think on it //// ``` diff --git a/src/Meta/test/WikiTests.BuildingChatWithOpenAi.cs b/src/Meta/test/WikiTests.BuildingChatWithOpenAi.cs index 1f52155b..f56810f4 100644 --- a/src/Meta/test/WikiTests.BuildingChatWithOpenAi.cs +++ b/src/Meta/test/WikiTests.BuildingChatWithOpenAi.cs @@ -60,7 +60,7 @@ public async Task BuildingChatWithOpenAi() Console.Write("AI: "); Console.WriteLine(res); } - + //// Now you can run the program and try to chat with it. //// //// The final output will look like this: diff --git a/src/Meta/test/WikiTests.GettingStarted.cs b/src/Meta/test/WikiTests.GettingStarted.cs index 9d1f5d04..96a6e0d2 100644 --- a/src/Meta/test/WikiTests.GettingStarted.cs +++ b/src/Meta/test/WikiTests.GettingStarted.cs @@ -48,13 +48,13 @@ public async Task GettingStarted() //// *** //// //// So, finally, let's write some code! - + // get model path var modelPath = await HuggingFaceModelDownloader.GetModelAsync( repository: "TheBloke/Thespis-13B-v0.5-GGUF", fileName: "thespis-13b-v0.5.Q2_K.gguf", version: "main"); - + //// This line will download the model and save it locally for future usage. After model is downloaded it will return path to the *.gguf file. //// _**You can manually download any model you want and insert path to it directly. Without using HuggingFaceModelDownloader.**_ //// @@ -62,13 +62,13 @@ public async Task GettingStarted() // load model var model = LLamaSharpModelInstruction.FromPath(modelPath).UseConsoleForDebug(); - + //// Now let's build a chain! //// //// # Building a chain //// //// This is minimal chain to make LLM work: - + // building a chain var prompt = @" You are an AI assistant that greets the world. @@ -80,7 +80,7 @@ You are an AI assistant that greets the world. | LLM(model, inputKey: "prompt"); await chain.RunAsync(); - + //// We can see here 2 chains(or links) working together: Set and LLM. //// //// * Set - setting value for the _chain context variable **prompt**_ diff --git a/src/Meta/test/WikiTests.HowToUseOpenAiProvider.cs b/src/Meta/test/WikiTests.HowToUseOpenAiProvider.cs index 109edb22..c972493d 100644 --- a/src/Meta/test/WikiTests.HowToUseOpenAiProvider.cs +++ b/src/Meta/test/WikiTests.HowToUseOpenAiProvider.cs @@ -21,7 +21,7 @@ public async Task HowToUseOpenAiProvider() var result = await chain.RunAsync("text", CancellationToken.None); // execute chain and get `text` context variable Console.WriteLine(result); // Hello! How can I assist you today? - + //// `inputKey` and `outputKey` here is more for understanding of what goes where. They have default values and can be omitted. Also there is classes like `Gpt35TurboModel` for simplicity. //// ## Additional options diff --git a/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs b/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs index ff7d2d55..779cf06a 100644 --- a/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs +++ b/src/Meta/test/WikiTests.ImageGenerationWithOllamaAndStableDiffusion.cs @@ -72,7 +72,7 @@ public async Task ImageGenerationWithOllamaAndStableDiffusion() //// I took it from [here](https://github.com/vicuna-tools/Stablediffy/blob/main/Stablediffy.txt) with some minor modifications. //// Basically, we are showing some examples so model could understand a principle of prompt generation. You can play around with examples and instructions to better match your preferences. //// Now let's build a chain! - + var template = @"[INST]Transcript of a dialog, where the User interacts with an Assistant named Stablediffy. Stablediffy knows much about prompt engineering for stable diffusion (an open-source image generation software). The User asks Stablediffy about prompts for stable diffusion Image Generation. @@ -102,7 +102,7 @@ public async Task ImageGenerationWithOllamaAndStableDiffusion() // run the chain await chain.RunAsync(); - + //// If everything done correctly - you should have `image.png` in your bin directory. } } \ No newline at end of file diff --git a/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs b/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs index 68a40884..1a16c091 100644 --- a/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs +++ b/src/Meta/test/WikiTests.RagWithOpenAiOllama.cs @@ -54,14 +54,14 @@ public async Task RagWithOpenAiOllama() //// //// ### OpenAI //// To use this chat and embedding model, you will need an API key from OpenAI. This has non-zero cost. - + // prepare OpenAI embedding model var provider = new OpenAiProvider(apiKey: Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OPENAI_API_KEY key is not set")); var embeddingModel = new TextEmbeddingV3SmallModel(provider); var llm = new OpenAiLatestFastChatModel(provider); - + //// ### Ollama //// To use this chat and embedding model, you will need an Ollama instance running. //// This is free, assuming it is running locally--this code assumes it is available at https://localhost:11434. @@ -82,7 +82,7 @@ public async Task RagWithOpenAiOllama() var vectorCollection = await vectorDatabase.AddDocumentsFromAsync( embeddingModel, dimensions: 1536, // Should be 1536 for TextEmbeddingV3SmallModel - // First, specify the source to index. + // First, specify the source to index. dataSource: DataSource.FromPath("E:\\AI\\Datasets\\Books\\Harry-Potter-Book-1.pdf"), collectionName: "harrypotter", // Second, configure how to extract chunks from the bigger document. @@ -131,7 +131,7 @@ public async Task RagWithOpenAiOllama() var result = await chain.RunAsync("text", CancellationToken.None); Console.WriteLine(result); - + //// We are done! Since we previously registered for events on the completion model, the output will be printed automatically. //// //// # Example diff --git a/src/Meta/test/WikiTests.UsingChainOutput.cs b/src/Meta/test/WikiTests.UsingChainOutput.cs index 83ef845c..9ff6222a 100644 --- a/src/Meta/test/WikiTests.UsingChainOutput.cs +++ b/src/Meta/test/WikiTests.UsingChainOutput.cs @@ -33,24 +33,24 @@ You are an AI assistant that greets the world. //// Almost every possible link in a chain are having having at least one input and output. //// //// Look here: - + var chain = Set(prompt, outputKey: "prompt") | LLM(model, inputKey: "prompt", outputKey: "result"); - + //// This means that, after link `Set` get executed, we are storring it's result into "prompt" variable inside of chain context. //// In its turn, link `LLM` gets "prompt" variable from chain context and uses it's as input. //// //// `LLM` link also has output key argument. Let's use it to save the result of llm. var result = await chain.RunAsync("result", CancellationToken.None); - + //// Now the `LLM` link saves it's result into "result" variable inside of chain context. But how do we extract it from there? //// //// `chain.Run()` method has an optional argument "resultKey". This allows you to specify variable inside of chain context to return as a result. - + Console.WriteLine(result); - + //// Output: //// ``` //// Hello, World! How can I help you today?