forked from tryAGI/LangChain
-
Notifications
You must be signed in to change notification settings - Fork 0
/
WikiTests.cs
286 lines (229 loc) · 11.6 KB
/
WikiTests.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
using LangChain.Chains.StackableChains.Agents.Tools.BuiltIn;
using LangChain.Databases;
using LangChain.Indexes;
using LangChain.Memory;
using LangChain.Providers;
using LangChain.Providers.Automatic1111;
using LangChain.Providers.HuggingFace.Downloader;
using LangChain.Providers.LLamaSharp;
using LangChain.Providers.OpenAI.Predefined;
using LangChain.Sources;
using LangChain.Splitters.Text;
using static LangChain.Chains.Chain;
namespace LangChain.IntegrationTests;
[TestFixture]
[Explicit]
public class WikiTests
{
[Test]
public async Task AgentWithOllama()
{
var model = new OllamaLanguageModelInstruction("mistral:latest",
"http://localhost:11434",
options: new OllamaLanguageModelOptions
{
Temperature = 0,
}).UseConsoleForDebug();
var chain =
Set("What is tryAGI/LangChain?")
| LLM(model);
await chain.Run();
}
[Test]
public async Task AgentWithOllamaReact()
{
var model = new OllamaLanguageModelInstruction("mistral:latest",
"http://localhost:11434",
options: new OllamaLanguageModelOptions()
{
Stop = new[] { "Observation", "[END]" }, // add injection word `Observation` and `[END]` to stop the model(just as additional safety feature)
Temperature = 0
}).UseConsoleForDebug();
// create a google search tool
var searchTool = new GoogleCustomSearchTool(key: "<your key>",cx: "<your cx>",resultsLimit:1);
var chain =
Set("What is tryAGI/LangChain?")
| ReActAgentExecutor(model) // does the magic
.UseTool(searchTool); // add the google search tool
await chain.Run();
}
[Test]
public async Task BuildingChatWithOpenAi()
{
// we will use GPT-3.5 model, but you can use any other model
var model = new Gpt35TurboModel("your_key");
// create simple template for conversation for AI to know what piece of text it is looking at
var template =
@"The following is a friendly conversation between a human and an AI.
{history}
Human: {input}
AI:";
// To have a conversation thar remembers previous messages we need to use memory.
// For memory to work properly we need to specify AI and Human prefixes.
// Since in our template we have "AI:" and "Human:" we need to specify them here. Pay attention to spaces after prefixes.
var conversationBufferMemory = new ConversationBufferMemory(new ChatMessageHistory());// TODO: Review { AiPrefix = "AI: ", HumanPrefix = "Human: "};
// build chain. Notice that we don't set input key here. It will be set in the loop
var chain =
// load history. at first it will be empty, but UpdateMemory will update it every iteration
LoadMemory(conversationBufferMemory, outputKey: "history")
| Template(template)
| LLM(model)
// update memory with new request from Human and response from AI
| UpdateMemory(conversationBufferMemory,requestKey:"input",responseKey:"text");
// run an endless loop of conversation
while (true)
{
Console.Write("Human: ");
var input = Console.ReadLine();
if (input == "exit")
break;
// build a new chain using previous chain but with new input every time
var chatChain = Set(input, "input")
|chain;
// get response from AI
var res = await chatChain.Run("text");
Console.Write("AI: ");
Console.WriteLine(res);
}
}
[Test]
public async Task GettingStarted()
{
// get model path
var modelPath = await HuggingFaceModelDownloader.Instance.GetModel(
repository: "TheBloke/Thespis-13B-v0.5-GGUF",
fileName: "thespis-13b-v0.5.Q2_K.gguf",
version: "main");
// load model
var model = LLamaSharpModelInstruction.FromPath(modelPath).UseConsoleForDebug();
// building a chain
var prompt = @"
You are an AI assistant that greets the world.
World: Hello, Assistant!
Assistant:";
var chain =
Set(prompt, outputKey:"prompt")
| LLM(model, inputKey:"prompt");
await chain.Run();
}
[Test]
public async Task HowToUseOpenAiProvider()
{
var model = new Gpt35TurboModel("your_openAI_key");
var chain =
Set("Hello!", outputKey:"request") // set context variable `request` to "Hello"
|LLM(model,inputKey:"request",outputKey:"text"); // get text from context variable `request`, pass it to the model and put result into `text`
var result = await chain.Run("text"); // execute chain and get `text` context variable
Console.WriteLine(result);
}
[Test]
public async Task HowToUseOpenAiProviderSmaller()
{
var model = new Gpt35TurboModel("your_openAI_key");
var chain =
Set("Hello!")
| LLM(model);
Console.WriteLine(await chain.Run("text"));
}
[Test]
public async Task ImageGenerationWithOllamaAndStableDiffusion()
{
var olmodel = new OllamaLanguageModelInstruction("mistral:latest",
"http://localhost:11434",
options: new OllamaLanguageModelOptions()
{
Stop = new[] { "\n" },
Temperature = 0
}).UseConsoleForDebug();
var sdmodel = new Automatic1111Model
{
Settings = new Automatic1111ModelSettings
{
NegativePrompt = "bad quality, blured, watermark, text, naked, nsfw",
Seed = 42, // for results repeatability
CfgScale = 6.0f,
Width = 512,
Height = 768,
},
};
var template =
@"[INST]Transcript of a dialog, where the User interacts with an Assistant named Stablediffy. Stablediffy knows much about prompt engineering for stable diffusion (an open-source image generation software). The User asks Stablediffy about prompts for stable diffusion Image Generation.
Possible keywords for stable diffusion: ""cinematic, colorful background, concept art, dramatic lighting, high detail, highly detailed, hyper realistic, intricate, intricate sharp details, octane render, smooth, studio lighting, trending on artstation, landscape, scenery, cityscape, underwater, salt flat, tundra, jungle, desert mountain, ocean, beach, lake, waterfall, ripples, swirl, waves, avenue, horizon, pasture, plateau, garden, fields, floating island, forest, cloud forest, grasslands, flower field, flower ocean, volcano, cliff, snowy mountain
city, cityscape, street, downtown""
[/INST]
-- Transcript --
USER: suggest a prompt for a young girl from Swiss sitting by the window with headphones on
ASSISTANT: gorgeous young Swiss girl sitting by window with headphones on, wearing white bra with translucent shirt over, soft lips, beach blonde hair, octane render, unreal engine, photograph, realistic skin texture, photorealistic, hyper realism, highly detailed, 85mm portrait photography, award winning, hard rim lighting photography
USER: suggest a prompt for an mysterious city
ASSISTANT: Mysterious city, cityscape, urban, downtown, street, noir style, cinematic lightning, dramatic lightning, intricate, sharp details, octane render, unreal engine, highly detailed, night scene, dark lighting, gritty atmosphere
USER: suggest a prompt for a high quality render of a car in 1950
ASSISTANT: Car in 1950, highly detailed, classic car, 1950's, highly detailed, dramatic lightning, cinematic lightning, unreal engine
USER:suggest a prompt for {value}
ASSISTANT:";
var chain = Set("a cute girl cosplaying a cat") // describe a desired image in simple words
| Template(template, outputKey: "prompt") // insert our description into the template
| LLM(olmodel, inputKey: "prompt", outputKey: "image_prompt") // ask ollama to generate a prompt for stable diffusion
| GenerateImage(sdmodel, inputKey: "image_prompt", outputKey: "image") // generate an image using stable diffusion
| SaveIntoFile("image.png", inputKey: "image"); // save the image into a file
// run the chain
await chain.Run();
}
[Test]
public async Task RagWithOpenAiOllama()
{
// prepare OpenAI embedding model
var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OpenAI API key is not set");
var embeddings = new TextEmbeddingV3SmallModel(apiKey);
// prepare Ollama with mistral model
var model = new OllamaLanguageModelInstruction("mistral:latest",options: new OllamaLanguageModelOptions
{
Stop = new string[] { "\n" },
Temperature = 0.0f,
}).UseConsoleForDebug();
var pdfSource = new PdfPigPdfSource("E:\\AI\\Datasets\\Books\\Harry-Potter-Book-1.pdf");
var documents = await pdfSource.LoadAsync();
var textSplitter = new RecursiveCharacterTextSplitter(chunkSize: 200, chunkOverlap: 50);
if (!File.Exists("vectors.db"))
{
await SQLiteVectorStore.CreateIndexFromDocuments(embeddings, documents, "vectors.db", "vectors",textSplitter: textSplitter);
}
var vectorStore = new SQLiteVectorStore("vectors.db", "vectors",embeddings);
var index = new VectorStoreIndexWrapper(vectorStore);
string promptText =
@"Use the following pieces of context to answer the question at the end. If the answer is not in context then just say that you don't know, don't try to make up an answer. Keep the answer as short as possible.
{context}
Question: {question}
Helpful Answer:";
var chain =
Set("Who was drinking a unicorn blood?", outputKey: "question") // set the question
| RetrieveDocuments(index, inputKey: "question", outputKey: "documents", amount: 5) // take 5 most similar documents
| StuffDocuments(inputKey: "documents", outputKey: "context") // combine documents together and put them into context
| Template(promptText) // replace context and question in the prompt with their values
| LLM(model); // send the result to the language model
var result = await chain.Run("text"); // get chain result
Console.WriteLine(result);
}
[Test]
public async Task UsingChainOutput()
{
// get model path
var modelPath = await HuggingFaceModelDownloader.Instance.GetModel(
repository: "TheBloke/Thespis-13B-v0.5-GGUF",
fileName: "thespis-13b-v0.5.Q2_K.gguf",
version: "main");
// load model
var model = LLamaSharpModelInstruction.FromPath(modelPath);
// building a chain
var prompt = @"
You are an AI assistant that greets the world.
World: Hello, Assistant!
Assistant:";
var chain =
Set(prompt, outputKey:"prompt")
| LLM(model, inputKey:"prompt", outputKey: "result");
var result = await chain.Run("result");
Console.WriteLine("---");
Console.WriteLine(result);
Console.WriteLine("---");
}
}