diff --git a/docs/30_function_calling/35_langchain_bia_choosing_algorithms.ipynb b/docs/30_function_calling/35_langchain_bia_choosing_algorithms.ipynb new file mode 100644 index 0000000..a705a03 --- /dev/null +++ b/docs/30_function_calling/35_langchain_bia_choosing_algorithms.ipynb @@ -0,0 +1,498 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b97b3b00-8ff4-4e1b-b7c7-709f87aabc37", + "metadata": {}, + "source": [ + "## Allowing language models to choose the right algorithm\n", + "In this notebook we enable a language model to choose for the right algorithm. We define multiple segmentation algorithms / tools and then give the language model the choice which one to use given different inputs." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "f4ae3a80-b6ea-4409-95b7-caecd4e4211c", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.memory import ConversationBufferMemory\n", + "from langchain.chat_models import ChatOpenAI\n", + "from langchain.agents import initialize_agent\n", + "from langchain.agents import AgentType\n", + "from langchain.tools import tool\n", + "\n", + "from skimage.io import imread\n", + "from bia_utilities import voronoi_otsu_labeling, local_minima_seeded_watershed\n", + "\n", + "import stackview" + ] + }, + { + "cell_type": "markdown", + "id": "6b78c8e5-58d1-4750-b659-e639a2b99d2f", + "metadata": {}, + "source": [ + "Again, we define an image storage and a list of tools." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "8f8158b6-5a36-4cad-a28f-42cd375a0d4f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "image_storage = {}\n", + "tools = []" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "bc5b05a7-8ef6-458f-acbf-1c79e26cf9fb", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "@tools.append\n", + "@tool\n", + "def load_image(filename:str):\n", + " \"\"\"Useful for loading an image file and storing it.\"\"\"\n", + " print(\"loading\", filename)\n", + " image = imread(filename)\n", + " image_storage[filename] = image\n", + " return \"The image is now stored as \" + filename" + ] + }, + { + "cell_type": "markdown", + "id": "8bf722d8-5636-4cfc-a3c7-422e0f02fe68", + "metadata": {}, + "source": [ + "We define two segmentation algorithms, one for segmenting bright objects and one for segmenting dark objects." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "993a17aa-57b2-4e72-b546-0ec7199c40c6", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "@tools.append\n", + "@tool\n", + "def segment_bright_objects(image_name):\n", + " \"\"\"\n", + " Useful for segmenting bright objects (such as nuclei) in an image \n", + " that has been loaded and stored before.\n", + " \"\"\"\n", + " print(\"segmenting (Voronoi-Otsu-Labeling)\", image_name)\n", + " \n", + " image = image_storage[image_name]\n", + " label_image = voronoi_otsu_labeling(image, spot_sigma=4)\n", + " \n", + " label_image_name = \"segmented_\" + image_name\n", + " image_storage[label_image_name] = label_image\n", + " \n", + " return \"The segmented image has been stored as \" + label_image_name" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7de6cb09-06a3-4e68-a685-0512e3f5aad3", + "metadata": {}, + "outputs": [], + "source": [ + "@tools.append\n", + "@tool\n", + "def segment_dark_objects(image_name):\n", + " \"\"\"\n", + " Useful for segmenting dark objects with bright border (e.g. cells with marked membranes) in an image \n", + " that has been loaded and stored before.\n", + " \"\"\"\n", + " print(\"segmenting (Local-minima-seeded watershed)\", image_name)\n", + " \n", + " image = image_storage[image_name]\n", + " label_image = local_minima_seeded_watershed(image, spot_sigma=10)\n", + " \n", + " label_image_name = \"segmented_\" + image_name\n", + " image_storage[label_image_name] = label_image\n", + " \n", + " return \"The segmented image has been stored as \" + label_image_name" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "a11fe914-4162-4ca3-b067-e5278711e3f3", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "@tools.append\n", + "@tool\n", + "def show_image(image_name):\n", + " \"\"\"Useful for showing an image that has been loaded and stored before.\"\"\"\n", + " print(\"showing\", image_name)\n", + " \n", + " image = image_storage[image_name]\n", + " display(stackview.insight(image))\n", + " \n", + " return \"The image \" + image_name + \" is shown above.\"" + ] + }, + { + "cell_type": "markdown", + "id": "c0524eb1-7633-45e7-982b-1c2cc5af0b16", + "metadata": {}, + "source": [ + "We create some memory and a large language model based on OpenAI's chatGPT." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "5d032bf0-49d1-42d4-9654-394a9e660996", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\haase\\miniconda3\\envs\\genai-gpu\\Lib\\site-packages\\langchain_core\\_api\\deprecation.py:141: LangChainDeprecationWarning: The class `ChatOpenAI` was deprecated in LangChain 0.0.10 and will be removed in 0.3.0. An updated version of the class exists in the langchain-openai package and should be used instead. To use it run `pip install -U langchain-openai` and import as `from langchain_openai import ChatOpenAI`.\n", + " warn_deprecated(\n" + ] + } + ], + "source": [ + "memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n", + "llm=ChatOpenAI(temperature=0)" + ] + }, + { + "cell_type": "markdown", + "id": "7bda4152-8cd8-4257-8e7a-e31fca49ffad", + "metadata": { + "tags": [] + }, + "source": [ + "Given the list of tools, the large language model and the memory, we can create an agent." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "28afdf8e-87f2-44a7-9f8d-ef188e0f13b5", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\haase\\miniconda3\\envs\\genai-gpu\\Lib\\site-packages\\langchain_core\\_api\\deprecation.py:141: LangChainDeprecationWarning: The function `initialize_agent` was deprecated in LangChain 0.1.0 and will be removed in 1.0. Use Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc. instead.\n", + " warn_deprecated(\n" + ] + } + ], + "source": [ + "agent = initialize_agent(\n", + " tools, \n", + " llm, \n", + " agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, \n", + " memory=memory\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "23e3065d-8d55-46dc-b160-ff4349ee3beb", + "metadata": { + "tags": [] + }, + "source": [ + "This agent can then respond to prompts." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "5bf8d165-de48-4052-8121-d0bedac8a3e2", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\haase\\miniconda3\\envs\\genai-gpu\\Lib\\site-packages\\langchain_core\\_api\\deprecation.py:141: LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 1.0. Use invoke instead.\n", + " warn_deprecated(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "loading data/membranes.tif\n" + ] + }, + { + "data": { + "text/plain": [ + "'The image is now stored as data/membranes.tif'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent.run(\"Please load the image data/membranes.tif\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "20f555c5-f40e-4ca5-8f04-31716d21b9df", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "showing data/membranes.tif\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
shape(256, 256)
dtypeuint16
size128.0 kB
min277
max44092
\n", + "\n", + "
" + ], + "text/plain": [ + "StackViewNDArray([[4496, 5212, 6863, ..., 2917, 2680, 2642],\n", + " [4533, 5146, 7555, ..., 2843, 2857, 2748],\n", + " [4640, 6082, 8452, ..., 3372, 3039, 3128],\n", + " ...,\n", + " [1339, 1403, 1359, ..., 4458, 4314, 4795],\n", + " [1473, 1560, 1622, ..., 3967, 4531, 4204],\n", + " [1380, 1368, 1649, ..., 3091, 3558, 3682]], dtype=uint16)" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "'The image data/membranes.tif has been shown.'" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent.run(\"Show the image.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "3a78de42-7960-43f0-a62b-98106e57e75a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "segmenting (Local-minima-seeded watershed) data/membranes.tif\n" + ] + }, + { + "data": { + "text/plain": [ + "'The segmented image has been stored as segmented_data/membranes.tif'" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent.run(\"Please segment the image data/membranes.tif\")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "ae00622c-0d17-4d73-adfc-3a0622024ea4", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "showing segmented_data/membranes.tif\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
shape(256, 256)
dtypeint32
size256.0 kB
min1
max27
\n", + "\n", + "
" + ], + "text/plain": [ + "StackViewNDArray([[ 5, 5, 5, ..., 3, 3, 3],\n", + " [ 5, 5, 5, ..., 3, 3, 3],\n", + " [ 5, 5, 5, ..., 3, 3, 3],\n", + " ...,\n", + " [24, 24, 24, ..., 27, 27, 27],\n", + " [24, 24, 24, ..., 27, 27, 27],\n", + " [24, 24, 24, ..., 27, 27, 27]])" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "'The image segmented_data/membranes.tif is shown above.'" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent.run(\"Please show the segmented data/membranes.tif image.\")" + ] + }, + { + "cell_type": "markdown", + "id": "b48dd0a0-f41c-4804-88b6-35ad766455aa", + "metadata": {}, + "source": [ + "The segmentation does not look like a cell-segmentation. Thus, we should ask more specifically." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "4b5c4ca0-e169-412a-85eb-46b9646750b6", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'I used an image segmentation algorithm to segment the image data/membranes.tif. This algorithm was chosen because it is effective at separating objects of interest in the image based on their characteristics, such as brightness or color, which can help in further analysis and processing of the image data.'" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "agent.run(\"Which algorithm did you use? Why did you use this algorithm?\")" + ] + }, + { + "cell_type": "markdown", + "id": "a1b93a57-f920-43c7-ae01-60dea0a09af1", + "metadata": {}, + "source": [ + "Note: The language model cannot see the image. Its tool selection depends on information you provided and information it acquired during the chat." + ] + }, + { + "cell_type": "markdown", + "id": "4fa24d32-5e1e-49d6-8484-0b31f69927e3", + "metadata": {}, + "source": [ + "## Exercise\n", + "Run the code a couple of times and study if it always uses the same algorithm.\n", + "Afterwards, rename \"membranes.tif\" to \"bright_blobs.tif\" and run the code again.\n", + "If the algorithm chooses a different algorithm then, why?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "205c6fd8-32a3-4363-99b6-2af5a8d51e98", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/30_function_calling/bia_utilities.py b/docs/30_function_calling/bia_utilities.py new file mode 100644 index 0000000..828daca --- /dev/null +++ b/docs/30_function_calling/bia_utilities.py @@ -0,0 +1,77 @@ +def voronoi_otsu_labeling(image, spot_sigma: float = 2, outline_sigma: float = 2): + """Voronoi-Otsu-Labeling is a segmentation algorithm for blob-like structures such as nuclei and + granules with high signal intensity on low-intensity background. + + The two sigma parameters allow tuning the segmentation result. The first sigma controls how close detected cells + can be (spot_sigma) and the second controls how precise segmented objects are outlined (outline_sigma). Under the + hood, this filter applies two Gaussian blurs, spot detection, Otsu-thresholding and Voronoi-labeling. The + thresholded binary image is flooded using the Voronoi approach starting from the found local maxima. Noise-removal + sigma for spot detection and thresholding can be configured separately. + + This allows segmenting connected objects such as not to dense nuclei. + If the nuclei are too dense, consider using stardist [1] or cellpose [2]. + + See also + -------- + .. [0] https://github.com/clEsperanto/pyclesperanto_prototype/blob/master/demo/segmentation/voronoi_otsu_labeling.ipynb + .. [1] https://www.napari-hub.org/plugins/stardist-napari + .. [2] https://www.napari-hub.org/plugins/cellpose-napari + """ + import numpy as np + from skimage.filters import threshold_otsu as sk_threshold_otsu, gaussian + from skimage.segmentation import watershed + from skimage.measure import label + from skimage.morphology import local_maxima + + + image = np.asarray(image) + + # blur and detect local maxima + blurred_spots = gaussian(image, spot_sigma) + spot_centroids = local_maxima(blurred_spots) + + # blur and threshold + blurred_outline = gaussian(image, outline_sigma) + threshold = sk_threshold_otsu(blurred_outline) + binary_otsu = blurred_outline > threshold + + # determine local maxima within the thresholded area + remaining_spots = spot_centroids * binary_otsu + + # start from remaining spots and flood binary image with labels + labeled_spots = label(remaining_spots) + labels = watershed(binary_otsu, labeled_spots, mask=binary_otsu) + + return labels + + +def local_minima_seeded_watershed(image, spot_sigma: float = 10, outline_sigma: float = 0): + """ + Segment cells in images with fluorescently marked membranes. + + The two sigma parameters allow tuning the segmentation result. The first sigma controls how close detected cells + can be (spot_sigma) and the second controls how precise segmented objects are outlined (outline_sigma). Under the + hood, this filter applies two Gaussian blurs, local minima detection and a seeded watershed. + + See also + -------- + .. [1] https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_watershed.html + """ + import numpy as np + from skimage.filters import gaussian + from skimage.segmentation import watershed + from skimage.measure import label + from skimage.morphology import local_minima + + image = np.asarray(image) + + spot_blurred = gaussian(image, sigma=spot_sigma) + + spots = label(local_minima(spot_blurred)) + + if outline_sigma == spot_sigma: + outline_blurred = spot_blurred + else: + outline_blurred = gaussian(image, sigma=outline_sigma) + + return watershed(outline_blurred, spots) \ No newline at end of file diff --git a/docs/30_function_calling/data/membrane2d.tif b/docs/30_function_calling/data/membranes.tif similarity index 100% rename from docs/30_function_calling/data/membrane2d.tif rename to docs/30_function_calling/data/membranes.tif diff --git a/docs/50_code_generation/02_generating_code.ipynb b/docs/50_code_generation/02_generating_code.ipynb new file mode 100644 index 0000000..660d7e6 --- /dev/null +++ b/docs/50_code_generation/02_generating_code.ipynb @@ -0,0 +1,242 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d2203d9d-f5cc-4198-bd78-1d56f98369ac", + "metadata": {}, + "source": [ + "# Code generation\n", + "Code generation makes sense in the context of scientific data analysis especially because code can be executed again and again producing the same results. \n", + "\n", + "As example, we count bright blobs in an image." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "aaa383f2-762d-4816-95f3-b259e4723878", + "metadata": {}, + "outputs": [], + "source": [ + "import openai\n", + "from IPython.display import Markdown\n", + "\n", + "def prompt(message:str, model=\"gpt-4o-2024-08-06\"):\n", + " \"\"\"A prompt helper function that sends a message to openAI\n", + " and returns only the text response.\n", + " \"\"\"\n", + " client = openai.OpenAI()\n", + " response = client.chat.completions.create(\n", + " model=model,\n", + " messages=[{\"role\": \"user\", \"content\": message}]\n", + " )\n", + " return response.choices[0].message.content" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a8aec675-cde8-4870-ac52-fafac89d543e", + "metadata": {}, + "outputs": [], + "source": [ + "my_prompt = \"\"\"\n", + "Write Python code that loads blobs.tif, \n", + "counts bright blobs and prints the number. \n", + "Return this code only.\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "id": "dc649cf5-2194-4bd4-ac0f-52b98607430c", + "metadata": {}, + "source": [ + "## Reviewing generated code\n", + "When generating code, it is recommended to print / visualize before executing it. If you automatically execute code before reviewing it, it may harm your computer." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "06ddee05-5e7f-4656-a877-a8ee61c84c10", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "```python\n", + "import cv2\n", + "import numpy as np\n", + "\n", + "# Load the image\n", + "image = cv2.imread('blobs.tif', cv2.IMREAD_GRAYSCALE)\n", + "\n", + "# Thresholding the image to binary\n", + "_, binary_image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)\n", + "\n", + "# Find connected components (blobs)\n", + "num_labels, labels_im = cv2.connectedComponents(binary_image)\n", + "\n", + "# Print the number of bright blobs, subtracting one for the background label\n", + "print(num_labels - 1)\n", + "```" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "code = prompt(my_prompt)\n", + "Markdown(code)" + ] + }, + { + "cell_type": "markdown", + "id": "d5dc2999-f4b9-434d-a923-c286f8667a7f", + "metadata": {}, + "source": [ + "If we are ok with the code, we can execute it." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "83a50d24-d1cd-4541-a0f2-fb59af5608c6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "64\n" + ] + } + ], + "source": [ + "code = code.replace(\"```python\", \"\").replace(\"```\", \"\")\n", + "exec(code)" + ] + }, + { + "cell_type": "markdown", + "id": "a5838196-e538-4026-9b74-6638166e6c6e", + "metadata": {}, + "source": [ + "## A comment on reproducibility\n", + "Depending on which model you use, it may produce the same code again - or not." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "03adabb8-b14e-4467-8ce8-02dcd5efc9dc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "```python\n", + "import numpy as np\n", + "from skimage import io, measure, filters, morphology\n", + "\n", + "# Load the image\n", + "image = io.imread('blobs.tif')\n", + "\n", + "# Convert image to grayscale if it is not\n", + "if len(image.shape) > 2:\n", + " image = rgb2gray(image)\n", + "\n", + "# Apply a threshold to convert the image to binary\n", + "binary_image = image > filters.threshold_otsu(image)\n", + "\n", + "# Remove small objects to isolate blobs better\n", + "cleaned_image = morphology.remove_small_objects(binary_image, min_size=20)\n", + "\n", + "# Label connected components\n", + "labeled_image, num_blobs = measure.label(cleaned_image, return_num=True)\n", + "\n", + "# Print the number of blobs found\n", + "print(\"Number of bright blobs:\", num_blobs)\n", + "```" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "code = prompt(my_prompt)\n", + "Markdown(code)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d393a342-fea7-42b8-a069-31bfe81a340e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of bright blobs: 61\n" + ] + } + ], + "source": [ + "code = code.replace(\"```python\", \"\").replace(\"```\", \"\")\n", + "exec(code)" + ] + }, + { + "cell_type": "markdown", + "id": "6559154f-8bb3-4524-8a33-8b76e05d3679", + "metadata": {}, + "source": [ + "# Exercise\n", + "Rerun the code, wait a minute between the two `prompt()` calls above and see if the code is identical to what is saved now. Also check if the number of cells is the same.\n", + "\n", + "Advanced, optional exercise: Modify the prompt function to use Anthropic's Claude." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4e3659ba-5b62-4ee9-997b-05d7fdde6d7b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/50_code_generation/03_generating_code.ipynb b/docs/50_code_generation/03_generating_code.ipynb index 5c4ac6b..5e8d5d7 100644 --- a/docs/50_code_generation/03_generating_code.ipynb +++ b/docs/50_code_generation/03_generating_code.ipynb @@ -5,7 +5,7 @@ "id": "85990a02-74d0-46f2-a638-00f37d8d1b3e", "metadata": {}, "source": [ - "## Generating code\n", + "## Prompt engineering for code generation\n", "In this notebook we will produce some image processing Python code and execute it to see if it works. We will build up the query gradually to demonstrate that short concise high quality code depends on the details we present in the prompt." ] }, diff --git a/docs/_toc.yml b/docs/_toc.yml index c4f7994..37d24ce 100644 --- a/docs/_toc.yml +++ b/docs/_toc.yml @@ -38,6 +38,7 @@ parts: - file: 30_function_calling/10_function_calling.ipynb - file: 30_function_calling/20_langchain.ipynb - file: 30_function_calling/30_langchain_bia.ipynb + - file: 30_function_calling/35_langchain_bia_choosing_algorithms.ipynb - file: 30_function_calling/50_blablado.ipynb - file: 30_function_calling/55_microscope_stage_demo.ipynb @@ -66,6 +67,7 @@ parts: - file: 50_code_generation/readme.md sections: + - file: 50_code_generation/02_generating_code.ipynb - file: 50_code_generation/03_generating_code.ipynb - file: 50_code_generation/04_generating_code_for_processing_images.ipynb - file: 50_code_generation/06_system_messages.ipynb