diff --git a/chapter15.md b/chapter15.md new file mode 100644 index 0000000..9ae70df --- /dev/null +++ b/chapter15.md @@ -0,0 +1,56 @@ +## Chapter 15: LangChain Best Practices and Optimization +We will introduce best practices for using LangChain effectively and techniques for optimizing performance. + +``` +from langchain.llms import OpenAI +from langchain.prompts import PromptTemplate +from langchain.chains import LLMChain +from langchain.cache import InMemoryCache +import langchain +import time + +# +langchain.llm_cache = InMemoryCache() + +# LLM +llm = OpenAI(temperature=0.1) + +# +template = """ +You are an AI assistant specialized in {topic}. +Please provide a concise answer to the following question: +Question: {question} +Answer: +""" + +prompt = PromptTemplate( + input_variables=["topic", "question"], + template=template +) + +# +chain = LLMChain(llm=llm, prompt=prompt) + +# +start_time = time.time() + +# +for _ in range(5): + response = chain.run(topic="Python programming", question="What is a list comprehension?") + print(response) + +end_time = time.time() +print(f"Execution time: {end_time - start_time} seconds") + +# +try: + invalid_response = chain.run(topic="Invalid topic", question="This will cause an error") +except Exception as e: + print(f"An error occurred: {e}") + + +# +del llm +del chain + +```