Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions kendra_retriever_samples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ Before you run the sample, you need to deploy a Large Language Model (or get an
| Bedrock Titan | None | | bedrock_titan|
| Bedrock Claude | None | | bedrock_claude|
| Bedrock Claude V2 | None | | bedrock_claudev2|
| Bedrock Jurassic Ultra | None | | bedrock_jurassic_ultra|


after deploying the LLM, set up environment variables for kendra id, aws_region and the endpoint name (or the API key for an external provider)
Expand All @@ -61,6 +62,7 @@ You can use commands as below to set the environment variables. Only set the env

```bash
export AWS_REGION=<YOUR-AWS-REGION>
export AWS_BEDROCK_REGION=<YOUR-AWS-BEDROCK-REGION>
export AWS_PROFILE=<AWS Profile>
export KENDRA_INDEX_ID=<YOUR-KENDRA-INDEX-ID>

Expand Down
6 changes: 5 additions & 1 deletion kendra_retriever_samples/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import kendra_chat_bedrock_titan as bedrock_titan
import kendra_chat_bedrock_claude as bedrock_claude
import kendra_chat_bedrock_claudev2 as bedrock_claudev2
import kendra_chat_bedrock_jurassic_ultra as bedrock_jurassic_ultra



Expand Down Expand Up @@ -76,10 +77,13 @@ def read_properties_file(filename):
elif (sys.argv[1] == 'bedrock_claudev2'):
st.session_state['llm_app'] = bedrock_claudev2
st.session_state['llm_chain'] = bedrock_claudev2.build_chain()
elif (sys.argv[1] == 'bedrock_jurassic_ultra'):
st.session_state['llm_app'] = bedrock_jurassic_ultra
st.session_state['llm_chain'] = bedrock_jurassic_ultra.build_chain()
else:
raise Exception("Unsupported LLM: ", sys.argv[1])
else:
raise Exception("Usage: streamlit run app.py <anthropic|flanxl|flanxxl|openai|bedrock_titan|bedrock_claude|bedrock|claudev2>")
raise Exception("Usage: streamlit run app.py <anthropic|flanxl|flanxxl|openai|bedrock_titan|bedrock_claude|bedrock|claudev2|jurassic_ultra>")

if 'chat_history' not in st.session_state:
st.session_state['chat_history'] = []
Expand Down
123 changes: 123 additions & 0 deletions kendra_retriever_samples/kendra_chat_bedrock_jurassic_ultra.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
# from aws_langchain.kendra import AmazonKendraRetriever #custom library
from langchain.retrievers import AmazonKendraRetriever
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.llms.bedrock import Bedrock
from langchain.chains.llm import LLMChain
import sys
import os

class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'

MAX_HISTORY_LENGTH = 5

def build_chain():
region = os.environ["AWS_REGION"]
bedrock_region = os.environ["AWS_BEDROCK_REGION"]
kendra_index_id = os.environ["KENDRA_INDEX_ID"]
credentials_profile_name = os.environ['AWS_PROFILE']

print(credentials_profile_name)

llm = Bedrock(
credentials_profile_name=credentials_profile_name,
region_name = bedrock_region,
model_kwargs={"maxTokens":200,"temperature":0.1,"topP":0.9, "countPenalty":{"scale":0},"presencePenalty":{"scale":0},"frequencyPenalty":{"scale":0}},
model_id="ai21.j2-ultra-v1"
)

attribute_filter = {
"EqualsTo": {
"Key": "_language_code",
"Value": {
"StringValue": "nl"
}
}
}
retriever = AmazonKendraRetriever(index_id=kendra_index_id,top_k=5,region_name=region, attribute_filter=attribute_filter)

prompt_template = """Mens: Dit is een vriendschappelijk gesprek tussen een mens en een AI.
De AI is spraakzaam en geeft specifieke details uit zijn context, maar beperkt dit tot 240 tokens.
Als de AI het antwoord op een vraag niet weet, zegt hij eerlijk dat hij het niet weet.
het niet weet.

Assistent: OK, begrepen, ik zal een spraakzame waarheidsgetrouwe AI-assistent zijn.

Mens: Hier zijn een paar documenten in <documenten> tags:
<documenten>
{context}
</documenten>
Geef op basis van de bovenstaande documenten een gedetailleerd antwoord op {question}.
Antwoord "weet niet" indien niet aanwezig in het document.

Assistent:
"""


PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)

condense_qa_template = """{chat_history}
Mens:
Gegeven het vorige gesprek en een vervolgvraag hieronder, herformuleer de vervolgvraag
zodat het een op zichzelf staande vraag wordt.

Vervolgvraag: {question}
Op zichzelf staande vraag:

Assistent:"""


standalone_question_prompt = PromptTemplate.from_template(condense_qa_template)



qa = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
condense_question_prompt=standalone_question_prompt,
return_source_documents=True,
combine_docs_chain_kwargs={"prompt":PROMPT},
verbose=True)

# qa = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, qa_prompt=PROMPT, return_source_documents=True)
return qa


def run_chain(chain, prompt: str, history=[]):
return chain({"question": prompt, "chat_history": history})


if __name__ == "__main__":
chat_history = []
qa = build_chain()
print(bcolors.OKBLUE + "Hello! How can I help you?" + bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
for query in sys.stdin:
if (query.strip().lower().startswith("new search:")):
query = query.strip().lower().replace("new search:","")
chat_history = []
elif (len(chat_history) == MAX_HISTORY_LENGTH):
chat_history.pop(0)
result = run_chain(qa, query, chat_history)
chat_history.append((query, result["answer"]))
print(bcolors.OKGREEN + result['answer'] + bcolors.ENDC)
if 'source_documents' in result:
print(bcolors.OKGREEN + 'Sources:')
for d in result['source_documents']:
print(d.metadata['source'])
print(bcolors.ENDC)
print(bcolors.OKCYAN + "Ask a question, start a New search: or CTRL-D to exit." + bcolors.ENDC)
print(">", end=" ", flush=True)
print(bcolors.OKBLUE + "Bye" + bcolors.ENDC)