diff --git a/.github/workflows/chatgpt-cr.yaml b/.github/workflows/chatgpt-cr.yaml index 9926310a..a381bbc1 100644 --- a/.github/workflows/chatgpt-cr.yaml +++ b/.github/workflows/chatgpt-cr.yaml @@ -23,5 +23,5 @@ jobs: PROMPT: "请检查以下代码差异是否有混淆或不规范之处:" # example: Please check if there are any confusions or irregularities in the following code diff: top_p: 1 # https://platform.openai.com/docs/api-reference/chat/create#chat/create-top_p temperature: 1 # https://platform.openai.com/docs/api-reference/chat/create#chat/create-temperature - max_tokens: 10000 - MAX_PATCH_LENGTH: 10000 # if the patch/diff length is large than MAX_PATCH_LENGTH, will be ignored and won't review. By default, with no MAX_PATCH_LENGTH set, there is also no limit for the patch/diff length. \ No newline at end of file + max_tokens: 1024 + MAX_PATCH_LENGTH: 1024 # if the patch/diff length is large than MAX_PATCH_LENGTH, will be ignored and won't review. By default, with no MAX_PATCH_LENGTH set, there is also no limit for the patch/diff length. diff --git "a/tmp-test-gpt-cr/obsidian\347\232\204\345\211\257\346\234\2542.py" "b/tmp-test-gpt-cr/obsidian\347\232\204\345\211\257\346\234\2542.py" new file mode 100644 index 00000000..b0462f90 --- /dev/null +++ "b/tmp-test-gpt-cr/obsidian\347\232\204\345\211\257\346\234\2542.py" @@ -0,0 +1,30 @@ +## 1.终端里设置环境变量 + +# export OPENAI_API_TYPE=azure +# export OPENAI_API_VERSION=2023-05-15 +# export OPENAI_API_BASE=https://ingtubeopenai.openai.azure.com +# export OPENAI_API_KEY=ea31775d794e47beb2f6cd479817ce81 + +# export PINECONE_API_KEY=d0e32935-ca46-4a82-be38-34cc17dbdcce +# export PINECONE_ENV=gcp-starter + +## 2.加载原始csv数据 + +# llm(documents1[0].page_content) + + +from langchain.document_loaders import ObsidianLoader + +loader = ObsidianLoader("/Users/yingtu/知识库/ingtube") +documents = loader.load() + +## 3.embddings对象模型初始化,实际调用在后面。 +from langchain.embeddings import OpenAIEmbeddings +embeddings = OpenAIEmbeddings( + client="", + model="text-embedding-ada-002", + deployment="ingtube-ada", + # input="texts", + # chunk_size=1 + show_progress_bar=True, +)