{"payload":{"feedbackUrl":"https://github.com/orgs/community/discussions/53140","repo":{"id":758997638,"defaultBranch":"main","name":"parse","ownerLogin":"drewskidang","currentUserCanPush":false,"isFork":false,"isEmpty":false,"createdAt":"2024-02-17T17:25:28.000Z","ownerAvatar":"https://avatars.githubusercontent.com/u/120294157?v=4","public":true,"private":false,"isOrgOwned":false},"refInfo":{"name":"","listCacheKey":"v0:1708190764.0","currentOid":""},"activityList":{"items":[{"before":"8ca2b857b22acf1f06ac397375e05f32a0e3252a","after":"45a073901466c3c19c33f5d95c637c833cf6ccef","ref":"refs/heads/main","pushedAt":"2024-02-18T16:02:34.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"phase 5","shortMessageHtmlLink":"phase 5"}},{"before":"ad6dd3068abe505536605aeffc9e779ddb39aee0","after":"8ca2b857b22acf1f06ac397375e05f32a0e3252a","ref":"refs/heads/main","pushedAt":"2024-02-18T15:47:24.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"phase 5","shortMessageHtmlLink":"phase 5"}},{"before":"8ae9da2cf3654fcf5490ea2d750043af97867849","after":"ad6dd3068abe505536605aeffc9e779ddb39aee0","ref":"refs/heads/main","pushedAt":"2024-02-18T15:42:41.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"diff --git a/README.md b/README.md\nindex df047c7..2c413a2 100644\n--- a/README.md\n+++ b/README.md\n@@ -43,6 +43,9 @@ Before running `ingest.py`, ensure you have created the necessary Pinecone index\n\n Open a new terminal and navigate to the project directory. Run the following command:\n\n+### OR Try classifer.py\n+Super experimental having a model classify chunks maybe facts that could be relevant to the case rule of law etc, it takes alot longer. Ideally train a llm to better classify on what you want to potential improving embeddings.\n+\n ```bash\n python ingest.py\n ```\ndiff --git a/classifer.py b/classifer.py\nnew file mode 100644\nindex 0000000..b77be92\n--- /dev/null\n+++ b/classifer.py\n@@ -0,0 +1,93 @@\n+import os\n+import json\n+from llmsherpa.readers import LayoutPDFReader\n+from llmsherpa.readers.layout_reader import LayoutReader\n+from llama_index.core import Document\n+from pinecone import Pinecone, ServerlessSpec\n+from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n+from llama_index.core import Settings\n+from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n+from llama_index.vector_stores.pinecone import PineconeVectorStore\n+from llama_index.core.node_parser import HierarchicalNodeParser, SentenceSplitter\n+from llama_index.core import StorageContext\n+from langchain_openai import ChatOpenAI\n+from spacy.tokens import Doc\n+from spacy_llm.registry import registry\n+from spacy_llm.util import assemble\n+import logging\n+import spacy_llm\n+spacy_llm.logger.addHandler(logging.StreamHandler())\n+spacy_llm.logger.setLevel(logging.DEBUG)\n+nlp = assemble(\"config.cfg\")\n+\n+api_key = os.environ.get(\"PINECONE_API_KEY\",\"\")\n+pc = Pinecone(api_key=api_key)\n+pinecone_index = pc.Index(\"antitrust\")\n+model_name = \"voyage-02\"\n+voyage_api_key = os.environ.get(\"VOYAGE_API_KEY\", \"\")\n+\n+embed_model = HuggingFaceEmbedding(model_name=\"Drewskidang/ANTI_BERT\")\n+\n+def get_extra_info():\n+ print(\"\\nPlease enter the following information for each document:\")\n+ print(\"Book/Case information, Relevant Rules, Case Name, Related Cases\")\n+ law_subject_area = input(\"Law subject area: \")\n+ relevant_rules = input(\"Relevant Rules: \")\n+ case_name = input(\"Case Name: \")\n+ related_cases = input(\"Related Cases: \")\n+\n+ return {\n+ \"law_subject_area\": law_subject_area,\n+ \"relevant_rules\": relevant_rules,\n+ \"extra_data\": case_name,\n+ \"related_cases\": related_cases\n+ }\n+inference_server_url=\"http://localhost:8000/v1\"\n+\n+llm=ChatOpenAI(\n+ openai_api_key=\"EMPTY\",\n+ openai_api_base=inference_server_url,\n+ model_name=\"Drewskidang/Marlin-AWQ\",\n+ max_tokens=512\n+ )\n+\n+\n+Settings.llm=llm\n+Settings.embed_model=embed_model\n+\n+\n+\n+def process_pdfs(pdf_directory):\n+ parser_api_url = \"http://localhost:5010/api/parseDocument?renderFormat=all&applyOcr=yes\"\n+ pdf_reader = LayoutPDFReader(parser_api_url)\n+\n+ data = []\n+ for filename in os.listdir(pdf_directory):\n+ if filename.endswith(\".pdf\"):\n+ pdf_path = os.path.join(pdf_directory, filename)\n+ print(f\"\\nProcessing document: {filename}\") # Display the name of the document\n+ docs = pdf_reader.read_pdf(pdf_path) # Move the call to get_extra_info() here, so it's called once per document\n+ extra_info = get_extra_info() # Get extra info from the user for each document\n+ for chunk in docs.chunks():\n+ chunk_text = chunk.to_context_text()\n+ docs= nlp(chunk_text)\n+ document = Document(\n+ text=chunk.to_text(include_children=True, recurse=True),\n+ extra_info=extra_info,\n+ docs=docs # Use the same extra_info for all paragraphs of the document\n+ )\n+ data.append(document)\n+ return data\n+def convert_nodes(data):\n+ name_space = 'criminal'\n+ vector_store = PineconeVectorStore(pinecone_index=pinecone_index, name_space=name_space)\n+ storage_context = StorageContext.from_defaults(vector_store=vector_store)\n+ index = VectorStoreIndex.from_documents(\n+ data, storage_context=storage_context\n+ )\n+\n+\n+\n+pdf_directory = \"data\" # Replace with your actual directory path\n+processed_data = process_pdfs(pdf_directory) # Call the function once and store its result\n+convert_nodes(processed_data) # Pass the result to the second function\ndiff --git a/config.cfg b/config.cfg\nnew file mode 100644\nindex 0000000..5cbf7db\n--- /dev/null\n+++ b/config.cfg\n@@ -0,0 +1,15 @@\n+[nlp]\n+lang = \"en\"\n+pipeline = [\"llm\"]\n+\n+[components]\n+\n+[components.llm]\n+factory = \"llm\"\n+\n+[components.llm.task]\n+@llm_tasks = \"spacy.SpanCat.v3\"\n+labels = [\" [\"Import-Antitrust-Facts\", \"Arguments from company\", \"Arguments From EU\",\"Reasining and Ruling\",\"Antitrust Laws\"]\"]\n+[components.llm.model]\n+@llm_models = \"spacy.GPT-3.v1\"\n+config = {\"temperature\": 0.3}\ndiff --git a/requirements.txt b/requirements.txt\nindex d8fea3e..0750160 100644\n--- a/requirements.txt\n+++ b/requirements.txt\n@@ -5,3 +5,4 @@ modal\n vllm\n llmsherpa\n nlm-ingestor\n+spacy-llm\n\\ No newline at end of file","shortMessageHtmlLink":"diff --git a/README.md b/README.md"}},{"before":"0b75019e46c9b3274b7268fbd3ebab5bfb9ea591","after":"8ae9da2cf3654fcf5490ea2d750043af97867849","ref":"refs/heads/main","pushedAt":"2024-02-17T21:35:59.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"change read me","shortMessageHtmlLink":"change read me"}},{"before":"3d54b88a68c0f218ed09436b7f06c4496a8f60ea","after":"0b75019e46c9b3274b7268fbd3ebab5bfb9ea591","ref":"refs/heads/main","pushedAt":"2024-02-17T21:34:44.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"readme","shortMessageHtmlLink":"readme"}},{"before":"b806ce6627f2586a08d660f5e41877391c382270","after":"3d54b88a68c0f218ed09436b7f06c4496a8f60ea","ref":"refs/heads/main","pushedAt":"2024-02-17T18:24:42.000Z","pushType":"push","commitsCount":2,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"change read me","shortMessageHtmlLink":"change read me"}},{"before":"4305c82f70e3b21fd311468d3145f68cfb4a7137","after":"b806ce6627f2586a08d660f5e41877391c382270","ref":"refs/heads/main","pushedAt":"2024-02-17T18:15:18.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"phase3","shortMessageHtmlLink":"phase3"}},{"before":"21f7c5c582d1ff113b5711db19a3ca374c695a5f","after":"4305c82f70e3b21fd311468d3145f68cfb4a7137","ref":"refs/heads/main","pushedAt":"2024-02-17T18:09:08.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"phase2","shortMessageHtmlLink":"phase2"}},{"before":"5bce9d820a267b6657c2bdd82de42774276af064","after":"21f7c5c582d1ff113b5711db19a3ca374c695a5f","ref":"refs/heads/main","pushedAt":"2024-02-17T17:48:52.000Z","pushType":"push","commitsCount":1,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"phase1","shortMessageHtmlLink":"phase1"}},{"before":null,"after":"5bce9d820a267b6657c2bdd82de42774276af064","ref":"refs/heads/main","pushedAt":"2024-02-17T17:26:04.000Z","pushType":"branch_creation","commitsCount":0,"pusher":{"login":"drewskidang","name":null,"path":"/drewskidang","primaryAvatarUrl":"https://avatars.githubusercontent.com/u/120294157?s=80&v=4"},"commit":{"message":"Initial commit","shortMessageHtmlLink":"Initial commit"}}],"hasNextPage":false,"hasPreviousPage":false,"activityType":"all","actor":null,"timePeriod":"all","sort":"DESC","perPage":30,"cursor":"djE6ks8AAAAD_e1lZgA","startCursor":null,"endCursor":null}},"title":"Activity ยท drewskidang/parse"}