Skip to content

Commit

Permalink
Clean up trunk check recommendation
Browse files Browse the repository at this point in the history
  • Loading branch information
KastanDay committed Mar 8, 2024
1 parent 016e48b commit 3017603
Show file tree
Hide file tree
Showing 8 changed files with 8 additions and 16 deletions.
2 changes: 1 addition & 1 deletion ai_ta_backend/beam/ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
import pytesseract
import sentry_sdk
import supabase
from beam import App, QueueDepthAutoscaler, RequestLatencyAutoscaler, Runtime
from beam import App, QueueDepthAutoscaler, Runtime # RequestLatencyAutoscaler,
from bs4 import BeautifulSoup
from git.repo import Repo
from langchain.document_loaders import (
Expand Down
2 changes: 0 additions & 2 deletions ai_ta_backend/executors/process_pool_executor.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
from concurrent.futures import ProcessPoolExecutor

from injector import inject


class ProcessPoolExecutorInterface:

Expand Down
2 changes: 0 additions & 2 deletions ai_ta_backend/executors/thread_pool_executor.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
from concurrent.futures import ThreadPoolExecutor

from injector import inject


class ThreadPoolExecutorInterface:

Expand Down
3 changes: 2 additions & 1 deletion ai_ta_backend/public_api/uiuc_chat_api.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import requests
import json

import requests
"""
# Example usage
Expand Down
5 changes: 2 additions & 3 deletions ai_ta_backend/service/export_service.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import io
import json
import os
import uuid
Expand Down Expand Up @@ -40,7 +39,7 @@ def export_documents_json(self, course_name: str, from_date='', to_date=''):
# call background task to upload to s3

filename = course_name + '_' + str(uuid.uuid4()) + '_documents.zip'
s3_filepath = s3_file = f"courses/{course_name}/{filename}"
s3_filepath = f"courses/{course_name}/{filename}"
# background task of downloading data - map it with above ID
executor = ProcessPoolExecutor()
executor.submit(self.export_data_in_bg, response, "documents", course_name, s3_filepath)
Expand Down Expand Up @@ -214,7 +213,7 @@ def export_convo_history_json(self, course_name: str, from_date='', to_date=''):
if response.count > 1000:
# call background task to upload to s3
filename = course_name + '_' + str(uuid.uuid4()) + '_convo_history.zip'
s3_filepath = s3_file = f"courses/{course_name}/{filename}"
s3_filepath = f"courses/{course_name}/{filename}"
# background task of downloading data - map it with above ID
executor = ProcessPoolExecutor()
executor.submit(self.export_data_in_bg, response, "conversations", course_name, s3_filepath)
Expand Down
4 changes: 2 additions & 2 deletions ai_ta_backend/service/nomic_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,10 +677,10 @@ def data_prep_for_doc_map(self, df: pd.DataFrame):
embeddings = []
texts = []

for index, row in df.iterrows():
for _index, row in df.iterrows():

current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if row['url'] == None:
if row['url'] is None:
row['url'] = ""
# iterate through all contexts and create separate entries for each
context_count = 0
Expand Down
4 changes: 1 addition & 3 deletions ai_ta_backend/service/retrieval_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,8 @@

import openai
from injector import inject
from langchain import hub
from langchain.chat_models import AzureChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.load import dumps, loads
from langchain.schema import Document

from ai_ta_backend.database.aws import AWSStorage
Expand Down Expand Up @@ -326,7 +324,7 @@ def delete_from_nomic_and_supabase(self, course_name: str, identifier_key: str,
project_id = response.data[0]['doc_map_id']
else:
return "No document map found for this course"
res = self.nomicService.delete_from_document_map(project_id, nomic_ids_to_delete)
self.nomicService.delete_from_document_map(project_id, nomic_ids_to_delete)

# delete from Supabase
self.sqlDb.deleteMaterialsForCourseAndKeyAndValue(course_name, identifier_key, identifier_value)
Expand Down
2 changes: 0 additions & 2 deletions ai_ta_backend/utils/context_parent_doc_padding.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,6 @@
from functools import partial
from multiprocessing import Manager

import supabase

DOCUMENTS_TABLE = os.environ['SUPABASE_DOCUMENTS_TABLE']
# SUPABASE_CLIENT = supabase.create_client(supabase_url=os.environ['SUPABASE_URL'],
# supabase_key=os.environ['SUPABASE_API_KEY']) # type: ignore
Expand Down

0 comments on commit 3017603

Please sign in to comment.