Skip to content

Commit

Permalink
Merge pull request #43 from LyzrCore/imp/data-analyzr
Browse files Browse the repository at this point in the history
DataAnalyzr improvements - big PR
  • Loading branch information
gargimaheshwari authored Jun 13, 2024
2 parents a3325f5 + b22fbcd commit b3ab128
Show file tree
Hide file tree
Showing 64 changed files with 8,521 additions and 6,125 deletions.
2 changes: 0 additions & 2 deletions build/lib/lyzr/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from lyzr.base.vector_store import LyzrVectorStoreIndex
from lyzr.formula_generator import FormulaGen
from lyzr.data_analyzr import DataAnalyzr
from lyzr.data_analyzr import DataConnector
from lyzr.voicebot import VoiceBot
from lyzr.qa.search_agent import SearchAgent
from lyzr.summarizer import Summarizer
Expand All @@ -19,7 +18,6 @@
"ChatBot",
"FormulaGen",
"DataAnalyzr",
"DataConnector",
"VoiceBot",
"SearchAgent",
"Summarizer",
Expand Down
3 changes: 0 additions & 3 deletions build/lib/lyzr/base/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from lyzr.base.file_utils import read_file, describe_dataset
from lyzr.base.llm import LyzrLLMFactory, LiteLLM
from lyzr.base.llms import LLM, get_model
from lyzr.base.service import LyzrService
Expand All @@ -13,8 +12,6 @@
"LyzrVectorStoreIndex",
"LLM",
"get_model",
"read_file",
"describe_dataset",
"LyzrRetriever",
"LiteLLM",
"LyzrPromptFactory",
Expand Down
12 changes: 12 additions & 0 deletions build/lib/lyzr/base/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,3 +35,15 @@ class ValidationError(Exception):
"""Raise for validations"""

pass


class AnalysisFailedError(Exception):
"""Raise for failed analysis"""

pass


class PromptError(ValueError):
"""Raise for prompt errors"""

pass
76 changes: 0 additions & 76 deletions build/lib/lyzr/base/file_utils.py

This file was deleted.

95 changes: 80 additions & 15 deletions build/lib/lyzr/base/llm.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,14 @@
"""
Classes and functions for interacting with LLMs.
"""

# standard library imports
import logging
import traceback
from typing import Union, Literal, Sequence

# third-party imports
import litellm
from llama_index.llms import LiteLLM
from llama_index.llms.base import LLM
from llama_index.llms.base import ChatMessage as LlamaChatMessage
Expand All @@ -12,18 +18,49 @@
from lyzr.base.errors import ImproperUsageError

DEFAULT_LLM = "gpt-4-0125-preview"
litellm.drop_params = True


class LyzrLLMFactory:
"""A factory class for creating instances of LiteLLM."""

@staticmethod
def from_defaults(model: str = DEFAULT_LLM, **kwargs) -> LLM:
def from_defaults(model: str = DEFAULT_LLM, **kwargs) -> LiteLLM:
# model_type -> api_type
# model_name -> model
# model_prompts -> Sequence[ChatMessage]
return LiteLLM(model=model, **kwargs)


class LiteLLM(LiteLLM):
"""
LiteLLM is a lightweight language model interface that supports chat,
text-to-speech (TTS), and speech-to-text (STT) functionalities.
Extends the `LiteLLM` class.
Properties:
_tts_kwargs (dict): Returns a dictionary of keyword arguments for TTS, including the voice setting.
_model_type (Literal["chat", "tts", "stt"]): Determines the type of model based on the model name.
Methods:
set_model_kwargs(model_kwargs: dict, force: Union[bool, dict] = True) -> dict:
Sets the model-specific keyword arguments. If `force` is a boolean, it applies to all arguments; otherwise, it can be a dictionary specifying which arguments to forcefully set.
set_messages(messages: Sequence[ChatMessage]):
Sets the messages for the chat model.
run(**kwargs):
Executes the model based on its type (chat, TTS, or STT) with the provided keyword arguments.
chat_complete(messages: Sequence[ChatMessage], stream: bool = False, logger: logging.Logger = None, **kwargs):
Completes a chat interaction with the provided messages. Supports streaming and logging.
tts(tts_input, voice: Literal["echo", "alloy", "fable", "onyx", "nova", "shimmer"], **kwargs):
Converts text to speech using the specified voice. Requires input text and voice.
stt(audiofile, **kwargs):
Converts speech to text from the provided audio file.
"""

@property
def _tts_kwargs(self) -> dict:
Expand All @@ -50,7 +87,7 @@ def set_model_kwargs(
if (not force.get(arg, True)) and (arg in all_kwargs):
continue
if arg in ["temperature", "max_tokens"]:
self.__dict__[arg] = model_kwargs[arg]
setattr(self, arg, model_kwargs[arg])
self.additional_kwargs[arg] = model_kwargs[arg]

def set_messages(self, messages: Sequence[ChatMessage]):
Expand All @@ -59,7 +96,7 @@ def set_messages(self, messages: Sequence[ChatMessage]):
def run(self, **kwargs):
if self._model_type == "chat":
return self.chat_complete(
messages=kwargs.pop("messages", self.__dict__.get("messages", None)),
messages=kwargs.pop("messages", getattr(self, "messages", None)),
stream=kwargs.pop("stream", False),
logger=self.additional_kwargs.pop("logger", kwargs.pop("logger", None)),
**kwargs,
Expand All @@ -84,22 +121,50 @@ def chat_complete(
if not messages:
raise ImproperUsageError("Please provide messages for chat.")
llama_messages = [
LlamaChatMessage(role=msg.role.value, content=msg.content)
LlamaChatMessage(role=msg.role.value, content=msg.content.strip())
for msg in messages
]
if stream:
response = self._stream_chat(messages=llama_messages, **kwargs)
return response # TODO: Convert response to ChatMessage
return self._stream_chat(messages=llama_messages, **kwargs)
else:
response = self._chat(messages=llama_messages, **kwargs)
return ChatResponse(
message=ChatMessage(
role=response.message.role, content=response.message.content
),
raw=response.raw,
delta=response.delta,
additional_kwargs=response.additional_kwargs,
)
try:
response = self._chat(messages=llama_messages, **kwargs)
logger.info(
f"LLM chat response received: {response.message.content}",
extra={
"function": "chat_complete",
"input_kwargs": {
"messages": messages,
"stream": stream,
**kwargs,
},
"response": response.message.content,
},
)
return ChatResponse(
message=ChatMessage(
role=response.message.role, content=response.message.content
),
raw=response.raw,
delta=response.delta,
additional_kwargs=response.additional_kwargs,
)
except Exception as e:
logger.error(
f"Error with getting response from LLM. {e.__class__.__name__}: {e}.",
extra={
"function": "chat_complete",
"traceback": traceback.format_exc().splitlines(),
"input_kwargs": {
"messages": messages,
"stream": stream,
**kwargs,
},
},
)
return None
finally:
self.additional_kwargs["logger"] = logger

def tts(
self,
Expand Down
2 changes: 1 addition & 1 deletion build/lib/lyzr/base/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from openai import OpenAI

# local imports
from lyzr.base.prompt_dep import get_prompt_text
from lyzr.base.prompt_old import get_prompt_text
from lyzr.base.errors import MissingValueError


Expand Down
Loading

0 comments on commit b3ab128

Please sign in to comment.