Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove ModelConfig type as a temporary solution. #2836

Merged
merged 7 commits into from
Apr 17, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,14 @@

import numpy as np

from promptflow.core import AzureOpenAIModelConfiguration
from promptflow.evals.evaluators import CoherenceEvaluator, FluencyEvaluator, GroundednessEvaluator, RelevanceEvaluator

logger = logging.getLogger(__name__)


class ChatEvaluator:
def __init__(
self, model_config: AzureOpenAIModelConfiguration, eval_last_turn: bool = False, parallel: bool = True
self, model_config, eval_last_turn: bool = False, parallel: bool = True
):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@
from pathlib import Path

from promptflow.client import load_flow
from promptflow.core import AzureOpenAIModelConfiguration
from promptflow.core._prompty_utils import convert_model_configuration_to_connection


class CoherenceEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
def __init__(self, model_config):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
inputs:
nick863 marked this conversation as resolved.
Show resolved Hide resolved
question:
type: string
answer:
type: string
init:
model_config:
type: object
entry: __init__:CoherenceEvaluator
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@
from pathlib import Path

from promptflow.client import load_flow
from promptflow.core import AzureOpenAIModelConfiguration
from promptflow.core._prompty_utils import convert_model_configuration_to_connection


class FluencyEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
def __init__(self, model_config):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@
from pathlib import Path

from promptflow.client import load_flow
from promptflow.core import AzureOpenAIModelConfiguration
from promptflow.core._prompty_utils import convert_model_configuration_to_connection


class GroundednessEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
def __init__(self, model_config):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore

from promptflow.core import AzureOpenAIModelConfiguration
from promptflow.evals.evaluators import (
CoherenceEvaluator,
F1ScoreEvaluator,
Expand All @@ -16,7 +15,7 @@


class QAEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
def __init__(self, model_config):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@
from pathlib import Path

from promptflow.client import load_flow
from promptflow.core import AzureOpenAIModelConfiguration
from promptflow.core._prompty_utils import convert_model_configuration_to_connection


class RelevanceEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
def __init__(self, model_config):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,11 @@
from pathlib import Path

from promptflow.client import load_flow
from promptflow.core import AzureOpenAIModelConfiguration
from promptflow.core._prompty_utils import convert_model_configuration_to_connection


class SimilarityEvaluator:
def __init__(self, model_config: AzureOpenAIModelConfiguration):
def __init__(self, model_config):
"""
Initialize an evaluator configured for a specific Azure OpenAI model.

Expand Down
42 changes: 42 additions & 0 deletions src/promptflow-evals/tests/evals/unittests/test_save_eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from typing import Any, List, Optional

import inspect
import os
import tempfile
import unittest
from promptflow.client import PFClient
from promptflow.evals import evaluators
from promptflow.evals.evaluators import content_safety


class TestSaveEval(unittest.TestCase):
nick863 marked this conversation as resolved.
Show resolved Hide resolved
"""Test saving evaluators."""

def setUp(self) -> None:
self.pf = PFClient()
unittest.TestCase.setUp(self)

def _do_test_saving(self,
namespace: Any,
exceptions: Optional[List[str]] = None) -> None:
"""Do the actual test on saving evaluators."""
for name, obj in inspect.getmembers(namespace):
if inspect.isclass(obj):
if exceptions and name in exceptions:
continue
with tempfile.TemporaryDirectory() as d:
self.pf.flows.save(obj, path=d)
self.assertTrue(os.path.isfile(os.path.join(d, 'flow.flex.yaml')))

def test_save_evaluators(self) -> None:
"""Test regular evaluator saving."""
self._do_test_saving(evaluators, ['ChatEvaluator'])

@unittest.skip('RAI models constructor contains credentials, which is not supported.')
nick863 marked this conversation as resolved.
Show resolved Hide resolved
def test_save_rai_evaluators(self):
"""Test saving of RAI evaluators"""
self._do_test_saving(content_safety)


if __name__ == "__main__":
unittest.main()
Loading