99
1010import openlayer
1111
12- from . import utils
12+ from . import tasks , utils
1313
1414logger = logging .getLogger (__name__ )
1515
@@ -23,6 +23,8 @@ class OpenAIMonitor:
2323 Whether to publish the data to Openlayer as soon as it is available. If True,
2424 the Openlayer credentials must be provided (either as keyword arguments or as
2525 environment variables).
26+ client : openai.api_client.Client, optional
27+ The OpenAI client. It is required if you are using openai>=1.0.0.
2628 openlayer_api_key : str, optional
2729 The Openlayer API key. If not provided, it is read from the environment
2830 variable `OPENLAYER_API_KEY`. This is required if `publish` is set to True.
@@ -44,22 +46,32 @@ class OpenAIMonitor:
4446 1. Set the environment variables:
4547
4648 .. code-block:: bash
49+ export OPENAI_API_KEY=<your-openai-api-key>
4750
48- export OPENLAYER_API_KEY=<your-api-key>
51+ export OPENLAYER_API_KEY=<your-openlayer- api-key>
4952 export OPENLAYER_PROJECT_NAME=<your-project-name>
5053
5154 2. Instantiate the monitor:
5255
56+ ** If you are using openai<1.0.0 **
5357 >>> from opemlayer import llm_monitors
5458 >>>
5559 >>> monitor = llm_monitors.OpenAIMonitor(publish=True)
5660
61+ ** If you are using openai>=1.0.0 **
62+ >>> from opemlayer import llm_monitors
63+ >>> from openai import OpenAI
64+ >>>
65+ >>> openai_client = OpenAI()
66+ >>> monitor = llm_monitors.OpenAIMonitor(publish=True, client=openai_client)
67+
5768 3. Start monitoring:
5869
5970 >>> monitor.start_monitoring()
6071
6172 From this point onwards, you can continue making requests to your model normally:
6273
74+ ** If you are using openai<1.0.0 **
6375 >>> openai.ChatCompletion.create(
6476 >>> model="gpt-3.5-turbo",
6577 >>> messages=[
@@ -68,6 +80,15 @@ class OpenAIMonitor:
6880 >>> ],
6981 >>> )
7082
83+ ** If you are using openai>=1.0.0 **
84+ >>> openai_client.chat.completions.create(
85+ >>> model="gpt-3.5-turbo",
86+ >>> messages=[
87+ >>> {"role": "system", "content": "You are a helpful assistant."},
88+ >>> {"role": "user", "content": "How are you doing today?"}
89+ >>> ],
90+ >>> )
91+
7192 Your data is automatically being published to your Openlayer project!
7293
7394 If you no longer want to monitor your model, you can stop monitoring by calling:
@@ -79,6 +100,7 @@ class OpenAIMonitor:
79100 def __init__ (
80101 self ,
81102 publish : bool = False ,
103+ client = None ,
82104 openlayer_api_key : Optional [str ] = None ,
83105 openlayer_project_name : Optional [str ] = None ,
84106 openlayer_inference_pipeline_name : Optional [str ] = None ,
@@ -97,6 +119,13 @@ def __init__(
97119 self ._load_inference_pipeline ()
98120
99121 # OpenAI setup
122+ self .openai_version = openai .__version__
123+ if self .openai_version .split ("." )[0 ] == "1" and client is None :
124+ raise ValueError (
125+ "You must provide the OpenAI client for as the kwarg `client` for"
126+ " openai>=1.0.0."
127+ )
128+ self .openai_client = client
100129 self .create_chat_completion : callable = None
101130 self .create_completion : callable = None
102131 self .modified_create_chat_completion : callable = None
@@ -148,7 +177,9 @@ def _load_inference_pipeline(self) -> None:
148177 client = openlayer .OpenlayerClient (
149178 api_key = self .openlayer_api_key ,
150179 )
151- project = client .load_project (name = self .openlayer_project_name )
180+ project = client .create_project (
181+ name = self .openlayer_project_name , task_type = tasks .TaskType .LLM
182+ )
152183 if self .openlayer_inference_pipeline_name :
153184 inference_pipeline = project .load_inference_pipeline (
154185 name = self .openlayer_inference_pipeline_name
@@ -160,8 +191,14 @@ def _load_inference_pipeline(self) -> None:
160191
161192 def _initialize_openai (self ) -> None :
162193 """Initializes the OpenAI attributes."""
163- self .create_chat_completion = openai .ChatCompletion .create
164- self .create_completion = openai .Completion .create
194+ if self .openai_version .split ("." )[0 ] == "0" :
195+ openai_api_key = utils .get_env_variable ("OPENAI_API_KEY" )
196+ openai .api_key = openai_api_key
197+ self .create_chat_completion = openai .ChatCompletion .create
198+ self .create_completion = openai .Completion .create
199+ else :
200+ self .create_chat_completion = self .openai_client .chat .completions .create
201+ self .create_completion = self .openai_client .completions .create
165202 self .modified_create_chat_completion = (
166203 self ._get_modified_create_chat_completion ()
167204 )
@@ -178,7 +215,7 @@ def modified_create_chat_completion(*args, **kwargs) -> str:
178215 try :
179216 input_data = self ._format_user_messages (kwargs ["messages" ])
180217 output_data = response .choices [0 ].message .content .strip ()
181- num_of_tokens = response .usage [ " total_tokens" ]
218+ num_of_tokens = response .usage . total_tokens
182219
183220 self ._append_row_to_df (
184221 input_data = input_data ,
@@ -211,7 +248,7 @@ def modified_create_completion(*args, **kwargs):
211248
212249 for input_data , choices in zip (prompts , choices_splits ):
213250 output_data = choices [0 ].text .strip ()
214- num_of_tokens = int (response .usage [ " total_tokens" ] / len (prompts ))
251+ num_of_tokens = int (response .usage . total_tokens / len (prompts ))
215252
216253 self ._append_row_to_df (
217254 input_data = input_data ,
@@ -313,8 +350,14 @@ def start_monitoring(self) -> None:
313350
314351 def _overwrite_completion_methods (self ) -> None :
315352 """Overwrites OpenAI's completion methods with the modified versions."""
316- openai .ChatCompletion .create = self .modified_create_chat_completion
317- openai .Completion .create = self .modified_create_completion
353+ if self .openai_version .split ("." )[0 ] == "0" :
354+ openai .ChatCompletion .create = self .modified_create_chat_completion
355+ openai .Completion .create = self .modified_create_completion
356+ else :
357+ self .openai_client .chat .completions .create = (
358+ self .modified_create_chat_completion
359+ )
360+ self .openai_client .completions .create = self .modified_create_completion
318361
319362 def stop_monitoring (self ):
320363 """Switches monitoring for OpenAI LLMs off.
@@ -335,8 +378,12 @@ def stop_monitoring(self):
335378
336379 def _restore_completion_methods (self ) -> None :
337380 """Restores OpenAI's completion methods to their original versions."""
338- openai .ChatCompletion .create = self .create_chat_completion
339- openai .Completion .create = self .create_completion
381+ if self .openai_version .split ("." )[0 ] == "0" :
382+ openai .ChatCompletion .create = self .create_chat_completion
383+ openai .Completion .create = self .create_completion
384+ else :
385+ self .openai_client .chat .completions .create = self .create_chat_completion
386+ self .openai_client .completions .create = self .create_completion
340387
341388 def publish_batch_data (self ):
342389 """Manually publish the accumulated data to Openlayer when automatic publishing
0 commit comments