|
4 | 4 | """
|
5 | 5 |
|
6 | 6 | from dataclasses import dataclass
|
7 |
| -from typing import List |
| 7 | +from typing import List, Literal, Optional |
8 | 8 |
|
9 | 9 |
|
| 10 | +# pylint: disable=too-many-instance-attributes |
10 | 11 | @dataclass
|
11 | 12 | class CompletionConfig:
|
12 |
| - "interface for the completion configuration portion of a prompt template" |
| 13 | + """ |
| 14 | + Interface for the completion configuration portion of a prompt template. |
13 | 15 |
|
14 |
| - temperature: float |
15 |
| - "the models temperature as a number between 0 and 1" |
| 16 | + Attributes: |
| 17 | + completion_type (Optional[Literal['chat','text']]): Type of completion to use. |
| 18 | + Defaults to using the completion type of the configured default model. |
| 19 | + New in schema version 1.1. |
16 | 20 |
|
17 |
| - top_p: float |
18 |
| - "the models top_p as a number between 0 and 1" |
| 21 | + frequency_penalty (float): The models frequency_penalty as a number between 0 and 1. |
| 22 | + Defaults to 0. |
19 | 23 |
|
20 |
| - presence_penalty: float |
21 |
| - "the models presence_penalty as a number between 0 and 1" |
| 24 | + include_history (bool): If true, the prompt will be augmented with the conversation history. |
| 25 | + Defaults to True. |
| 26 | + New in schema version 1.1. |
22 | 27 |
|
23 |
| - frequency_penalty: float |
24 |
| - "the models frequency_penalty as a number between 0 and 1" |
| 28 | + include_input (bool): If true, the prompt will be augmented with the user's input. |
| 29 | + Defaults to True. |
| 30 | + New in schema version 1.1. |
25 | 31 |
|
26 |
| - max_tokens: int |
27 |
| - "the models maximum number of tokens to generate" |
| 32 | + include_images (bool): If true, the prompt will be augmented with any images |
| 33 | + uploaded by the user. Defaults to False. |
| 34 | + New in schema version 1.1. |
28 | 35 |
|
29 |
| - stop_sequences: List[str] |
30 |
| - "optional: array of stop sequences that when hit will stop generation" |
| 36 | + max_tokens (int): The maximum number of tokens to generate. |
| 37 | + Defaults to 150. |
31 | 38 |
|
32 |
| - @staticmethod |
33 |
| - def from_dict(data: dict) -> "CompletionConfig": |
34 |
| - "creates a CompletionConfig from a dictionary" |
35 |
| - # TODO: should we have default values for the properties? |
36 |
| - return CompletionConfig( |
37 |
| - temperature=data["temperature"], |
38 |
| - top_p=data["top_p"], |
39 |
| - presence_penalty=data["presence_penalty"], |
40 |
| - frequency_penalty=data["frequency_penalty"], |
41 |
| - max_tokens=data["max_tokens"], |
42 |
| - stop_sequences=data.get("stop_sequences", []), |
| 39 | + max_input_tokens (int): The maximum number of tokens allowed in the input. |
| 40 | + Defaults to 2048. |
| 41 | + New in schema version 1.1. |
| 42 | +
|
| 43 | + model (Optional[str]): Name of the model to use otherwise the configured |
| 44 | + default model is used. Defaults to None. |
| 45 | + New in schema version 1.1. |
| 46 | +
|
| 47 | + presence_penalty (float): The model's presence_penalty as a number between 0 and 1. |
| 48 | + Defaults to 0. |
| 49 | +
|
| 50 | + stop_sequences (Optional[List[str]]): Array of stop sequences that when hit will |
| 51 | + stop generation. Defaults to None. |
| 52 | +
|
| 53 | + temperature (float): The model's temperature as a number between 0 and 2. |
| 54 | + Defaults to 0. |
| 55 | +
|
| 56 | + top_p (float): The model's top_p as a number between 0 and 2. |
| 57 | + Defaults to 0. |
| 58 | + """ |
| 59 | + |
| 60 | + completion_type: Optional[Literal["chat", "text"]] = None |
| 61 | + frequency_penalty: float = 0 |
| 62 | + include_history: bool = True |
| 63 | + include_input: bool = True |
| 64 | + include_images: bool = False |
| 65 | + max_tokens: int = 150 |
| 66 | + max_input_tokens: int = 2048 |
| 67 | + model: Optional[str] = None |
| 68 | + presence_penalty: float = 0 |
| 69 | + stop_sequences: Optional[List[str]] = None |
| 70 | + temperature: float = 0 |
| 71 | + top_p: float = 0 |
| 72 | + |
| 73 | + @classmethod |
| 74 | + def from_dict(cls, data: dict) -> "CompletionConfig": |
| 75 | + return cls( |
| 76 | + completion_type=data.get("completion_type"), |
| 77 | + frequency_penalty=data.get("frequency_penalty", 0), |
| 78 | + include_history=data.get("include_history", True), |
| 79 | + include_input=data.get("include_input", True), |
| 80 | + include_images=data.get("include_images", False), |
| 81 | + max_tokens=data.get("max_tokens", 150), |
| 82 | + max_input_tokens=data.get("max_input_tokens", 2048), |
| 83 | + model=data.get("model"), |
| 84 | + presence_penalty=data.get("presence_penalty", 0), |
| 85 | + stop_sequences=data.get("stop_sequences"), |
| 86 | + temperature=data.get("temperature", 0), |
| 87 | + top_p=data.get("top_p", 0), |
43 | 88 | )
|
0 commit comments