Skip to content

Commit

Permalink
SDK regeneration
Browse files Browse the repository at this point in the history
  • Loading branch information
fern-api[bot] committed Sep 19, 2024
1 parent cdd01e4 commit ed4edd2
Show file tree
Hide file tree
Showing 19 changed files with 258 additions and 288 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "cohere"
version = "5.9.3"
version = "5.9.4"
description = ""
readme = "README.md"
authors = []
Expand Down
82 changes: 76 additions & 6 deletions reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -2858,17 +2858,14 @@ If you want to learn more how to use the embedding model, have a look at the [Se
<dd>

```python
from cohere import Client, ImageEmbedRequestV2
from cohere import Client

client = Client(
client_name="YOUR_CLIENT_NAME",
token="YOUR_TOKEN",
)
client.v2.embed(
request=ImageEmbedRequestV2(
images=["string"],
model="string",
),
model="model",
)

```
Expand All @@ -2885,7 +2882,80 @@ client.v2.embed(
<dl>
<dd>

**request:** `EmbedRequestV2`
**model:** `str`

Defaults to embed-english-v2.0

The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.

Available models and corresponding embedding dimensions:

* `embed-english-v3.0` 1024
* `embed-multilingual-v3.0` 1024
* `embed-english-light-v3.0` 384
* `embed-multilingual-light-v3.0` 384

* `embed-english-v2.0` 4096
* `embed-english-light-v2.0` 1024
* `embed-multilingual-v2.0` 768

</dd>
</dl>

<dl>
<dd>

**texts:** `typing.Optional[typing.Sequence[str]]` — An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.

</dd>
</dl>

<dl>
<dd>

**images:** `typing.Optional[typing.Sequence[str]]`

An array of image data URIs for the model to embed. Maximum number of images per call is `1`.

The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.

</dd>
</dl>

<dl>
<dd>

**input_type:** `typing.Optional[EmbedInputType]`

</dd>
</dl>

<dl>
<dd>

**embedding_types:** `typing.Optional[typing.Sequence[EmbeddingType]]`

Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.

* `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
* `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
* `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
* `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
* `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.

</dd>
</dl>

<dl>
<dd>

**truncate:** `typing.Optional[V2EmbedRequestTruncate]`

One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.

Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.

If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.

</dd>
</dl>
Expand Down
20 changes: 2 additions & 18 deletions src/cohere/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@
CitationStartEventDelta,
CitationStartEventDeltaMessage,
CitationStartStreamedChatResponseV2,
ClassificationEmbedRequestV2,
ClassifyDataMetrics,
ClassifyExample,
ClassifyRequestTruncate,
Expand All @@ -89,7 +88,6 @@
ClassifyResponseClassificationsItemClassificationType,
ClassifyResponseClassificationsItemLabelsValue,
ClientClosedRequestErrorBody,
ClusteringEmbedRequestV2,
CompatibleEndpoint,
Connector,
ConnectorAuthStatus,
Expand Down Expand Up @@ -120,7 +118,6 @@
EmbedJobStatus,
EmbedJobTruncate,
EmbedRequestTruncate,
EmbedRequestV2,
EmbedResponse,
EmbeddingType,
EmbeddingsByTypeEmbedResponse,
Expand All @@ -141,8 +138,6 @@
Generation,
GetConnectorResponse,
GetModelResponse,
ImageEmbedRequestV2,
Images,
JsonObjectResponseFormat,
JsonObjectResponseFormatV2,
JsonResponseFormat,
Expand All @@ -169,9 +164,7 @@
RerankerDataMetrics,
ResponseFormat,
ResponseFormatV2,
SearchDocumentEmbedRequestV2,
SearchQueriesGenerationStreamedChatResponse,
SearchQueryEmbedRequestV2,
SearchResultsStreamedChatResponse,
SingleGeneration,
SingleGenerationInStream,
Expand Down Expand Up @@ -200,8 +193,6 @@
TextResponseFormatV2,
TextSystemMessageContentItem,
TextToolContent,
Texts,
TextsTruncate,
TokenizeResponse,
TooManyRequestsErrorBody,
Tool,
Expand Down Expand Up @@ -267,6 +258,7 @@
V2ChatRequestSafetyMode,
V2ChatStreamRequestDocumentsItem,
V2ChatStreamRequestSafetyMode,
V2EmbedRequestTruncate,
V2RerankRequestDocumentsItem,
V2RerankResponse,
V2RerankResponseResultsItem,
Expand Down Expand Up @@ -359,7 +351,6 @@
"CitationStartEventDelta",
"CitationStartEventDeltaMessage",
"CitationStartStreamedChatResponseV2",
"ClassificationEmbedRequestV2",
"ClassifyDataMetrics",
"ClassifyExample",
"ClassifyRequestTruncate",
Expand All @@ -372,7 +363,6 @@
"ClientClosedRequestErrorBody",
"ClientEnvironment",
"ClientV2",
"ClusteringEmbedRequestV2",
"CompatibleEndpoint",
"Connector",
"ConnectorAuthStatus",
Expand Down Expand Up @@ -409,7 +399,6 @@
"EmbedJobStatus",
"EmbedJobTruncate",
"EmbedRequestTruncate",
"EmbedRequestV2",
"EmbedResponse",
"EmbeddingType",
"EmbeddingsByTypeEmbedResponse",
Expand All @@ -432,8 +421,6 @@
"Generation",
"GetConnectorResponse",
"GetModelResponse",
"ImageEmbedRequestV2",
"Images",
"InternalServerError",
"JsonObjectResponseFormat",
"JsonObjectResponseFormatV2",
Expand Down Expand Up @@ -464,9 +451,7 @@
"ResponseFormat",
"ResponseFormatV2",
"SagemakerClient",
"SearchDocumentEmbedRequestV2",
"SearchQueriesGenerationStreamedChatResponse",
"SearchQueryEmbedRequestV2",
"SearchResultsStreamedChatResponse",
"ServiceUnavailableError",
"SingleGeneration",
Expand Down Expand Up @@ -496,8 +481,6 @@
"TextResponseFormatV2",
"TextSystemMessageContentItem",
"TextToolContent",
"Texts",
"TextsTruncate",
"TokenizeResponse",
"TooManyRequestsError",
"TooManyRequestsErrorBody",
Expand Down Expand Up @@ -536,6 +519,7 @@
"V2ChatRequestSafetyMode",
"V2ChatStreamRequestDocumentsItem",
"V2ChatStreamRequestSafetyMode",
"V2EmbedRequestTruncate",
"V2RerankRequestDocumentsItem",
"V2RerankResponse",
"V2RerankResponseResultsItem",
Expand Down
2 changes: 1 addition & 1 deletion src/cohere/core/client_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "cohere",
"X-Fern-SDK-Version": "5.9.3",
"X-Fern-SDK-Version": "5.9.4",
}
if self._client_name is not None:
headers["X-Client-Name"] = self._client_name
Expand Down
2 changes: 2 additions & 0 deletions src/cohere/finetuning/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
ListEventsResponse,
ListFinetunedModelsResponse,
ListTrainingStepMetricsResponse,
LoraTargetModules,
Settings,
Status,
Strategy,
Expand All @@ -33,6 +34,7 @@
"ListEventsResponse",
"ListFinetunedModelsResponse",
"ListTrainingStepMetricsResponse",
"LoraTargetModules",
"Settings",
"Status",
"Strategy",
Expand Down
2 changes: 2 additions & 0 deletions src/cohere/finetuning/finetuning/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
ListEventsResponse,
ListFinetunedModelsResponse,
ListTrainingStepMetricsResponse,
LoraTargetModules,
Settings,
Status,
Strategy,
Expand All @@ -32,6 +33,7 @@
"ListEventsResponse",
"ListFinetunedModelsResponse",
"ListTrainingStepMetricsResponse",
"LoraTargetModules",
"Settings",
"Status",
"Strategy",
Expand Down
2 changes: 2 additions & 0 deletions src/cohere/finetuning/finetuning/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from .list_events_response import ListEventsResponse
from .list_finetuned_models_response import ListFinetunedModelsResponse
from .list_training_step_metrics_response import ListTrainingStepMetricsResponse
from .lora_target_modules import LoraTargetModules
from .settings import Settings
from .status import Status
from .strategy import Strategy
Expand All @@ -30,6 +31,7 @@
"ListEventsResponse",
"ListFinetunedModelsResponse",
"ListTrainingStepMetricsResponse",
"LoraTargetModules",
"Settings",
"Status",
"Strategy",
Expand Down
2 changes: 1 addition & 1 deletion src/cohere/finetuning/finetuning/types/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class BaseModel(UncheckedBaseModel):

strategy: typing.Optional[Strategy] = pydantic.Field(default=None)
"""
The fine-tuning strategy.
Deprecated: The fine-tuning strategy.
"""

if IS_PYDANTIC_V2:
Expand Down
18 changes: 18 additions & 0 deletions src/cohere/finetuning/finetuning/types/hyperparameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from ....core.unchecked_base_model import UncheckedBaseModel
import typing
import pydantic
from .lora_target_modules import LoraTargetModules
from ....core.pydantic_utilities import IS_PYDANTIC_V2


Expand Down Expand Up @@ -38,6 +39,23 @@ class Hyperparameters(UncheckedBaseModel):
The learning rate to be used during training.
"""

lora_alpha: typing.Optional[int] = pydantic.Field(default=None)
"""
Controls the scaling factor for LoRA updates. Higher values make the
updates more impactful.
"""

lora_rank: typing.Optional[int] = pydantic.Field(default=None)
"""
Specifies the rank for low-rank matrices. Lower ranks reduce parameters
but may limit model flexibility.
"""

lora_target_modules: typing.Optional[LoraTargetModules] = pydantic.Field(default=None)
"""
The combination of LoRA modules to target.
"""

if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
Expand Down
13 changes: 13 additions & 0 deletions src/cohere/finetuning/finetuning/types/lora_target_modules.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# This file was auto-generated by Fern from our API Definition.

import typing

LoraTargetModules = typing.Union[
typing.Literal[
"LORA_TARGET_MODULES_UNSPECIFIED",
"LORA_TARGET_MODULES_QV",
"LORA_TARGET_MODULES_QKVO",
"LORA_TARGET_MODULES_QKVO_FFN",
],
typing.Any,
]
Loading

0 comments on commit ed4edd2

Please sign in to comment.