Skip to content

Commit

Permalink
SDK regeneration
Browse files Browse the repository at this point in the history
  • Loading branch information
fern-api[bot] committed Mar 19, 2024
1 parent d476799 commit 2a4cdea
Show file tree
Hide file tree
Showing 32 changed files with 100 additions and 193 deletions.
6 changes: 3 additions & 3 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "cohere"
version = "5.0.0a11"
version = "5.0.0a12"
description = ""
readme = "README.md"
authors = []
Expand Down
40 changes: 40 additions & 0 deletions src/cohere/base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,7 @@ def chat_stream(
max_tokens: typing.Optional[int] = OMIT,
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -210,6 +211,8 @@ def chat_stream(
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -353,6 +356,8 @@ def chat_stream(
_request["k"] = k
if p is not OMIT:
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down Expand Up @@ -420,6 +425,7 @@ def chat(
max_tokens: typing.Optional[int] = OMIT,
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -502,6 +508,8 @@ def chat(
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -588,6 +596,8 @@ def chat(
_request["k"] = k
if p is not OMIT:
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down Expand Up @@ -643,6 +653,7 @@ def generate_stream(
max_tokens: typing.Optional[int] = OMIT,
truncate: typing.Optional[GenerateStreamRequestTruncate] = OMIT,
temperature: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
preset: typing.Optional[str] = OMIT,
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
Expand Down Expand Up @@ -683,6 +694,8 @@ def generate_stream(
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
Expand Down Expand Up @@ -751,6 +764,8 @@ def generate_stream(
_request["truncate"] = truncate
if temperature is not OMIT:
_request["temperature"] = temperature
if seed is not OMIT:
_request["seed"] = seed
if preset is not OMIT:
_request["preset"] = preset
if end_sequences is not OMIT:
Expand Down Expand Up @@ -823,6 +838,7 @@ def generate(
max_tokens: typing.Optional[int] = OMIT,
truncate: typing.Optional[GenerateRequestTruncate] = OMIT,
temperature: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
preset: typing.Optional[str] = OMIT,
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
Expand Down Expand Up @@ -863,6 +879,8 @@ def generate(
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
Expand Down Expand Up @@ -917,6 +935,8 @@ def generate(
_request["truncate"] = truncate
if temperature is not OMIT:
_request["temperature"] = temperature
if seed is not OMIT:
_request["seed"] = seed
if preset is not OMIT:
_request["preset"] = preset
if end_sequences is not OMIT:
Expand Down Expand Up @@ -1608,6 +1628,7 @@ async def chat_stream(
max_tokens: typing.Optional[int] = OMIT,
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -1690,6 +1711,8 @@ async def chat_stream(
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -1833,6 +1856,8 @@ async def chat_stream(
_request["k"] = k
if p is not OMIT:
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down Expand Up @@ -1900,6 +1925,7 @@ async def chat(
max_tokens: typing.Optional[int] = OMIT,
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -1982,6 +2008,8 @@ async def chat(
- p: typing.Optional[float]. Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -2068,6 +2096,8 @@ async def chat(
_request["k"] = k
if p is not OMIT:
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down Expand Up @@ -2123,6 +2153,7 @@ async def generate_stream(
max_tokens: typing.Optional[int] = OMIT,
truncate: typing.Optional[GenerateStreamRequestTruncate] = OMIT,
temperature: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
preset: typing.Optional[str] = OMIT,
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
Expand Down Expand Up @@ -2163,6 +2194,8 @@ async def generate_stream(
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
Expand Down Expand Up @@ -2231,6 +2264,8 @@ async def generate_stream(
_request["truncate"] = truncate
if temperature is not OMIT:
_request["temperature"] = temperature
if seed is not OMIT:
_request["seed"] = seed
if preset is not OMIT:
_request["preset"] = preset
if end_sequences is not OMIT:
Expand Down Expand Up @@ -2303,6 +2338,7 @@ async def generate(
max_tokens: typing.Optional[int] = OMIT,
truncate: typing.Optional[GenerateRequestTruncate] = OMIT,
temperature: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
preset: typing.Optional[str] = OMIT,
end_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
Expand Down Expand Up @@ -2343,6 +2379,8 @@ async def generate(
- temperature: typing.Optional[float]. A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
- preset: typing.Optional[str]. Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
Expand Down Expand Up @@ -2397,6 +2435,8 @@ async def generate(
_request["truncate"] = truncate
if temperature is not OMIT:
_request["temperature"] = temperature
if seed is not OMIT:
_request["seed"] = seed
if preset is not OMIT:
_request["preset"] = preset
if end_sequences is not OMIT:
Expand Down
2 changes: 1 addition & 1 deletion src/cohere/core/client_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "cohere",
"X-Fern-SDK-Version": "5.0.0a11",
"X-Fern-SDK-Version": "5.0.0a12",
}
if self._client_name is not None:
headers["X-Client-Name"] = self._client_name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

CreateEmbedJobRequestTruncate = typing.Literal["START", "END"]
CreateEmbedJobRequestTruncate = typing.Union[typing.AnyStr, typing.Literal["START", "END"]]
2 changes: 1 addition & 1 deletion src/cohere/types/auth_token_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

AuthTokenType = typing.Literal["bearer", "basic", "noscheme"]
AuthTokenType = typing.Union[typing.AnyStr, typing.Literal["bearer", "basic", "noscheme"]]
2 changes: 1 addition & 1 deletion src/cohere/types/chat_message_role.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

ChatMessageRole = typing.Literal["CHATBOT", "SYSTEM", "USER"]
ChatMessageRole = typing.Union[typing.AnyStr, typing.Literal["CHATBOT", "SYSTEM", "USER"]]
2 changes: 1 addition & 1 deletion src/cohere/types/chat_request_citation_quality.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

ChatRequestCitationQuality = typing.Literal["fast", "accurate"]
ChatRequestCitationQuality = typing.Union[typing.AnyStr, typing.Literal["fast", "accurate"]]
2 changes: 1 addition & 1 deletion src/cohere/types/chat_request_prompt_truncation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

ChatRequestPromptTruncation = typing.Literal["OFF", "AUTO", "AUTO_PRESERVE_ORDER"]
ChatRequestPromptTruncation = typing.Union[typing.AnyStr, typing.Literal["OFF", "AUTO", "AUTO_PRESERVE_ORDER"]]
4 changes: 3 additions & 1 deletion src/cohere/types/chat_stream_end_event_finish_reason.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@

import typing

ChatStreamEndEventFinishReason = typing.Literal["COMPLETE", "ERROR_LIMIT", "MAX_TOKENS", "ERROR", "ERROR_TOXIC"]
ChatStreamEndEventFinishReason = typing.Union[
typing.AnyStr, typing.Literal["COMPLETE", "ERROR_LIMIT", "MAX_TOKENS", "ERROR", "ERROR_TOXIC"]
]
2 changes: 1 addition & 1 deletion src/cohere/types/chat_stream_request_citation_quality.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

ChatStreamRequestCitationQuality = typing.Literal["fast", "accurate"]
ChatStreamRequestCitationQuality = typing.Union[typing.AnyStr, typing.Literal["fast", "accurate"]]
2 changes: 1 addition & 1 deletion src/cohere/types/chat_stream_request_prompt_truncation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

ChatStreamRequestPromptTruncation = typing.Literal["OFF", "AUTO", "AUTO_PRESERVE_ORDER"]
ChatStreamRequestPromptTruncation = typing.Union[typing.AnyStr, typing.Literal["OFF", "AUTO", "AUTO_PRESERVE_ORDER"]]
2 changes: 1 addition & 1 deletion src/cohere/types/classify_request_truncate.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

ClassifyRequestTruncate = typing.Literal["NONE", "START", "END"]
ClassifyRequestTruncate = typing.Union[typing.AnyStr, typing.Literal["NONE", "START", "END"]]
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@

import typing

ClassifyResponseClassificationsItemClassificationType = typing.Literal["single-label", "multi-label"]
ClassifyResponseClassificationsItemClassificationType = typing.Union[
typing.AnyStr, typing.Literal["single-label", "multi-label"]
]
4 changes: 3 additions & 1 deletion src/cohere/types/compatible_endpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@

import typing

CompatibleEndpoint = typing.Literal["chat", "embed", "classify", "summarize", "rerank", "rate", "generate"]
CompatibleEndpoint = typing.Union[
typing.AnyStr, typing.Literal["chat", "embed", "classify", "summarize", "rerank", "rate", "generate"]
]
2 changes: 1 addition & 1 deletion src/cohere/types/connector_auth_status.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@

import typing

ConnectorAuthStatus = typing.Literal["valid", "expired"]
ConnectorAuthStatus = typing.Union[typing.AnyStr, typing.Literal["valid", "expired"]]
23 changes: 13 additions & 10 deletions src/cohere/types/dataset_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,17 @@

import typing

DatasetType = typing.Literal[
"embed-input",
"embed-result",
"cluster-result",
"cluster-outliers",
"reranker-finetune-input",
"prompt-completion-finetune-input",
"single-label-classification-finetune-input",
"chat-finetune-input",
"multi-label-classification-finetune-input",
DatasetType = typing.Union[
typing.AnyStr,
typing.Literal[
"embed-input",
"embed-result",
"cluster-result",
"cluster-outliers",
"reranker-finetune-input",
"prompt-completion-finetune-input",
"single-label-classification-finetune-input",
"chat-finetune-input",
"multi-label-classification-finetune-input",
],
]
4 changes: 3 additions & 1 deletion src/cohere/types/dataset_validation_status.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@

import typing

DatasetValidationStatus = typing.Literal["unknown", "queued", "processing", "failed", "validated", "skipped"]
DatasetValidationStatus = typing.Union[
typing.AnyStr, typing.Literal["unknown", "queued", "processing", "failed", "validated", "skipped"]
]
4 changes: 3 additions & 1 deletion src/cohere/types/embed_input_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@

import typing

EmbedInputType = typing.Literal["search_document", "search_query", "classification", "clustering"]
EmbedInputType = typing.Union[
typing.AnyStr, typing.Literal["search_document", "search_query", "classification", "clustering"]
]
4 changes: 3 additions & 1 deletion src/cohere/types/embed_job_status.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,6 @@

import typing

EmbedJobStatus = typing.Literal["processing", "complete", "cancelling", "cancelled", "failed"]
EmbedJobStatus = typing.Union[
typing.AnyStr, typing.Literal["processing", "complete", "cancelling", "cancelled", "failed"]
]
Loading

0 comments on commit 2a4cdea

Please sign in to comment.