Skip to content

Commit

Permalink
SDK regeneration (#421)
Browse files Browse the repository at this point in the history
Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
  • Loading branch information
fern-api[bot] authored Mar 21, 2024
1 parent 681424e commit 96f29a9
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 2 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "cohere"
version = "5.0.0"
version = "5.0.1"
description = ""
readme = "README.md"
authors = []
Expand Down
22 changes: 22 additions & 0 deletions src/cohere/base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def chat_stream(
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -213,6 +214,8 @@ def chat_stream(
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -295,6 +298,7 @@ def chat_stream(
k=1,
p=1.1,
seed=1.1,
stop_sequences=["string"],
connectors_search_options=ChatStreamRequestConnectorsSearchOptions(
model={"key": "value"},
temperature={"key": "value"},
Expand Down Expand Up @@ -358,6 +362,8 @@ def chat_stream(
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if stop_sequences is not OMIT:
_request["stop_sequences"] = stop_sequences
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down Expand Up @@ -426,6 +432,7 @@ def chat(
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -510,6 +517,8 @@ def chat(
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -598,6 +607,8 @@ def chat(
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if stop_sequences is not OMIT:
_request["stop_sequences"] = stop_sequences
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down Expand Up @@ -1629,6 +1640,7 @@ async def chat_stream(
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -1713,6 +1725,8 @@ async def chat_stream(
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -1795,6 +1809,7 @@ async def chat_stream(
k=1,
p=1.1,
seed=1.1,
stop_sequences=["string"],
connectors_search_options=ChatStreamRequestConnectorsSearchOptions(
model={"key": "value"},
temperature={"key": "value"},
Expand Down Expand Up @@ -1858,6 +1873,8 @@ async def chat_stream(
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if stop_sequences is not OMIT:
_request["stop_sequences"] = stop_sequences
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down Expand Up @@ -1926,6 +1943,7 @@ async def chat(
k: typing.Optional[int] = OMIT,
p: typing.Optional[float] = OMIT,
seed: typing.Optional[float] = OMIT,
stop_sequences: typing.Optional[typing.Sequence[str]] = OMIT,
frequency_penalty: typing.Optional[float] = OMIT,
presence_penalty: typing.Optional[float] = OMIT,
raw_prompting: typing.Optional[bool] = OMIT,
Expand Down Expand Up @@ -2010,6 +2028,8 @@ async def chat(
- seed: typing.Optional[float]. If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
- stop_sequences: typing.Optional[typing.Sequence[str]]. A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
- frequency_penalty: typing.Optional[float]. Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
Expand Down Expand Up @@ -2098,6 +2118,8 @@ async def chat(
_request["p"] = p
if seed is not OMIT:
_request["seed"] = seed
if stop_sequences is not OMIT:
_request["stop_sequences"] = stop_sequences
if frequency_penalty is not OMIT:
_request["frequency_penalty"] = frequency_penalty
if presence_penalty is not OMIT:
Expand Down
2 changes: 1 addition & 1 deletion src/cohere/core/client_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "cohere",
"X-Fern-SDK-Version": "5.0.0",
"X-Fern-SDK-Version": "5.0.1",
}
if self._client_name is not None:
headers["X-Client-Name"] = self._client_name
Expand Down

0 comments on commit 96f29a9

Please sign in to comment.