hexo/node_modules/chatgpt/build/index.js.map

1 line
51 KiB
Plaintext

{"version":3,"sources":["../src/chatgpt-api.ts","../src/tokenizer.ts","../src/types.ts","../src/fetch.ts","../src/fetch-sse.ts","../src/stream-async-iterable.ts","../src/chatgpt-unofficial-proxy-api.ts","../src/utils.ts"],"sourcesContent":["import Keyv from 'keyv'\nimport pTimeout from 'p-timeout'\nimport QuickLRU from 'quick-lru'\nimport { v4 as uuidv4 } from 'uuid'\n\nimport * as tokenizer from './tokenizer'\nimport * as types from './types'\nimport { fetch as globalFetch } from './fetch'\nimport { fetchSSE } from './fetch-sse'\n\nconst CHATGPT_MODEL = 'gpt-3.5-turbo'\n\nconst USER_LABEL_DEFAULT = 'User'\nconst ASSISTANT_LABEL_DEFAULT = 'ChatGPT'\n\nexport class ChatGPTAPI {\n protected _apiKey: string\n protected _apiBaseUrl: string\n protected _apiOrg?: string\n protected _debug: boolean\n\n protected _systemMessage: string\n protected _completionParams: Omit<\n types.openai.CreateChatCompletionRequest,\n 'messages' | 'n'\n >\n protected _maxModelTokens: number\n protected _maxResponseTokens: number\n protected _fetch: types.FetchFn\n\n protected _getMessageById: types.GetMessageByIdFunction\n protected _upsertMessage: types.UpsertMessageFunction\n\n protected _messageStore: Keyv<types.ChatMessage>\n\n /**\n * Creates a new client wrapper around OpenAI's chat completion API, mimicing the official ChatGPT webapp's functionality as closely as possible.\n *\n * @param apiKey - OpenAI API key (required).\n * @param apiOrg - Optional OpenAI API organization (optional).\n * @param apiBaseUrl - Optional override for the OpenAI API base URL.\n * @param debug - Optional enables logging debugging info to stdout.\n * @param completionParams - Param overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.\n * @param maxModelTokens - Optional override for the maximum number of tokens allowed by the model's context. Defaults to 4096.\n * @param maxResponseTokens - Optional override for the minimum number of tokens allowed for the model's response. Defaults to 1000.\n * @param messageStore - Optional [Keyv](https://github.com/jaredwray/keyv) store to persist chat messages to. If not provided, messages will be lost when the process exits.\n * @param getMessageById - Optional function to retrieve a message by its ID. If not provided, the default implementation will be used (using an in-memory `messageStore`).\n * @param upsertMessage - Optional function to insert or update a message. If not provided, the default implementation will be used (using an in-memory `messageStore`).\n * @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.\n */\n constructor(opts: types.ChatGPTAPIOptions) {\n const {\n apiKey,\n apiOrg,\n apiBaseUrl = 'https://api.openai.com/v1',\n debug = false,\n messageStore,\n completionParams,\n systemMessage,\n maxModelTokens = 4000,\n maxResponseTokens = 1000,\n getMessageById,\n upsertMessage,\n fetch = globalFetch\n } = opts\n\n this._apiKey = apiKey\n this._apiOrg = apiOrg\n this._apiBaseUrl = apiBaseUrl\n this._debug = !!debug\n this._fetch = fetch\n\n this._completionParams = {\n model: CHATGPT_MODEL,\n temperature: 0.8,\n top_p: 1.0,\n presence_penalty: 1.0,\n ...completionParams\n }\n\n this._systemMessage = systemMessage\n\n if (this._systemMessage === undefined) {\n const currentDate = new Date().toISOString().split('T')[0]\n this._systemMessage = `You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.\\nKnowledge cutoff: 2021-09-01\\nCurrent date: ${currentDate}`\n }\n\n this._maxModelTokens = maxModelTokens\n this._maxResponseTokens = maxResponseTokens\n\n this._getMessageById = getMessageById ?? this._defaultGetMessageById\n this._upsertMessage = upsertMessage ?? this._defaultUpsertMessage\n\n if (messageStore) {\n this._messageStore = messageStore\n } else {\n this._messageStore = new Keyv<types.ChatMessage, any>({\n store: new QuickLRU<string, types.ChatMessage>({ maxSize: 10000 })\n })\n }\n\n if (!this._apiKey) {\n throw new Error('OpenAI missing required apiKey')\n }\n\n if (!this._fetch) {\n throw new Error('Invalid environment; fetch is not defined')\n }\n\n if (typeof this._fetch !== 'function') {\n throw new Error('Invalid \"fetch\" is not a function')\n }\n }\n\n /**\n * Sends a message to the OpenAI chat completions endpoint, waits for the response\n * to resolve, and returns the response.\n *\n * If you want your response to have historical context, you must provide a valid `parentMessageId`.\n *\n * If you want to receive a stream of partial responses, use `opts.onProgress`.\n *\n * Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI chat completions API. You can override the `systemMessage` in `opts` to customize the assistant's instructions.\n *\n * @param message - The prompt message to send\n * @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)\n * @param opts.conversationId - Optional ID of the conversation (defaults to `undefined`)\n * @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)\n * @param opts.systemMessage - Optional override for the chat \"system message\" which acts as instructions to the model (defaults to the ChatGPT system message)\n * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)\n * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated\n * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)\n * @param completionParams - Optional overrides to send to the [OpenAI chat completion API](https://platform.openai.com/docs/api-reference/chat/create). Options like `temperature` and `presence_penalty` can be tweaked to change the personality of the assistant.\n *\n * @returns The response from ChatGPT\n */\n async sendMessage(\n text: string,\n opts: types.SendMessageOptions = {}\n ): Promise<types.ChatMessage> {\n const {\n parentMessageId,\n messageId = uuidv4(),\n timeoutMs,\n onProgress,\n stream = onProgress ? true : false,\n completionParams,\n conversationId\n } = opts\n\n let { abortSignal } = opts\n\n let abortController: AbortController = null\n if (timeoutMs && !abortSignal) {\n abortController = new AbortController()\n abortSignal = abortController.signal\n }\n\n const message: types.ChatMessage = {\n role: 'user',\n id: messageId,\n conversationId,\n parentMessageId,\n text\n }\n\n const latestQuestion = message\n\n const { messages, maxTokens, numTokens } = await this._buildMessages(\n text,\n opts\n )\n\n const result: types.ChatMessage = {\n role: 'assistant',\n id: uuidv4(),\n conversationId,\n parentMessageId: messageId,\n text: ''\n }\n\n const responseP = new Promise<types.ChatMessage>(\n async (resolve, reject) => {\n const url = `${this._apiBaseUrl}/chat/completions`\n const headers = {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${this._apiKey}`\n }\n const body = {\n max_tokens: maxTokens,\n ...this._completionParams,\n ...completionParams,\n messages,\n stream\n }\n\n // Support multiple organizations\n // See https://platform.openai.com/docs/api-reference/authentication\n if (this._apiOrg) {\n headers['OpenAI-Organization'] = this._apiOrg\n }\n\n if (this._debug) {\n console.log(`sendMessage (${numTokens} tokens)`, body)\n }\n\n if (stream) {\n fetchSSE(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: abortSignal,\n onMessage: (data: string) => {\n if (data === '[DONE]') {\n result.text = result.text.trim()\n return resolve(result)\n }\n\n try {\n const response: types.openai.CreateChatCompletionDeltaResponse =\n JSON.parse(data)\n\n if (response.id) {\n result.id = response.id\n }\n\n if (response.choices?.length) {\n const delta = response.choices[0].delta\n result.delta = delta.content\n if (delta?.content) result.text += delta.content\n\n if (delta.role) {\n result.role = delta.role\n }\n\n result.detail = response\n onProgress?.(result)\n }\n } catch (err) {\n console.warn('OpenAI stream SEE event unexpected error', err)\n return reject(err)\n }\n }\n },\n this._fetch\n ).catch(reject)\n } else {\n try {\n const res = await this._fetch(url, {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: abortSignal\n })\n\n if (!res.ok) {\n const reason = await res.text()\n const msg = `OpenAI error ${\n res.status || res.statusText\n }: ${reason}`\n const error = new types.ChatGPTError(msg, { cause: res })\n error.statusCode = res.status\n error.statusText = res.statusText\n return reject(error)\n }\n\n const response: types.openai.CreateChatCompletionResponse =\n await res.json()\n if (this._debug) {\n console.log(response)\n }\n\n if (response?.id) {\n result.id = response.id\n }\n\n if (response?.choices?.length) {\n const message = response.choices[0].message\n result.text = message.content\n if (message.role) {\n result.role = message.role\n }\n } else {\n const res = response as any\n return reject(\n new Error(\n `OpenAI error: ${\n res?.detail?.message || res?.detail || 'unknown'\n }`\n )\n )\n }\n\n result.detail = response\n\n return resolve(result)\n } catch (err) {\n return reject(err)\n }\n }\n }\n ).then(async (message) => {\n if (message.detail && !message.detail.usage) {\n try {\n const promptTokens = numTokens\n const completionTokens = await this._getTokenCount(message.text)\n message.detail.usage = {\n prompt_tokens: promptTokens,\n completion_tokens: completionTokens,\n total_tokens: promptTokens + completionTokens,\n estimated: true\n }\n } catch (err) {\n // TODO: this should really never happen, but if it does,\n // we should handle notify the user gracefully\n }\n }\n\n return Promise.all([\n this._upsertMessage(latestQuestion),\n this._upsertMessage(message)\n ]).then(() => message)\n })\n\n if (timeoutMs) {\n if (abortController) {\n // This will be called when a timeout occurs in order for us to forcibly\n // ensure that the underlying HTTP request is aborted.\n ;(responseP as any).cancel = () => {\n abortController.abort()\n }\n }\n\n return pTimeout(responseP, {\n milliseconds: timeoutMs,\n message: 'OpenAI timed out waiting for response'\n })\n } else {\n return responseP\n }\n }\n\n get apiKey(): string {\n return this._apiKey\n }\n\n set apiKey(apiKey: string) {\n this._apiKey = apiKey\n }\n\n get apiOrg(): string {\n return this._apiOrg\n }\n\n set apiOrg(apiOrg: string) {\n this._apiOrg = apiOrg\n }\n\n protected async _buildMessages(text: string, opts: types.SendMessageOptions) {\n const { systemMessage = this._systemMessage } = opts\n let { parentMessageId } = opts\n\n const userLabel = USER_LABEL_DEFAULT\n const assistantLabel = ASSISTANT_LABEL_DEFAULT\n\n const maxNumTokens = this._maxModelTokens - this._maxResponseTokens\n let messages: types.openai.ChatCompletionRequestMessage[] = []\n\n if (systemMessage) {\n messages.push({\n role: 'system',\n content: systemMessage\n })\n }\n\n const systemMessageOffset = messages.length\n let nextMessages = text\n ? messages.concat([\n {\n role: 'user',\n content: text,\n name: opts.name\n }\n ])\n : messages\n let numTokens = 0\n\n do {\n const prompt = nextMessages\n .reduce((prompt, message) => {\n switch (message.role) {\n case 'system':\n return prompt.concat([`Instructions:\\n${message.content}`])\n case 'user':\n return prompt.concat([`${userLabel}:\\n${message.content}`])\n default:\n return prompt.concat([`${assistantLabel}:\\n${message.content}`])\n }\n }, [] as string[])\n .join('\\n\\n')\n\n const nextNumTokensEstimate = await this._getTokenCount(prompt)\n const isValidPrompt = nextNumTokensEstimate <= maxNumTokens\n\n if (prompt && !isValidPrompt) {\n break\n }\n\n messages = nextMessages\n numTokens = nextNumTokensEstimate\n\n if (!isValidPrompt) {\n break\n }\n\n if (!parentMessageId) {\n break\n }\n\n const parentMessage = await this._getMessageById(parentMessageId)\n if (!parentMessage) {\n break\n }\n\n const parentMessageRole = parentMessage.role || 'user'\n\n nextMessages = nextMessages.slice(0, systemMessageOffset).concat([\n {\n role: parentMessageRole,\n content: parentMessage.text,\n name: parentMessage.name\n },\n ...nextMessages.slice(systemMessageOffset)\n ])\n\n parentMessageId = parentMessage.parentMessageId\n } while (true)\n\n // Use up to 4096 tokens (prompt + response), but try to leave 1000 tokens\n // for the response.\n const maxTokens = Math.max(\n 1,\n Math.min(this._maxModelTokens - numTokens, this._maxResponseTokens)\n )\n\n return { messages, maxTokens, numTokens }\n }\n\n protected async _getTokenCount(text: string) {\n // TODO: use a better fix in the tokenizer\n text = text.replace(/<\\|endoftext\\|>/g, '')\n\n return tokenizer.encode(text).length\n }\n\n protected async _defaultGetMessageById(\n id: string\n ): Promise<types.ChatMessage> {\n const res = await this._messageStore.get(id)\n return res\n }\n\n protected async _defaultUpsertMessage(\n message: types.ChatMessage\n ): Promise<void> {\n await this._messageStore.set(message.id, message)\n }\n}\n","import { getEncoding } from 'js-tiktoken'\n\n// TODO: make this configurable\nconst tokenizer = getEncoding('cl100k_base')\n\nexport function encode(input: string): Uint32Array {\n return new Uint32Array(tokenizer.encode(input))\n}\n","import Keyv from 'keyv'\n\nexport type Role = 'user' | 'assistant' | 'system'\n\nexport type FetchFn = typeof fetch\n\nexport type ChatGPTAPIOptions = {\n apiKey: string\n\n /** @defaultValue `'https://api.openai.com'` **/\n apiBaseUrl?: string\n\n apiOrg?: string\n\n /** @defaultValue `false` **/\n debug?: boolean\n\n completionParams?: Partial<\n Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>\n >\n\n systemMessage?: string\n\n /** @defaultValue `4096` **/\n maxModelTokens?: number\n\n /** @defaultValue `1000` **/\n maxResponseTokens?: number\n\n messageStore?: Keyv\n getMessageById?: GetMessageByIdFunction\n upsertMessage?: UpsertMessageFunction\n\n fetch?: FetchFn\n}\n\nexport type SendMessageOptions = {\n /** The name of a user in a multi-user chat. */\n name?: string\n parentMessageId?: string\n conversationId?: string\n messageId?: string\n stream?: boolean\n systemMessage?: string\n timeoutMs?: number\n onProgress?: (partialResponse: ChatMessage) => void\n abortSignal?: AbortSignal\n completionParams?: Partial<\n Omit<openai.CreateChatCompletionRequest, 'messages' | 'n' | 'stream'>\n >\n}\n\nexport type MessageActionType = 'next' | 'variant'\n\nexport type SendMessageBrowserOptions = {\n conversationId?: string\n parentMessageId?: string\n messageId?: string\n action?: MessageActionType\n timeoutMs?: number\n onProgress?: (partialResponse: ChatMessage) => void\n abortSignal?: AbortSignal\n}\n\nexport interface ChatMessage {\n id: string\n text: string\n role: Role\n name?: string\n delta?: string\n detail?:\n | openai.CreateChatCompletionResponse\n | CreateChatCompletionStreamResponse\n\n // relevant for both ChatGPTAPI and ChatGPTUnofficialProxyAPI\n parentMessageId?: string\n\n // only relevant for ChatGPTUnofficialProxyAPI (optional for ChatGPTAPI)\n conversationId?: string\n}\n\nexport class ChatGPTError extends Error {\n statusCode?: number\n statusText?: string\n isFinal?: boolean\n accountId?: string\n}\n\n/** Returns a chat message from a store by it's ID (or null if not found). */\nexport type GetMessageByIdFunction = (id: string) => Promise<ChatMessage>\n\n/** Upserts a chat message to a store. */\nexport type UpsertMessageFunction = (message: ChatMessage) => Promise<void>\n\nexport interface CreateChatCompletionStreamResponse\n extends openai.CreateChatCompletionDeltaResponse {\n usage: CreateCompletionStreamResponseUsage\n}\n\nexport interface CreateCompletionStreamResponseUsage\n extends openai.CreateCompletionResponseUsage {\n estimated: true\n}\n\n/**\n * https://chat.openapi.com/backend-api/conversation\n */\nexport type ConversationJSONBody = {\n /**\n * The action to take\n */\n action: string\n\n /**\n * The ID of the conversation\n */\n conversation_id?: string\n\n /**\n * Prompts to provide\n */\n messages: Prompt[]\n\n /**\n * The model to use\n */\n model: string\n\n /**\n * The parent message ID\n */\n parent_message_id: string\n}\n\nexport type Prompt = {\n /**\n * The content of the prompt\n */\n content: PromptContent\n\n /**\n * The ID of the prompt\n */\n id: string\n\n /**\n * The role played in the prompt\n */\n role: Role\n}\n\nexport type ContentType = 'text'\n\nexport type PromptContent = {\n /**\n * The content type of the prompt\n */\n content_type: ContentType\n\n /**\n * The parts to the prompt\n */\n parts: string[]\n}\n\nexport type ConversationResponseEvent = {\n message?: Message\n conversation_id?: string\n error?: string | null\n}\n\nexport type Message = {\n id: string\n content: MessageContent\n role: Role\n user: string | null\n create_time: string | null\n update_time: string | null\n end_turn: null\n weight: number\n recipient: string\n metadata: MessageMetadata\n}\n\nexport type MessageContent = {\n content_type: string\n parts: string[]\n}\n\nexport type MessageMetadata = any\n\nexport namespace openai {\n export interface CreateChatCompletionDeltaResponse {\n id: string\n object: 'chat.completion.chunk'\n created: number\n model: string\n choices: [\n {\n delta: {\n role: Role\n content?: string\n }\n index: number\n finish_reason: string | null\n }\n ]\n }\n\n /**\n *\n * @export\n * @interface ChatCompletionRequestMessage\n */\n export interface ChatCompletionRequestMessage {\n /**\n * The role of the author of this message.\n * @type {string}\n * @memberof ChatCompletionRequestMessage\n */\n role: ChatCompletionRequestMessageRoleEnum\n /**\n * The contents of the message\n * @type {string}\n * @memberof ChatCompletionRequestMessage\n */\n content: string\n /**\n * The name of the user in a multi-user chat\n * @type {string}\n * @memberof ChatCompletionRequestMessage\n */\n name?: string\n }\n export declare const ChatCompletionRequestMessageRoleEnum: {\n readonly System: 'system'\n readonly User: 'user'\n readonly Assistant: 'assistant'\n }\n export declare type ChatCompletionRequestMessageRoleEnum =\n (typeof ChatCompletionRequestMessageRoleEnum)[keyof typeof ChatCompletionRequestMessageRoleEnum]\n /**\n *\n * @export\n * @interface ChatCompletionResponseMessage\n */\n export interface ChatCompletionResponseMessage {\n /**\n * The role of the author of this message.\n * @type {string}\n * @memberof ChatCompletionResponseMessage\n */\n role: ChatCompletionResponseMessageRoleEnum\n /**\n * The contents of the message\n * @type {string}\n * @memberof ChatCompletionResponseMessage\n */\n content: string\n }\n export declare const ChatCompletionResponseMessageRoleEnum: {\n readonly System: 'system'\n readonly User: 'user'\n readonly Assistant: 'assistant'\n }\n export declare type ChatCompletionResponseMessageRoleEnum =\n (typeof ChatCompletionResponseMessageRoleEnum)[keyof typeof ChatCompletionResponseMessageRoleEnum]\n /**\n *\n * @export\n * @interface CreateChatCompletionRequest\n */\n export interface CreateChatCompletionRequest {\n /**\n * ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.\n * @type {string}\n * @memberof CreateChatCompletionRequest\n */\n model: string\n /**\n * The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).\n * @type {Array<ChatCompletionRequestMessage>}\n * @memberof CreateChatCompletionRequest\n */\n messages: Array<ChatCompletionRequestMessage>\n /**\n * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n temperature?: number | null\n /**\n * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n top_p?: number | null\n /**\n * How many chat completion choices to generate for each input message.\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n n?: number | null\n /**\n * If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.\n * @type {boolean}\n * @memberof CreateChatCompletionRequest\n */\n stream?: boolean | null\n /**\n *\n * @type {CreateChatCompletionRequestStop}\n * @memberof CreateChatCompletionRequest\n */\n stop?: CreateChatCompletionRequestStop\n /**\n * The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n max_tokens?: number\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n presence_penalty?: number | null\n /**\n * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)\n * @type {number}\n * @memberof CreateChatCompletionRequest\n */\n frequency_penalty?: number | null\n /**\n * Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n * @type {object}\n * @memberof CreateChatCompletionRequest\n */\n logit_bias?: object | null\n /**\n * A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).\n * @type {string}\n * @memberof CreateChatCompletionRequest\n */\n user?: string\n }\n /**\n * @type CreateChatCompletionRequestStop\n * Up to 4 sequences where the API will stop generating further tokens.\n * @export\n */\n export declare type CreateChatCompletionRequestStop = Array<string> | string\n /**\n *\n * @export\n * @interface CreateChatCompletionResponse\n */\n export interface CreateChatCompletionResponse {\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponse\n */\n id: string\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponse\n */\n object: string\n /**\n *\n * @type {number}\n * @memberof CreateChatCompletionResponse\n */\n created: number\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponse\n */\n model: string\n /**\n *\n * @type {Array<CreateChatCompletionResponseChoicesInner>}\n * @memberof CreateChatCompletionResponse\n */\n choices: Array<CreateChatCompletionResponseChoicesInner>\n /**\n *\n * @type {CreateCompletionResponseUsage}\n * @memberof CreateChatCompletionResponse\n */\n usage?: CreateCompletionResponseUsage\n }\n /**\n *\n * @export\n * @interface CreateChatCompletionResponseChoicesInner\n */\n export interface CreateChatCompletionResponseChoicesInner {\n /**\n *\n * @type {number}\n * @memberof CreateChatCompletionResponseChoicesInner\n */\n index?: number\n /**\n *\n * @type {ChatCompletionResponseMessage}\n * @memberof CreateChatCompletionResponseChoicesInner\n */\n message?: ChatCompletionResponseMessage\n /**\n *\n * @type {string}\n * @memberof CreateChatCompletionResponseChoicesInner\n */\n finish_reason?: string\n }\n /**\n *\n * @export\n * @interface CreateCompletionResponseUsage\n */\n export interface CreateCompletionResponseUsage {\n /**\n *\n * @type {number}\n * @memberof CreateCompletionResponseUsage\n */\n prompt_tokens: number\n /**\n *\n * @type {number}\n * @memberof CreateCompletionResponseUsage\n */\n completion_tokens: number\n /**\n *\n * @type {number}\n * @memberof CreateCompletionResponseUsage\n */\n total_tokens: number\n }\n}\n","/// <reference lib=\"dom\" />\n\nconst fetch = globalThis.fetch\n\nexport { fetch }\n","import { createParser } from 'eventsource-parser'\n\nimport * as types from './types'\nimport { fetch as globalFetch } from './fetch'\nimport { streamAsyncIterable } from './stream-async-iterable'\n\nexport async function fetchSSE(\n url: string,\n options: Parameters<typeof fetch>[1] & {\n onMessage: (data: string) => void\n onError?: (error: any) => void\n },\n fetch: types.FetchFn = globalFetch\n) {\n const { onMessage, onError, ...fetchOptions } = options\n const res = await fetch(url, fetchOptions)\n if (!res.ok) {\n let reason: string\n\n try {\n reason = await res.text()\n } catch (err) {\n reason = res.statusText\n }\n\n const msg = `ChatGPT error ${res.status}: ${reason}`\n const error = new types.ChatGPTError(msg, { cause: res })\n error.statusCode = res.status\n error.statusText = res.statusText\n throw error\n }\n\n const parser = createParser((event) => {\n if (event.type === 'event') {\n onMessage(event.data)\n }\n })\n\n // handle special response errors\n const feed = (chunk: string) => {\n let response = null\n\n try {\n response = JSON.parse(chunk)\n } catch {\n // ignore\n }\n\n if (response?.detail?.type === 'invalid_request_error') {\n const msg = `ChatGPT error ${response.detail.message}: ${response.detail.code} (${response.detail.type})`\n const error = new types.ChatGPTError(msg, { cause: response })\n error.statusCode = response.detail.code\n error.statusText = response.detail.message\n\n if (onError) {\n onError(error)\n } else {\n console.error(error)\n }\n\n // don't feed to the event parser\n return\n }\n\n parser.feed(chunk)\n }\n\n if (!res.body.getReader) {\n // Vercel polyfills `fetch` with `node-fetch`, which doesn't conform to\n // web standards, so this is a workaround...\n const body: NodeJS.ReadableStream = res.body as any\n\n if (!body.on || !body.read) {\n throw new types.ChatGPTError('unsupported \"fetch\" implementation')\n }\n\n body.on('readable', () => {\n let chunk: string | Buffer\n while (null !== (chunk = body.read())) {\n feed(chunk.toString())\n }\n })\n } else {\n for await (const chunk of streamAsyncIterable(res.body)) {\n const str = new TextDecoder().decode(chunk)\n feed(str)\n }\n }\n}\n","export async function* streamAsyncIterable<T>(stream: ReadableStream<T>) {\n const reader = stream.getReader()\n try {\n while (true) {\n const { done, value } = await reader.read()\n if (done) {\n return\n }\n yield value\n }\n } finally {\n reader.releaseLock()\n }\n}\n","import pTimeout from 'p-timeout'\nimport { v4 as uuidv4 } from 'uuid'\n\nimport * as types from './types'\nimport { fetch as globalFetch } from './fetch'\nimport { fetchSSE } from './fetch-sse'\nimport { isValidUUIDv4 } from './utils'\n\nexport class ChatGPTUnofficialProxyAPI {\n protected _accessToken: string\n protected _apiReverseProxyUrl: string\n protected _debug: boolean\n protected _model: string\n protected _headers: Record<string, string>\n protected _fetch: types.FetchFn\n\n /**\n * @param fetch - Optional override for the `fetch` implementation to use. Defaults to the global `fetch` function.\n */\n constructor(opts: {\n accessToken: string\n\n /** @defaultValue `https://bypass.duti.tech/api/conversation` **/\n apiReverseProxyUrl?: string\n\n /** @defaultValue `text-davinci-002-render-sha` **/\n model?: string\n\n /** @defaultValue `false` **/\n debug?: boolean\n\n /** @defaultValue `undefined` **/\n headers?: Record<string, string>\n\n fetch?: types.FetchFn\n }) {\n const {\n accessToken,\n apiReverseProxyUrl = 'https://bypass.duti.tech/api/conversation',\n model = 'text-davinci-002-render-sha',\n debug = false,\n headers,\n fetch = globalFetch\n } = opts\n\n this._accessToken = accessToken\n this._apiReverseProxyUrl = apiReverseProxyUrl\n this._debug = !!debug\n this._model = model\n this._fetch = fetch\n this._headers = headers\n\n if (!this._accessToken) {\n throw new Error('ChatGPT invalid accessToken')\n }\n\n if (!this._fetch) {\n throw new Error('Invalid environment; fetch is not defined')\n }\n\n if (typeof this._fetch !== 'function') {\n throw new Error('Invalid \"fetch\" is not a function')\n }\n }\n\n get accessToken(): string {\n return this._accessToken\n }\n\n set accessToken(value: string) {\n this._accessToken = value\n }\n\n /**\n * Sends a message to ChatGPT, waits for the response to resolve, and returns\n * the response.\n *\n * If you want your response to have historical context, you must provide a valid `parentMessageId`.\n *\n * If you want to receive a stream of partial responses, use `opts.onProgress`.\n * If you want to receive the full response, including message and conversation IDs,\n * you can use `opts.onConversationResponse` or use the `ChatGPTAPI.getConversation`\n * helper.\n *\n * Set `debug: true` in the `ChatGPTAPI` constructor to log more info on the full prompt sent to the OpenAI completions API. You can override the `promptPrefix` and `promptSuffix` in `opts` to customize the prompt.\n *\n * @param message - The prompt message to send\n * @param opts.conversationId - Optional ID of a conversation to continue (defaults to a random UUID)\n * @param opts.parentMessageId - Optional ID of the previous message in the conversation (defaults to `undefined`)\n * @param opts.messageId - Optional ID of the message to send (defaults to a random UUID)\n * @param opts.timeoutMs - Optional timeout in milliseconds (defaults to no timeout)\n * @param opts.onProgress - Optional callback which will be invoked every time the partial response is updated\n * @param opts.abortSignal - Optional callback used to abort the underlying `fetch` call using an [AbortController](https://developer.mozilla.org/en-US/docs/Web/API/AbortController)\n *\n * @returns The response from ChatGPT\n */\n async sendMessage(\n text: string,\n opts: types.SendMessageBrowserOptions = {}\n ): Promise<types.ChatMessage> {\n if (!!opts.conversationId !== !!opts.parentMessageId) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: conversationId and parentMessageId must both be set or both be undefined'\n )\n }\n\n if (opts.conversationId && !isValidUUIDv4(opts.conversationId)) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: conversationId is not a valid v4 UUID'\n )\n }\n\n if (opts.parentMessageId && !isValidUUIDv4(opts.parentMessageId)) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: parentMessageId is not a valid v4 UUID'\n )\n }\n\n if (opts.messageId && !isValidUUIDv4(opts.messageId)) {\n throw new Error(\n 'ChatGPTUnofficialProxyAPI.sendMessage: messageId is not a valid v4 UUID'\n )\n }\n\n const {\n conversationId,\n parentMessageId = uuidv4(),\n messageId = uuidv4(),\n action = 'next',\n timeoutMs,\n onProgress\n } = opts\n\n let { abortSignal } = opts\n\n let abortController: AbortController = null\n if (timeoutMs && !abortSignal) {\n abortController = new AbortController()\n abortSignal = abortController.signal\n }\n\n const body: types.ConversationJSONBody = {\n action,\n messages: [\n {\n id: messageId,\n role: 'user',\n content: {\n content_type: 'text',\n parts: [text]\n }\n }\n ],\n model: this._model,\n parent_message_id: parentMessageId\n }\n\n if (conversationId) {\n body.conversation_id = conversationId\n }\n\n const result: types.ChatMessage = {\n role: 'assistant',\n id: uuidv4(),\n parentMessageId: messageId,\n conversationId,\n text: ''\n }\n\n const responseP = new Promise<types.ChatMessage>((resolve, reject) => {\n const url = this._apiReverseProxyUrl\n const headers = {\n ...this._headers,\n Authorization: `Bearer ${this._accessToken}`,\n Accept: 'text/event-stream',\n 'Content-Type': 'application/json'\n }\n\n if (this._debug) {\n console.log('POST', url, { body, headers })\n }\n\n fetchSSE(\n url,\n {\n method: 'POST',\n headers,\n body: JSON.stringify(body),\n signal: abortSignal,\n onMessage: (data: string) => {\n if (data === '[DONE]') {\n return resolve(result)\n }\n\n try {\n const convoResponseEvent: types.ConversationResponseEvent =\n JSON.parse(data)\n if (convoResponseEvent.conversation_id) {\n result.conversationId = convoResponseEvent.conversation_id\n }\n\n if (convoResponseEvent.message?.id) {\n result.id = convoResponseEvent.message.id\n }\n\n const message = convoResponseEvent.message\n // console.log('event', JSON.stringify(convoResponseEvent, null, 2))\n\n if (message) {\n let text = message?.content?.parts?.[0]\n\n if (text) {\n result.text = text\n\n if (onProgress) {\n onProgress(result)\n }\n }\n }\n } catch (err) {\n if (this._debug) {\n console.warn('chatgpt unexpected JSON error', err)\n }\n // reject(err)\n }\n },\n onError: (err) => {\n reject(err)\n }\n },\n this._fetch\n ).catch((err) => {\n const errMessageL = err.toString().toLowerCase()\n\n if (\n result.text &&\n (errMessageL === 'error: typeerror: terminated' ||\n errMessageL === 'typeerror: terminated')\n ) {\n // OpenAI sometimes forcefully terminates the socket from their end before\n // the HTTP request has resolved cleanly. In my testing, these cases tend to\n // happen when OpenAI has already send the last `response`, so we can ignore\n // the `fetch` error in this case.\n return resolve(result)\n } else {\n return reject(err)\n }\n })\n })\n\n if (timeoutMs) {\n if (abortController) {\n // This will be called when a timeout occurs in order for us to forcibly\n // ensure that the underlying HTTP request is aborted.\n ;(responseP as any).cancel = () => {\n abortController.abort()\n }\n }\n\n return pTimeout(responseP, {\n milliseconds: timeoutMs,\n message: 'ChatGPT timed out waiting for response'\n })\n } else {\n return responseP\n }\n }\n}\n","const uuidv4Re =\n /^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i\n\nexport function isValidUUIDv4(str: string): boolean {\n return str && uuidv4Re.test(str)\n}\n"],"mappings":";AAAA,OAAO,UAAU;AACjB,OAAO,cAAc;AACrB,OAAO,cAAc;AACrB,SAAS,MAAM,cAAc;;;ACH7B,SAAS,mBAAmB;AAG5B,IAAM,YAAY,YAAY,aAAa;AAEpC,SAAS,OAAO,OAA4B;AACjD,SAAO,IAAI,YAAY,UAAU,OAAO,KAAK,CAAC;AAChD;;;AC0EO,IAAM,eAAN,cAA2B,MAAM;AAKxC;AAyGO,IAAU;AAAA,CAAV,CAAUA,YAAV;AAAA,GAAU;;;AC7LjB,IAAM,QAAQ,WAAW;;;ACFzB,SAAS,oBAAoB;;;ACA7B,gBAAuB,oBAAuB,QAA2B;AACvE,QAAM,SAAS,OAAO,UAAU;AAChC,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAC1C,UAAI,MAAM;AACR;AAAA,MACF;AACA,YAAM;AAAA,IACR;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;;;ADPA,eAAsB,SACpB,KACA,SAIAC,SAAuB,OACvB;AACA,QAAM,EAAE,WAAW,SAAS,GAAG,aAAa,IAAI;AAChD,QAAM,MAAM,MAAMA,OAAM,KAAK,YAAY;AACzC,MAAI,CAAC,IAAI,IAAI;AACX,QAAI;AAEJ,QAAI;AACF,eAAS,MAAM,IAAI,KAAK;AAAA,IAC1B,SAAS,KAAP;AACA,eAAS,IAAI;AAAA,IACf;AAEA,UAAM,MAAM,iBAAiB,IAAI,WAAW;AAC5C,UAAM,QAAQ,IAAU,aAAa,KAAK,EAAE,OAAO,IAAI,CAAC;AACxD,UAAM,aAAa,IAAI;AACvB,UAAM,aAAa,IAAI;AACvB,UAAM;AAAA,EACR;AAEA,QAAM,SAAS,aAAa,CAAC,UAAU;AACrC,QAAI,MAAM,SAAS,SAAS;AAC1B,gBAAU,MAAM,IAAI;AAAA,IACtB;AAAA,EACF,CAAC;AAGD,QAAM,OAAO,CAAC,UAAkB;AAvClC;AAwCI,QAAI,WAAW;AAEf,QAAI;AACF,iBAAW,KAAK,MAAM,KAAK;AAAA,IAC7B,QAAE;AAAA,IAEF;AAEA,UAAI,0CAAU,WAAV,mBAAkB,UAAS,yBAAyB;AACtD,YAAM,MAAM,iBAAiB,SAAS,OAAO,YAAY,SAAS,OAAO,SAAS,SAAS,OAAO;AAClG,YAAM,QAAQ,IAAU,aAAa,KAAK,EAAE,OAAO,SAAS,CAAC;AAC7D,YAAM,aAAa,SAAS,OAAO;AACnC,YAAM,aAAa,SAAS,OAAO;AAEnC,UAAI,SAAS;AACX,gBAAQ,KAAK;AAAA,MACf,OAAO;AACL,gBAAQ,MAAM,KAAK;AAAA,MACrB;AAGA;AAAA,IACF;AAEA,WAAO,KAAK,KAAK;AAAA,EACnB;AAEA,MAAI,CAAC,IAAI,KAAK,WAAW;AAGvB,UAAM,OAA8B,IAAI;AAExC,QAAI,CAAC,KAAK,MAAM,CAAC,KAAK,MAAM;AAC1B,YAAM,IAAU,aAAa,oCAAoC;AAAA,IACnE;AAEA,SAAK,GAAG,YAAY,MAAM;AACxB,UAAI;AACJ,aAAO,UAAU,QAAQ,KAAK,KAAK,IAAI;AACrC,aAAK,MAAM,SAAS,CAAC;AAAA,MACvB;AAAA,IACF,CAAC;AAAA,EACH,OAAO;AACL,qBAAiB,SAAS,oBAAoB,IAAI,IAAI,GAAG;AACvD,YAAM,MAAM,IAAI,YAAY,EAAE,OAAO,KAAK;AAC1C,WAAK,GAAG;AAAA,IACV;AAAA,EACF;AACF;;;AJ9EA,IAAM,gBAAgB;AAEtB,IAAM,qBAAqB;AAC3B,IAAM,0BAA0B;AAEzB,IAAM,aAAN,MAAiB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAmCtB,YAAY,MAA+B;AACzC,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA,aAAa;AAAA,MACb,QAAQ;AAAA,MACR;AAAA,MACA;AAAA,MACA;AAAA,MACA,iBAAiB;AAAA,MACjB,oBAAoB;AAAA,MACpB;AAAA,MACA;AAAA,MACA,OAAAC,SAAQ;AAAA,IACV,IAAI;AAEJ,SAAK,UAAU;AACf,SAAK,UAAU;AACf,SAAK,cAAc;AACnB,SAAK,SAAS,CAAC,CAAC;AAChB,SAAK,SAASA;AAEd,SAAK,oBAAoB;AAAA,MACvB,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO;AAAA,MACP,kBAAkB;AAAA,MAClB,GAAG;AAAA,IACL;AAEA,SAAK,iBAAiB;AAEtB,QAAI,KAAK,mBAAmB,QAAW;AACrC,YAAM,eAAc,oBAAI,KAAK,GAAE,YAAY,EAAE,MAAM,GAAG,EAAE,CAAC;AACzD,WAAK,iBAAiB;AAAA;AAAA,gBAA4I;AAAA,IACpK;AAEA,SAAK,kBAAkB;AACvB,SAAK,qBAAqB;AAE1B,SAAK,kBAAkB,kBAAkB,KAAK;AAC9C,SAAK,iBAAiB,iBAAiB,KAAK;AAE5C,QAAI,cAAc;AAChB,WAAK,gBAAgB;AAAA,IACvB,OAAO;AACL,WAAK,gBAAgB,IAAI,KAA6B;AAAA,QACpD,OAAO,IAAI,SAAoC,EAAE,SAAS,IAAM,CAAC;AAAA,MACnE,CAAC;AAAA,IACH;AAEA,QAAI,CAAC,KAAK,SAAS;AACjB,YAAM,IAAI,MAAM,gCAAgC;AAAA,IAClD;AAEA,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,MAAM,2CAA2C;AAAA,IAC7D;AAEA,QAAI,OAAO,KAAK,WAAW,YAAY;AACrC,YAAM,IAAI,MAAM,mCAAmC;AAAA,IACrD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,YACJ,MACA,OAAiC,CAAC,GACN;AAC5B,UAAM;AAAA,MACJ;AAAA,MACA,YAAY,OAAO;AAAA,MACnB;AAAA,MACA;AAAA,MACA,SAAS,aAAa,OAAO;AAAA,MAC7B;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,QAAI,EAAE,YAAY,IAAI;AAEtB,QAAI,kBAAmC;AACvC,QAAI,aAAa,CAAC,aAAa;AAC7B,wBAAkB,IAAI,gBAAgB;AACtC,oBAAc,gBAAgB;AAAA,IAChC;AAEA,UAAM,UAA6B;AAAA,MACjC,MAAM;AAAA,MACN,IAAI;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,UAAM,iBAAiB;AAEvB,UAAM,EAAE,UAAU,WAAW,UAAU,IAAI,MAAM,KAAK;AAAA,MACpD;AAAA,MACA;AAAA,IACF;AAEA,UAAM,SAA4B;AAAA,MAChC,MAAM;AAAA,MACN,IAAI,OAAO;AAAA,MACX;AAAA,MACA,iBAAiB;AAAA,MACjB,MAAM;AAAA,IACR;AAEA,UAAM,YAAY,IAAI;AAAA,MACpB,OAAO,SAAS,WAAW;AAtLjC;AAuLQ,cAAM,MAAM,GAAG,KAAK;AACpB,cAAM,UAAU;AAAA,UACd,gBAAgB;AAAA,UAChB,eAAe,UAAU,KAAK;AAAA,QAChC;AACA,cAAM,OAAO;AAAA,UACX,YAAY;AAAA,UACZ,GAAG,KAAK;AAAA,UACR,GAAG;AAAA,UACH;AAAA,UACA;AAAA,QACF;AAIA,YAAI,KAAK,SAAS;AAChB,kBAAQ,qBAAqB,IAAI,KAAK;AAAA,QACxC;AAEA,YAAI,KAAK,QAAQ;AACf,kBAAQ,IAAI,gBAAgB,qBAAqB,IAAI;AAAA,QACvD;AAEA,YAAI,QAAQ;AACV;AAAA,YACE;AAAA,YACA;AAAA,cACE,QAAQ;AAAA,cACR;AAAA,cACA,MAAM,KAAK,UAAU,IAAI;AAAA,cACzB,QAAQ;AAAA,cACR,WAAW,CAAC,SAAiB;AAtN3C,oBAAAC;AAuNgB,oBAAI,SAAS,UAAU;AACrB,yBAAO,OAAO,OAAO,KAAK,KAAK;AAC/B,yBAAO,QAAQ,MAAM;AAAA,gBACvB;AAEA,oBAAI;AACF,wBAAM,WACJ,KAAK,MAAM,IAAI;AAEjB,sBAAI,SAAS,IAAI;AACf,2BAAO,KAAK,SAAS;AAAA,kBACvB;AAEA,uBAAIA,MAAA,SAAS,YAAT,gBAAAA,IAAkB,QAAQ;AAC5B,0BAAM,QAAQ,SAAS,QAAQ,CAAC,EAAE;AAClC,2BAAO,QAAQ,MAAM;AACrB,wBAAI,+BAAO;AAAS,6BAAO,QAAQ,MAAM;AAEzC,wBAAI,MAAM,MAAM;AACd,6BAAO,OAAO,MAAM;AAAA,oBACtB;AAEA,2BAAO,SAAS;AAChB,6DAAa;AAAA,kBACf;AAAA,gBACF,SAAS,KAAP;AACA,0BAAQ,KAAK,4CAA4C,GAAG;AAC5D,yBAAO,OAAO,GAAG;AAAA,gBACnB;AAAA,cACF;AAAA,YACF;AAAA,YACA,KAAK;AAAA,UACP,EAAE,MAAM,MAAM;AAAA,QAChB,OAAO;AACL,cAAI;AACF,kBAAM,MAAM,MAAM,KAAK,OAAO,KAAK;AAAA,cACjC,QAAQ;AAAA,cACR;AAAA,cACA,MAAM,KAAK,UAAU,IAAI;AAAA,cACzB,QAAQ;AAAA,YACV,CAAC;AAED,gBAAI,CAAC,IAAI,IAAI;AACX,oBAAM,SAAS,MAAM,IAAI,KAAK;AAC9B,oBAAM,MAAM,gBACV,IAAI,UAAU,IAAI,eACf;AACL,oBAAM,QAAQ,IAAU,aAAa,KAAK,EAAE,OAAO,IAAI,CAAC;AACxD,oBAAM,aAAa,IAAI;AACvB,oBAAM,aAAa,IAAI;AACvB,qBAAO,OAAO,KAAK;AAAA,YACrB;AAEA,kBAAM,WACJ,MAAM,IAAI,KAAK;AACjB,gBAAI,KAAK,QAAQ;AACf,sBAAQ,IAAI,QAAQ;AAAA,YACtB;AAEA,gBAAI,qCAAU,IAAI;AAChB,qBAAO,KAAK,SAAS;AAAA,YACvB;AAEA,iBAAI,0CAAU,YAAV,mBAAmB,QAAQ;AAC7B,oBAAMC,WAAU,SAAS,QAAQ,CAAC,EAAE;AACpC,qBAAO,OAAOA,SAAQ;AACtB,kBAAIA,SAAQ,MAAM;AAChB,uBAAO,OAAOA,SAAQ;AAAA,cACxB;AAAA,YACF,OAAO;AACL,oBAAMC,OAAM;AACZ,qBAAO;AAAA,gBACL,IAAI;AAAA,kBACF,mBACE,KAAAA,QAAA,gBAAAA,KAAK,WAAL,mBAAa,aAAWA,QAAA,gBAAAA,KAAK,WAAU;AAAA,gBAE3C;AAAA,cACF;AAAA,YACF;AAEA,mBAAO,SAAS;AAEhB,mBAAO,QAAQ,MAAM;AAAA,UACvB,SAAS,KAAP;AACA,mBAAO,OAAO,GAAG;AAAA,UACnB;AAAA,QACF;AAAA,MACF;AAAA,IACF,EAAE,KAAK,OAAOD,aAAY;AACxB,UAAIA,SAAQ,UAAU,CAACA,SAAQ,OAAO,OAAO;AAC3C,YAAI;AACF,gBAAM,eAAe;AACrB,gBAAM,mBAAmB,MAAM,KAAK,eAAeA,SAAQ,IAAI;AAC/D,UAAAA,SAAQ,OAAO,QAAQ;AAAA,YACrB,eAAe;AAAA,YACf,mBAAmB;AAAA,YACnB,cAAc,eAAe;AAAA,YAC7B,WAAW;AAAA,UACb;AAAA,QACF,SAAS,KAAP;AAAA,QAGF;AAAA,MACF;AAEA,aAAO,QAAQ,IAAI;AAAA,QACjB,KAAK,eAAe,cAAc;AAAA,QAClC,KAAK,eAAeA,QAAO;AAAA,MAC7B,CAAC,EAAE,KAAK,MAAMA,QAAO;AAAA,IACvB,CAAC;AAED,QAAI,WAAW;AACb,UAAI,iBAAiB;AAGnB;AAAC,QAAC,UAAkB,SAAS,MAAM;AACjC,0BAAgB,MAAM;AAAA,QACxB;AAAA,MACF;AAEA,aAAO,SAAS,WAAW;AAAA,QACzB,cAAc;AAAA,QACd,SAAS;AAAA,MACX,CAAC;AAAA,IACH,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,IAAI,SAAiB;AACnB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,OAAO,QAAgB;AACzB,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,IAAI,SAAiB;AACnB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,OAAO,QAAgB;AACzB,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,MAAgB,eAAe,MAAc,MAAgC;AAC3E,UAAM,EAAE,gBAAgB,KAAK,eAAe,IAAI;AAChD,QAAI,EAAE,gBAAgB,IAAI;AAE1B,UAAM,YAAY;AAClB,UAAM,iBAAiB;AAEvB,UAAM,eAAe,KAAK,kBAAkB,KAAK;AACjD,QAAI,WAAwD,CAAC;AAE7D,QAAI,eAAe;AACjB,eAAS,KAAK;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,MACX,CAAC;AAAA,IACH;AAEA,UAAM,sBAAsB,SAAS;AACrC,QAAI,eAAe,OACf,SAAS,OAAO;AAAA,MACd;AAAA,QACE,MAAM;AAAA,QACN,SAAS;AAAA,QACT,MAAM,KAAK;AAAA,MACb;AAAA,IACF,CAAC,IACD;AACJ,QAAI,YAAY;AAEhB,OAAG;AACD,YAAM,SAAS,aACZ,OAAO,CAACE,SAAQ,YAAY;AAC3B,gBAAQ,QAAQ,MAAM;AAAA,UACpB,KAAK;AACH,mBAAOA,QAAO,OAAO,CAAC;AAAA,EAAkB,QAAQ,SAAS,CAAC;AAAA,UAC5D,KAAK;AACH,mBAAOA,QAAO,OAAO,CAAC,GAAG;AAAA,EAAe,QAAQ,SAAS,CAAC;AAAA,UAC5D;AACE,mBAAOA,QAAO,OAAO,CAAC,GAAG;AAAA,EAAoB,QAAQ,SAAS,CAAC;AAAA,QACnE;AAAA,MACF,GAAG,CAAC,CAAa,EAChB,KAAK,MAAM;AAEd,YAAM,wBAAwB,MAAM,KAAK,eAAe,MAAM;AAC9D,YAAM,gBAAgB,yBAAyB;AAE/C,UAAI,UAAU,CAAC,eAAe;AAC5B;AAAA,MACF;AAEA,iBAAW;AACX,kBAAY;AAEZ,UAAI,CAAC,eAAe;AAClB;AAAA,MACF;AAEA,UAAI,CAAC,iBAAiB;AACpB;AAAA,MACF;AAEA,YAAM,gBAAgB,MAAM,KAAK,gBAAgB,eAAe;AAChE,UAAI,CAAC,eAAe;AAClB;AAAA,MACF;AAEA,YAAM,oBAAoB,cAAc,QAAQ;AAEhD,qBAAe,aAAa,MAAM,GAAG,mBAAmB,EAAE,OAAO;AAAA,QAC/D;AAAA,UACE,MAAM;AAAA,UACN,SAAS,cAAc;AAAA,UACvB,MAAM,cAAc;AAAA,QACtB;AAAA,QACA,GAAG,aAAa,MAAM,mBAAmB;AAAA,MAC3C,CAAC;AAED,wBAAkB,cAAc;AAAA,IAClC,SAAS;AAIT,UAAM,YAAY,KAAK;AAAA,MACrB;AAAA,MACA,KAAK,IAAI,KAAK,kBAAkB,WAAW,KAAK,kBAAkB;AAAA,IACpE;AAEA,WAAO,EAAE,UAAU,WAAW,UAAU;AAAA,EAC1C;AAAA,EAEA,MAAgB,eAAe,MAAc;AAE3C,WAAO,KAAK,QAAQ,oBAAoB,EAAE;AAE1C,WAAiB,OAAO,IAAI,EAAE;AAAA,EAChC;AAAA,EAEA,MAAgB,uBACd,IAC4B;AAC5B,UAAM,MAAM,MAAM,KAAK,cAAc,IAAI,EAAE;AAC3C,WAAO;AAAA,EACT;AAAA,EAEA,MAAgB,sBACd,SACe;AACf,UAAM,KAAK,cAAc,IAAI,QAAQ,IAAI,OAAO;AAAA,EAClD;AACF;;;AMrdA,OAAOC,eAAc;AACrB,SAAS,MAAMC,eAAc;;;ACD7B,IAAM,WACJ;AAEK,SAAS,cAAc,KAAsB;AAClD,SAAO,OAAO,SAAS,KAAK,GAAG;AACjC;;;ADGO,IAAM,4BAAN,MAAgC;AAAA;AAAA;AAAA;AAAA,EAWrC,YAAY,MAgBT;AACD,UAAM;AAAA,MACJ;AAAA,MACA,qBAAqB;AAAA,MACrB,QAAQ;AAAA,MACR,QAAQ;AAAA,MACR;AAAA,MACA,OAAAC,SAAQ;AAAA,IACV,IAAI;AAEJ,SAAK,eAAe;AACpB,SAAK,sBAAsB;AAC3B,SAAK,SAAS,CAAC,CAAC;AAChB,SAAK,SAAS;AACd,SAAK,SAASA;AACd,SAAK,WAAW;AAEhB,QAAI,CAAC,KAAK,cAAc;AACtB,YAAM,IAAI,MAAM,6BAA6B;AAAA,IAC/C;AAEA,QAAI,CAAC,KAAK,QAAQ;AAChB,YAAM,IAAI,MAAM,2CAA2C;AAAA,IAC7D;AAEA,QAAI,OAAO,KAAK,WAAW,YAAY;AACrC,YAAM,IAAI,MAAM,mCAAmC;AAAA,IACrD;AAAA,EACF;AAAA,EAEA,IAAI,cAAsB;AACxB,WAAO,KAAK;AAAA,EACd;AAAA,EAEA,IAAI,YAAY,OAAe;AAC7B,SAAK,eAAe;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,MAAM,YACJ,MACA,OAAwC,CAAC,GACb;AAC5B,QAAI,CAAC,CAAC,KAAK,mBAAmB,CAAC,CAAC,KAAK,iBAAiB;AACpD,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI,KAAK,kBAAkB,CAAC,cAAc,KAAK,cAAc,GAAG;AAC9D,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI,KAAK,mBAAmB,CAAC,cAAc,KAAK,eAAe,GAAG;AAChE,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI,KAAK,aAAa,CAAC,cAAc,KAAK,SAAS,GAAG;AACpD,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,UAAM;AAAA,MACJ;AAAA,MACA,kBAAkBC,QAAO;AAAA,MACzB,YAAYA,QAAO;AAAA,MACnB,SAAS;AAAA,MACT;AAAA,MACA;AAAA,IACF,IAAI;AAEJ,QAAI,EAAE,YAAY,IAAI;AAEtB,QAAI,kBAAmC;AACvC,QAAI,aAAa,CAAC,aAAa;AAC7B,wBAAkB,IAAI,gBAAgB;AACtC,oBAAc,gBAAgB;AAAA,IAChC;AAEA,UAAM,OAAmC;AAAA,MACvC;AAAA,MACA,UAAU;AAAA,QACR;AAAA,UACE,IAAI;AAAA,UACJ,MAAM;AAAA,UACN,SAAS;AAAA,YACP,cAAc;AAAA,YACd,OAAO,CAAC,IAAI;AAAA,UACd;AAAA,QACF;AAAA,MACF;AAAA,MACA,OAAO,KAAK;AAAA,MACZ,mBAAmB;AAAA,IACrB;AAEA,QAAI,gBAAgB;AAClB,WAAK,kBAAkB;AAAA,IACzB;AAEA,UAAM,SAA4B;AAAA,MAChC,MAAM;AAAA,MACN,IAAIA,QAAO;AAAA,MACX,iBAAiB;AAAA,MACjB;AAAA,MACA,MAAM;AAAA,IACR;AAEA,UAAM,YAAY,IAAI,QAA2B,CAAC,SAAS,WAAW;AACpE,YAAM,MAAM,KAAK;AACjB,YAAM,UAAU;AAAA,QACd,GAAG,KAAK;AAAA,QACR,eAAe,UAAU,KAAK;AAAA,QAC9B,QAAQ;AAAA,QACR,gBAAgB;AAAA,MAClB;AAEA,UAAI,KAAK,QAAQ;AACf,gBAAQ,IAAI,QAAQ,KAAK,EAAE,MAAM,QAAQ,CAAC;AAAA,MAC5C;AAEA;AAAA,QACE;AAAA,QACA;AAAA,UACE,QAAQ;AAAA,UACR;AAAA,UACA,MAAM,KAAK,UAAU,IAAI;AAAA,UACzB,QAAQ;AAAA,UACR,WAAW,CAAC,SAAiB;AA7LvC;AA8LY,gBAAI,SAAS,UAAU;AACrB,qBAAO,QAAQ,MAAM;AAAA,YACvB;AAEA,gBAAI;AACF,oBAAM,qBACJ,KAAK,MAAM,IAAI;AACjB,kBAAI,mBAAmB,iBAAiB;AACtC,uBAAO,iBAAiB,mBAAmB;AAAA,cAC7C;AAEA,mBAAI,wBAAmB,YAAnB,mBAA4B,IAAI;AAClC,uBAAO,KAAK,mBAAmB,QAAQ;AAAA,cACzC;AAEA,oBAAM,UAAU,mBAAmB;AAGnC,kBAAI,SAAS;AACX,oBAAIC,SAAO,8CAAS,YAAT,mBAAkB,UAAlB,mBAA0B;AAErC,oBAAIA,OAAM;AACR,yBAAO,OAAOA;AAEd,sBAAI,YAAY;AACd,+BAAW,MAAM;AAAA,kBACnB;AAAA,gBACF;AAAA,cACF;AAAA,YACF,SAAS,KAAP;AACA,kBAAI,KAAK,QAAQ;AACf,wBAAQ,KAAK,iCAAiC,GAAG;AAAA,cACnD;AAAA,YAEF;AAAA,UACF;AAAA,UACA,SAAS,CAAC,QAAQ;AAChB,mBAAO,GAAG;AAAA,UACZ;AAAA,QACF;AAAA,QACA,KAAK;AAAA,MACP,EAAE,MAAM,CAAC,QAAQ;AACf,cAAM,cAAc,IAAI,SAAS,EAAE,YAAY;AAE/C,YACE,OAAO,SACN,gBAAgB,kCACf,gBAAgB,0BAClB;AAKA,iBAAO,QAAQ,MAAM;AAAA,QACvB,OAAO;AACL,iBAAO,OAAO,GAAG;AAAA,QACnB;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAED,QAAI,WAAW;AACb,UAAI,iBAAiB;AAGnB;AAAC,QAAC,UAAkB,SAAS,MAAM;AACjC,0BAAgB,MAAM;AAAA,QACxB;AAAA,MACF;AAEA,aAAOC,UAAS,WAAW;AAAA,QACzB,cAAc;AAAA,QACd,SAAS;AAAA,MACX,CAAC;AAAA,IACH,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AACF;","names":["openai","fetch","fetch","_a","message","res","prompt","pTimeout","uuidv4","fetch","uuidv4","text","pTimeout"]}