Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge ChatGPT-Next-Web v2.9.3 into AIChatWeb v0.9-pro #42

Open
wants to merge 12 commits into
base: pro-v0.9
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 0 additions & 34 deletions .env.template

This file was deleted.

3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,5 @@ dev
.env

*.key
*.key.pub
*.key.pub
*.zip
4 changes: 0 additions & 4 deletions .husky/pre-commit

This file was deleted.

9 changes: 5 additions & 4 deletions app/api/common.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { NextRequest, NextResponse } from "next/server";

export const OPENAI_URL = "api.openai.com";
const DEFAULT_PROTOCOL = "https";
export const OPENAI_URL = "aichat-admin:8080";
const DEFAULT_PROTOCOL = "http";
const PROTOCOL = process.env.PROTOCOL ?? DEFAULT_PROTOCOL;
const BASE_URL = process.env.BASE_URL ?? OPENAI_URL;
const DISABLE_GPT4 = !!process.env.DISABLE_GPT4;
Expand All @@ -10,7 +10,7 @@ export async function requestOpenai(req: NextRequest) {
const controller = new AbortController();
const authValue = req.headers.get("Authorization") ?? "";
const openaiPath = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll(
"/api/",
"/api/openai/",
"",
);

Expand Down Expand Up @@ -87,6 +87,7 @@ export async function request(req: NextRequest) {

try {
console.log(`url = ${baseUrl}/${uri}`);
console.log(`uri = ${uri}`);
const res = await fetch(`${baseUrl}/${uri}`, {
headers: {
"Content-Type": "application/json",
Expand Down Expand Up @@ -123,4 +124,4 @@ export interface Response<T> {
message: string;

data: T;
}
}
2 changes: 1 addition & 1 deletion app/api/config/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ const serverConfig = getServerSideConfig();
const DANGER_CONFIG = {
needCode: serverConfig.needCode,
hideUserApiKey: serverConfig.hideUserApiKey,
enableGPT4: serverConfig.enableGPT4,
disableGPT4: serverConfig.disableGPT4,
hideBalanceQuery: serverConfig.hideBalanceQuery,
};

Expand Down
37 changes: 16 additions & 21 deletions app/api/openai/[...path]/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,27 +16,22 @@ async function handle(
return NextResponse.json({ body: "OK" }, { status: 200 });
}

// const subpath = params.path.join("/");

// if (!ALLOWD_PATH.has(subpath)) {
// console.log("[OpenAI Route] forbidden path ", subpath);
// return NextResponse.json(
// {
// error: true,
// msg: "you are not allowed to request " + subpath,
// },
// {
// status: 403,
// },
// );
// }

// const authResult = auth(req);
// if (authResult.error) {
// return NextResponse.json(authResult, {
// status: 401,
// });
// }
const subpath = params.path.join("/");

if (!ALLOWD_PATH.has(subpath)) {
console.log("[OpenAI Route] forbidden path ", subpath);
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + subpath,
},
{
status: 403,
},
);
}



try {
return await requestOpenai(req);
Expand Down
7 changes: 6 additions & 1 deletion app/client/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ export interface RequestMessage {
export interface LLMConfig {
model: string;
temperature?: number;
top_p?: number;
stream?: boolean;
presence_penalty?: number;
frequency_penalty?: number;
Expand All @@ -45,9 +44,15 @@ export interface LLMUsage {
total: number;
}

export interface LLMModel {
name: string;
available: boolean;
}

export abstract class LLMApi {
abstract chat(options: ChatOptions): Promise<void>;
abstract usage(): Promise<LLMUsage>;
abstract models(): Promise<LLMModel[]>;
}

type ProviderName = "openai" | "azure" | "claude" | "palm";
Expand Down
18 changes: 9 additions & 9 deletions app/client/controller.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@ export const ChatControllerPool = {
controllers: {} as Record<string, AbortController>,

addController(
sessionIndex: number,
messageId: number,
sessionId: string,
messageId: string,
controller: AbortController,
) {
const key = this.key(sessionIndex, messageId);
const key = this.key(sessionId, messageId);
this.controllers[key] = controller;
return key;
},

stop(sessionIndex: number, messageId: number) {
const key = this.key(sessionIndex, messageId);
stop(sessionId: string, messageId: string) {
const key = this.key(sessionId, messageId);
const controller = this.controllers[key];
controller?.abort();
},
Expand All @@ -26,12 +26,12 @@ export const ChatControllerPool = {
return Object.values(this.controllers).length > 0;
},

remove(sessionIndex: number, messageId: number) {
const key = this.key(sessionIndex, messageId);
remove(sessionId: string, messageId: string) {
const key = this.key(sessionId, messageId);
delete this.controllers[key];
},

key(sessionIndex: number, messageIndex: number) {
return `${sessionIndex},${messageIndex}`;
key(sessionId: string, messageIndex: string) {
return `${sessionId},${messageIndex}`;
},
};
89 changes: 58 additions & 31 deletions app/client/platforms/openai.ts
Original file line number Diff line number Diff line change
@@ -1,37 +1,54 @@
import {
DEFAULT_API_HOST,
DEFAULT_MODELS,
OpenaiPath,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";

import { ChatOptions, getHeaders, LLMApi, LLMUsage } from "../api";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";

export interface OpenAIListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}

export class ChatGPTApi implements LLMApi {
private disableListModels = true;

path(path: string): string {
const BASE_URL = process.env.BASE_URL;
const mode = process.env.BUILD_MODE;
let baseUrl =
mode === "export" ? (BASE_URL ?? DEFAULT_API_HOST) + "/api" : "/api";
let openaiUrl = useAccessStore.getState().openaiUrl;
const apiPath = "/api/openai";

if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
if (openaiUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
openaiUrl = isApp ? DEFAULT_API_HOST : apiPath;
}
if (openaiUrl.endsWith("/")) {
openaiUrl = openaiUrl.slice(0, openaiUrl.length - 1);
}
if (!openaiUrl.startsWith("http") && !openaiUrl.startsWith(apiPath)) {
openaiUrl = "https://" + openaiUrl;
}
return [baseUrl, path].join("/");
return [openaiUrl, path].join("/");
}

extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}

async chat(options: ChatOptions) {
const plugins = options.plugins;
const messages = options.messages.map((v) => ({
role: v.role,
content: v.content,
Expand All @@ -52,26 +69,16 @@ export class ChatGPTApi implements LLMApi {
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
plugins: plugins.map((p) => {
return {
id: p.plugin.id,
uuid: p.plugin.uuid,
name: p.plugin.name,
value: p.value,
};
}),
};

console.log("[Request] openai payload: ", requestPayload);

const shouldStream = !!options.config.stream;
console.log("shouldStream", shouldStream);
const controller = new AbortController();
options.onController?.(controller);

try {
const chatPath = this.path(OpenaiPath.ChatPath);
console.log("chatPath", chatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
Expand Down Expand Up @@ -122,13 +129,10 @@ export class ChatGPTApi implements LLMApi {
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
// console.log('extraInfo', extraInfo)
// try {
// const resJson = await res.clone().json();
// console.log('resJson', resJson)
// extraInfo = prettyObject(resJson);
// console.log('extraInfo', extraInfo)
// } catch {}
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}

if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
Expand All @@ -148,9 +152,6 @@ export class ChatGPTApi implements LLMApi {
return finish();
}
const text = msg.data;
if (text === "") {
return;
}
try {
const json = JSON.parse(text);
const delta = json.choices[0].delta.content;
Expand Down Expand Up @@ -180,7 +181,7 @@ export class ChatGPTApi implements LLMApi {
options.onFinish(message);
}
} catch (e) {
console.log("[Request] failed to make a chat reqeust", e);
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
Expand Down Expand Up @@ -249,5 +250,31 @@ export class ChatGPTApi implements LLMApi {
total: total.hard_limit_usd,
} as LLMUsage;
}

async models(): Promise<LLMModel[]> {
if (this.disableListModels) {
return DEFAULT_MODELS.slice();
}

const res = await fetch(this.path(OpenaiPath.ListModelPath), {
method: "GET",
headers: {
...getHeaders(),
},
});

const resJson = (await res.json()) as OpenAIListModelResponse;
const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
console.log("[Models]", chatModels);

if (!chatModels) {
return [];
}

return chatModels.map((m) => ({
name: m.id,
available: true,
}));
}
}
export { OpenaiPath };
export { OpenaiPath };
Loading