255 lines
8.9 KiB
TypeScript
255 lines
8.9 KiB
TypeScript
import OpenAI from "openai";
|
||
import storage from 'node-persist'
|
||
import { sendMsg } from "../lib/qq";
|
||
import prompt from './prompt.txt'
|
||
import type { ResponseFunctionToolCall, ResponseOutputMessage } from "openai/resources/responses/responses.mjs";
|
||
|
||
await storage.init();
|
||
// storage.clear();
|
||
|
||
const client = new OpenAI({
|
||
baseURL: process.env.OPENAI_BASE_URL,
|
||
apiKey: process.env.OPENAI_API_KEY,
|
||
// logLevel: "debug"
|
||
})
|
||
|
||
const tools: OpenAI.Responses.Tool[] = [{ type: 'web_search' }]
|
||
|
||
/**
|
||
* 将错误对象转成简明可读的字符串,避免进程因未捕获异常退出
|
||
*/
|
||
function formatError(err: any): string {
|
||
try {
|
||
if (!err) return '未知错误';
|
||
if (typeof err === 'string') return err;
|
||
const msg = err.message || err.toString?.() || Object.prototype.toString.call(err);
|
||
const status = err.status ?? err.statusCode ?? err.response?.status;
|
||
const detail = err.error?.message
|
||
?? err.response?.data?.error?.message
|
||
?? err.response?.data?.message
|
||
?? err.data?.error?.message
|
||
?? '';
|
||
const parts = [] as string[];
|
||
if (status) parts.push(`HTTP ${status}`);
|
||
if (msg) parts.push(String(msg));
|
||
if (detail && detail !== msg) parts.push(`详情: ${String(detail)}`);
|
||
return parts.join(' | ') || '未知错误';
|
||
} catch {
|
||
return String(err);
|
||
}
|
||
}
|
||
|
||
/**
|
||
*
|
||
* @param input 提问
|
||
* @param target_id 用户 QQ 号
|
||
*/
|
||
export async function chat(input: MessageData[], target_id: string) {
|
||
const chatHistoryKey = `chat_history_${target_id}`;
|
||
await waitForQueueEmpty(target_id);
|
||
let chatHistory: OpenAI.Responses.ResponseInput = await storage.getItem(chatHistoryKey) || [];
|
||
|
||
if(chatHistory.length > 40) {
|
||
sendMsg(`[提示] 当前对话轮数:${chatHistory.length},过长的对话会降低输出质量,如果你准备好了,建议输入"/reset"来重置对话历史。`, target_id);
|
||
}
|
||
|
||
if(chatHistory.length > 100) {
|
||
sendMsg(`[提示] 当前对话轮数:${chatHistory.length},对话过长可能导致模型无法正常工作,模型将只保留最近100条记录。`, target_id);
|
||
chatHistory = chatHistory.slice(-40);
|
||
await storage.setItem(chatHistoryKey, chatHistory);
|
||
}
|
||
|
||
// 添加新输入到对话历史
|
||
const userContent: OpenAI.Responses.ResponseInputContent[] = []
|
||
for (const element of input) {
|
||
if (element.type == 'text') {
|
||
userContent.push({ type: "input_text", text: element.data.text });
|
||
} else if (element.type == 'image') {
|
||
userContent.push({ type: "input_image", image_url: element.data.url, detail: 'low' });
|
||
}
|
||
}
|
||
if (userContent.length === 0) {
|
||
console.log("[LLM] 空消息,跳过");
|
||
sendMsg("未能识别的消息类型。", target_id);
|
||
return
|
||
}
|
||
chatHistory.push({ role: "user", content: userContent });
|
||
|
||
// 保存更新后的对话历史
|
||
console.log(`[LLM] 使用对话, 历史:`, JSON.stringify(chatHistory, null, 0));
|
||
|
||
await storage.setItem(chatHistoryKey, chatHistory);
|
||
|
||
let response: OpenAI.Responses.Response | undefined;
|
||
try {
|
||
response = await client.responses.create({
|
||
model: process.env.CHAT_MODEL || "gpt-5-nano",
|
||
instructions: prompt,
|
||
reasoning: { effort: process.env.CHAT_MODEL_REASONING_EFFORT as any || 'minimal' },
|
||
input: chatHistory,
|
||
tools
|
||
});
|
||
} catch (err) {
|
||
const errText = formatError(err);
|
||
console.error('[LLM] 首次调用 responses.create 失败:', errText);
|
||
scheduleSendMsg(`[错误] 模型接口调用失败:${errText}`, target_id);
|
||
return; // 终止本次对话流程,避免未捕获异常导致进程退出
|
||
}
|
||
|
||
await storage.setItem(chatHistoryKey, chatHistory);
|
||
|
||
// 继续调用工具,直到没有工具调用为止
|
||
|
||
if (!response?.output || response.output.length === 0) {
|
||
console.warn('[LLM] responses.create 返回空输出,结束本轮。');
|
||
return;
|
||
}
|
||
|
||
await toolUseCycle(response.output);
|
||
|
||
async function toolUseCycle(outputArr: OpenAI.Responses.ResponseOutputItem[]) {
|
||
if (!outputArr || outputArr.length === 0) return;
|
||
chatHistory.push(...outputArr);
|
||
await storage.setItem(chatHistoryKey, chatHistory);
|
||
|
||
const assistantReply = outputArr.filter(item => item.type === 'message' && item.role === 'assistant') as ResponseOutputMessage[];
|
||
const functionCalls = outputArr.filter(item => item.type === 'function_call') as ResponseFunctionToolCall[];
|
||
|
||
console.log("进入 toolUseCycle, with functionCalls", functionCalls.length, "个");
|
||
console.log(JSON.stringify(chatHistory, null, 0));
|
||
|
||
if (assistantReply.length > 0) {
|
||
const replyText = assistantReply.map(item => item.content).flat().filter(con => con.type == 'output_text').map(con => con.text).join("[newline]");
|
||
|
||
console.log(`[LLM] 回复:`, replyText);
|
||
scheduleSendMsg(replyText, target_id);
|
||
}
|
||
|
||
if (functionCalls.length == 0) {
|
||
return
|
||
}
|
||
for (const item of functionCalls ?? []) {
|
||
|
||
}
|
||
await storage.setItem(chatHistoryKey, chatHistory);
|
||
|
||
let response: OpenAI.Responses.Response | undefined;
|
||
try {
|
||
response = await client.responses.create({
|
||
model: process.env.CHAT_MODEL || "gpt-5-nano",
|
||
instructions: prompt,
|
||
reasoning: { effort: process.env.CHAT_MODEL_REASONING_EFFORT as any || 'minimal' },
|
||
input: chatHistory,
|
||
tools
|
||
});
|
||
} catch (err) {
|
||
const errText = formatError(err);
|
||
console.error('[LLM] 工具循环内调用 responses.create 失败:', errText);
|
||
scheduleSendMsg(`[错误] 工具调用阶段失败:${errText}`, target_id);
|
||
return; // 结束工具循环,避免崩溃
|
||
}
|
||
|
||
if (!response?.output || response.output.length === 0) {
|
||
console.warn('[LLM] 工具循环内 responses.create 返回空输出,结束循环。');
|
||
return;
|
||
}
|
||
|
||
toolUseCycle(response.output);
|
||
}
|
||
}
|
||
|
||
const msgQueue: Record<string, string[]> = {}
|
||
const msgInQueue: Record<string, boolean> = {}
|
||
const queueEmptyPromises: Record<string, (() => void)[]> = {}
|
||
|
||
/** 统一的“队列已空”通知 */
|
||
function resolveDrain(target_id: string) {
|
||
const waiters = queueEmptyPromises[target_id];
|
||
if (waiters && waiters.length) {
|
||
// 逐个 resolve 并清空
|
||
waiters.splice(0).forEach(resolve => resolve());
|
||
}
|
||
}
|
||
|
||
/** 仅在需要时启动调度;空队列时会 resolve 等待者 */
|
||
function startMsgScheduler(target_id: string) {
|
||
// 确保队列存在
|
||
const q = msgQueue[target_id] ?? (msgQueue[target_id] = []);
|
||
|
||
// 若正在发送,交给当前发送完成后再递归调度
|
||
if (msgInQueue[target_id]) return;
|
||
|
||
// 队列空 => 通知等待者并返回
|
||
if (q.length === 0) {
|
||
resolveDrain(target_id);
|
||
return;
|
||
}
|
||
|
||
// 取下一条并发送
|
||
msgInQueue[target_id] = true;
|
||
const msg = q.shift()!; // 这里一定有元素
|
||
|
||
const msgDelay = Math.sqrt(msg.length) * 200 + 500; // 可按需加上限
|
||
setTimeout(async () => {
|
||
try {
|
||
await sendMsg(msg, target_id);
|
||
} catch (err) {
|
||
console.error("[queue] sendMsg error:", err);
|
||
// 可选:把消息放回队首重试
|
||
// q.unshift(msg);
|
||
} finally {
|
||
// 本次发送结束
|
||
msgInQueue[target_id] = false;
|
||
|
||
// 如果此刻队列已空,立即通知等待者
|
||
if (q.length === 0) {
|
||
resolveDrain(target_id);
|
||
}
|
||
|
||
// 继续调度后续消息(若有)
|
||
startMsgScheduler(target_id);
|
||
}
|
||
}, msgDelay);
|
||
}
|
||
|
||
function scheduleSendMsg(input: string, target_id: string) {
|
||
if (!msgQueue[target_id]) {
|
||
msgQueue[target_id] = [];
|
||
}
|
||
// 切分 + 去空白项
|
||
const parts = input
|
||
.split("[newline]")
|
||
.map(s => s.trim())
|
||
.filter(Boolean);
|
||
|
||
if (parts.length === 0) return; // 全是空白,直接略过
|
||
|
||
msgQueue[target_id].push(...parts);
|
||
startMsgScheduler(target_id);
|
||
}
|
||
|
||
function waitForQueueEmpty(target_id: string) {
|
||
return new Promise<void>((resolve) => {
|
||
const q = msgQueue[target_id];
|
||
|
||
// 条件:不在发送中,且队列不存在或为空
|
||
const isIdle = !msgInQueue[target_id];
|
||
const isEmpty = !q || q.length === 0;
|
||
|
||
if (isIdle && isEmpty) {
|
||
resolve();
|
||
return;
|
||
}
|
||
|
||
if (!queueEmptyPromises[target_id]) {
|
||
queueEmptyPromises[target_id] = [];
|
||
}
|
||
queueEmptyPromises[target_id].push(resolve);
|
||
});
|
||
}
|
||
|
||
export async function resetChat(target_id: string) {
|
||
const chatHistoryKey = `chat_history_${target_id}`;
|
||
await storage.removeItem(chatHistoryKey);
|
||
sendMsg("已为你重置对话历史。", target_id);
|
||
} |