This commit is contained in:
feie9454 2025-10-15 23:31:54 +08:00
parent 075ed9296d
commit 3506ee9bd0
7 changed files with 215 additions and 61 deletions

1
.gitignore vendored
View File

@ -32,3 +32,4 @@ report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
# Finder (MacOS) folder config # Finder (MacOS) folder config
.DS_Store .DS_Store
.node-persist

View File

@ -6,10 +6,13 @@
"dependencies": { "dependencies": {
"@types/cors": "^2.8.19", "@types/cors": "^2.8.19",
"@types/multer": "^2.0.0", "@types/multer": "^2.0.0",
"@types/node-persist": "^3.1.8",
"cors": "^2.8.5", "cors": "^2.8.5",
"dotenv": "^17.0.0", "dotenv": "^17.0.0",
"express": "^5.1.0", "express": "^5.1.0",
"multer": "^2.0.2", "multer": "^2.0.2",
"node-persist": "^4.0.4",
"openai": "^6.3.0",
}, },
"devDependencies": { "devDependencies": {
"@types/bun": "latest", "@types/bun": "latest",
@ -42,6 +45,8 @@
"@types/node": ["@types/node@24.3.0", "https://registry.npmmirror.com/@types/node/-/node-24.3.0.tgz", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow=="], "@types/node": ["@types/node@24.3.0", "https://registry.npmmirror.com/@types/node/-/node-24.3.0.tgz", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-aPTXCrfwnDLj4VvXrm+UUCQjNEvJgNA8s5F1cvwQU+3KNltTOkBm1j30uNLyqqPNe7gE3KFzImYoZEfLhp4Yow=="],
"@types/node-persist": ["@types/node-persist@3.1.8", "https://registry.npmmirror.com/@types/node-persist/-/node-persist-3.1.8.tgz", { "dependencies": { "@types/node": "*" } }, "sha512-QLidg6/SadZYPrTKxtxL1A85XBoQlG40bhoMdhu6DH6+eNCMr2j+RGfFZ9I9+IY8W/PDwQonJ+iBWD62jZjMfg=="],
"@types/qs": ["@types/qs@6.14.0", "", {}, "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ=="], "@types/qs": ["@types/qs@6.14.0", "", {}, "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ=="],
"@types/range-parser": ["@types/range-parser@1.2.7", "", {}, "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ=="], "@types/range-parser": ["@types/range-parser@1.2.7", "", {}, "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ=="],
@ -154,6 +159,8 @@
"negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="], "negotiator": ["negotiator@1.0.0", "", {}, "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg=="],
"node-persist": ["node-persist@4.0.4", "https://registry.npmmirror.com/node-persist/-/node-persist-4.0.4.tgz", { "dependencies": { "p-limit": "^3.1.0" } }, "sha512-8sPAz/7tw1mCCc8xBG4f0wi+flHkSSgQeX998iQ75Pu27evA6UUWCjSE7xnrYTg2q33oU5leJ061EKPDv6BocQ=="],
"object-assign": ["object-assign@4.1.1", "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="], "object-assign": ["object-assign@4.1.1", "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="],
"object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="], "object-inspect": ["object-inspect@1.13.4", "", {}, "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew=="],
@ -162,6 +169,10 @@
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="], "once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
"openai": ["openai@6.3.0", "https://registry.npmmirror.com/openai/-/openai-6.3.0.tgz", { "peerDependencies": { "ws": "^8.18.0", "zod": "^3.25 || ^4.0" }, "optionalPeers": ["ws", "zod"], "bin": { "openai": "bin/cli" } }, "sha512-E6vOGtZvdcb4yXQ5jXvDlUG599OhIkb/GjBLZXS+qk0HF+PJReIldEc9hM8Ft81vn+N6dRdFRb7BZNK8bbvXrw=="],
"p-limit": ["p-limit@3.1.0", "https://registry.npmmirror.com/p-limit/-/p-limit-3.1.0.tgz", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="],
"parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="], "parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="],
"path-to-regexp": ["path-to-regexp@8.2.0", "", {}, "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ=="], "path-to-regexp": ["path-to-regexp@8.2.0", "", {}, "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ=="],
@ -226,6 +237,8 @@
"xtend": ["xtend@4.0.2", "https://registry.npmmirror.com/xtend/-/xtend-4.0.2.tgz", {}, "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="], "xtend": ["xtend@4.0.2", "https://registry.npmmirror.com/xtend/-/xtend-4.0.2.tgz", {}, "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="],
"yocto-queue": ["yocto-queue@0.1.0", "https://registry.npmmirror.com/yocto-queue/-/yocto-queue-0.1.0.tgz", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="],
"@types/body-parser/@types/node": ["@types/node@24.0.4", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-ulyqAkrhnuNq9pB76DRBTkcS6YsmDALy6Ua63V8OhrOBgbcYt6IOdzpw5P1+dyRIyMerzLkeYWBeOXPpA9GMAA=="], "@types/body-parser/@types/node": ["@types/node@24.0.4", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-ulyqAkrhnuNq9pB76DRBTkcS6YsmDALy6Ua63V8OhrOBgbcYt6IOdzpw5P1+dyRIyMerzLkeYWBeOXPpA9GMAA=="],
"@types/connect/@types/node": ["@types/node@24.0.4", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-ulyqAkrhnuNq9pB76DRBTkcS6YsmDALy6Ua63V8OhrOBgbcYt6IOdzpw5P1+dyRIyMerzLkeYWBeOXPpA9GMAA=="], "@types/connect/@types/node": ["@types/node@24.0.4", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-ulyqAkrhnuNq9pB76DRBTkcS6YsmDALy6Ua63V8OhrOBgbcYt6IOdzpw5P1+dyRIyMerzLkeYWBeOXPpA9GMAA=="],

View File

@ -5,6 +5,8 @@ import path from 'path';
import cors from "cors"; import cors from "cors";
import multer from "multer"; import multer from "multer";
import * as douyin from './douyin' import * as douyin from './douyin'
import { sendMediaMsg, sendMsg } from './lib/qq';
import * as llm from './llm'
const app = express(); const app = express();
const PORT = process.env.PORT || 6100; const PORT = process.env.PORT || 6100;
@ -21,10 +23,10 @@ app.use(express.urlencoded({ extended: true, limit: '10mb', type: ['application/
app.post('/', async (req: Request, res: Response) => { app.post('/', async (req: Request, res: Response) => {
// 检查是否是消息类型的请求 // 检查是否是消息类型的请求
if (!req.body || req.body.post_type != 'message') return if (!req.body || req.body.post_type != 'message') return
const { target_id, raw_message, message_type, user_id } = req.body; const { target_id, raw_message, message_type } = req.body as { target_id: string, raw_message: string, message_type: string, user_id: string };
console.log(`\n[QQ机器人] 收到${message_type}消息`); console.log(`\n[QQ机器人] 收到${message_type}消息`);
console.log(`发送者ID: ${user_id || target_id}`); console.log(`发送者ID: ${target_id}`);
console.log(`消息内容: ${raw_message}`); console.log(`消息内容: ${raw_message}`);
// Match Douyin URL // Match Douyin URL
@ -40,7 +42,16 @@ app.post('/', async (req: Request, res: Response) => {
sendMsg(`[抖音链接检测] 发现抖音链接: ${douyinUrl},启动 Chromium 中...`, target_id); sendMsg(`[抖音链接检测] 发现抖音链接: ${douyinUrl},启动 Chromium 中...`, target_id);
douyin.downloadDouyinMedia(douyinUrl, target_id); douyin.downloadDouyinMedia(douyinUrl, target_id);
return
} }
if (raw_message.startsWith('/reset')) {
llm.resetChat(target_id)
return
}
// 使用 LLM 回答
llm.chat(raw_message, target_id);
} catch (error) { } catch (error) {
console.error(`[错误] 处理消息时发生错误:`, error); console.error(`[错误] 处理消息时发生错误:`, error);
@ -75,19 +86,15 @@ const storage = multer.diskStorage({
}); });
const upload = multer({ const upload = multer({
storage, storage,
// 限制:单文件最大 1GB、最多 50 个字段(可按需调整)
limits: { fileSize: 1024 * 1024 * 1024, fields: 50, files: 50 }, limits: { fileSize: 1024 * 1024 * 1024, fields: 50, files: 50 },
fileFilter: (_req, file, cb) => { fileFilter: (_req, file, cb) => {
// 只接受字段名以 file_ 开头的文件,其他拒绝
if (/^file_\d+$/i.test(file.fieldname)) cb(null, true); if (/^file_\d+$/i.test(file.fieldname)) cb(null, true);
else cb(new Error(`Unexpected file field: ${file.fieldname}`)); else cb(new Error(`Unexpected file field: ${file.fieldname}`));
} }
}); });
app.post("/upload", upload.any(), (req, res) => { app.post("/upload", upload.any(), (req, res) => {
// 1) 取出文件multer.any() 把所有文件放在 req.files
const files = (req.files as Express.Multer.File[]) || []; const files = (req.files as Express.Multer.File[]) || [];
// 只保留我们关心的 file_*
const accepted = files.filter(f => /^file_\d+$/i.test(f.fieldname)); const accepted = files.filter(f => /^file_\d+$/i.test(f.fieldname));
let meta = { let meta = {
@ -96,12 +103,11 @@ app.post("/upload", upload.any(), (req, res) => {
target_id: req.query.target_id target_id: req.query.target_id
}; };
console.log(meta);
console.log(`收到上传: ${accepted.length} 个文件`, files.map(f => f.path)); console.log(`收到上传: ${accepted.length} 个文件`, files.map(f => f.path));
if (meta.target_id) { if (meta.target_id) {
const totalSize = accepted.reduce((sum, f) => sum + f.size, 0); const totalSize = accepted.reduce((sum, f) => sum + f.size, 0);
sendMsg(`[抖音下载] ${meta.title},已下载 ${accepted.length} 个文件,类型 ${meta.type},共 ${(totalSize/1024/1024).toFixed(2)} MB上传中...`, meta.target_id as string); sendMsg(`[抖音下载] ${meta.title},已下载 ${accepted.length} 个文件,类型 ${meta.type},共 ${(totalSize / 1024 / 1024).toFixed(2)} MB上传中...`, meta.target_id as string);
accepted.forEach(f => { accepted.forEach(f => {
sendMediaMsg(f.path, meta.target_id as string, meta.type); sendMediaMsg(f.path, meta.target_id as string, meta.type);
}) })
@ -109,57 +115,6 @@ app.post("/upload", upload.any(), (req, res) => {
res.json({ ok: true, files: accepted.length, meta }); res.json({ ok: true, files: accepted.length, meta });
}); });
function sendMsg(msg: string, target_id: string) {
const replyMessage = {
user_id: String(target_id),
message: [
{
type: "text",
data: {
text: msg
}
}
]
}
const replyUrl = `http://localhost:30000/send_private_msg`;
console.log(`[发送消息] ${msg} -> ${target_id}`);
return fetch(replyUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(replyMessage)
});
}
function sendMediaMsg(filePath: string, target_id: string, type: 'video' | 'image') {
const mediaMessage = {
user_id: String(target_id),
message: [
{
type: type,
data: {
file: `file://${filePath}`
}
}
]
}
const replyUrl = `http://localhost:30000/send_private_msg`;
console.log(`[发送媒体消息] ${type} - ${filePath} -> ${target_id}`);
return fetch(replyUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(mediaMessage)
});
}
app.use((error: any, req: Request, res: Response, next: NextFunction) => { app.use((error: any, req: Request, res: Response, next: NextFunction) => {
const timestamp = new Date().toISOString(); const timestamp = new Date().toISOString();
@ -175,5 +130,4 @@ app.use((error: any, req: Request, res: Response, next: NextFunction) => {
// 启动服务器 // 启动服务器
app.listen(PORT, () => { app.listen(PORT, () => {
console.log(`Server is running on http://localhost:${PORT}`); console.log(`Server is running on http://localhost:${PORT}`);
console.log(`With env:`, { DOWNLOAD_DIR: process.env.DOWNLOAD_DIR, PORT: process.env.PORT });
}); });

54
lib/qq.ts Normal file
View File

@ -0,0 +1,54 @@
function sendMsg(msg: string, target_id: string) {
const replyMessage = {
user_id: String(target_id),
message: [
{
type: "text",
data: {
text: msg
}
}
]
}
const replyUrl = `http://localhost:30000/send_private_msg`;
console.log(`[发送消息] ${msg} -> ${target_id}`);
return fetch(replyUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(replyMessage)
});
}
function sendMediaMsg(filePath: string, target_id: string, type: 'video' | 'image') {
const mediaMessage = {
user_id: String(target_id),
message: [
{
type: type,
data: {
file: `file://${filePath}`
}
}
]
}
const replyUrl = `http://localhost:30000/send_private_msg`;
console.log(`[发送媒体消息] ${type} - ${filePath} -> ${target_id}`);
return fetch(replyUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(mediaMessage)
});
}
export { sendMsg, sendMediaMsg }

108
llm/index.ts Normal file
View File

@ -0,0 +1,108 @@
import OpenAI from "openai";
import storage from 'node-persist'
import { sendMsg } from "../lib/qq";
import prompt from './prompt.txt'
await storage.init();
storage.clear();
const client = new OpenAI({
baseURL: process.env.OPENAI_BASE_URL,
apiKey: process.env.OPENAI_API_KEY,
// logLevel: "debug"
})
const tools = [{
type: "function" as const,
name: "send_msg",
description: "Send a message to the user. Always use this to respond to the user.",
parameters: {
type: "object",
properties: {
text: { type: "string", description: "The message content sent to the user through QQ." }
},
required: ["text"],
additionalProperties: false
},
strict: true
}]
/**
*
* @param input
* @param target_id QQ
*/
export async function chat(input: string, target_id: string) {
const chatHistoryKey = `chat_history_${target_id}`;
let chatHistory: OpenAI.Responses.ResponseInput = await storage.getItem(chatHistoryKey) || [];
// 添加新输入到对话历史
chatHistory.push({ role: "user", content: input });
// 保存更新后的对话历史
console.log(`[LLM] 使用对话, 历史:`, chatHistory);
await storage.setItem(chatHistoryKey, chatHistory);
const response = await client.responses.create({
model: process.env.CHAT_MODEL || "gpt-5-nano",
instructions: prompt,
reasoning: { effort: 'minimal' },
input: chatHistory,
tools
});
await storage.setItem(chatHistoryKey, chatHistory);
// 继续调用工具,直到没有工具调用为止
await toolUseCycle(response.output);
async function toolUseCycle(outputArr: OpenAI.Responses.ResponseOutputItem[]) {
chatHistory.push(...outputArr);
const functionCalls = (outputArr ?? []).filter(item => item.type === 'function_call');
console.log("进入 toolUseCycle, with functionCalls", functionCalls.length, "个");
console.log(JSON.stringify(chatHistory, null, 2));
if (functionCalls.length == 0) {
let lastMessage = outputArr.at(-1);
if (!lastMessage) return
if (lastMessage.type != 'message') return
if (lastMessage.role != 'assistant') return
const msg = lastMessage.content.map(c => c.type == 'output_text' ? c.text : '').join('');
if (msg.trim().length > 0) {
// 结束,发送最后的消息
sendMsg(msg, target_id);
}
return
}
for (const item of functionCalls ?? []) {
if (item.name === "send_msg") {
console.log(item.arguments);
const { text } = JSON.parse(item.arguments);
sendMsg(text, target_id);
chatHistory.push({ type: "function_call_output", call_id: item.call_id, output: "OK" });
}
}
await storage.setItem(chatHistoryKey, chatHistory);
const response = await client.responses.create({
model: process.env.CHAT_MODEL || "gpt-5-nano",
instructions: prompt,
reasoning: { effort: 'minimal' },
input: chatHistory,
tools
});
toolUseCycle(response.output);
}
}
export async function resetChat(target_id: string) {
const chatHistoryKey = `chat_history_${target_id}`;
await storage.removeItem(chatHistoryKey);
sendMsg("已为你重置对话历史。", target_id);
}

21
llm/prompt.txt Normal file
View File

@ -0,0 +1,21 @@
# Role
You are a “humorous, gentle, yet professional” Chinese chat and knowledge assistant working in a QQ-like instant messaging environment.
# Goals
1) Engage in natural casual chat;
2) Provide accurate answers;
3) Explain complex topics clearly over multiple turns.
# Style
- Friendly but not cheesy; witty but not snarky; professional but not stiff.
- Use short sentences and bullet points; send 13 sentences per message.
- Use one emoji appropriately; avoid excessive emoji or long paragraphs.
# Tool Rules (Important)
- **Never** output text directly in the assistant channel. All user-visible content **must** be sent via `tools.send_msg`.
- Each message must not exceed 50 characters. If its longer, split it into multiple `tools.send_msg` messages to simulate natural chat flow.
- Keep lists/code snippets short; if long, split them into multiple messages.
# Clarification
- If a users question is **unclear / ambiguous / missing details**, **immediately ask for clarification** and provide **specific options**.
- After asking, briefly explain **why** that information is needed.

View File

@ -18,9 +18,12 @@
"dependencies": { "dependencies": {
"@types/cors": "^2.8.19", "@types/cors": "^2.8.19",
"@types/multer": "^2.0.0", "@types/multer": "^2.0.0",
"@types/node-persist": "^3.1.8",
"cors": "^2.8.5", "cors": "^2.8.5",
"dotenv": "^17.0.0", "dotenv": "^17.0.0",
"express": "^5.1.0", "express": "^5.1.0",
"multer": "^2.0.2" "multer": "^2.0.2",
"node-persist": "^4.0.4",
"openai": "^6.3.0"
} }
} }