This commit is contained in:
从何开始123
2026-01-08 02:16:42 +08:00
parent 83b4df1167
commit 54e9bf5906
31 changed files with 2201 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
import { ModelOption, ExpertResult } from '../../types';
import { getExpertSystemInstruction } from './prompts';
export const streamExpertResponse = async (
ai: any,
model: ModelOption,
expert: ExpertResult,
context: string,
budget: number,
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
): Promise<void> => {
const streamResult = await ai.models.generateContentStream({
model: model,
contents: expert.prompt,
config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature,
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
}
});
for await (const chunk of streamResult) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
};

View File

@@ -0,0 +1,61 @@
import { Type } from "@google/genai";
import { ModelOption, AnalysisResult } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT } from './prompts';
export const executeManagerAnalysis = async (
ai: any,
model: ModelOption,
query: string,
context: string,
budget: number
): Promise<AnalysisResult> => {
const managerSchema = {
type: Type.OBJECT,
properties: {
thought_process: { type: Type.STRING, description: "Brief explanation of why these supplementary experts were chosen." },
experts: {
type: Type.ARRAY,
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["thought_process", "experts"]
};
const analysisResp = await ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: managerSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
});
const rawText = analysisResp.text || '{}';
const cleanText = cleanJsonString(rawText);
try {
const analysisJson = JSON.parse(cleanText) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
return analysisJson;
} catch (e) {
console.error("JSON Parse Error:", e, rawText);
return { thought_process: "Direct processing.", experts: [] };
}
};

View File

@@ -0,0 +1,34 @@
import { ExpertResult } from '../../types';
export const MANAGER_SYSTEM_PROMPT = `
You are the "Dynamic Planning Engine". Your goal is to analyze a user query (considering the conversation context) and decompose it into a set of specialized expert personas (2 to 4) who can collaboratively solve specific aspects of the problem.
Your job is to create SUPPLEMENTARY experts to aid the Primary Responder.
DO NOT create an expert that just repeats the user query. The Primary Responder is already doing that.
Focus on specialized angles: specific coding patterns, historical context, devil's advocate, security analyst, etc.
For each expert, you must assign a specific 'temperature' (0.0 to 2.0).
`;
export const getExpertSystemInstruction = (role: string, description: string, context: string) => {
return `You are a ${role}. ${description}. Context: ${context}`;
};
export const getSynthesisPrompt = (recentHistory: string, query: string, expertResults: ExpertResult[]) => {
return `
You are the "Synthesis Engine".
Context:
${recentHistory}
Original User Query: "${query}"
Here are the analyses from your expert panel:
${expertResults.map(e => `--- Expert: ${e.role} (Temp: ${e.temperature}) ---\n${e.content || "(No output)"}\n`).join('\n')}
Your Task:
1. Reflect on the experts' inputs. Identify conflicts and consensus.
2. Synthesize a final, comprehensive, and high-quality answer to the user's original query.
3. Do not simply summarize; integrate the knowledge into a cohesive response.
`;
};

View File

@@ -0,0 +1,44 @@
import { ModelOption, ExpertResult } from '../../types';
import { getSynthesisPrompt } from './prompts';
export const streamSynthesisResponse = async (
ai: any,
model: ModelOption,
query: string,
historyContext: string,
expertResults: ExpertResult[],
budget: number,
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
): Promise<void> => {
const prompt = getSynthesisPrompt(historyContext, query, expertResults);
const synthesisStream = await ai.models.generateContentStream({
model: model,
contents: prompt,
config: {
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
}
});
for await (const chunk of synthesisStream) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
};