This commit is contained in:
从何开始123
2026-01-08 11:56:00 +08:00
parent 54e9bf5906
commit 1561c054b7
24 changed files with 1105 additions and 449 deletions

View File

@@ -1,7 +1,7 @@
import { Type } from "@google/genai";
import { ModelOption, AnalysisResult } from '../../types';
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT } from './prompts';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
export const executeManagerAnalysis = async (
ai: any,
@@ -59,3 +59,66 @@ export const executeManagerAnalysis = async (
return { thought_process: "Direct processing.", experts: [] };
}
};
export const executeManagerReview = async (
ai: any,
model: ModelOption,
query: string,
currentExperts: ExpertResult[],
budget: number
): Promise<ReviewResult> => {
const reviewSchema = {
type: Type.OBJECT,
properties: {
satisfied: { type: Type.BOOLEAN, description: "True if the experts have fully answered the query with high quality." },
critique: { type: Type.STRING, description: "If not satisfied, explain why and what is missing." },
next_round_strategy: { type: Type.STRING, description: "Plan for the next iteration." },
refined_experts: {
type: Type.ARRAY,
description: "The list of experts for the next round. Can be the same roles or new ones.",
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["satisfied", "critique"]
};
const expertOutputs = currentExperts.map(e =>
`--- [Round ${e.round}] Expert: ${e.role} ---\nOutput: ${e.content?.slice(0, 2000)}...`
).join('\n\n');
const content = `User Query: "${query}"\n\nCurrent Expert Outputs:\n${expertOutputs}`;
const resp = await ai.models.generateContent({
model: model,
contents: content,
config: {
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: reviewSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
});
const rawText = resp.text || '{}';
const cleanText = cleanJsonString(rawText);
try {
return JSON.parse(cleanText) as ReviewResult;
} catch (e) {
console.error("Review JSON Parse Error:", e);
// Fallback: Assume satisfied if JSON fails to avoid infinite loops due to format errors
return { satisfied: true, critique: "JSON Error, proceeding to synthesis." };
}
};

View File

@@ -1,13 +1,35 @@
import { ExpertResult } from '../../types';
export const MANAGER_SYSTEM_PROMPT = `
You are the "Dynamic Planning Engine". Your goal is to analyze a user query (considering the conversation context) and decompose it into a set of specialized expert personas (2 to 4) who can collaboratively solve specific aspects of the problem.
export const MANAGER_SYSTEM_PROMPT = `You are the "Dynamic Planning Engine". Your goal is to analyze a user query (considering the conversation context) and decompose it into a set of specialized expert personas (2 to 4) who can collaboratively solve specific aspects of the problem.
Your job is to create SUPPLEMENTARY experts to aid the Primary Responder.
DO NOT create an expert that just repeats the user query. The Primary Responder is already doing that.
Focus on specialized angles: specific coding patterns, historical context, devil's advocate, security analyst, etc.
Your job is to create SUPPLEMENTARY experts
For each expert, you must assign a specific 'temperature' (0.0 to 2.0).
For each expert, you must assign a specific 'temperature' (0.0 to 2.0) based on the nature of their task:
* High temperature (1.0 - 2.0)
* Low temperature (0.0 - 0.4)
* Medium temperature (0.4 - 1.0)`;
export const MANAGER_REVIEW_SYSTEM_PROMPT = `
You are the "Quality Assurance & Orchestration Engine".
You have just received outputs from a team of AI experts.
Your goal is to evaluate if these outputs are sufficient to fully answer the user's complex request with high quality.
Criteria for "Not Satisfied":
- Conflicting information between experts that isn't resolved.
- Missing code implementation details or edge cases.
- Shallow analysis that doesn't go deep enough.
- Logic errors or hallucinations.
If you are NOT satisfied:
1. Provide a "critique" explaining exactly what is missing or wrong.
2. Define a "next_round_strategy" (briefly) to fix it.
3. Define the *refined_experts* for the next round. You can keep the same roles or create new ones. Their prompts MUST include the feedback/critique.
If you ARE satisfied:
1. Set satisfied to true.
2. Leave refined_experts empty.
`;
export const getExpertSystemInstruction = (role: string, description: string, context: string) => {
@@ -23,11 +45,11 @@ ${recentHistory}
Original User Query: "${query}"
Here are the analyses from your expert panel:
${expertResults.map(e => `--- Expert: ${e.role} (Temp: ${e.temperature}) ---\n${e.content || "(No output)"}\n`).join('\n')}
Here are the analyses from your expert panel (potentially across multiple rounds of refinement):
${expertResults.map(e => `--- [Round ${e.round || 1}] Expert: ${e.role} (Temp: ${e.temperature}) ---\n${e.content || "(No output)"}\n`).join('\n')}
Your Task:
1. Reflect on the experts' inputs. Identify conflicts and consensus.
1. Reflect on the experts' inputs. Identify conflicts, consensus, and evolution of thought across rounds.
2. Synthesize a final, comprehensive, and high-quality answer to the user's original query.
3. Do not simply summarize; integrate the knowledge into a cohesive response.
`;