This commit is contained in:
从何开始123
2026-01-09 01:07:03 +08:00
parent 21842c2b50
commit a32f3a5faf
17 changed files with 334 additions and 135 deletions

View File

@@ -1,4 +1,5 @@
import { ModelOption, ExpertResult } from '../../types';
import { ModelOption, ExpertResult, MessageAttachment } from '../../types';
import { getExpertSystemInstruction } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
@@ -12,6 +13,7 @@ export const streamExpertResponse = async (
model: ModelOption,
expert: ExpertResult,
context: string,
attachments: MessageAttachment[],
budget: number,
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
@@ -19,9 +21,25 @@ export const streamExpertResponse = async (
const isGoogle = isGoogleProvider(ai);
if (isGoogle) {
const contents: any = {
role: 'user',
parts: [{ text: expert.prompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
contents: contents,
config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature,
@@ -55,10 +73,26 @@ export const streamExpertResponse = async (
throw streamError;
}
} else {
let contentPayload: any = expert.prompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: expert.prompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
content: expert.prompt,
content: contentPayload,
temperature: expert.temperature,
thinkingConfig: {
thinkingBudget: budget,

View File

@@ -1,10 +1,10 @@
import { Type } from "@google/genai";
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, ApiProvider } from '../../types';
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, MessageAttachment } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContent as generateOpenAIContent } from './openaiClient';
import { getAIProvider } from '../../api';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContent !== undefined;
@@ -15,9 +15,11 @@ export const executeManagerAnalysis = async (
model: ModelOption,
query: string,
context: string,
attachments: MessageAttachment[],
budget: number
): Promise<AnalysisResult> => {
const isGoogle = isGoogleProvider(ai);
const textPrompt = `Context:\n${context}\n\nCurrent Query: "${query}"`;
if (isGoogle) {
const managerSchema = {
@@ -41,10 +43,26 @@ export const executeManagerAnalysis = async (
required: ["thought_process", "experts"]
};
const contents: any = {
role: 'user',
parts: [{ text: textPrompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
try {
const analysisResp = await withRetry(() => ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
contents: contents,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
@@ -73,10 +91,37 @@ export const executeManagerAnalysis = async (
}
} else {
try {
let contentPayload: any = textPrompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: textPrompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
// Append formatting instruction to prompt if needed (OpenAI sometimes needs this explicit in text)
// but usually responseFormat: json_object + system prompt is enough.
// We append it to the text part or the string.
const jsonInstruction = `\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`;
if (Array.isArray(contentPayload)) {
contentPayload[0].text += jsonInstruction;
} else {
contentPayload += jsonInstruction;
}
const response = await generateOpenAIContent(ai, {
model,
systemInstruction: MANAGER_SYSTEM_PROMPT,
content: `Context:\n${context}\n\nCurrent Query: "${query}"\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`,
content: contentPayload,
temperature: 0.7,
responseFormat: 'json_object',
thinkingConfig: {

View File

@@ -1,3 +1,4 @@
import OpenAI from "openai";
import { ModelOption } from '../../types';
import { withRetry } from '../utils/retry';
@@ -10,7 +11,7 @@ export interface OpenAIStreamChunk {
export interface OpenAIConfig {
model: ModelOption;
systemInstruction?: string;
content: string;
content: string | Array<any>;
temperature?: number;
responseFormat?: 'text' | 'json_object';
thinkingConfig?: {
@@ -49,7 +50,7 @@ export const generateContent = async (
messages.push({
role: 'user',
content: config.content
content: config.content as any
});
const requestOptions: any = {
@@ -93,7 +94,7 @@ export async function* generateContentStream(
messages.push({
role: 'user',
content: config.content
content: config.content as any
});
const requestOptions: any = {

View File

@@ -1,4 +1,5 @@
import { ModelOption, ExpertResult } from '../../types';
import { ModelOption, ExpertResult, MessageAttachment } from '../../types';
import { getSynthesisPrompt } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
@@ -13,6 +14,7 @@ export const streamSynthesisResponse = async (
query: string,
historyContext: string,
expertResults: ExpertResult[],
attachments: MessageAttachment[],
budget: number,
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
@@ -21,9 +23,25 @@ export const streamSynthesisResponse = async (
const isGoogle = isGoogleProvider(ai);
if (isGoogle) {
const contents: any = {
role: 'user',
parts: [{ text: prompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
contents: contents,
config: {
thinkingConfig: {
thinkingBudget: budget,
@@ -55,10 +73,26 @@ export const streamSynthesisResponse = async (
throw streamError;
}
} else {
let contentPayload: any = prompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: prompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: undefined,
content: prompt,
content: contentPayload,
temperature: 0.7,
thinkingConfig: {
thinkingBudget: budget,