增加 openai 的模型兼容

This commit is contained in:
jwangkun
2026-01-08 17:09:34 +08:00
parent 6558006a4d
commit 579071ac95
18 changed files with 5185 additions and 212 deletions

11
prisma/.env Normal file
View File

@@ -0,0 +1,11 @@
# API Keys Configuration
# Copy this file to .env.local and add your actual API keys
# Primary API Key (used by default)
# For Google Gemini: https://ai.google.dev/
# For OpenAI: https://platform.openai.com/
VITE_API_KEY=sk-d8f36ac74ac24875abcbdcae382177eb
# Alternative: Use provider-specific keys (optional)
# GEMINI_API_KEY=your_gemini_key_here
# OPENAI_API_KEY=your_openai_key_here

11
prisma/.env.example Normal file
View File

@@ -0,0 +1,11 @@
# API Keys Configuration
# Copy this file to .env.local and add your actual API keys
# Primary API Key (used by default)
# For Google Gemini: https://ai.google.dev/
# For OpenAI: https://platform.openai.com/
VITE_API_KEY=your_api_key_here
# Alternative: Use provider-specific keys (optional)
# GEMINI_API_KEY=your_gemini_key_here
# OPENAI_API_KEY=your_openai_key_here

View File

@@ -54,6 +54,7 @@ const App = () => {
onOpenSettings={() => setIsSettingsOpen(true)}
onToggleSidebar={() => setIsSidebarOpen(!isSidebarOpen)}
onNewChat={handleNewChat}
config={config}
/>
<div className="flex flex-1 overflow-hidden relative">

View File

@@ -3,6 +3,7 @@ import React from 'react';
import { Settings, X } from 'lucide-react';
import { AppConfig, ModelOption } from './types';
import ApiSection from './components/settings/ApiSection';
import ModelSection from './components/settings/ModelSection';
import ThinkingSection from './components/settings/ThinkingSection';
import GithubSection from './components/settings/GithubSection';
@@ -40,6 +41,7 @@ const SettingsModal = ({
{/* Body */}
<div className="p-6 space-y-6 overflow-y-auto custom-scrollbar">
<ModelSection config={config} setConfig={setConfig} />
<ApiSection config={config} setConfig={setConfig} />
<ThinkingSection

View File

@@ -1,13 +1,78 @@
import { GoogleGenAI } from "@google/genai";
import OpenAI from "openai";
import { ApiProvider, AppConfig, CustomModel } from './types';
export const getAI = (config?: { apiKey?: string; baseUrl?: string }) => {
const options: any = {
apiKey: config?.apiKey || process.env.API_KEY,
};
if (config?.baseUrl) {
options.baseUrl = config.baseUrl;
}
return new GoogleGenAI(options);
type AIProviderConfig = {
provider?: ApiProvider;
apiKey?: string;
baseUrl?: string;
};
/**
* Find custom model configuration by model name
*/
export const findCustomModel = (modelName: string, customModels?: CustomModel[]): CustomModel | undefined => {
return customModels?.find(m => m.name === modelName);
};
export const getAI = (config?: AIProviderConfig) => {
const provider = config?.provider || 'google';
// Support both Vite env vars (VITE_) and standard env vars for flexibility
const apiKey = config?.apiKey || (import.meta.env as any).VITE_API_KEY || process.env.API_KEY;
if (provider === 'openai' || provider === 'deepseek' || provider === 'custom' || provider === 'anthropic' || provider === 'xai' || provider === 'mistral') {
const options: any = {
apiKey: apiKey,
// WARNING: dangerouslyAllowBrowser enables client-side API calls
// This is acceptable for local development but NOT production
// In production, use a backend proxy to protect API keys
dangerouslyAllowBrowser: true,
};
if (config?.baseUrl) {
options.baseURL = config.baseUrl;
} else if (provider === 'deepseek') {
options.baseURL = 'https://api.deepseek.com/v1';
} else if (provider === 'anthropic') {
options.baseURL = 'https://api.anthropic.com/v1';
} else if (provider === 'xai') {
options.baseURL = 'https://api.x.ai/v1';
} else if (provider === 'mistral') {
options.baseURL = 'https://api.mistral.ai/v1';
}
return new OpenAI(options);
} else {
const options: any = {
apiKey: apiKey,
};
if (config?.baseUrl) {
options.baseUrl = config.baseUrl;
}
return new GoogleGenAI(options);
}
};
export const getAIProvider = (model: string): ApiProvider => {
if (model.startsWith('gpt-') || model.startsWith('o1-')) {
return 'openai';
}
if (model.startsWith('deepseek-')) {
return 'deepseek';
}
if (model.startsWith('claude-')) {
return 'anthropic';
}
if (model.startsWith('grok-')) {
return 'xai';
}
if (model.startsWith('mistral-') || model.startsWith('mixtral-')) {
return 'mistral';
}
if (model === 'custom') {
return 'custom';
}
return 'google';
};

View File

@@ -1,8 +1,8 @@
import React from 'react';
import { Settings, ChevronDown, Menu } from 'lucide-react';
import { MODELS } from '../config';
import { ModelOption } from '../types';
import { getAllModels } from '../config';
import { ModelOption, AppConfig } from '../types';
import Logo from './Logo';
interface HeaderProps {
@@ -11,9 +11,12 @@ interface HeaderProps {
onOpenSettings: () => void;
onToggleSidebar: () => void;
onNewChat: () => void;
config: AppConfig;
}
const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSidebar, onNewChat }: HeaderProps) => {
const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSidebar, onNewChat, config }: HeaderProps) => {
const availableModels = getAllModels(config);
return (
<header className="sticky top-0 z-50 bg-white/80 backdrop-blur-md">
<div className="w-full px-4 h-16 flex items-center justify-between">
@@ -45,7 +48,7 @@ const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSideb
onChange={(e) => setSelectedModel(e.target.value as ModelOption)}
className="relative bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-auto p-2.5 outline-none appearance-none cursor-pointer pl-3 pr-8 shadow-sm font-medium hover:bg-slate-50 transition-colors"
>
{MODELS.map(m => (
{availableModels.map(m => (
<option key={m.value} value={m.value}>{m.label}</option>
))}
</select>

View File

@@ -1,6 +1,6 @@
import React from 'react';
import { Key, Globe } from 'lucide-react';
import { Key, Globe, Info } from 'lucide-react';
import { AppConfig } from '../../types';
interface ApiSectionProps {
@@ -12,7 +12,7 @@ const ApiSection = ({ config, setConfig }: ApiSectionProps) => {
return (
<div className="space-y-4 pt-1">
<div className="flex items-center justify-between mb-2">
<h3 className="text-xs font-bold text-slate-400 uppercase tracking-wider">API Connection</h3>
<h3 className="text-xs font-bold text-slate-400 uppercase tracking-wider">Default API Connection</h3>
<label className="relative inline-flex items-center cursor-pointer">
<input
type="checkbox"
@@ -21,15 +21,23 @@ const ApiSection = ({ config, setConfig }: ApiSectionProps) => {
className="sr-only peer"
/>
<div className="w-11 h-6 bg-slate-200 peer-focus:outline-none rounded-full peer peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:start-[2px] after:bg-white after:border-gray-300 after:border after:rounded-full after:h-5 after:w-5 after:transition-all peer-checked:bg-blue-600"></div>
</label>
</label>
</div>
{config.enableCustomApi && (
<div className="space-y-4 p-4 bg-slate-50 rounded-lg border border-slate-100 animate-in fade-in slide-in-from-top-1 duration-200">
<div className="flex items-start gap-2 p-3 bg-blue-50 rounded-lg border border-blue-100">
<Info size={16} className="text-blue-600 mt-0.5 flex-shrink-0" />
<div className="text-xs text-blue-800">
<p className="font-medium mb-1">Custom Model Configuration</p>
<p>Each custom model can now be configured with its own API key and base URL in the Custom Models section below. This default configuration is used for preset models.</p>
</div>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Key size={14} className="text-slate-400" />
Custom API Key
Default API Key
</label>
<input
type="password"
@@ -43,11 +51,11 @@ const ApiSection = ({ config, setConfig }: ApiSectionProps) => {
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Globe size={14} className="text-slate-400" />
Custom Base URL
Default Base URL
</label>
<input
type="text"
placeholder="https://generativelanguage.googleapis.com"
placeholder="https://api.example.com/v1"
value={config.customBaseUrl || ''}
onChange={(e) => setConfig({ ...config, customBaseUrl: e.target.value })}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"

View File

@@ -0,0 +1,223 @@
import React, { useState } from 'react';
import { Plus, Trash2, Bot, Key, Globe, ChevronDown, ChevronUp } from 'lucide-react';
import { AppConfig, ApiProvider, CustomModel } from '../../types';
interface ModelSectionProps {
config: AppConfig;
setConfig: (c: AppConfig) => void;
}
const ModelSection = ({ config, setConfig }: ModelSectionProps) => {
const [newModelName, setNewModelName] = useState('');
const [newModelProvider, setNewModelProvider] = useState<ApiProvider>('custom');
const [newModelApiKey, setNewModelApiKey] = useState('');
const [newModelBaseUrl, setNewModelBaseUrl] = useState('');
const [expandedModelId, setExpandedModelId] = useState<string | null>(null);
const customModels = config.customModels || [];
const handleAddModel = () => {
if (!newModelName.trim()) return;
const newModel: CustomModel = {
id: `custom-${Date.now()}`,
name: newModelName.trim(),
provider: newModelProvider,
apiKey: newModelApiKey || undefined,
baseUrl: newModelBaseUrl || undefined
};
setConfig({
...config,
customModels: [...customModels, newModel]
});
setNewModelName('');
setNewModelApiKey('');
setNewModelBaseUrl('');
};
const handleDeleteModel = (modelId: string) => {
setConfig({
...config,
customModels: customModels.filter(m => m.id !== modelId)
});
if (expandedModelId === modelId) {
setExpandedModelId(null);
}
};
const handleUpdateModel = (modelId: string, updates: Partial<CustomModel>) => {
setConfig({
...config,
customModels: customModels.map(m =>
m.id === modelId ? { ...m, ...updates } : m
)
});
};
return (
<div className="space-y-4 pt-1">
<h3 className="text-xs font-bold text-slate-400 uppercase tracking-wider">Custom Models</h3>
<div className="p-4 bg-slate-50 rounded-lg border border-slate-100 space-y-4">
<div className="space-y-3">
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Bot size={14} className="text-slate-400" />
Model Name
</label>
<input
type="text"
placeholder="e.g., llama-3-8b-instruct, qwen-72b-chat"
value={newModelName}
onChange={(e) => setNewModelName(e.target.value)}
onKeyDown={(e) => e.key === 'Enter' && handleAddModel()}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700">Provider</label>
<select
value={newModelProvider}
onChange={(e) => setNewModelProvider(e.target.value as ApiProvider)}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none"
>
<option value="custom">Custom (OpenAI-compatible)</option>
<option value="openai">OpenAI</option>
<option value="deepseek">DeepSeek</option>
<option value="anthropic">Anthropic</option>
<option value="xai">xAI</option>
<option value="mistral">Mistral</option>
<option value="google">Google</option>
</select>
</div>
{(newModelProvider === 'custom' || newModelProvider === 'openai' || newModelProvider === 'anthropic' || newModelProvider === 'xai' || newModelProvider === 'mistral') && (
<>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Key size={14} className="text-slate-400" />
API Key (optional)
</label>
<input
type="password"
placeholder="sk-..."
value={newModelApiKey}
onChange={(e) => setNewModelApiKey(e.target.value)}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Globe size={14} className="text-slate-400" />
Base URL (optional)
</label>
<input
type="text"
placeholder="https://api.example.com/v1"
value={newModelBaseUrl}
onChange={(e) => setNewModelBaseUrl(e.target.value)}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
</>
)}
<button
onClick={handleAddModel}
disabled={!newModelName.trim()}
className="w-full flex items-center justify-center gap-2 px-4 py-2 bg-blue-600 hover:bg-blue-700 disabled:bg-slate-300 disabled:cursor-not-allowed text-white text-sm font-medium rounded-lg transition-all shadow-sm"
>
<Plus size={16} />
Add Model
</button>
</div>
{customModels.length > 0 && (
<div className="border-t border-slate-200 pt-4">
<div className="text-xs font-medium text-slate-500 mb-3">
Added Models ({customModels.length})
</div>
<div className="space-y-2">
{customModels.map((model) => (
<div
key={model.id}
className="bg-white rounded-lg border border-slate-200 hover:border-slate-300 transition-colors"
>
<div
className="flex items-center justify-between p-3 cursor-pointer"
onClick={() => setExpandedModelId(expandedModelId === model.id ? null : model.id)}
>
<div className="flex-1 min-w-0">
<div className="text-sm font-medium text-slate-800 truncate">
{model.name}
</div>
<div className="text-xs text-slate-500 capitalize">
{model.provider} {model.apiKey && '• Configured'}
</div>
</div>
<div className="flex items-center gap-1">
{expandedModelId === model.id ? (
<ChevronUp size={16} className="text-slate-400" />
) : (
<ChevronDown size={16} className="text-slate-400" />
)}
<button
onClick={(e) => {
e.stopPropagation();
handleDeleteModel(model.id);
}}
className="p-1.5 text-slate-400 hover:text-red-600 hover:bg-red-50 rounded-lg transition-colors"
title="Remove model"
>
<Trash2 size={16} />
</button>
</div>
</div>
{expandedModelId === model.id && (
<div className="px-3 pb-3 pt-0 space-y-3 animate-in fade-in slide-in-from-top-2 duration-200">
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Key size={14} className="text-slate-400" />
API Key
</label>
<input
type="password"
placeholder="sk-..."
value={model.apiKey || ''}
onChange={(e) => handleUpdateModel(model.id, { apiKey: e.target.value || undefined })}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Globe size={14} className="text-slate-400" />
Base URL
</label>
<input
type="text"
placeholder="https://api.example.com/v1"
value={model.baseUrl || ''}
onChange={(e) => handleUpdateModel(model.id, { baseUrl: e.target.value || undefined })}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
</div>
)}
</div>
))}
</div>
</div>
)}
</div>
</div>
);
};
export default ModelSection;

View File

@@ -1,16 +1,90 @@
import { ModelOption, ThinkingLevel, AppConfig } from './types';
import { ModelOption, ThinkingLevel, AppConfig, ApiProvider } from './types';
export const MODELS: { value: ModelOption; label: string; desc: string }[] = [
export const MODELS: { value: ModelOption; label: string; desc: string; provider: ApiProvider }[] = [
{
value: 'gemini-3-flash-preview',
label: 'Gemini 3 Flash',
desc: 'Low latency, high throughput, dynamic thinking.'
desc: 'Low latency, high throughput, dynamic thinking.',
provider: 'google'
},
{
value: 'gemini-3-pro-preview',
label: 'Gemini 3 Pro',
desc: 'Deep reasoning, complex tasks, higher intelligence.'
desc: 'Deep reasoning, complex tasks, higher intelligence.',
provider: 'google'
},
{
value: 'gpt-4o',
label: 'GPT-4o',
desc: 'OpenAI flagship model with vision capabilities.',
provider: 'openai'
},
{
value: 'gpt-4o-mini',
label: 'GPT-4o Mini',
desc: 'Fast, affordable small model for focused tasks.',
provider: 'openai'
},
{
value: 'o1-preview',
label: 'O1 Preview',
desc: 'Advanced reasoning model with chain-of-thought.',
provider: 'openai'
},
{
value: 'o1-mini',
label: 'O1 Mini',
desc: 'Fast reasoning model for efficient problem solving.',
provider: 'openai'
},
{
value: 'deepseek-chat',
label: 'DeepSeek Chat',
desc: 'Advanced language model optimized for conversational AI.',
provider: 'deepseek'
},
{
value: 'deepseek-coder',
label: 'DeepSeek Coder',
desc: 'Specialized model for code generation and programming tasks.',
provider: 'deepseek'
},
{
value: 'claude-3-5-sonnet-20241022',
label: 'Claude 3.5 Sonnet',
desc: 'Anthropic\'s most capable model with excellent reasoning.',
provider: 'anthropic'
},
{
value: 'claude-3-haiku-20240307',
label: 'Claude 3 Haiku',
desc: 'Fast and efficient model for quick responses.',
provider: 'anthropic'
},
{
value: 'grok-2-1212',
label: 'Grok 2',
desc: 'xAI\'s advanced model with real-time knowledge.',
provider: 'xai'
},
{
value: 'mistral-large-2411',
label: 'Mistral Large',
desc: 'Mistral\'s flagship model for complex reasoning.',
provider: 'mistral'
},
{
value: 'mixtral-8x7b-32768',
label: 'Mixtral 8x7B',
desc: 'Mixture of Experts model with excellent performance.',
provider: 'mistral'
},
{
value: 'custom',
label: 'Custom Model',
desc: 'Use any OpenAI-compatible API (LM Studio, Ollama, LocalAI, etc.) by configuring custom base URL.',
provider: 'custom'
},
];
@@ -27,24 +101,51 @@ export const DEFAULT_CONFIG: AppConfig = {
customApiKey: '',
customBaseUrl: '',
enableCustomApi: false,
enableRecursiveLoop: false
enableRecursiveLoop: false,
apiProvider: 'google',
customModels: []
};
export const getValidThinkingLevels = (model: ModelOption): ThinkingLevel[] => {
if (model === 'gemini-3-pro-preview') {
return ['low', 'high'];
}
if (model === 'o1-preview' || model === 'o1-mini') {
return ['low', 'medium', 'high'];
}
return ['minimal', 'low', 'medium', 'high'];
};
export const getThinkingBudget = (level: ThinkingLevel, model: ModelOption): number => {
const isPro = model === 'gemini-3-pro-preview';
const isGeminiPro = model === 'gemini-3-pro-preview';
const isOpenAIReasoning = model === 'o1-preview' || model === 'o1-mini';
switch (level) {
case 'minimal': return 0; // Disables thinking
case 'minimal': return 0;
case 'low': return 2048;
case 'medium': return 8192;
case 'high': return isPro ? 32768 : 16384;
case 'high':
if (isOpenAIReasoning) return 65536;
if (isGeminiPro) return 32768;
return 16384;
default: return 0;
}
};
export const getProvider = (model: ModelOption): ApiProvider => {
const modelInfo = MODELS.find(m => m.value === model);
return modelInfo?.provider || 'google';
};
export const getAllModels = (config: AppConfig): { value: ModelOption; label: string; desc: string; provider: ApiProvider }[] => {
const presetModels = MODELS.filter(m => m.value !== 'custom');
const customModels = (config.customModels || []).map(m => ({
value: m.name as ModelOption,
label: m.name,
desc: `Custom ${m.provider} model`,
provider: m.provider
}));
return [...presetModels, ...customModels];
};

View File

@@ -1,6 +1,6 @@
import { useCallback } from 'react';
import { getAI } from '../api';
import { getAI, getAIProvider, findCustomModel } from '../api';
import { getThinkingBudget } from '../config';
import { AppConfig, ModelOption, ExpertResult, ChatMessage } from '../types';
@@ -99,9 +99,13 @@ export const useDeepThink = () => {
setProcessStartTime(Date.now());
setProcessEndTime(null);
const provider = getAIProvider(model);
const customModelConfig = findCustomModel(model, config.customModels);
const ai = getAI({
apiKey: config.enableCustomApi ? config.customApiKey : undefined,
baseUrl: (config.enableCustomApi && config.customBaseUrl) ? config.customBaseUrl : undefined
provider,
apiKey: customModelConfig?.apiKey || config.customApiKey,
baseUrl: customModelConfig?.baseUrl || config.customBaseUrl
});
try {

4254
prisma/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -9,14 +9,15 @@
"preview": "vite preview"
},
"dependencies": {
"react-markdown": "^10.1.0",
"remark-math": "6.0.0",
"rehype-katex": "7.0.0",
"@google/genai": "^1.34.0",
"lucide-react": "^0.562.0",
"react-dom": "^19.2.3",
"openai": "^6.15.0",
"react": "^19.2.3",
"react-syntax-highlighter": "^16.1.0"
"react-dom": "^19.2.3",
"react-markdown": "^10.1.0",
"react-syntax-highlighter": "^16.1.0",
"rehype-katex": "7.0.0",
"remark-math": "6.0.0"
},
"devDependencies": {
"@types/node": "^22.14.0",

View File

@@ -1,6 +1,11 @@
import { ModelOption, ExpertResult } from '../../types';
import { getExpertSystemInstruction } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContentStream !== undefined;
};
export const streamExpertResponse = async (
ai: any,
@@ -11,44 +16,65 @@ export const streamExpertResponse = async (
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
): Promise<void> => {
// We wrap the stream initiation in retry.
// If the stream is successfully established but fails during iteration,
// we catch that separately.
const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature,
thinkingConfig: {
const isGoogle = isGoogleProvider(ai);
if (isGoogle) {
const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature,
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
}
}));
try {
for await (const chunk of (streamResult as any)) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
} catch (streamError) {
console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
throw streamError;
}
}));
} else {
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
content: expert.prompt,
temperature: expert.temperature,
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
});
try {
for await (const chunk of streamResult) {
if (signal.aborted) break;
try {
for await (const chunk of (stream as any)) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
onChunk(chunk.text, chunk.thought || '');
}
} catch (streamError) {
console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
throw streamError;
}
} catch (streamError) {
console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
// We don't retry mid-stream automatically here to avoid complex state management,
// but the initial connection is protected by withRetry.
throw streamError;
}
};

View File

@@ -1,8 +1,14 @@
import { Type } from "@google/genai";
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult } from '../../types';
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, ApiProvider } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContent as generateOpenAIContent } from './openaiClient';
import { getAIProvider } from '../../api';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContent !== undefined;
};
export const executeManagerAnalysis = async (
ai: any,
@@ -11,57 +17,86 @@ export const executeManagerAnalysis = async (
context: string,
budget: number
): Promise<AnalysisResult> => {
const managerSchema = {
type: Type.OBJECT,
properties: {
thought_process: { type: Type.STRING, description: "Brief explanation of why these supplementary experts were chosen." },
experts: {
type: Type.ARRAY,
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["thought_process", "experts"]
};
const isGoogle = isGoogleProvider(ai);
try {
const analysisResp = await withRetry(() => ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: managerSchema,
thinkingConfig: {
if (isGoogle) {
const managerSchema = {
type: Type.OBJECT,
properties: {
thought_process: { type: Type.STRING, description: "Brief explanation of why these supplementary experts were chosen." },
experts: {
type: Type.ARRAY,
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["thought_process", "experts"]
};
try {
const analysisResp = await withRetry(() => ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: managerSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
}));
const rawText = (analysisResp as any).text || '{}';
const cleanText = cleanJsonString(rawText);
const analysisJson = JSON.parse(cleanText) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
}));
const rawText = analysisResp.text || '{}';
const cleanText = cleanJsonString(rawText);
const analysisJson = JSON.parse(cleanText) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
return analysisJson;
} catch (e) {
console.error("Manager Analysis Error:", e);
return {
thought_process: "Direct processing fallback due to analysis error.",
experts: []
};
}
} else {
try {
const response = await generateOpenAIContent(ai, {
model,
systemInstruction: MANAGER_SYSTEM_PROMPT,
content: `Context:\n${context}\n\nCurrent Query: "${query}"\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`,
temperature: 0.7,
responseFormat: 'json_object',
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
});
const analysisJson = JSON.parse(response.text) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
return analysisJson;
} catch (e) {
console.error("Manager Analysis Error:", e);
return {
thought_process: "Direct processing fallback due to analysis error.",
experts: []
};
}
return analysisJson;
} catch (e) {
console.error("Manager Analysis Error:", e);
// Return a fallback so the process doesn't completely die if planning fails
return {
thought_process: "Direct processing fallback due to analysis error.",
experts: []
};
}
};
@@ -72,57 +107,78 @@ export const executeManagerReview = async (
currentExperts: ExpertResult[],
budget: number
): Promise<ReviewResult> => {
const reviewSchema = {
type: Type.OBJECT,
properties: {
satisfied: { type: Type.BOOLEAN, description: "True if the experts have fully answered the query with high quality." },
critique: { type: Type.STRING, description: "If not satisfied, explain why and what is missing." },
next_round_strategy: { type: Type.STRING, description: "Plan for the next iteration." },
refined_experts: {
type: Type.ARRAY,
description: "The list of experts for the next round. Can be the same roles or new ones.",
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["satisfied", "critique"]
};
const isGoogle = isGoogleProvider(ai);
const expertOutputs = currentExperts.map(e =>
`--- [Round ${e.round}] Expert: ${e.role} ---\nOutput: ${e.content?.slice(0, 2000)}...`
).join('\n\n');
const content = `User Query: "${query}"\n\nCurrent Expert Outputs:\n${expertOutputs}`;
try {
const resp = await withRetry(() => ai.models.generateContent({
model: model,
contents: content,
config: {
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: reviewSchema,
thinkingConfig: {
if (isGoogle) {
const reviewSchema = {
type: Type.OBJECT,
properties: {
satisfied: { type: Type.BOOLEAN, description: "True if the experts have fully answered the query with high quality." },
critique: { type: Type.STRING, description: "If not satisfied, explain why and what is missing." },
next_round_strategy: { type: Type.STRING, description: "Plan for the next iteration." },
refined_experts: {
type: Type.ARRAY,
description: "The list of experts for the next round. Can be the same roles or new ones.",
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["satisfied", "critique"]
};
try {
const resp = await withRetry(() => ai.models.generateContent({
model: model,
contents: content,
config: {
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: reviewSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
}));
}
}));
const rawText = resp.text || '{}';
const cleanText = cleanJsonString(rawText);
return JSON.parse(cleanText) as ReviewResult;
} catch (e) {
console.error("Review Error:", e);
// Fallback: Assume satisfied if JSON or API fails to avoid infinite loops
return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
const rawText = (resp as any).text || '{}';
const cleanText = cleanJsonString(rawText);
return JSON.parse(cleanText) as ReviewResult;
} catch (e) {
console.error("Review Error:", e);
return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
}
} else {
try {
const response = await generateOpenAIContent(ai, {
model,
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
content: `${content}\n\nReturn a JSON response with this structure:\n{\n "satisfied": boolean,\n "critique": "...",\n "next_round_strategy": "...",\n "refined_experts": [...]\n}`,
temperature: 0.7,
responseFormat: 'json_object',
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
});
return JSON.parse(response.text) as ReviewResult;
} catch (e) {
console.error("Review Error:", e);
return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
}
}
};

View File

@@ -0,0 +1,157 @@
import OpenAI from "openai";
import { ModelOption } from '../../types';
import { withRetry } from '../utils/retry';
export interface OpenAIStreamChunk {
text: string;
thought?: string;
}
export interface OpenAIConfig {
model: ModelOption;
systemInstruction?: string;
content: string;
temperature?: number;
responseFormat?: 'text' | 'json_object';
thinkingConfig?: {
includeThoughts: boolean;
thinkingBudget: number;
};
}
const parseThinkingTokens = (text: string): { thought: string; text: string } => {
const thinkPattern = /<thinking>([\s\S]*?)<\/thinking>/g;
let thought = '';
let cleanText = text;
const matches = text.matchAll(thinkPattern);
for (const match of matches) {
thought += match[1];
}
cleanText = text.replace(thinkPattern, '');
return { thought: thought.trim(), text: cleanText.trim() };
};
export const generateContent = async (
ai: OpenAI,
config: OpenAIConfig
): Promise<{ text: string; thought?: string }> => {
const messages: Array<OpenAI.Chat.ChatCompletionMessageParam> = [];
if (config.systemInstruction) {
messages.push({
role: 'system',
content: config.systemInstruction
});
}
messages.push({
role: 'user',
content: config.content
});
const requestOptions: any = {
model: config.model,
messages,
temperature: config.temperature,
};
if (config.responseFormat === 'json_object') {
requestOptions.response_format = { type: 'json_object' };
}
try {
const response = await withRetry(() => ai.chat.completions.create(requestOptions));
const content = response.choices[0]?.message?.content || '';
if (config.thinkingConfig?.includeThoughts) {
const { thought, text } = parseThinkingTokens(content);
return { text, thought };
}
return { text: content };
} catch (error) {
console.error('OpenAI generateContent error:', error);
throw error;
}
};
export async function* generateContentStream(
ai: OpenAI,
config: OpenAIConfig
): AsyncGenerator<OpenAIStreamChunk, void, unknown> {
const messages: Array<OpenAI.Chat.ChatCompletionMessageParam> = [];
if (config.systemInstruction) {
messages.push({
role: 'system',
content: config.systemInstruction
});
}
messages.push({
role: 'user',
content: config.content
});
const requestOptions: any = {
model: config.model,
messages,
temperature: config.temperature,
stream: true,
};
const stream = await withRetry(() => ai.chat.completions.create(requestOptions) as any);
let accumulatedText = '';
let inThinking = false;
let currentThought = '';
for await (const chunk of (stream as any)) {
const delta = chunk.choices[0]?.delta?.content || '';
if (!delta) continue;
accumulatedText += delta;
if (config.thinkingConfig?.includeThoughts) {
if (delta.includes('<thinking>')) {
inThinking = true;
continue;
}
if (inThinking) {
if (delta.includes('</thinking>')) {
inThinking = false;
const parts = delta.split('</thinking>', 2);
currentThought += parts[0];
if (currentThought.trim()) {
yield { text: '', thought: currentThought };
currentThought = '';
}
if (parts[1]) {
yield { text: parts[1], thought: '' };
}
} else {
currentThought += delta;
if (currentThought.length > 100) {
yield { text: '', thought: currentThought };
currentThought = '';
}
}
} else {
yield { text: delta, thought: '' };
}
} else {
yield { text: delta, thought: '' };
}
}
if (currentThought.trim()) {
yield { text: '', thought: currentThought };
}
}

View File

@@ -1,6 +1,11 @@
import { ModelOption, ExpertResult } from '../../types';
import { getSynthesisPrompt } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContentStream !== undefined;
};
export const streamSynthesisResponse = async (
ai: any,
@@ -13,38 +18,63 @@ export const streamSynthesisResponse = async (
onChunk: (text: string, thought: string) => void
): Promise<void> => {
const prompt = getSynthesisPrompt(historyContext, query, expertResults);
const isGoogle = isGoogleProvider(ai);
const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
config: {
thinkingConfig: {
if (isGoogle) {
const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
config: {
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
}
}
}));
}));
try {
for await (const chunk of synthesisStream) {
if (signal.aborted) break;
try {
for await (const chunk of (synthesisStream as any)) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
} catch (streamError) {
console.error("Synthesis stream interrupted:", streamError);
throw streamError;
}
} else {
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: undefined,
content: prompt,
temperature: 0.7,
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
});
try {
for await (const chunk of (stream as any)) {
if (signal.aborted) break;
onChunk(chunk.text, chunk.thought || '');
}
} catch (streamError) {
console.error("Synthesis stream interrupted:", streamError);
throw streamError;
}
} catch (streamError) {
console.error("Synthesis stream interrupted:", streamError);
throw streamError;
}
};

View File

@@ -1,5 +1,14 @@
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview';
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview' | 'gpt-4.1' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1-mini' | 'deepseek-chat' | 'deepseek-coder' | 'custom' | string;
export type ThinkingLevel = 'minimal' | 'low' | 'medium' | 'high';
export type ApiProvider = 'google' | 'openai' | 'deepseek' | 'anthropic' | 'xai' | 'mistral' | 'custom';
export type CustomModel = {
id: string;
name: string;
provider: ApiProvider;
apiKey?: string;
baseUrl?: string;
};
export type ExpertConfig = {
id: string;
@@ -40,7 +49,9 @@ export type AppConfig = {
customApiKey?: string;
customBaseUrl?: string;
enableCustomApi?: boolean;
enableRecursiveLoop?: boolean; // New toggle for loop mode
enableRecursiveLoop?: boolean;
apiProvider?: ApiProvider;
customModels?: CustomModel[];
};
export type ChatMessage = {

9
prisma/vite-env.d.ts vendored Normal file
View File

@@ -0,0 +1,9 @@
/// <reference types="vite/client" />
interface ImportMetaEnv {
readonly VITE_API_KEY?: string
}
interface ImportMeta {
readonly env: ImportMetaEnv
}