Merge pull request #1 from jwangkun/main

Add custom model support and OpenAI compatibility
This commit is contained in:
从何开始123
2026-01-08 22:51:55 +08:00
committed by GitHub
24 changed files with 5408 additions and 240 deletions

BIN
.DS_Store vendored

Binary file not shown.

6
package-lock.json generated Normal file
View File

@@ -0,0 +1,6 @@
{
"name": "Prisma",
"lockfileVersion": 3,
"requires": true,
"packages": {}
}

11
prisma/.env Normal file
View File

@@ -0,0 +1,11 @@
# API Keys Configuration
# Copy this file to .env.local and add your actual API keys
# Primary API Key (used by default)
# For Google Gemini: https://ai.google.dev/
# For OpenAI: https://platform.openai.com/
VITE_API_KEY=sk-d8f36ac74ac24875abcbdcae382177eb
# Alternative: Use provider-specific keys (optional)
# GEMINI_API_KEY=your_gemini_key_here
# OPENAI_API_KEY=your_openai_key_here

11
prisma/.env.example Normal file
View File

@@ -0,0 +1,11 @@
# API Keys Configuration
# Copy this file to .env.local and add your actual API keys
# Primary API Key (used by default)
# For Google Gemini: https://ai.google.dev/
# For OpenAI: https://platform.openai.com/
VITE_API_KEY=your_api_key_here
# Alternative: Use provider-specific keys (optional)
# GEMINI_API_KEY=your_gemini_key_here
# OPENAI_API_KEY=your_openai_key_here

2
prisma/.gitignore vendored
View File

@@ -22,3 +22,5 @@ dist-ssr
*.njsproj
*.sln
*.sw?
prisma/.env
prisma/.env.example

View File

@@ -48,12 +48,13 @@ const App = () => {
model={selectedModel}
/>
<Header
<Header
selectedModel={selectedModel}
setSelectedModel={setSelectedModel}
onOpenSettings={() => setIsSettingsOpen(true)}
onToggleSidebar={() => setIsSidebarOpen(!isSidebarOpen)}
onNewChat={handleNewChat}
config={config}
/>
<div className="flex flex-1 overflow-hidden relative">

View File

@@ -3,6 +3,7 @@ import React from 'react';
import { Settings, X } from 'lucide-react';
import { AppConfig, ModelOption } from './types';
import ApiSection from './components/settings/ApiSection';
import ModelSection from './components/settings/ModelSection';
import ThinkingSection from './components/settings/ThinkingSection';
import GithubSection from './components/settings/GithubSection';
@@ -40,12 +41,13 @@ const SettingsModal = ({
{/* Body */}
<div className="p-6 space-y-6 overflow-y-auto custom-scrollbar">
<ModelSection config={config} setConfig={setConfig} />
<ApiSection config={config} setConfig={setConfig} />
<ThinkingSection
config={config}
setConfig={setConfig}
model={model}
<ThinkingSection
config={config}
setConfig={setConfig}
model={model}
/>
<GithubSection isOpen={isOpen} />

View File

@@ -1,13 +1,138 @@
import { GoogleGenAI } from "@google/genai";
import OpenAI from "openai";
import { ApiProvider, CustomModel } from './types';
export const getAI = (config?: { apiKey?: string; baseUrl?: string }) => {
const options: any = {
apiKey: config?.apiKey || process.env.API_KEY,
type AIProviderConfig = {
provider?: ApiProvider;
apiKey?: string;
baseUrl?: string;
};
export const findCustomModel = (modelName: string, customModels?: CustomModel[]): CustomModel | undefined => {
return customModels?.find(m => m.name === modelName);
};
// External API base URLs for production
const PROVIDER_BASE_URLS: Record<string, string> = {
openai: 'https://api.openai.com/v1',
deepseek: 'https://api.deepseek.com/v1',
anthropic: 'https://api.anthropic.com/v1',
xai: 'https://api.x.ai/v1',
mistral: 'https://api.mistral.ai/v1',
custom: '',
};
// Check if we're in development mode
const isDevelopment = import.meta.env?.MODE === 'development' || process.env.NODE_ENV === 'development';
// Store the current custom API target URL
let currentCustomApiUrl: string | null = null;
// Setup fetch interceptor to add X-Target-URL header for custom API proxy
const originalFetch = typeof window !== 'undefined' ? window.fetch.bind(window) : null;
if (typeof window !== 'undefined' && originalFetch) {
window.fetch = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => {
let urlString: string;
if (typeof input === 'string') {
urlString = input;
} else if (input instanceof URL) {
urlString = input.toString();
} else {
urlString = input.url;
}
// If this is a custom-api request and we have a target URL, add the header
if (urlString.includes('/custom-api') && currentCustomApiUrl) {
const headers = new Headers(init?.headers);
headers.set('X-Target-URL', currentCustomApiUrl);
console.log('[Fetch] Adding X-Target-URL header:', currentCustomApiUrl);
return originalFetch(input, {
...init,
headers,
});
}
return originalFetch(input, init);
};
if (config?.baseUrl) {
options.baseUrl = config.baseUrl;
}
}
return new GoogleGenAI(options);
};
export const getAI = (config?: AIProviderConfig) => {
const provider = config?.provider || 'google';
const apiKey = config?.apiKey || (import.meta.env as any).VITE_API_KEY || process.env.API_KEY;
if (provider === 'openai' || provider === 'deepseek' || provider === 'custom' || provider === 'anthropic' || provider === 'xai' || provider === 'mistral') {
const options: any = {
apiKey: apiKey,
dangerouslyAllowBrowser: true,
};
if (config?.baseUrl) {
// Custom baseUrl from Configuration UI
if (isDevelopment) {
// Store the target URL for the fetch interceptor
currentCustomApiUrl = config.baseUrl;
// Use proxy path
options.baseURL = `${window.location.origin}/custom-api`;
console.log('[API] Using custom API proxy:', {
proxyPath: options.baseURL,
targetUrl: currentCustomApiUrl,
});
} else {
// In production, use the URL directly
options.baseURL = config.baseUrl;
}
} else {
const providerBaseUrl = PROVIDER_BASE_URLS[provider];
if (providerBaseUrl) {
if (isDevelopment) {
// In development, use proxy to avoid CORS for known providers
options.baseURL = `${window.location.origin}/${provider}/v1`;
} else {
options.baseURL = providerBaseUrl;
}
}
}
console.log('[API] OpenAI client config:', {
provider,
baseURL: options.baseURL,
hasApiKey: !!options.apiKey,
customTarget: currentCustomApiUrl,
});
return new OpenAI(options);
} else {
const options: any = {
apiKey: apiKey,
};
if (config?.baseUrl) {
options.baseUrl = config.baseUrl;
}
return new GoogleGenAI(options);
}
};
export const getAIProvider = (model: string): ApiProvider => {
if (model.startsWith('gpt-') || model.startsWith('o1-')) {
return 'openai';
}
if (model.startsWith('deepseek-')) {
return 'deepseek';
}
if (model.startsWith('claude-')) {
return 'anthropic';
}
if (model.startsWith('grok-')) {
return 'xai';
}
if (model.startsWith('mistral-') || model.startsWith('mixtral-')) {
return 'mistral';
}
if (model === 'custom') {
return 'custom';
}
return 'google';
};

View File

@@ -1,8 +1,8 @@
import React from 'react';
import { Settings, ChevronDown, Menu } from 'lucide-react';
import { MODELS } from '../config';
import { ModelOption } from '../types';
import { getAllModels } from '../config';
import { ModelOption, AppConfig } from '../types';
import Logo from './Logo';
interface HeaderProps {
@@ -11,22 +11,25 @@ interface HeaderProps {
onOpenSettings: () => void;
onToggleSidebar: () => void;
onNewChat: () => void;
config: AppConfig;
}
const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSidebar, onNewChat }: HeaderProps) => {
const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSidebar, onNewChat, config }: HeaderProps) => {
const availableModels = getAllModels(config);
return (
<header className="sticky top-0 z-50 bg-white/80 backdrop-blur-md">
<div className="w-full px-4 h-16 flex items-center justify-between">
<div className="flex items-center gap-4">
<button
<button
onClick={onToggleSidebar}
className="p-2 -ml-2 text-slate-500 hover:bg-slate-100 rounded-lg transition-colors"
title="Toggle History"
>
<Menu size={20} />
</button>
<div
<div
className="flex items-center gap-3 cursor-pointer group"
onClick={onNewChat}
title="Start New Chat"
@@ -37,22 +40,22 @@ const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSideb
</h1>
</div>
</div>
<div className="flex items-center gap-2 sm:gap-3">
<div className="relative group">
<select
<select
value={selectedModel}
onChange={(e) => setSelectedModel(e.target.value as ModelOption)}
className="relative bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-auto p-2.5 outline-none appearance-none cursor-pointer pl-3 pr-8 shadow-sm font-medium hover:bg-slate-50 transition-colors"
>
{MODELS.map(m => (
<option key={m.value} value={m.value}>{m.label}</option>
{availableModels.map(m => (
<option key={`${m.provider}-${m.value}`} value={m.value}>{m.label}</option>
))}
</select>
<ChevronDown className="absolute right-3 top-3 text-slate-400 pointer-events-none group-hover:text-slate-600 transition-colors" size={14} />
</div>
<button
<button
onClick={onOpenSettings}
className="p-2.5 rounded-lg bg-white border border-slate-200 hover:bg-slate-50 hover:border-slate-300 transition-colors text-slate-500 hover:text-slate-900 shadow-sm"
title="Configuration"

View File

@@ -1,6 +1,6 @@
import React from 'react';
import { Key, Globe } from 'lucide-react';
import { Key, Globe, Info } from 'lucide-react';
import { AppConfig } from '../../types';
interface ApiSectionProps {
@@ -12,26 +12,34 @@ const ApiSection = ({ config, setConfig }: ApiSectionProps) => {
return (
<div className="space-y-4 pt-1">
<div className="flex items-center justify-between mb-2">
<h3 className="text-xs font-bold text-slate-400 uppercase tracking-wider">API Connection</h3>
<h3 className="text-xs font-bold text-slate-400 uppercase tracking-wider">Default API Connection</h3>
<label className="relative inline-flex items-center cursor-pointer">
<input
type="checkbox"
checked={config.enableCustomApi ?? false}
onChange={(e) => setConfig({ ...config, enableCustomApi: e.target.checked })}
className="sr-only peer"
<input
type="checkbox"
checked={config.enableCustomApi ?? false}
onChange={(e) => setConfig({ ...config, enableCustomApi: e.target.checked })}
className="sr-only peer"
/>
<div className="w-11 h-6 bg-slate-200 peer-focus:outline-none rounded-full peer peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:start-[2px] after:bg-white after:border-gray-300 after:border after:rounded-full after:h-5 after:w-5 after:transition-all peer-checked:bg-blue-600"></div>
</label>
</label>
</div>
{config.enableCustomApi && (
<div className="space-y-4 p-4 bg-slate-50 rounded-lg border border-slate-100 animate-in fade-in slide-in-from-top-1 duration-200">
<div className="flex items-start gap-2 p-3 bg-blue-50 rounded-lg border border-blue-100">
<Info size={16} className="text-blue-600 mt-0.5 flex-shrink-0" />
<div className="text-xs text-blue-800">
<p className="font-medium mb-1">Custom Model Configuration</p>
<p>Each custom model can now be configured with its own API key and base URL in the Custom Models section below. This default configuration is used for preset models.</p>
</div>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Key size={14} className="text-slate-400" />
Custom API Key
Default API Key
</label>
<input
<input
type="password"
placeholder="sk-..."
value={config.customApiKey || ''}
@@ -43,11 +51,11 @@ const ApiSection = ({ config, setConfig }: ApiSectionProps) => {
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Globe size={14} className="text-slate-400" />
Custom Base URL
Default Base URL
</label>
<input
<input
type="text"
placeholder="https://generativelanguage.googleapis.com"
placeholder="https://api.example.com/v1"
value={config.customBaseUrl || ''}
onChange={(e) => setConfig({ ...config, customBaseUrl: e.target.value })}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"

View File

@@ -0,0 +1,240 @@
import React, { useState } from 'react';
import { Plus, Trash2, Bot, Key, Globe, ChevronDown, ChevronUp } from 'lucide-react';
import { AppConfig, ApiProvider, CustomModel } from '../../types';
import { MODELS } from '../../config';
interface ModelSectionProps {
config: AppConfig;
setConfig: (c: AppConfig) => void;
}
const ModelSection = ({ config, setConfig }: ModelSectionProps) => {
const [newModelName, setNewModelName] = useState('');
const [newModelProvider, setNewModelProvider] = useState<ApiProvider>('custom');
const [newModelApiKey, setNewModelApiKey] = useState('');
const [newModelBaseUrl, setNewModelBaseUrl] = useState('');
const [expandedModelId, setExpandedModelId] = useState<string | null>(null);
const customModels = config.customModels || [];
const handleAddModel = () => {
if (!newModelName.trim()) return;
const trimmedName = newModelName.trim();
// Check if model name already exists in preset models
const existingPresetModel = MODELS.find(m => m.value === trimmedName);
if (existingPresetModel) {
alert(`Model name "${trimmedName}" already exists as a preset model. Please choose a different name.`);
return;
}
// Check if model name already exists in custom models
const existingCustomModel = customModels.find(m => m.name === trimmedName);
if (existingCustomModel) {
alert(`Model name "${trimmedName}" already exists. Please choose a different name.`);
return;
}
const newModel: CustomModel = {
id: `custom-${Date.now()}`,
name: trimmedName,
provider: newModelProvider,
apiKey: newModelApiKey || undefined,
baseUrl: newModelBaseUrl || undefined
};
setConfig({
...config,
customModels: [...customModels, newModel]
});
setNewModelName('');
setNewModelApiKey('');
setNewModelBaseUrl('');
};
const handleDeleteModel = (modelId: string) => {
setConfig({
...config,
customModels: customModels.filter(m => m.id !== modelId)
});
if (expandedModelId === modelId) {
setExpandedModelId(null);
}
};
const handleUpdateModel = (modelId: string, updates: Partial<CustomModel>) => {
setConfig({
...config,
customModels: customModels.map(m =>
m.id === modelId ? { ...m, ...updates } : m
)
});
};
return (
<div className="space-y-4 pt-1">
<h3 className="text-xs font-bold text-slate-400 uppercase tracking-wider">Custom Models</h3>
<div className="p-4 bg-slate-50 rounded-lg border border-slate-100 space-y-4">
<div className="space-y-3">
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Bot size={14} className="text-slate-400" />
Model Name
</label>
<input
type="text"
placeholder="e.g., llama-3-8b-instruct, qwen-72b-chat"
value={newModelName}
onChange={(e) => setNewModelName(e.target.value)}
onKeyDown={(e) => e.key === 'Enter' && handleAddModel()}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700">Provider</label>
<select
value={newModelProvider}
onChange={(e) => setNewModelProvider(e.target.value as ApiProvider)}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none"
>
<option value="custom">Custom (OpenAI-compatible)</option>
<option value="openai">OpenAI</option>
<option value="deepseek">DeepSeek</option>
<option value="anthropic">Anthropic</option>
<option value="xai">xAI</option>
<option value="mistral">Mistral</option>
<option value="google">Google</option>
</select>
</div>
{(newModelProvider === 'custom' || newModelProvider === 'openai' || newModelProvider === 'anthropic' || newModelProvider === 'xai' || newModelProvider === 'mistral') && (
<>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Key size={14} className="text-slate-400" />
API Key (optional)
</label>
<input
type="password"
placeholder="sk-..."
value={newModelApiKey}
onChange={(e) => setNewModelApiKey(e.target.value)}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Globe size={14} className="text-slate-400" />
Base URL (optional)
</label>
<input
type="text"
placeholder="https://api.example.com/v1"
value={newModelBaseUrl}
onChange={(e) => setNewModelBaseUrl(e.target.value)}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
</>
)}
<button
onClick={handleAddModel}
disabled={!newModelName.trim()}
className="w-full flex items-center justify-center gap-2 px-4 py-2 bg-blue-600 hover:bg-blue-700 disabled:bg-slate-300 disabled:cursor-not-allowed text-white text-sm font-medium rounded-lg transition-all shadow-sm"
>
<Plus size={16} />
Add Model
</button>
</div>
{customModels.length > 0 && (
<div className="border-t border-slate-200 pt-4">
<div className="text-xs font-medium text-slate-500 mb-3">
Added Models ({customModels.length})
</div>
<div className="space-y-2">
{customModels.map((model) => (
<div
key={model.id}
className="bg-white rounded-lg border border-slate-200 hover:border-slate-300 transition-colors"
>
<div
className="flex items-center justify-between p-3 cursor-pointer"
onClick={() => setExpandedModelId(expandedModelId === model.id ? null : model.id)}
>
<div className="flex-1 min-w-0">
<div className="text-sm font-medium text-slate-800 truncate">
{model.name}
</div>
<div className="text-xs text-slate-500 capitalize">
{model.provider} {model.apiKey && '• Configured'}
</div>
</div>
<div className="flex items-center gap-1">
{expandedModelId === model.id ? (
<ChevronUp size={16} className="text-slate-400" />
) : (
<ChevronDown size={16} className="text-slate-400" />
)}
<button
onClick={(e) => {
e.stopPropagation();
handleDeleteModel(model.id);
}}
className="p-1.5 text-slate-400 hover:text-red-600 hover:bg-red-50 rounded-lg transition-colors"
title="Remove model"
>
<Trash2 size={16} />
</button>
</div>
</div>
{expandedModelId === model.id && (
<div className="px-3 pb-3 pt-0 space-y-3 animate-in fade-in slide-in-from-top-2 duration-200">
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Key size={14} className="text-slate-400" />
API Key
</label>
<input
type="password"
placeholder="sk-..."
value={model.apiKey || ''}
onChange={(e) => handleUpdateModel(model.id, { apiKey: e.target.value || undefined })}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
<div className="space-y-2">
<label className="text-sm font-medium text-slate-700 flex items-center gap-2">
<Globe size={14} className="text-slate-400" />
Base URL
</label>
<input
type="text"
placeholder="https://api.example.com/v1"
value={model.baseUrl || ''}
onChange={(e) => handleUpdateModel(model.id, { baseUrl: e.target.value || undefined })}
className="w-full bg-white border border-slate-200 text-slate-800 text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block p-2.5 outline-none placeholder:text-slate-400"
/>
</div>
</div>
)}
</div>
))}
</div>
</div>
)}
</div>
</div>
);
};
export default ModelSection;

View File

@@ -1,16 +1,90 @@
import { ModelOption, ThinkingLevel, AppConfig } from './types';
import { ModelOption, ThinkingLevel, AppConfig, ApiProvider } from './types';
export const MODELS: { value: ModelOption; label: string; desc: string }[] = [
{
value: 'gemini-3-flash-preview',
label: 'Gemini 3 Flash',
desc: 'Low latency, high throughput, dynamic thinking.'
export const MODELS: { value: ModelOption; label: string; desc: string; provider: ApiProvider }[] = [
{
value: 'gemini-3-flash-preview',
label: 'Gemini 3 Flash',
desc: 'Low latency, high throughput, dynamic thinking.',
provider: 'google'
},
{
value: 'gemini-3-pro-preview',
label: 'Gemini 3 Pro',
desc: 'Deep reasoning, complex tasks, higher intelligence.'
{
value: 'gemini-3-pro-preview',
label: 'Gemini 3 Pro',
desc: 'Deep reasoning, complex tasks, higher intelligence.',
provider: 'google'
},
{
value: 'gpt-4o',
label: 'GPT-4o',
desc: 'OpenAI flagship model with vision capabilities.',
provider: 'openai'
},
{
value: 'gpt-4o-mini',
label: 'GPT-4o Mini',
desc: 'Fast, affordable small model for focused tasks.',
provider: 'openai'
},
{
value: 'o1-preview',
label: 'O1 Preview',
desc: 'Advanced reasoning model with chain-of-thought.',
provider: 'openai'
},
{
value: 'o1-mini',
label: 'O1 Mini',
desc: 'Fast reasoning model for efficient problem solving.',
provider: 'openai'
},
{
value: 'deepseek-chat',
label: 'DeepSeek Chat',
desc: 'Advanced language model optimized for conversational AI.',
provider: 'deepseek'
},
{
value: 'deepseek-coder',
label: 'DeepSeek Coder',
desc: 'Specialized model for code generation and programming tasks.',
provider: 'deepseek'
},
{
value: 'claude-3-5-sonnet-20241022',
label: 'Claude 3.5 Sonnet',
desc: 'Anthropic\'s most capable model with excellent reasoning.',
provider: 'anthropic'
},
{
value: 'claude-3-haiku-20240307',
label: 'Claude 3 Haiku',
desc: 'Fast and efficient model for quick responses.',
provider: 'anthropic'
},
{
value: 'grok-2-1212',
label: 'Grok 2',
desc: 'xAI\'s advanced model with real-time knowledge.',
provider: 'xai'
},
{
value: 'mistral-large-2411',
label: 'Mistral Large',
desc: 'Mistral\'s flagship model for complex reasoning.',
provider: 'mistral'
},
{
value: 'mixtral-8x7b-32768',
label: 'Mixtral 8x7B',
desc: 'Mixture of Experts model with excellent performance.',
provider: 'mistral'
},
{
value: 'custom',
label: 'Custom Model',
desc: 'Use any OpenAI-compatible API (LM Studio, Ollama, LocalAI, etc.) by configuring custom base URL.',
provider: 'custom'
},
];
@@ -27,24 +101,51 @@ export const DEFAULT_CONFIG: AppConfig = {
customApiKey: '',
customBaseUrl: '',
enableCustomApi: false,
enableRecursiveLoop: false
enableRecursiveLoop: false,
apiProvider: 'google',
customModels: []
};
export const getValidThinkingLevels = (model: ModelOption): ThinkingLevel[] => {
if (model === 'gemini-3-pro-preview') {
return ['low', 'high'];
}
if (model === 'o1-preview' || model === 'o1-mini') {
return ['low', 'medium', 'high'];
}
return ['minimal', 'low', 'medium', 'high'];
};
export const getThinkingBudget = (level: ThinkingLevel, model: ModelOption): number => {
const isPro = model === 'gemini-3-pro-preview';
const isGeminiPro = model === 'gemini-3-pro-preview';
const isOpenAIReasoning = model === 'o1-preview' || model === 'o1-mini';
switch (level) {
case 'minimal': return 0; // Disables thinking
case 'minimal': return 0;
case 'low': return 2048;
case 'medium': return 8192;
case 'high': return isPro ? 32768 : 16384;
case 'high':
if (isOpenAIReasoning) return 65536;
if (isGeminiPro) return 32768;
return 16384;
default: return 0;
}
};
export const getProvider = (model: ModelOption): ApiProvider => {
const modelInfo = MODELS.find(m => m.value === model);
return modelInfo?.provider || 'google';
};
export const getAllModels = (config: AppConfig): { value: ModelOption; label: string; desc: string; provider: ApiProvider }[] => {
const presetModels = MODELS.filter(m => m.value !== 'custom');
const customModels = (config.customModels || []).map(m => ({
value: m.name as ModelOption,
label: m.name,
desc: `Custom ${m.provider} model`,
provider: m.provider
}));
return [...presetModels, ...customModels];
};

View File

@@ -1,6 +1,6 @@
import { useCallback } from 'react';
import { getAI } from '../api';
import { getAI, getAIProvider, findCustomModel } from '../api';
import { getThinkingBudget } from '../config';
import { AppConfig, ModelOption, ExpertResult, ChatMessage } from '../types';
@@ -99,9 +99,13 @@ export const useDeepThink = () => {
setProcessStartTime(Date.now());
setProcessEndTime(null);
const customModelConfig = findCustomModel(model, config.customModels);
const provider = customModelConfig?.provider || getAIProvider(model);
const ai = getAI({
apiKey: config.enableCustomApi ? config.customApiKey : undefined,
baseUrl: (config.enableCustomApi && config.customBaseUrl) ? config.customBaseUrl : undefined
provider,
apiKey: customModelConfig?.apiKey || config.customApiKey,
baseUrl: customModelConfig?.baseUrl || config.customBaseUrl
});
try {

1
prisma/index.css Normal file
View File

@@ -0,0 +1 @@
/* Base styles are handled by Tailwind CSS */

View File

@@ -1,18 +1,5 @@
/**
* Network Interceptor
*
* Intercepts global fetch calls to redirect Gemini API requests
* from the default endpoint to a user-defined custom base URL.
*
* Uses Object.defineProperty to bypass "getter-only" restrictions on window.fetch
* in certain sandboxed or strict environments.
*/
const originalFetch = window.fetch;
/**
* Robustly applies a function to the window.fetch property.
*/
const applyFetch = (fn: typeof window.fetch) => {
try {
Object.defineProperty(window, 'fetch', {
@@ -22,7 +9,6 @@ const applyFetch = (fn: typeof window.fetch) => {
enumerable: true
});
} catch (e) {
// Fallback for environments where defineProperty on window might fail
try {
(window as any).fetch = fn;
} catch (err) {
@@ -33,15 +19,12 @@ const applyFetch = (fn: typeof window.fetch) => {
export const setInterceptorUrl = (baseUrl: string | null) => {
if (!baseUrl) {
// Restore original fetch when disabled
applyFetch(originalFetch);
return;
}
// Normalize the base URL
let normalizedBase = baseUrl.trim();
try {
// Basic validation
new URL(normalizedBase);
} catch (e) {
console.warn("[Prisma] Invalid Base URL provided:", normalizedBase);
@@ -65,28 +48,22 @@ export const setInterceptorUrl = (baseUrl: string | null) => {
const defaultHost = 'generativelanguage.googleapis.com';
// Check if the request is targeting the Google Gemini API
if (urlString.includes(defaultHost)) {
try {
const url = new URL(urlString);
const proxy = new URL(normalizedBase);
// Replace protocol and host
url.protocol = proxy.protocol;
url.host = proxy.host;
// Prepend proxy path if it exists (e.g., /v1/proxy)
if (proxy.pathname !== '/') {
const cleanPath = proxy.pathname.endsWith('/') ? proxy.pathname.slice(0, -1) : proxy.pathname;
// Ensure we don't double up slashes
url.pathname = cleanPath + url.pathname;
}
const newUrl = url.toString();
// Handle the different types of fetch inputs
if (input instanceof Request) {
// Re-create the request with the new URL and original properties
const requestData: RequestInit = {
method: input.method,
headers: input.headers,
@@ -99,9 +76,7 @@ export const setInterceptorUrl = (baseUrl: string | null) => {
integrity: input.integrity,
};
// Merge with init if provided
const mergedInit = { ...requestData, ...init };
return originalFetch(new URL(newUrl), mergedInit);
}

4254
prisma/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -9,14 +9,15 @@
"preview": "vite preview"
},
"dependencies": {
"react-markdown": "^10.1.0",
"remark-math": "6.0.0",
"rehype-katex": "7.0.0",
"@google/genai": "^1.34.0",
"lucide-react": "^0.562.0",
"react-dom": "^19.2.3",
"openai": "^6.15.0",
"react": "^19.2.3",
"react-syntax-highlighter": "^16.1.0"
"react-dom": "^19.2.3",
"react-markdown": "^10.1.0",
"react-syntax-highlighter": "^16.1.0",
"rehype-katex": "7.0.0",
"remark-math": "6.0.0"
},
"devDependencies": {
"@types/node": "^22.14.0",

View File

@@ -1,6 +1,11 @@
import { ModelOption, ExpertResult } from '../../types';
import { getExpertSystemInstruction } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContentStream !== undefined;
};
export const streamExpertResponse = async (
ai: any,
@@ -11,44 +16,65 @@ export const streamExpertResponse = async (
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
): Promise<void> => {
// We wrap the stream initiation in retry.
// If the stream is successfully established but fails during iteration,
// we catch that separately.
const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature,
thinkingConfig: {
const isGoogle = isGoogleProvider(ai);
if (isGoogle) {
const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature,
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
includeThoughts: true
}
}
}));
try {
for await (const chunk of (streamResult as any)) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
} catch (streamError) {
console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
throw streamError;
}
}));
} else {
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
content: expert.prompt,
temperature: expert.temperature,
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
});
try {
for await (const chunk of streamResult) {
if (signal.aborted) break;
try {
for await (const chunk of (stream as any)) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
onChunk(chunk.text, chunk.thought || '');
}
} catch (streamError) {
console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
throw streamError;
}
} catch (streamError) {
console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
// We don't retry mid-stream automatically here to avoid complex state management,
// but the initial connection is protected by withRetry.
throw streamError;
}
};

View File

@@ -1,8 +1,14 @@
import { Type } from "@google/genai";
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult } from '../../types';
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, ApiProvider } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContent as generateOpenAIContent } from './openaiClient';
import { getAIProvider } from '../../api';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContent !== undefined;
};
export const executeManagerAnalysis = async (
ai: any,
@@ -11,57 +17,86 @@ export const executeManagerAnalysis = async (
context: string,
budget: number
): Promise<AnalysisResult> => {
const managerSchema = {
type: Type.OBJECT,
properties: {
thought_process: { type: Type.STRING, description: "Brief explanation of why these supplementary experts were chosen." },
experts: {
type: Type.ARRAY,
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["thought_process", "experts"]
};
const isGoogle = isGoogleProvider(ai);
try {
const analysisResp = await withRetry(() => ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: managerSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
if (isGoogle) {
const managerSchema = {
type: Type.OBJECT,
properties: {
thought_process: { type: Type.STRING, description: "Brief explanation of why these supplementary experts were chosen." },
experts: {
type: Type.ARRAY,
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
}
}));
const rawText = analysisResp.text || '{}';
const cleanText = cleanJsonString(rawText);
const analysisJson = JSON.parse(cleanText) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
return analysisJson;
} catch (e) {
console.error("Manager Analysis Error:", e);
// Return a fallback so the process doesn't completely die if planning fails
return {
thought_process: "Direct processing fallback due to analysis error.",
experts: []
},
required: ["thought_process", "experts"]
};
try {
const analysisResp = await withRetry(() => ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: managerSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
}));
const rawText = (analysisResp as any).text || '{}';
const cleanText = cleanJsonString(rawText);
const analysisJson = JSON.parse(cleanText) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
return analysisJson;
} catch (e) {
console.error("Manager Analysis Error:", e);
return {
thought_process: "Direct processing fallback due to analysis error.",
experts: []
};
}
} else {
try {
const response = await generateOpenAIContent(ai, {
model,
systemInstruction: MANAGER_SYSTEM_PROMPT,
content: `Context:\n${context}\n\nCurrent Query: "${query}"\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`,
temperature: 0.7,
responseFormat: 'json_object',
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
});
const analysisJson = JSON.parse(response.text) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
return analysisJson;
} catch (e) {
console.error("Manager Analysis Error:", e);
return {
thought_process: "Direct processing fallback due to analysis error.",
experts: []
};
}
}
};
@@ -72,57 +107,78 @@ export const executeManagerReview = async (
currentExperts: ExpertResult[],
budget: number
): Promise<ReviewResult> => {
const reviewSchema = {
type: Type.OBJECT,
properties: {
satisfied: { type: Type.BOOLEAN, description: "True if the experts have fully answered the query with high quality." },
critique: { type: Type.STRING, description: "If not satisfied, explain why and what is missing." },
next_round_strategy: { type: Type.STRING, description: "Plan for the next iteration." },
refined_experts: {
type: Type.ARRAY,
description: "The list of experts for the next round. Can be the same roles or new ones.",
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
},
required: ["satisfied", "critique"]
};
const expertOutputs = currentExperts.map(e =>
const isGoogle = isGoogleProvider(ai);
const expertOutputs = currentExperts.map(e =>
`--- [Round ${e.round}] Expert: ${e.role} ---\nOutput: ${e.content?.slice(0, 2000)}...`
).join('\n\n');
const content = `User Query: "${query}"\n\nCurrent Expert Outputs:\n${expertOutputs}`;
try {
const resp = await withRetry(() => ai.models.generateContent({
model: model,
contents: content,
config: {
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: reviewSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
if (isGoogle) {
const reviewSchema = {
type: Type.OBJECT,
properties: {
satisfied: { type: Type.BOOLEAN, description: "True if the experts have fully answered the query with high quality." },
critique: { type: Type.STRING, description: "If not satisfied, explain why and what is missing." },
next_round_strategy: { type: Type.STRING, description: "Plan for the next iteration." },
refined_experts: {
type: Type.ARRAY,
description: "The list of experts for the next round. Can be the same roles or new ones.",
items: {
type: Type.OBJECT,
properties: {
role: { type: Type.STRING },
description: { type: Type.STRING },
temperature: { type: Type.NUMBER },
prompt: { type: Type.STRING }
},
required: ["role", "description", "temperature", "prompt"]
}
}
}
}));
},
required: ["satisfied", "critique"]
};
const rawText = resp.text || '{}';
const cleanText = cleanJsonString(rawText);
return JSON.parse(cleanText) as ReviewResult;
} catch (e) {
console.error("Review Error:", e);
// Fallback: Assume satisfied if JSON or API fails to avoid infinite loops
return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
try {
const resp = await withRetry(() => ai.models.generateContent({
model: model,
contents: content,
config: {
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: reviewSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
}));
const rawText = (resp as any).text || '{}';
const cleanText = cleanJsonString(rawText);
return JSON.parse(cleanText) as ReviewResult;
} catch (e) {
console.error("Review Error:", e);
return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
}
} else {
try {
const response = await generateOpenAIContent(ai, {
model,
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
content: `${content}\n\nReturn a JSON response with this structure:\n{\n "satisfied": boolean,\n "critique": "...",\n "next_round_strategy": "...",\n "refined_experts": [...]\n}`,
temperature: 0.7,
responseFormat: 'json_object',
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
});
return JSON.parse(response.text) as ReviewResult;
} catch (e) {
console.error("Review Error:", e);
return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
}
}
};

View File

@@ -0,0 +1,157 @@
import OpenAI from "openai";
import { ModelOption } from '../../types';
import { withRetry } from '../utils/retry';
export interface OpenAIStreamChunk {
text: string;
thought?: string;
}
export interface OpenAIConfig {
model: ModelOption;
systemInstruction?: string;
content: string;
temperature?: number;
responseFormat?: 'text' | 'json_object';
thinkingConfig?: {
includeThoughts: boolean;
thinkingBudget: number;
};
}
const parseThinkingTokens = (text: string): { thought: string; text: string } => {
const thinkPattern = /<thinking>([\s\S]*?)<\/thinking>/g;
let thought = '';
let cleanText = text;
const matches = text.matchAll(thinkPattern);
for (const match of matches) {
thought += match[1];
}
cleanText = text.replace(thinkPattern, '');
return { thought: thought.trim(), text: cleanText.trim() };
};
export const generateContent = async (
ai: OpenAI,
config: OpenAIConfig
): Promise<{ text: string; thought?: string }> => {
const messages: Array<OpenAI.Chat.ChatCompletionMessageParam> = [];
if (config.systemInstruction) {
messages.push({
role: 'system',
content: config.systemInstruction
});
}
messages.push({
role: 'user',
content: config.content
});
const requestOptions: any = {
model: config.model,
messages,
temperature: config.temperature,
};
if (config.responseFormat === 'json_object') {
requestOptions.response_format = { type: 'json_object' };
}
try {
const response = await withRetry(() => ai.chat.completions.create(requestOptions));
const content = response.choices[0]?.message?.content || '';
if (config.thinkingConfig?.includeThoughts) {
const { thought, text } = parseThinkingTokens(content);
return { text, thought };
}
return { text: content };
} catch (error) {
console.error('OpenAI generateContent error:', error);
throw error;
}
};
export async function* generateContentStream(
ai: OpenAI,
config: OpenAIConfig
): AsyncGenerator<OpenAIStreamChunk, void, unknown> {
const messages: Array<OpenAI.Chat.ChatCompletionMessageParam> = [];
if (config.systemInstruction) {
messages.push({
role: 'system',
content: config.systemInstruction
});
}
messages.push({
role: 'user',
content: config.content
});
const requestOptions: any = {
model: config.model,
messages,
temperature: config.temperature,
stream: true,
};
const stream = await withRetry(() => ai.chat.completions.create(requestOptions) as any);
let accumulatedText = '';
let inThinking = false;
let currentThought = '';
for await (const chunk of (stream as any)) {
const delta = chunk.choices[0]?.delta?.content || '';
if (!delta) continue;
accumulatedText += delta;
if (config.thinkingConfig?.includeThoughts) {
if (delta.includes('<thinking>')) {
inThinking = true;
continue;
}
if (inThinking) {
if (delta.includes('</thinking>')) {
inThinking = false;
const parts = delta.split('</thinking>', 2);
currentThought += parts[0];
if (currentThought.trim()) {
yield { text: '', thought: currentThought };
currentThought = '';
}
if (parts[1]) {
yield { text: parts[1], thought: '' };
}
} else {
currentThought += delta;
if (currentThought.length > 100) {
yield { text: '', thought: currentThought };
currentThought = '';
}
}
} else {
yield { text: delta, thought: '' };
}
} else {
yield { text: delta, thought: '' };
}
}
if (currentThought.trim()) {
yield { text: '', thought: currentThought };
}
}

View File

@@ -1,6 +1,11 @@
import { ModelOption, ExpertResult } from '../../types';
import { getSynthesisPrompt } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContentStream !== undefined;
};
export const streamSynthesisResponse = async (
ai: any,
@@ -13,38 +18,63 @@ export const streamSynthesisResponse = async (
onChunk: (text: string, thought: string) => void
): Promise<void> => {
const prompt = getSynthesisPrompt(historyContext, query, expertResults);
const isGoogle = isGoogleProvider(ai);
const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
config: {
thinkingConfig: {
if (isGoogle) {
const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
config: {
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
}
}));
}
}
}));
try {
for await (const chunk of synthesisStream) {
if (signal.aborted) break;
try {
for await (const chunk of (synthesisStream as any)) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
} catch (streamError) {
console.error("Synthesis stream interrupted:", streamError);
throw streamError;
}
} else {
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: undefined,
content: prompt,
temperature: 0.7,
thinkingConfig: {
thinkingBudget: budget,
includeThoughts: true
}
});
try {
for await (const chunk of (stream as any)) {
if (signal.aborted) break;
onChunk(chunk.text, chunk.thought || '');
}
} catch (streamError) {
console.error("Synthesis stream interrupted:", streamError);
throw streamError;
}
} catch (streamError) {
console.error("Synthesis stream interrupted:", streamError);
throw streamError;
}
};

View File

@@ -1,5 +1,14 @@
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview';
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview' | 'gpt-4.1' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1-mini' | 'deepseek-chat' | 'deepseek-coder' | 'custom' | string;
export type ThinkingLevel = 'minimal' | 'low' | 'medium' | 'high';
export type ApiProvider = 'google' | 'openai' | 'deepseek' | 'anthropic' | 'xai' | 'mistral' | 'custom';
export type CustomModel = {
id: string;
name: string;
provider: ApiProvider;
apiKey?: string;
baseUrl?: string;
};
export type ExpertConfig = {
id: string;
@@ -40,7 +49,9 @@ export type AppConfig = {
customApiKey?: string;
customBaseUrl?: string;
enableCustomApi?: boolean;
enableRecursiveLoop?: boolean; // New toggle for loop mode
enableRecursiveLoop?: boolean;
apiProvider?: ApiProvider;
customModels?: CustomModel[];
};
export type ChatMessage = {

9
prisma/vite-env.d.ts vendored Normal file
View File

@@ -0,0 +1,9 @@
/// <reference types="vite/client" />
interface ImportMetaEnv {
readonly VITE_API_KEY?: string
}
interface ImportMeta {
readonly env: ImportMetaEnv
}

View File

@@ -1,15 +1,149 @@
import path from 'path';
import { defineConfig, loadEnv } from 'vite';
import react from '@vitejs/plugin-react';
import type { Connect } from 'vite';
// Custom middleware to handle dynamic proxy for /custom-api
function customApiProxyMiddleware(): Connect.NextHandleFunction {
return async (req, res, next) => {
if (!req.url?.startsWith('/custom-api')) {
return next();
}
const targetUrl = req.headers['x-target-url'] as string;
if (!targetUrl) {
res.statusCode = 400;
res.end(JSON.stringify({ error: 'Missing X-Target-URL header' }));
return;
}
try {
const url = new URL(targetUrl);
const targetPath = req.url.replace(/^\/custom-api/, '');
const fullUrl = `${url.origin}${targetPath}`;
console.log('[Custom Proxy] Forwarding:', req.method, req.url, '->', fullUrl);
// Collect request body
const chunks: Buffer[] = [];
for await (const chunk of req) {
chunks.push(chunk as Buffer);
}
const body = Buffer.concat(chunks);
// Forward headers (excluding hop-by-hop headers)
const forwardHeaders: Record<string, string> = {};
const skipHeaders = ['host', 'connection', 'x-target-url', 'transfer-encoding'];
for (const [key, value] of Object.entries(req.headers)) {
if (!skipHeaders.includes(key.toLowerCase()) && value) {
forwardHeaders[key] = Array.isArray(value) ? value[0] : value;
}
}
forwardHeaders['host'] = url.host;
// Make the request
const response = await fetch(fullUrl, {
method: req.method,
headers: forwardHeaders,
body: ['GET', 'HEAD'].includes(req.method || '') ? undefined : body,
});
// Forward response status and headers
res.statusCode = response.status;
response.headers.forEach((value, key) => {
if (!['transfer-encoding', 'connection'].includes(key.toLowerCase())) {
res.setHeader(key, value);
}
});
// Stream the response body
if (response.body) {
const reader = response.body.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
res.write(value);
}
} finally {
reader.releaseLock();
}
}
res.end();
} catch (error: any) {
console.error('[Custom Proxy] Error:', error.message);
res.statusCode = 502;
res.end(JSON.stringify({ error: 'Proxy error', message: error.message }));
}
};
}
export default defineConfig(({ mode }) => {
const env = loadEnv(mode, '.', '');
return {
server: {
port: 3000,
host: '0.0.0.0',
proxy: {
// Proxy for OpenAI API
'/openai/v1': {
target: 'https://api.openai.com',
changeOrigin: true,
secure: true,
rewrite: (path) => path.replace(/^\/openai\/v1/, '/v1'),
},
// Proxy for DeepSeek API
'/deepseek/v1': {
target: 'https://api.deepseek.com',
changeOrigin: true,
secure: true,
rewrite: (path) => path.replace(/^\/deepseek\/v1/, '/v1'),
},
// Proxy for Anthropic API
'/anthropic/v1': {
target: 'https://api.anthropic.com',
changeOrigin: true,
secure: true,
rewrite: (path) => path.replace(/^\/anthropic\/v1/, '/v1'),
},
// Proxy for xAI API
'/xai/v1': {
target: 'https://api.x.ai',
changeOrigin: true,
secure: true,
rewrite: (path) => path.replace(/^\/xai\/v1/, '/v1'),
},
// Proxy for Mistral API
'/mistral/v1': {
target: 'https://api.mistral.ai',
changeOrigin: true,
secure: true,
rewrite: (path) => path.replace(/^\/mistral\/v1/, '/v1'),
},
// Proxy for Google Gemini API
'/v1beta/models': {
target: 'https://generativelanguage.googleapis.com',
changeOrigin: true,
secure: true,
},
'/v1/models': {
target: 'https://generativelanguage.googleapis.com',
changeOrigin: true,
secure: true,
},
}
},
plugins: [react()],
plugins: [
react(),
{
name: 'custom-api-proxy',
configureServer(server) {
server.middlewares.use(customApiProxyMiddleware());
},
},
],
define: {
'process.env.API_KEY': JSON.stringify(env.GEMINI_API_KEY),
'process.env.GEMINI_API_KEY': JSON.stringify(env.GEMINI_API_KEY)