This commit is contained in:
从何开始123
2026-01-09 01:07:03 +08:00
parent 21842c2b50
commit a32f3a5faf
17 changed files with 334 additions and 135 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@@ -1,11 +0,0 @@
# API Keys Configuration
# Copy this file to .env.local and add your actual API keys
# Primary API Key (used by default)
# For Google Gemini: https://ai.google.dev/
# For OpenAI: https://platform.openai.com/
VITE_API_KEY=your_api_key_here
# Alternative: Use provider-specific keys (optional)
# GEMINI_API_KEY=your_gemini_key_here
# OPENAI_API_KEY=your_openai_key_here

2
prisma/.gitignore vendored
View File

@@ -22,5 +22,3 @@ dist-ssr
*.njsproj
*.sln
*.sw?
.env
# .env.example is allowed by default

View File

@@ -32,7 +32,7 @@ let currentCustomApiUrl: string | null = null;
const originalFetch = typeof window !== 'undefined' ? window.fetch.bind(window) : null;
if (typeof window !== 'undefined' && originalFetch) {
window.fetch = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => {
const proxyFetch = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => {
let urlString: string;
if (typeof input === 'string') {
urlString = input;
@@ -56,11 +56,26 @@ if (typeof window !== 'undefined' && originalFetch) {
return originalFetch(input, init);
};
try {
window.fetch = proxyFetch;
} catch (e) {
try {
Object.defineProperty(window, 'fetch', {
value: proxyFetch,
writable: true,
configurable: true,
enumerable: true
});
} catch (e2) {
console.error('[API] Failed to intercept fetch:', e2);
}
}
}
export const getAI = (config?: AIProviderConfig) => {
const provider = config?.provider || 'google';
const apiKey = config?.apiKey || (import.meta.env as any).VITE_API_KEY || process.env.API_KEY;
const apiKey = config?.apiKey || import.meta.env?.VITE_API_KEY || process.env.API_KEY;
if (provider === 'openai' || provider === 'deepseek' || provider === 'custom' || provider === 'anthropic' || provider === 'xai' || provider === 'mistral') {
const options: any = {

View File

@@ -102,6 +102,21 @@ const ChatMessageItem = ({ message, isLast }: ChatMessageProps) => {
</div>
)}
{/* Attachments */}
{message.attachments && message.attachments.length > 0 && (
<div className="flex flex-wrap gap-2 mb-3">
{message.attachments.map(att => (
<img
key={att.id}
src={att.url || `data:${att.mimeType};base64,${att.data}`}
alt="attachment"
className="h-32 w-32 object-cover rounded-lg border border-slate-200 shadow-sm cursor-pointer hover:opacity-90 transition-opacity"
onClick={() => window.open(att.url || `data:${att.mimeType};base64,${att.data}`, '_blank')}
/>
))}
</div>
)}
{/* Text Content */}
<div className="prose prose-slate max-w-none prose-p:leading-7 prose-pre:bg-slate-900 prose-pre:text-slate-50">
{message.content ? (

View File

@@ -1,12 +1,13 @@
import React, { useRef, useLayoutEffect, useState, useEffect } from 'react';
import { ArrowUp, Square } from 'lucide-react';
import { AppState } from '../types';
import { ArrowUp, Square, Paperclip, X, Image as ImageIcon } from 'lucide-react';
import { AppState, MessageAttachment } from '../types';
import { fileToBase64 } from '../utils';
interface InputSectionProps {
query: string;
setQuery: (q: string) => void;
onRun: () => void;
onRun: (attachments: MessageAttachment[]) => void;
onStop: () => void;
appState: AppState;
focusTrigger?: number;
@@ -14,7 +15,9 @@ interface InputSectionProps {
const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }: InputSectionProps) => {
const textareaRef = useRef<HTMLTextAreaElement>(null);
const fileInputRef = useRef<HTMLInputElement>(null);
const [isComposing, setIsComposing] = useState(false);
const [attachments, setAttachments] = useState<MessageAttachment[]>([]);
const adjustHeight = () => {
if (textareaRef.current) {
@@ -36,8 +39,7 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }
}
};
// Focus input on mount and when app becomes idle (e.g. after "New Chat" or completion)
// or when explicitly triggered by focusTrigger
// Focus input on mount and when app becomes idle
useEffect(() => {
if (appState === 'idle' && textareaRef.current) {
textareaRef.current.focus();
@@ -49,39 +51,125 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }
adjustHeight();
}, [query]);
const processFile = async (file: File) => {
if (!file.type.startsWith('image/')) return;
try {
const base64 = await fileToBase64(file);
const newAttachment: MessageAttachment = {
id: Math.random().toString(36).substring(7),
type: 'image',
mimeType: file.type,
data: base64,
url: URL.createObjectURL(file)
};
setAttachments(prev => [...prev, newAttachment]);
} catch (e) {
console.error("Failed to process file", e);
}
};
const handlePaste = (e: React.ClipboardEvent) => {
const items = e.clipboardData.items;
for (let i = 0; i < items.length; i++) {
if (items[i].type.indexOf('image') !== -1) {
const file = items[i].getAsFile();
if (file) {
e.preventDefault();
processFile(file);
}
}
}
};
const handleFileSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
if (e.target.files) {
Array.from(e.target.files).forEach(processFile);
}
// Reset input so same file can be selected again
if (fileInputRef.current) fileInputRef.current.value = '';
};
const removeAttachment = (id: string) => {
setAttachments(prev => prev.filter(a => a.id !== id));
};
const handleKeyDown = (e: React.KeyboardEvent) => {
// If user presses Enter without Shift
if (e.key === 'Enter' && !e.shiftKey) {
// robust check for IME composition (e.g. Chinese/Japanese inputs)
if (isComposing || (e.nativeEvent as any).isComposing) {
return;
}
e.preventDefault();
if (query.trim() && appState === 'idle') {
onRun();
if ((query.trim() || attachments.length > 0) && appState === 'idle') {
handleSubmit();
}
}
};
const handleSubmit = () => {
if (!query.trim() && attachments.length === 0) return;
onRun(attachments);
setAttachments([]);
};
const isRunning = appState !== 'idle';
return (
<div className="w-full">
{/* Container: Flex items-end ensures button stays at bottom right as text grows */}
{/* Attachments Preview */}
{attachments.length > 0 && (
<div className="flex gap-2 mb-2 overflow-x-auto px-1 py-1">
{attachments.map(att => (
<div key={att.id} className="relative group shrink-0">
<img
src={att.url}
alt="attachment"
className="h-16 w-16 object-cover rounded-lg border border-slate-200 shadow-sm"
/>
<button
onClick={() => removeAttachment(att.id)}
className="absolute -top-1.5 -right-1.5 bg-slate-900 text-white rounded-full p-0.5 opacity-0 group-hover:opacity-100 transition-opacity shadow-md"
>
<X size={12} />
</button>
</div>
))}
</div>
)}
{/* Input Container */}
<div className="w-full flex items-end p-2 bg-white/70 backdrop-blur-xl border border-slate-200/50 rounded-[26px] shadow-2xl focus-within:ring-2 focus-within:ring-blue-500/20 focus-within:bg-white/90 transition-colors duration-200">
<input
type="file"
ref={fileInputRef}
className="hidden"
accept="image/*"
multiple
onChange={handleFileSelect}
/>
<button
onClick={() => fileInputRef.current?.click()}
className="flex-shrink-0 p-2.5 mb-0.5 ml-1 rounded-full text-slate-400 hover:text-slate-600 hover:bg-slate-100 transition-colors"
title="Attach Image"
disabled={isRunning}
>
<Paperclip size={20} />
</button>
<textarea
ref={textareaRef}
value={query}
onChange={(e) => setQuery(e.target.value)}
onKeyDown={handleKeyDown}
onPaste={handlePaste}
onCompositionStart={() => setIsComposing(true)}
onCompositionEnd={() => setIsComposing(false)}
placeholder="Ask a complex question..."
rows={1}
autoFocus
className="flex-1 max-h-[200px] py-3 pl-4 pr-2 bg-transparent border-none focus:ring-0 resize-none outline-none text-slate-800 placeholder:text-slate-400 leading-relaxed custom-scrollbar text-base"
className="flex-1 max-h-[200px] py-3 pl-2 pr-2 bg-transparent border-none focus:ring-0 resize-none outline-none text-slate-800 placeholder:text-slate-400 leading-relaxed custom-scrollbar text-base"
style={{ minHeight: '48px' }}
/>
@@ -95,10 +183,8 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }
</button>
) : (
<button
onClick={() => {
if (query.trim()) onRun();
}}
disabled={!query.trim()}
onClick={handleSubmit}
disabled={!query.trim() && attachments.length === 0}
className="flex items-center justify-center w-10 h-10 rounded-full bg-blue-600 text-white hover:bg-blue-700 disabled:bg-slate-200 disabled:text-slate-400 transition-all shadow-md hover:scale-105 active:scale-95"
>
<ArrowUp size={20} />

View File

@@ -14,72 +14,6 @@ export const MODELS: { value: ModelOption; label: string; desc: string; provider
desc: 'Deep reasoning, complex tasks, higher intelligence.',
provider: 'google'
},
{
value: 'gpt-4o',
label: 'GPT-4o',
desc: 'OpenAI flagship model with vision capabilities.',
provider: 'openai'
},
{
value: 'gpt-4o-mini',
label: 'GPT-4o Mini',
desc: 'Fast, affordable small model for focused tasks.',
provider: 'openai'
},
{
value: 'o1-preview',
label: 'O1 Preview',
desc: 'Advanced reasoning model with chain-of-thought.',
provider: 'openai'
},
{
value: 'o1-mini',
label: 'O1 Mini',
desc: 'Fast reasoning model for efficient problem solving.',
provider: 'openai'
},
{
value: 'deepseek-chat',
label: 'DeepSeek Chat',
desc: 'Advanced language model optimized for conversational AI.',
provider: 'deepseek'
},
{
value: 'deepseek-coder',
label: 'DeepSeek Coder',
desc: 'Specialized model for code generation and programming tasks.',
provider: 'deepseek'
},
{
value: 'claude-3-5-sonnet-20241022',
label: 'Claude 3.5 Sonnet',
desc: 'Anthropic\'s most capable model with excellent reasoning.',
provider: 'anthropic'
},
{
value: 'claude-3-haiku-20240307',
label: 'Claude 3 Haiku',
desc: 'Fast and efficient model for quick responses.',
provider: 'anthropic'
},
{
value: 'grok-2-1212',
label: 'Grok 2',
desc: 'xAI\'s advanced model with real-time knowledge.',
provider: 'xai'
},
{
value: 'mistral-large-2411',
label: 'Mistral Large',
desc: 'Mistral\'s flagship model for complex reasoning.',
provider: 'mistral'
},
{
value: 'mixtral-8x7b-32768',
label: 'Mixtral 8x7B',
desc: 'Mixture of Experts model with excellent performance.',
provider: 'mistral'
},
{
value: 'custom',
label: 'Custom Model',

View File

@@ -1,6 +1,6 @@
import { useState, useEffect, useCallback } from 'react';
import { ModelOption, AppConfig, ChatMessage } from '../types';
import { ModelOption, AppConfig, ChatMessage, MessageAttachment } from '../types';
import { STORAGE_KEYS, DEFAULT_CONFIG, getValidThinkingLevels } from '../config';
import { useDeepThink } from './useDeepThink';
import { useChatSessions } from './useChatSessions';
@@ -155,13 +155,14 @@ export const useAppLogic = () => {
}
}, [appState, finalOutput, managerAnalysis, experts, synthesisThoughts, resetDeepThink, processStartTime, processEndTime, currentSessionId, messages, selectedModel, createSession, updateSessionMessages]);
const handleRun = useCallback(() => {
if (!query.trim()) return;
const handleRun = useCallback((attachments: MessageAttachment[] = []) => {
if (!query.trim() && attachments.length === 0) return;
const userMsg: ChatMessage = {
id: `user-${Date.now()}`,
role: 'user',
content: query
content: query,
attachments: attachments
};
const newMessages = [...messages, userMsg];
@@ -174,7 +175,7 @@ export const useAppLogic = () => {
updateSessionMessages(activeSessionId, newMessages);
}
runDynamicDeepThink(query, messages, selectedModel, config);
runDynamicDeepThink(query, newMessages, selectedModel, config);
setQuery('');
}, [query, messages, currentSessionId, selectedModel, config, createSession, updateSessionMessages, runDynamicDeepThink]);

View File

@@ -2,7 +2,7 @@
import { useCallback } from 'react';
import { getAI, getAIProvider, findCustomModel } from '../api';
import { getThinkingBudget } from '../config';
import { AppConfig, ModelOption, ExpertResult, ChatMessage } from '../types';
import { AppConfig, ModelOption, ExpertResult, ChatMessage, MessageAttachment } from '../types';
import { executeManagerAnalysis, executeManagerReview } from '../services/deepThink/manager';
import { streamExpertResponse } from '../services/deepThink/expert';
@@ -35,6 +35,7 @@ export const useDeepThink = () => {
ai: any,
model: ModelOption,
context: string,
attachments: MessageAttachment[],
budget: number,
signal: AbortSignal
): Promise<ExpertResult> => {
@@ -52,6 +53,7 @@ export const useDeepThink = () => {
model,
expert,
context,
attachments,
budget,
signal,
(textChunk, thoughtChunk) => {
@@ -84,7 +86,7 @@ export const useDeepThink = () => {
model: ModelOption,
config: AppConfig
) => {
if (!query.trim()) return;
if (!query.trim() && (!history.length || !history[history.length - 1].attachments?.length)) return;
if (abortControllerRef.current) abortControllerRef.current.abort();
abortControllerRef.current = new AbortController();
@@ -109,7 +111,11 @@ export const useDeepThink = () => {
});
try {
const recentHistory = history.slice(-5).map(msg =>
// Get the last message (which is the user's current query) to retrieve attachments
const lastMessage = history[history.length - 1];
const currentAttachments = lastMessage.role === 'user' ? (lastMessage.attachments || []) : [];
const recentHistory = history.slice(0, -1).slice(-5).map(msg =>
`${msg.role === 'user' ? 'User' : 'Model'}: ${msg.content}`
).join('\n');
@@ -120,6 +126,7 @@ export const useDeepThink = () => {
model,
query,
recentHistory,
currentAttachments,
getThinkingBudget(config.planningLevel, model)
);
@@ -135,8 +142,9 @@ export const useDeepThink = () => {
setInitialExperts([primaryExpert]);
// Primary expert sees the images
const primaryTask = runExpertLifecycle(
primaryExpert, 0, ai, model, recentHistory,
primaryExpert, 0, ai, model, recentHistory, currentAttachments,
getThinkingBudget(config.expertLevel, model), signal
);
@@ -154,8 +162,12 @@ export const useDeepThink = () => {
appendExperts(round1Experts);
setAppState('experts_working');
// Supplementary experts usually don't need the images unless specified,
// but for simplicity/consistency we pass them if the model supports it.
// However, to save tokens/bandwidth, we might limit this.
// For now, let's pass them to ensure they have full context.
const round1Tasks = round1Experts.map((exp, idx) =>
runExpertLifecycle(exp, idx + 1, ai, model, recentHistory,
runExpertLifecycle(exp, idx + 1, ai, model, recentHistory, currentAttachments,
getThinkingBudget(config.expertLevel, model), signal)
);
@@ -195,7 +207,7 @@ export const useDeepThink = () => {
setAppState('experts_working');
const nextRoundTasks = nextRoundExperts.map((exp, idx) =>
runExpertLifecycle(exp, startIndex + idx, ai, model, recentHistory,
runExpertLifecycle(exp, startIndex + idx, ai, model, recentHistory, currentAttachments,
getThinkingBudget(config.expertLevel, model), signal)
);
@@ -213,6 +225,7 @@ export const useDeepThink = () => {
await streamSynthesisResponse(
ai, model, query, recentHistory, expertsDataRef.current,
currentAttachments,
getThinkingBudget(config.synthesisLevel, model), signal,
(textChunk, thoughtChunk) => {
fullFinalText += textChunk;

View File

@@ -6,7 +6,6 @@
<title>Prisma</title>
<!-- SVG Favicon -->
<link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg viewBox='0 0 600 600' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='300' cy='300' r='300' fill='white'/%3E%3Cg stroke-width='16' stroke-linecap='round' stroke-linejoin='round' fill='none' stroke='%23334155'%3E%3Cpath d='M300 180 L200 420 L400 420 Z'/%3E%3Cpath d='M300 50 L300 180'/%3E%3Cpath d='M100 480 L200 420'/%3E%3Cpath d='M500 480 L400 420'/%3E%3Cpath d='M300 50 L100 480 L500 480 Z'/%3E%3C/g%3E%3Cg stroke-width='12' stroke-linejoin='round' fill='none'%3E%3Cline x1='0' y1='275' x2='195' y2='275' stroke='%23334155'/%3E%3Cpolyline points='194,270 380,225 600,245' stroke='%232563eb' opacity='0.95'/%3E%3Cpolyline points='194,275 400,275 600,305' stroke='%234ade80' opacity='0.95'/%3E%3Cpolyline points='194,280 420,325 600,370' stroke='%239333ea' opacity='0.95'/%3E%3C/g%3E%3C/svg%3E">
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://cdn.tailwindcss.com?plugins=typography"></script>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;700&display=swap" rel="stylesheet">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.8/dist/katex.min.css">
@@ -73,17 +72,20 @@
<script type="importmap">
{
"imports": {
"react-markdown": "https://esm.sh/react-markdown@^10.1.0",
"remark-math": "https://esm.sh/remark-math@6.0.0",
"rehype-katex": "https://esm.sh/rehype-katex@7.0.0",
"@google/genai": "https://esm.sh/@google/genai@^1.34.0",
"lucide-react": "https://esm.sh/lucide-react@^0.562.0",
"openai": "https://esm.sh/openai@^6.15.0",
"path": "https://esm.sh/path@^0.12.7",
"vite": "https://esm.sh/vite@^7.3.1",
"@vitejs/plugin-react": "https://esm.sh/@vitejs/plugin-react@^5.1.2",
"react-dom/": "https://esm.sh/react-dom@^19.2.3/",
"lucide-react": "https://esm.sh/lucide-react@^0.562.0",
"react/": "https://esm.sh/react@^19.2.3/",
"react": "https://esm.sh/react@^19.2.3",
"react-syntax-highlighter": "https://esm.sh/react-syntax-highlighter@15.6.1?external=react,react-dom",
"react-syntax-highlighter/dist/esm/styles/prism": "https://esm.sh/react-syntax-highlighter@15.6.1/dist/esm/styles/prism?external=react,react-dom",
"react-syntax-highlighter/": "https://esm.sh/react-syntax-highlighter@^16.1.0/"
"react-markdown": "https://esm.sh/react-markdown@^10.1.0",
"@google/genai": "https://esm.sh/@google/genai@^1.34.0",
"react-syntax-highlighter/": "https://esm.sh/react-syntax-highlighter@^16.1.0/",
"react-syntax-highlighter": "https://esm.sh/react-syntax-highlighter@^16.1.0",
"remark-math": "https://esm.sh/remark-math@^6.0.0",
"rehype-katex": "https://esm.sh/rehype-katex@^7.0.1"
}
}
</script>

5
prisma/metadata.json Normal file
View File

@@ -0,0 +1,5 @@
{
"description": "Prisma - Deep multi-agent reasoning application.",
"requestFramePermissions": [],
"name": "Prisma"
}

View File

@@ -1,4 +1,5 @@
import { ModelOption, ExpertResult } from '../../types';
import { ModelOption, ExpertResult, MessageAttachment } from '../../types';
import { getExpertSystemInstruction } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
@@ -12,6 +13,7 @@ export const streamExpertResponse = async (
model: ModelOption,
expert: ExpertResult,
context: string,
attachments: MessageAttachment[],
budget: number,
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
@@ -19,9 +21,25 @@ export const streamExpertResponse = async (
const isGoogle = isGoogleProvider(ai);
if (isGoogle) {
const contents: any = {
role: 'user',
parts: [{ text: expert.prompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
contents: contents,
config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature,
@@ -55,10 +73,26 @@ export const streamExpertResponse = async (
throw streamError;
}
} else {
let contentPayload: any = expert.prompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: expert.prompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
content: expert.prompt,
content: contentPayload,
temperature: expert.temperature,
thinkingConfig: {
thinkingBudget: budget,

View File

@@ -1,10 +1,10 @@
import { Type } from "@google/genai";
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, ApiProvider } from '../../types';
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, MessageAttachment } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContent as generateOpenAIContent } from './openaiClient';
import { getAIProvider } from '../../api';
const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContent !== undefined;
@@ -15,9 +15,11 @@ export const executeManagerAnalysis = async (
model: ModelOption,
query: string,
context: string,
attachments: MessageAttachment[],
budget: number
): Promise<AnalysisResult> => {
const isGoogle = isGoogleProvider(ai);
const textPrompt = `Context:\n${context}\n\nCurrent Query: "${query}"`;
if (isGoogle) {
const managerSchema = {
@@ -41,10 +43,26 @@ export const executeManagerAnalysis = async (
required: ["thought_process", "experts"]
};
const contents: any = {
role: 'user',
parts: [{ text: textPrompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
try {
const analysisResp = await withRetry(() => ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
contents: contents,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
@@ -73,10 +91,37 @@ export const executeManagerAnalysis = async (
}
} else {
try {
let contentPayload: any = textPrompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: textPrompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
// Append formatting instruction to prompt if needed (OpenAI sometimes needs this explicit in text)
// but usually responseFormat: json_object + system prompt is enough.
// We append it to the text part or the string.
const jsonInstruction = `\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`;
if (Array.isArray(contentPayload)) {
contentPayload[0].text += jsonInstruction;
} else {
contentPayload += jsonInstruction;
}
const response = await generateOpenAIContent(ai, {
model,
systemInstruction: MANAGER_SYSTEM_PROMPT,
content: `Context:\n${context}\n\nCurrent Query: "${query}"\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`,
content: contentPayload,
temperature: 0.7,
responseFormat: 'json_object',
thinkingConfig: {

View File

@@ -1,3 +1,4 @@
import OpenAI from "openai";
import { ModelOption } from '../../types';
import { withRetry } from '../utils/retry';
@@ -10,7 +11,7 @@ export interface OpenAIStreamChunk {
export interface OpenAIConfig {
model: ModelOption;
systemInstruction?: string;
content: string;
content: string | Array<any>;
temperature?: number;
responseFormat?: 'text' | 'json_object';
thinkingConfig?: {
@@ -49,7 +50,7 @@ export const generateContent = async (
messages.push({
role: 'user',
content: config.content
content: config.content as any
});
const requestOptions: any = {
@@ -93,7 +94,7 @@ export async function* generateContentStream(
messages.push({
role: 'user',
content: config.content
content: config.content as any
});
const requestOptions: any = {

View File

@@ -1,4 +1,5 @@
import { ModelOption, ExpertResult } from '../../types';
import { ModelOption, ExpertResult, MessageAttachment } from '../../types';
import { getSynthesisPrompt } from './prompts';
import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient';
@@ -13,6 +14,7 @@ export const streamSynthesisResponse = async (
query: string,
historyContext: string,
expertResults: ExpertResult[],
attachments: MessageAttachment[],
budget: number,
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
@@ -21,9 +23,25 @@ export const streamSynthesisResponse = async (
const isGoogle = isGoogleProvider(ai);
if (isGoogle) {
const contents: any = {
role: 'user',
parts: [{ text: prompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
contents: contents,
config: {
thinkingConfig: {
thinkingBudget: budget,
@@ -55,10 +73,26 @@ export const streamSynthesisResponse = async (
throw streamError;
}
} else {
let contentPayload: any = prompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: prompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
const stream = generateOpenAIStream(ai, {
model,
systemInstruction: undefined,
content: prompt,
content: contentPayload,
temperature: 0.7,
thinkingConfig: {
thinkingBudget: budget,

View File

@@ -1,4 +1,5 @@
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview' | 'gpt-4.1' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1-mini' | 'deepseek-chat' | 'deepseek-coder' | 'custom' | string;
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview' | 'custom' | string;
export type ThinkingLevel = 'minimal' | 'low' | 'medium' | 'high';
export type ApiProvider = 'google' | 'openai' | 'deepseek' | 'anthropic' | 'xai' | 'mistral' | 'custom';
@@ -54,10 +55,19 @@ export type AppConfig = {
customModels?: CustomModel[];
};
export type MessageAttachment = {
id: string;
type: 'image';
mimeType: string;
data: string; // Base64 string
url?: string; // For display
};
export type ChatMessage = {
id: string;
role: 'user' | 'model';
content: string;
attachments?: MessageAttachment[];
// DeepThink Artifacts (only for model messages)
analysis?: AnalysisResult | null;
experts?: ExpertResult[];

View File

@@ -21,3 +21,20 @@ export const cleanJsonString = (str: string) => {
// 3. Fallback: return original if it looks like JSON, otherwise empty object
return str.trim().startsWith('{') ? str : "{}";
};
export const fileToBase64 = (file: File): Promise<string> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = () => {
if (typeof reader.result === 'string') {
// Remove the Data URL prefix (e.g., "data:image/png;base64,")
const base64 = reader.result.split(',')[1];
resolve(base64);
} else {
reject(new Error('Failed to convert file to base64'));
}
};
reader.onerror = error => reject(error);
});
};