This commit is contained in:
从何开始123
2026-01-09 01:07:03 +08:00
parent 21842c2b50
commit a32f3a5faf
17 changed files with 334 additions and 135 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@@ -1,11 +0,0 @@
# API Keys Configuration
# Copy this file to .env.local and add your actual API keys
# Primary API Key (used by default)
# For Google Gemini: https://ai.google.dev/
# For OpenAI: https://platform.openai.com/
VITE_API_KEY=your_api_key_here
# Alternative: Use provider-specific keys (optional)
# GEMINI_API_KEY=your_gemini_key_here
# OPENAI_API_KEY=your_openai_key_here

2
prisma/.gitignore vendored
View File

@@ -22,5 +22,3 @@ dist-ssr
*.njsproj *.njsproj
*.sln *.sln
*.sw? *.sw?
.env
# .env.example is allowed by default

View File

@@ -32,7 +32,7 @@ let currentCustomApiUrl: string | null = null;
const originalFetch = typeof window !== 'undefined' ? window.fetch.bind(window) : null; const originalFetch = typeof window !== 'undefined' ? window.fetch.bind(window) : null;
if (typeof window !== 'undefined' && originalFetch) { if (typeof window !== 'undefined' && originalFetch) {
window.fetch = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => { const proxyFetch = async (input: RequestInfo | URL, init?: RequestInit): Promise<Response> => {
let urlString: string; let urlString: string;
if (typeof input === 'string') { if (typeof input === 'string') {
urlString = input; urlString = input;
@@ -56,11 +56,26 @@ if (typeof window !== 'undefined' && originalFetch) {
return originalFetch(input, init); return originalFetch(input, init);
}; };
try {
window.fetch = proxyFetch;
} catch (e) {
try {
Object.defineProperty(window, 'fetch', {
value: proxyFetch,
writable: true,
configurable: true,
enumerable: true
});
} catch (e2) {
console.error('[API] Failed to intercept fetch:', e2);
}
}
} }
export const getAI = (config?: AIProviderConfig) => { export const getAI = (config?: AIProviderConfig) => {
const provider = config?.provider || 'google'; const provider = config?.provider || 'google';
const apiKey = config?.apiKey || (import.meta.env as any).VITE_API_KEY || process.env.API_KEY; const apiKey = config?.apiKey || import.meta.env?.VITE_API_KEY || process.env.API_KEY;
if (provider === 'openai' || provider === 'deepseek' || provider === 'custom' || provider === 'anthropic' || provider === 'xai' || provider === 'mistral') { if (provider === 'openai' || provider === 'deepseek' || provider === 'custom' || provider === 'anthropic' || provider === 'xai' || provider === 'mistral') {
const options: any = { const options: any = {
@@ -135,4 +150,4 @@ export const getAIProvider = (model: string): ApiProvider => {
return 'custom'; return 'custom';
} }
return 'google'; return 'google';
}; };

View File

@@ -102,6 +102,21 @@ const ChatMessageItem = ({ message, isLast }: ChatMessageProps) => {
</div> </div>
)} )}
{/* Attachments */}
{message.attachments && message.attachments.length > 0 && (
<div className="flex flex-wrap gap-2 mb-3">
{message.attachments.map(att => (
<img
key={att.id}
src={att.url || `data:${att.mimeType};base64,${att.data}`}
alt="attachment"
className="h-32 w-32 object-cover rounded-lg border border-slate-200 shadow-sm cursor-pointer hover:opacity-90 transition-opacity"
onClick={() => window.open(att.url || `data:${att.mimeType};base64,${att.data}`, '_blank')}
/>
))}
</div>
)}
{/* Text Content */} {/* Text Content */}
<div className="prose prose-slate max-w-none prose-p:leading-7 prose-pre:bg-slate-900 prose-pre:text-slate-50"> <div className="prose prose-slate max-w-none prose-p:leading-7 prose-pre:bg-slate-900 prose-pre:text-slate-50">
{message.content ? ( {message.content ? (

View File

@@ -1,12 +1,13 @@
import React, { useRef, useLayoutEffect, useState, useEffect } from 'react'; import React, { useRef, useLayoutEffect, useState, useEffect } from 'react';
import { ArrowUp, Square } from 'lucide-react'; import { ArrowUp, Square, Paperclip, X, Image as ImageIcon } from 'lucide-react';
import { AppState } from '../types'; import { AppState, MessageAttachment } from '../types';
import { fileToBase64 } from '../utils';
interface InputSectionProps { interface InputSectionProps {
query: string; query: string;
setQuery: (q: string) => void; setQuery: (q: string) => void;
onRun: () => void; onRun: (attachments: MessageAttachment[]) => void;
onStop: () => void; onStop: () => void;
appState: AppState; appState: AppState;
focusTrigger?: number; focusTrigger?: number;
@@ -14,7 +15,9 @@ interface InputSectionProps {
const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }: InputSectionProps) => { const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }: InputSectionProps) => {
const textareaRef = useRef<HTMLTextAreaElement>(null); const textareaRef = useRef<HTMLTextAreaElement>(null);
const fileInputRef = useRef<HTMLInputElement>(null);
const [isComposing, setIsComposing] = useState(false); const [isComposing, setIsComposing] = useState(false);
const [attachments, setAttachments] = useState<MessageAttachment[]>([]);
const adjustHeight = () => { const adjustHeight = () => {
if (textareaRef.current) { if (textareaRef.current) {
@@ -36,8 +39,7 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }
} }
}; };
// Focus input on mount and when app becomes idle (e.g. after "New Chat" or completion) // Focus input on mount and when app becomes idle
// or when explicitly triggered by focusTrigger
useEffect(() => { useEffect(() => {
if (appState === 'idle' && textareaRef.current) { if (appState === 'idle' && textareaRef.current) {
textareaRef.current.focus(); textareaRef.current.focus();
@@ -49,39 +51,125 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }
adjustHeight(); adjustHeight();
}, [query]); }, [query]);
const processFile = async (file: File) => {
if (!file.type.startsWith('image/')) return;
try {
const base64 = await fileToBase64(file);
const newAttachment: MessageAttachment = {
id: Math.random().toString(36).substring(7),
type: 'image',
mimeType: file.type,
data: base64,
url: URL.createObjectURL(file)
};
setAttachments(prev => [...prev, newAttachment]);
} catch (e) {
console.error("Failed to process file", e);
}
};
const handlePaste = (e: React.ClipboardEvent) => {
const items = e.clipboardData.items;
for (let i = 0; i < items.length; i++) {
if (items[i].type.indexOf('image') !== -1) {
const file = items[i].getAsFile();
if (file) {
e.preventDefault();
processFile(file);
}
}
}
};
const handleFileSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
if (e.target.files) {
Array.from(e.target.files).forEach(processFile);
}
// Reset input so same file can be selected again
if (fileInputRef.current) fileInputRef.current.value = '';
};
const removeAttachment = (id: string) => {
setAttachments(prev => prev.filter(a => a.id !== id));
};
const handleKeyDown = (e: React.KeyboardEvent) => { const handleKeyDown = (e: React.KeyboardEvent) => {
// If user presses Enter without Shift
if (e.key === 'Enter' && !e.shiftKey) { if (e.key === 'Enter' && !e.shiftKey) {
// robust check for IME composition (e.g. Chinese/Japanese inputs)
if (isComposing || (e.nativeEvent as any).isComposing) { if (isComposing || (e.nativeEvent as any).isComposing) {
return; return;
} }
e.preventDefault(); e.preventDefault();
if (query.trim() && appState === 'idle') { if ((query.trim() || attachments.length > 0) && appState === 'idle') {
onRun(); handleSubmit();
} }
} }
}; };
const handleSubmit = () => {
if (!query.trim() && attachments.length === 0) return;
onRun(attachments);
setAttachments([]);
};
const isRunning = appState !== 'idle'; const isRunning = appState !== 'idle';
return ( return (
<div className="w-full"> <div className="w-full">
{/* Container: Flex items-end ensures button stays at bottom right as text grows */} {/* Attachments Preview */}
{attachments.length > 0 && (
<div className="flex gap-2 mb-2 overflow-x-auto px-1 py-1">
{attachments.map(att => (
<div key={att.id} className="relative group shrink-0">
<img
src={att.url}
alt="attachment"
className="h-16 w-16 object-cover rounded-lg border border-slate-200 shadow-sm"
/>
<button
onClick={() => removeAttachment(att.id)}
className="absolute -top-1.5 -right-1.5 bg-slate-900 text-white rounded-full p-0.5 opacity-0 group-hover:opacity-100 transition-opacity shadow-md"
>
<X size={12} />
</button>
</div>
))}
</div>
)}
{/* Input Container */}
<div className="w-full flex items-end p-2 bg-white/70 backdrop-blur-xl border border-slate-200/50 rounded-[26px] shadow-2xl focus-within:ring-2 focus-within:ring-blue-500/20 focus-within:bg-white/90 transition-colors duration-200"> <div className="w-full flex items-end p-2 bg-white/70 backdrop-blur-xl border border-slate-200/50 rounded-[26px] shadow-2xl focus-within:ring-2 focus-within:ring-blue-500/20 focus-within:bg-white/90 transition-colors duration-200">
<input
type="file"
ref={fileInputRef}
className="hidden"
accept="image/*"
multiple
onChange={handleFileSelect}
/>
<button
onClick={() => fileInputRef.current?.click()}
className="flex-shrink-0 p-2.5 mb-0.5 ml-1 rounded-full text-slate-400 hover:text-slate-600 hover:bg-slate-100 transition-colors"
title="Attach Image"
disabled={isRunning}
>
<Paperclip size={20} />
</button>
<textarea <textarea
ref={textareaRef} ref={textareaRef}
value={query} value={query}
onChange={(e) => setQuery(e.target.value)} onChange={(e) => setQuery(e.target.value)}
onKeyDown={handleKeyDown} onKeyDown={handleKeyDown}
onPaste={handlePaste}
onCompositionStart={() => setIsComposing(true)} onCompositionStart={() => setIsComposing(true)}
onCompositionEnd={() => setIsComposing(false)} onCompositionEnd={() => setIsComposing(false)}
placeholder="Ask a complex question..." placeholder="Ask a complex question..."
rows={1} rows={1}
autoFocus autoFocus
className="flex-1 max-h-[200px] py-3 pl-4 pr-2 bg-transparent border-none focus:ring-0 resize-none outline-none text-slate-800 placeholder:text-slate-400 leading-relaxed custom-scrollbar text-base" className="flex-1 max-h-[200px] py-3 pl-2 pr-2 bg-transparent border-none focus:ring-0 resize-none outline-none text-slate-800 placeholder:text-slate-400 leading-relaxed custom-scrollbar text-base"
style={{ minHeight: '48px' }} style={{ minHeight: '48px' }}
/> />
@@ -95,10 +183,8 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }
</button> </button>
) : ( ) : (
<button <button
onClick={() => { onClick={handleSubmit}
if (query.trim()) onRun(); disabled={!query.trim() && attachments.length === 0}
}}
disabled={!query.trim()}
className="flex items-center justify-center w-10 h-10 rounded-full bg-blue-600 text-white hover:bg-blue-700 disabled:bg-slate-200 disabled:text-slate-400 transition-all shadow-md hover:scale-105 active:scale-95" className="flex items-center justify-center w-10 h-10 rounded-full bg-blue-600 text-white hover:bg-blue-700 disabled:bg-slate-200 disabled:text-slate-400 transition-all shadow-md hover:scale-105 active:scale-95"
> >
<ArrowUp size={20} /> <ArrowUp size={20} />

View File

@@ -14,72 +14,6 @@ export const MODELS: { value: ModelOption; label: string; desc: string; provider
desc: 'Deep reasoning, complex tasks, higher intelligence.', desc: 'Deep reasoning, complex tasks, higher intelligence.',
provider: 'google' provider: 'google'
}, },
{
value: 'gpt-4o',
label: 'GPT-4o',
desc: 'OpenAI flagship model with vision capabilities.',
provider: 'openai'
},
{
value: 'gpt-4o-mini',
label: 'GPT-4o Mini',
desc: 'Fast, affordable small model for focused tasks.',
provider: 'openai'
},
{
value: 'o1-preview',
label: 'O1 Preview',
desc: 'Advanced reasoning model with chain-of-thought.',
provider: 'openai'
},
{
value: 'o1-mini',
label: 'O1 Mini',
desc: 'Fast reasoning model for efficient problem solving.',
provider: 'openai'
},
{
value: 'deepseek-chat',
label: 'DeepSeek Chat',
desc: 'Advanced language model optimized for conversational AI.',
provider: 'deepseek'
},
{
value: 'deepseek-coder',
label: 'DeepSeek Coder',
desc: 'Specialized model for code generation and programming tasks.',
provider: 'deepseek'
},
{
value: 'claude-3-5-sonnet-20241022',
label: 'Claude 3.5 Sonnet',
desc: 'Anthropic\'s most capable model with excellent reasoning.',
provider: 'anthropic'
},
{
value: 'claude-3-haiku-20240307',
label: 'Claude 3 Haiku',
desc: 'Fast and efficient model for quick responses.',
provider: 'anthropic'
},
{
value: 'grok-2-1212',
label: 'Grok 2',
desc: 'xAI\'s advanced model with real-time knowledge.',
provider: 'xai'
},
{
value: 'mistral-large-2411',
label: 'Mistral Large',
desc: 'Mistral\'s flagship model for complex reasoning.',
provider: 'mistral'
},
{
value: 'mixtral-8x7b-32768',
label: 'Mixtral 8x7B',
desc: 'Mixture of Experts model with excellent performance.',
provider: 'mistral'
},
{ {
value: 'custom', value: 'custom',
label: 'Custom Model', label: 'Custom Model',

View File

@@ -1,6 +1,6 @@
import { useState, useEffect, useCallback } from 'react'; import { useState, useEffect, useCallback } from 'react';
import { ModelOption, AppConfig, ChatMessage } from '../types'; import { ModelOption, AppConfig, ChatMessage, MessageAttachment } from '../types';
import { STORAGE_KEYS, DEFAULT_CONFIG, getValidThinkingLevels } from '../config'; import { STORAGE_KEYS, DEFAULT_CONFIG, getValidThinkingLevels } from '../config';
import { useDeepThink } from './useDeepThink'; import { useDeepThink } from './useDeepThink';
import { useChatSessions } from './useChatSessions'; import { useChatSessions } from './useChatSessions';
@@ -155,13 +155,14 @@ export const useAppLogic = () => {
} }
}, [appState, finalOutput, managerAnalysis, experts, synthesisThoughts, resetDeepThink, processStartTime, processEndTime, currentSessionId, messages, selectedModel, createSession, updateSessionMessages]); }, [appState, finalOutput, managerAnalysis, experts, synthesisThoughts, resetDeepThink, processStartTime, processEndTime, currentSessionId, messages, selectedModel, createSession, updateSessionMessages]);
const handleRun = useCallback(() => { const handleRun = useCallback((attachments: MessageAttachment[] = []) => {
if (!query.trim()) return; if (!query.trim() && attachments.length === 0) return;
const userMsg: ChatMessage = { const userMsg: ChatMessage = {
id: `user-${Date.now()}`, id: `user-${Date.now()}`,
role: 'user', role: 'user',
content: query content: query,
attachments: attachments
}; };
const newMessages = [...messages, userMsg]; const newMessages = [...messages, userMsg];
@@ -174,7 +175,7 @@ export const useAppLogic = () => {
updateSessionMessages(activeSessionId, newMessages); updateSessionMessages(activeSessionId, newMessages);
} }
runDynamicDeepThink(query, messages, selectedModel, config); runDynamicDeepThink(query, newMessages, selectedModel, config);
setQuery(''); setQuery('');
}, [query, messages, currentSessionId, selectedModel, config, createSession, updateSessionMessages, runDynamicDeepThink]); }, [query, messages, currentSessionId, selectedModel, config, createSession, updateSessionMessages, runDynamicDeepThink]);

View File

@@ -2,7 +2,7 @@
import { useCallback } from 'react'; import { useCallback } from 'react';
import { getAI, getAIProvider, findCustomModel } from '../api'; import { getAI, getAIProvider, findCustomModel } from '../api';
import { getThinkingBudget } from '../config'; import { getThinkingBudget } from '../config';
import { AppConfig, ModelOption, ExpertResult, ChatMessage } from '../types'; import { AppConfig, ModelOption, ExpertResult, ChatMessage, MessageAttachment } from '../types';
import { executeManagerAnalysis, executeManagerReview } from '../services/deepThink/manager'; import { executeManagerAnalysis, executeManagerReview } from '../services/deepThink/manager';
import { streamExpertResponse } from '../services/deepThink/expert'; import { streamExpertResponse } from '../services/deepThink/expert';
@@ -35,6 +35,7 @@ export const useDeepThink = () => {
ai: any, ai: any,
model: ModelOption, model: ModelOption,
context: string, context: string,
attachments: MessageAttachment[],
budget: number, budget: number,
signal: AbortSignal signal: AbortSignal
): Promise<ExpertResult> => { ): Promise<ExpertResult> => {
@@ -52,6 +53,7 @@ export const useDeepThink = () => {
model, model,
expert, expert,
context, context,
attachments,
budget, budget,
signal, signal,
(textChunk, thoughtChunk) => { (textChunk, thoughtChunk) => {
@@ -84,7 +86,7 @@ export const useDeepThink = () => {
model: ModelOption, model: ModelOption,
config: AppConfig config: AppConfig
) => { ) => {
if (!query.trim()) return; if (!query.trim() && (!history.length || !history[history.length - 1].attachments?.length)) return;
if (abortControllerRef.current) abortControllerRef.current.abort(); if (abortControllerRef.current) abortControllerRef.current.abort();
abortControllerRef.current = new AbortController(); abortControllerRef.current = new AbortController();
@@ -109,7 +111,11 @@ export const useDeepThink = () => {
}); });
try { try {
const recentHistory = history.slice(-5).map(msg => // Get the last message (which is the user's current query) to retrieve attachments
const lastMessage = history[history.length - 1];
const currentAttachments = lastMessage.role === 'user' ? (lastMessage.attachments || []) : [];
const recentHistory = history.slice(0, -1).slice(-5).map(msg =>
`${msg.role === 'user' ? 'User' : 'Model'}: ${msg.content}` `${msg.role === 'user' ? 'User' : 'Model'}: ${msg.content}`
).join('\n'); ).join('\n');
@@ -119,7 +125,8 @@ export const useDeepThink = () => {
ai, ai,
model, model,
query, query,
recentHistory, recentHistory,
currentAttachments,
getThinkingBudget(config.planningLevel, model) getThinkingBudget(config.planningLevel, model)
); );
@@ -135,8 +142,9 @@ export const useDeepThink = () => {
setInitialExperts([primaryExpert]); setInitialExperts([primaryExpert]);
// Primary expert sees the images
const primaryTask = runExpertLifecycle( const primaryTask = runExpertLifecycle(
primaryExpert, 0, ai, model, recentHistory, primaryExpert, 0, ai, model, recentHistory, currentAttachments,
getThinkingBudget(config.expertLevel, model), signal getThinkingBudget(config.expertLevel, model), signal
); );
@@ -154,8 +162,12 @@ export const useDeepThink = () => {
appendExperts(round1Experts); appendExperts(round1Experts);
setAppState('experts_working'); setAppState('experts_working');
// Supplementary experts usually don't need the images unless specified,
// but for simplicity/consistency we pass them if the model supports it.
// However, to save tokens/bandwidth, we might limit this.
// For now, let's pass them to ensure they have full context.
const round1Tasks = round1Experts.map((exp, idx) => const round1Tasks = round1Experts.map((exp, idx) =>
runExpertLifecycle(exp, idx + 1, ai, model, recentHistory, runExpertLifecycle(exp, idx + 1, ai, model, recentHistory, currentAttachments,
getThinkingBudget(config.expertLevel, model), signal) getThinkingBudget(config.expertLevel, model), signal)
); );
@@ -195,7 +207,7 @@ export const useDeepThink = () => {
setAppState('experts_working'); setAppState('experts_working');
const nextRoundTasks = nextRoundExperts.map((exp, idx) => const nextRoundTasks = nextRoundExperts.map((exp, idx) =>
runExpertLifecycle(exp, startIndex + idx, ai, model, recentHistory, runExpertLifecycle(exp, startIndex + idx, ai, model, recentHistory, currentAttachments,
getThinkingBudget(config.expertLevel, model), signal) getThinkingBudget(config.expertLevel, model), signal)
); );
@@ -213,6 +225,7 @@ export const useDeepThink = () => {
await streamSynthesisResponse( await streamSynthesisResponse(
ai, model, query, recentHistory, expertsDataRef.current, ai, model, query, recentHistory, expertsDataRef.current,
currentAttachments,
getThinkingBudget(config.synthesisLevel, model), signal, getThinkingBudget(config.synthesisLevel, model), signal,
(textChunk, thoughtChunk) => { (textChunk, thoughtChunk) => {
fullFinalText += textChunk; fullFinalText += textChunk;

View File

@@ -6,7 +6,6 @@
<title>Prisma</title> <title>Prisma</title>
<!-- SVG Favicon --> <!-- SVG Favicon -->
<link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg viewBox='0 0 600 600' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='300' cy='300' r='300' fill='white'/%3E%3Cg stroke-width='16' stroke-linecap='round' stroke-linejoin='round' fill='none' stroke='%23334155'%3E%3Cpath d='M300 180 L200 420 L400 420 Z'/%3E%3Cpath d='M300 50 L300 180'/%3E%3Cpath d='M100 480 L200 420'/%3E%3Cpath d='M500 480 L400 420'/%3E%3Cpath d='M300 50 L100 480 L500 480 Z'/%3E%3C/g%3E%3Cg stroke-width='12' stroke-linejoin='round' fill='none'%3E%3Cline x1='0' y1='275' x2='195' y2='275' stroke='%23334155'/%3E%3Cpolyline points='194,270 380,225 600,245' stroke='%232563eb' opacity='0.95'/%3E%3Cpolyline points='194,275 400,275 600,305' stroke='%234ade80' opacity='0.95'/%3E%3Cpolyline points='194,280 420,325 600,370' stroke='%239333ea' opacity='0.95'/%3E%3C/g%3E%3C/svg%3E"> <link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg viewBox='0 0 600 600' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='300' cy='300' r='300' fill='white'/%3E%3Cg stroke-width='16' stroke-linecap='round' stroke-linejoin='round' fill='none' stroke='%23334155'%3E%3Cpath d='M300 180 L200 420 L400 420 Z'/%3E%3Cpath d='M300 50 L300 180'/%3E%3Cpath d='M100 480 L200 420'/%3E%3Cpath d='M500 480 L400 420'/%3E%3Cpath d='M300 50 L100 480 L500 480 Z'/%3E%3C/g%3E%3Cg stroke-width='12' stroke-linejoin='round' fill='none'%3E%3Cline x1='0' y1='275' x2='195' y2='275' stroke='%23334155'/%3E%3Cpolyline points='194,270 380,225 600,245' stroke='%232563eb' opacity='0.95'/%3E%3Cpolyline points='194,275 400,275 600,305' stroke='%234ade80' opacity='0.95'/%3E%3Cpolyline points='194,280 420,325 600,370' stroke='%239333ea' opacity='0.95'/%3E%3C/g%3E%3C/svg%3E">
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://cdn.tailwindcss.com?plugins=typography"></script> <script src="https://cdn.tailwindcss.com?plugins=typography"></script>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;700&display=swap" rel="stylesheet"> <link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;700&display=swap" rel="stylesheet">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.8/dist/katex.min.css"> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.8/dist/katex.min.css">
@@ -70,20 +69,23 @@
z-index: 0; z-index: 0;
} }
</style> </style>
<script type="importmap"> <script type="importmap">
{ {
"imports": { "imports": {
"react-markdown": "https://esm.sh/react-markdown@^10.1.0", "openai": "https://esm.sh/openai@^6.15.0",
"remark-math": "https://esm.sh/remark-math@6.0.0", "path": "https://esm.sh/path@^0.12.7",
"rehype-katex": "https://esm.sh/rehype-katex@7.0.0", "vite": "https://esm.sh/vite@^7.3.1",
"@google/genai": "https://esm.sh/@google/genai@^1.34.0", "@vitejs/plugin-react": "https://esm.sh/@vitejs/plugin-react@^5.1.2",
"lucide-react": "https://esm.sh/lucide-react@^0.562.0",
"react-dom/": "https://esm.sh/react-dom@^19.2.3/", "react-dom/": "https://esm.sh/react-dom@^19.2.3/",
"lucide-react": "https://esm.sh/lucide-react@^0.562.0",
"react/": "https://esm.sh/react@^19.2.3/", "react/": "https://esm.sh/react@^19.2.3/",
"react": "https://esm.sh/react@^19.2.3", "react": "https://esm.sh/react@^19.2.3",
"react-syntax-highlighter": "https://esm.sh/react-syntax-highlighter@15.6.1?external=react,react-dom", "react-markdown": "https://esm.sh/react-markdown@^10.1.0",
"react-syntax-highlighter/dist/esm/styles/prism": "https://esm.sh/react-syntax-highlighter@15.6.1/dist/esm/styles/prism?external=react,react-dom", "@google/genai": "https://esm.sh/@google/genai@^1.34.0",
"react-syntax-highlighter/": "https://esm.sh/react-syntax-highlighter@^16.1.0/" "react-syntax-highlighter/": "https://esm.sh/react-syntax-highlighter@^16.1.0/",
"react-syntax-highlighter": "https://esm.sh/react-syntax-highlighter@^16.1.0",
"remark-math": "https://esm.sh/remark-math@^6.0.0",
"rehype-katex": "https://esm.sh/rehype-katex@^7.0.1"
} }
} }
</script> </script>

5
prisma/metadata.json Normal file
View File

@@ -0,0 +1,5 @@
{
"description": "Prisma - Deep multi-agent reasoning application.",
"requestFramePermissions": [],
"name": "Prisma"
}

View File

@@ -1,4 +1,5 @@
import { ModelOption, ExpertResult } from '../../types';
import { ModelOption, ExpertResult, MessageAttachment } from '../../types';
import { getExpertSystemInstruction } from './prompts'; import { getExpertSystemInstruction } from './prompts';
import { withRetry } from '../utils/retry'; import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient'; import { generateContentStream as generateOpenAIStream } from './openaiClient';
@@ -12,6 +13,7 @@ export const streamExpertResponse = async (
model: ModelOption, model: ModelOption,
expert: ExpertResult, expert: ExpertResult,
context: string, context: string,
attachments: MessageAttachment[],
budget: number, budget: number,
signal: AbortSignal, signal: AbortSignal,
onChunk: (text: string, thought: string) => void onChunk: (text: string, thought: string) => void
@@ -19,9 +21,25 @@ export const streamExpertResponse = async (
const isGoogle = isGoogleProvider(ai); const isGoogle = isGoogleProvider(ai);
if (isGoogle) { if (isGoogle) {
const contents: any = {
role: 'user',
parts: [{ text: expert.prompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
const streamResult = await withRetry(() => ai.models.generateContentStream({ const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model, model: model,
contents: expert.prompt, contents: contents,
config: { config: {
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context), systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
temperature: expert.temperature, temperature: expert.temperature,
@@ -55,10 +73,26 @@ export const streamExpertResponse = async (
throw streamError; throw streamError;
} }
} else { } else {
let contentPayload: any = expert.prompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: expert.prompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
const stream = generateOpenAIStream(ai, { const stream = generateOpenAIStream(ai, {
model, model,
systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context), systemInstruction: getExpertSystemInstruction(expert.role, expert.description, context),
content: expert.prompt, content: contentPayload,
temperature: expert.temperature, temperature: expert.temperature,
thinkingConfig: { thinkingConfig: {
thinkingBudget: budget, thinkingBudget: budget,

View File

@@ -1,10 +1,10 @@
import { Type } from "@google/genai"; import { Type } from "@google/genai";
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, ApiProvider } from '../../types'; import { ModelOption, AnalysisResult, ExpertResult, ReviewResult, MessageAttachment } from '../../types';
import { cleanJsonString } from '../../utils'; import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts'; import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
import { withRetry } from '../utils/retry'; import { withRetry } from '../utils/retry';
import { generateContent as generateOpenAIContent } from './openaiClient'; import { generateContent as generateOpenAIContent } from './openaiClient';
import { getAIProvider } from '../../api';
const isGoogleProvider = (ai: any): boolean => { const isGoogleProvider = (ai: any): boolean => {
return ai?.models?.generateContent !== undefined; return ai?.models?.generateContent !== undefined;
@@ -15,9 +15,11 @@ export const executeManagerAnalysis = async (
model: ModelOption, model: ModelOption,
query: string, query: string,
context: string, context: string,
attachments: MessageAttachment[],
budget: number budget: number
): Promise<AnalysisResult> => { ): Promise<AnalysisResult> => {
const isGoogle = isGoogleProvider(ai); const isGoogle = isGoogleProvider(ai);
const textPrompt = `Context:\n${context}\n\nCurrent Query: "${query}"`;
if (isGoogle) { if (isGoogle) {
const managerSchema = { const managerSchema = {
@@ -41,10 +43,26 @@ export const executeManagerAnalysis = async (
required: ["thought_process", "experts"] required: ["thought_process", "experts"]
}; };
const contents: any = {
role: 'user',
parts: [{ text: textPrompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
try { try {
const analysisResp = await withRetry(() => ai.models.generateContent({ const analysisResp = await withRetry(() => ai.models.generateContent({
model: model, model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`, contents: contents,
config: { config: {
systemInstruction: MANAGER_SYSTEM_PROMPT, systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json", responseMimeType: "application/json",
@@ -73,10 +91,37 @@ export const executeManagerAnalysis = async (
} }
} else { } else {
try { try {
let contentPayload: any = textPrompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: textPrompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
// Append formatting instruction to prompt if needed (OpenAI sometimes needs this explicit in text)
// but usually responseFormat: json_object + system prompt is enough.
// We append it to the text part or the string.
const jsonInstruction = `\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`;
if (Array.isArray(contentPayload)) {
contentPayload[0].text += jsonInstruction;
} else {
contentPayload += jsonInstruction;
}
const response = await generateOpenAIContent(ai, { const response = await generateOpenAIContent(ai, {
model, model,
systemInstruction: MANAGER_SYSTEM_PROMPT, systemInstruction: MANAGER_SYSTEM_PROMPT,
content: `Context:\n${context}\n\nCurrent Query: "${query}"\n\nReturn a JSON response with this structure:\n{\n "thought_process": "...",\n "experts": [\n { "role": "...", "description": "...", "temperature": number, "prompt": "..." }\n ]\n}`, content: contentPayload,
temperature: 0.7, temperature: 0.7,
responseFormat: 'json_object', responseFormat: 'json_object',
thinkingConfig: { thinkingConfig: {

View File

@@ -1,3 +1,4 @@
import OpenAI from "openai"; import OpenAI from "openai";
import { ModelOption } from '../../types'; import { ModelOption } from '../../types';
import { withRetry } from '../utils/retry'; import { withRetry } from '../utils/retry';
@@ -10,7 +11,7 @@ export interface OpenAIStreamChunk {
export interface OpenAIConfig { export interface OpenAIConfig {
model: ModelOption; model: ModelOption;
systemInstruction?: string; systemInstruction?: string;
content: string; content: string | Array<any>;
temperature?: number; temperature?: number;
responseFormat?: 'text' | 'json_object'; responseFormat?: 'text' | 'json_object';
thinkingConfig?: { thinkingConfig?: {
@@ -49,7 +50,7 @@ export const generateContent = async (
messages.push({ messages.push({
role: 'user', role: 'user',
content: config.content content: config.content as any
}); });
const requestOptions: any = { const requestOptions: any = {
@@ -93,7 +94,7 @@ export async function* generateContentStream(
messages.push({ messages.push({
role: 'user', role: 'user',
content: config.content content: config.content as any
}); });
const requestOptions: any = { const requestOptions: any = {

View File

@@ -1,4 +1,5 @@
import { ModelOption, ExpertResult } from '../../types';
import { ModelOption, ExpertResult, MessageAttachment } from '../../types';
import { getSynthesisPrompt } from './prompts'; import { getSynthesisPrompt } from './prompts';
import { withRetry } from '../utils/retry'; import { withRetry } from '../utils/retry';
import { generateContentStream as generateOpenAIStream } from './openaiClient'; import { generateContentStream as generateOpenAIStream } from './openaiClient';
@@ -13,6 +14,7 @@ export const streamSynthesisResponse = async (
query: string, query: string,
historyContext: string, historyContext: string,
expertResults: ExpertResult[], expertResults: ExpertResult[],
attachments: MessageAttachment[],
budget: number, budget: number,
signal: AbortSignal, signal: AbortSignal,
onChunk: (text: string, thought: string) => void onChunk: (text: string, thought: string) => void
@@ -21,9 +23,25 @@ export const streamSynthesisResponse = async (
const isGoogle = isGoogleProvider(ai); const isGoogle = isGoogleProvider(ai);
if (isGoogle) { if (isGoogle) {
const contents: any = {
role: 'user',
parts: [{ text: prompt }]
};
if (attachments.length > 0) {
attachments.forEach(att => {
contents.parts.push({
inlineData: {
mimeType: att.mimeType,
data: att.data
}
});
});
}
const synthesisStream = await withRetry(() => ai.models.generateContentStream({ const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model, model: model,
contents: prompt, contents: contents,
config: { config: {
thinkingConfig: { thinkingConfig: {
thinkingBudget: budget, thinkingBudget: budget,
@@ -55,10 +73,26 @@ export const streamSynthesisResponse = async (
throw streamError; throw streamError;
} }
} else { } else {
let contentPayload: any = prompt;
if (attachments.length > 0) {
contentPayload = [
{ type: 'text', text: prompt }
];
attachments.forEach(att => {
contentPayload.push({
type: 'image_url',
image_url: {
url: `data:${att.mimeType};base64,${att.data}`
}
});
});
}
const stream = generateOpenAIStream(ai, { const stream = generateOpenAIStream(ai, {
model, model,
systemInstruction: undefined, systemInstruction: undefined,
content: prompt, content: contentPayload,
temperature: 0.7, temperature: 0.7,
thinkingConfig: { thinkingConfig: {
thinkingBudget: budget, thinkingBudget: budget,

View File

@@ -1,4 +1,5 @@
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview' | 'gpt-4.1' | 'gpt-4o' | 'gpt-4o-mini' | 'o1-preview' | 'o1-mini' | 'deepseek-chat' | 'deepseek-coder' | 'custom' | string;
export type ModelOption = 'gemini-3-flash-preview' | 'gemini-3-pro-preview' | 'custom' | string;
export type ThinkingLevel = 'minimal' | 'low' | 'medium' | 'high'; export type ThinkingLevel = 'minimal' | 'low' | 'medium' | 'high';
export type ApiProvider = 'google' | 'openai' | 'deepseek' | 'anthropic' | 'xai' | 'mistral' | 'custom'; export type ApiProvider = 'google' | 'openai' | 'deepseek' | 'anthropic' | 'xai' | 'mistral' | 'custom';
@@ -54,10 +55,19 @@ export type AppConfig = {
customModels?: CustomModel[]; customModels?: CustomModel[];
}; };
export type MessageAttachment = {
id: string;
type: 'image';
mimeType: string;
data: string; // Base64 string
url?: string; // For display
};
export type ChatMessage = { export type ChatMessage = {
id: string; id: string;
role: 'user' | 'model'; role: 'user' | 'model';
content: string; content: string;
attachments?: MessageAttachment[];
// DeepThink Artifacts (only for model messages) // DeepThink Artifacts (only for model messages)
analysis?: AnalysisResult | null; analysis?: AnalysisResult | null;
experts?: ExpertResult[]; experts?: ExpertResult[];
@@ -72,4 +82,4 @@ export type ChatSession = {
messages: ChatMessage[]; messages: ChatMessage[];
createdAt: number; createdAt: number;
model: ModelOption; model: ModelOption;
}; };

View File

@@ -20,4 +20,21 @@ export const cleanJsonString = (str: string) => {
// 3. Fallback: return original if it looks like JSON, otherwise empty object // 3. Fallback: return original if it looks like JSON, otherwise empty object
return str.trim().startsWith('{') ? str : "{}"; return str.trim().startsWith('{') ? str : "{}";
};
export const fileToBase64 = (file: File): Promise<string> => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = () => {
if (typeof reader.result === 'string') {
// Remove the Data URL prefix (e.g., "data:image/png;base64,")
const base64 = reader.result.split(',')[1];
resolve(base64);
} else {
reject(new Error('Failed to convert file to base64'));
}
};
reader.onerror = error => reject(error);
});
}; };