This commit is contained in:
从何开始123
2026-01-08 12:56:00 +08:00
parent 1561c054b7
commit 37b0e5f8e6
13 changed files with 258 additions and 140 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@@ -33,7 +33,8 @@ const App = () => {
handleNewChat,
handleSelectSession,
handleDeleteSession,
stopDeepThink
stopDeepThink,
focusTrigger
} = useAppLogic();
return (
@@ -85,6 +86,7 @@ const App = () => {
onRun={handleRun}
onStop={stopDeepThink}
appState={appState}
focusTrigger={focusTrigger}
/>
</div>
</div>

View File

@@ -24,67 +24,69 @@ const ChatArea = ({
processStartTime,
processEndTime
}: ChatAreaProps) => {
const isIdle = messages.length === 0 && appState === 'idle';
return (
<div className="flex-1 overflow-y-auto custom-scrollbar scroll-smooth">
<div className="pb-40">
{messages.length === 0 && appState === 'idle' && (
<div className="h-full flex flex-col items-center justify-center pt-32 opacity-70 px-4 text-center">
<Logo className="w-24 h-24 mb-6 drop-shadow-xl animate-pulse-slow" />
<p className="text-xl font-bold text-slate-900">Prisma</p>
<p className="text-sm text-slate-500 max-w-xs mt-2">
Deep multi-agent reasoning.
</p>
</div>
)}
{isIdle ? (
<div className="h-full flex flex-col items-center justify-center opacity-70 px-4 text-center">
<Logo className="w-24 h-24 mb-6 drop-shadow-xl animate-pulse-slow" />
<p className="text-xl font-bold text-slate-900">Prisma</p>
<p className="text-sm text-slate-500 max-w-xs mt-2">
Deep multi-agent reasoning.
</p>
</div>
) : (
<div className="pb-40">
{/* History */}
{messages.map((msg, idx) => (
<ChatMessageItem
key={msg.id}
message={msg}
isLast={idx === messages.length - 1}
/>
))}
{/* History */}
{messages.map((msg, idx) => (
<ChatMessageItem
key={msg.id}
message={msg}
isLast={idx === messages.length - 1}
/>
))}
{/* Active Generation (Ghost Message) */}
{appState !== 'idle' && appState !== 'completed' && (
<div className="group w-full bg-transparent text-slate-800">
{/* Active Generation (Ghost Message) */}
{appState !== 'idle' && appState !== 'completed' && (
<div className="group w-full bg-transparent text-slate-800">
<div className="max-w-6xl mx-auto px-4 py-8 flex gap-6">
<div className="flex-shrink-0 w-8 h-8 rounded-full bg-white border border-blue-200 shadow-sm flex items-center justify-center">
<div className="animate-spin w-4 h-4 border-2 border-blue-600 border-t-transparent rounded-full"></div>
</div>
<div className="flex-1 min-w-0">
<div className="font-semibold text-sm text-slate-900 mb-2">Prisma</div>
{/* Active Thinking Process */}
<div className="mb-4 bg-white border border-blue-100 rounded-xl p-4 shadow-sm">
<ProcessFlow
appState={appState}
managerAnalysis={managerAnalysis}
experts={experts}
processStartTime={processStartTime}
processEndTime={processEndTime}
/>
</div>
<div className="flex-shrink-0 w-8 h-8 rounded-full bg-white border border-blue-200 shadow-sm flex items-center justify-center">
<div className="animate-spin w-4 h-4 border-2 border-blue-600 border-t-transparent rounded-full"></div>
</div>
<div className="flex-1 min-w-0">
<div className="font-semibold text-sm text-slate-900 mb-2">Prisma</div>
{/* Active Thinking Process */}
<div className="mb-4 bg-white border border-blue-100 rounded-xl p-4 shadow-sm">
<ProcessFlow
appState={appState}
managerAnalysis={managerAnalysis}
experts={experts}
processStartTime={processStartTime}
processEndTime={processEndTime}
/>
</div>
{/* Streaming Output */}
{finalOutput && (
<div className="prose prose-slate max-w-none">
<ChatMessageItem
message={{
id: 'streaming',
role: 'model',
content: finalOutput,
isThinking: false
}}
/>
</div>
)}
</div>
{/* Streaming Output */}
{finalOutput && (
<div className="prose prose-slate max-w-none">
<ChatMessageItem
message={{
id: 'streaming',
role: 'model',
content: finalOutput,
isThinking: false
}}
/>
</div>
)}
</div>
</div>
</div>
)}
</div>
</div>
)}
</div>
)}
</div>
);
};

View File

@@ -1,5 +1,6 @@
import React, { useState } from 'react';
import { User, Sparkles, ChevronDown, ChevronRight } from 'lucide-react';
import { User, Sparkles, ChevronDown, ChevronRight, Copy, Check } from 'lucide-react';
import MarkdownRenderer from './MarkdownRenderer';
import ProcessFlow from './ProcessFlow';
import { ChatMessage } from '../types';
@@ -12,10 +13,18 @@ interface ChatMessageProps {
const ChatMessageItem = ({ message, isLast }: ChatMessageProps) => {
const isUser = message.role === 'user';
const [showThinking, setShowThinking] = useState(false);
const [copied, setCopied] = useState(false);
// Check if there is any thinking data to show
const hasThinkingData = message.analysis || (message.experts && message.experts.length > 0);
const handleCopy = () => {
if (!message.content) return;
navigator.clipboard.writeText(message.content);
setCopied(true);
setTimeout(() => setCopied(false), 2000);
};
return (
<div className={`group w-full text-slate-800 ${isUser ? 'bg-transparent' : 'bg-transparent'}`}>
<div className="max-w-6xl mx-auto px-4 py-8 flex gap-4 md:gap-6">
@@ -36,8 +45,30 @@ const ChatMessageItem = ({ message, isLast }: ChatMessageProps) => {
{/* Content */}
<div className="relative flex-1 overflow-hidden">
<div className="font-semibold text-sm text-slate-900 mb-1">
{isUser ? 'You' : 'Prisma'}
<div className="flex items-center justify-between mb-1">
<div className="font-semibold text-sm text-slate-900">
{isUser ? 'You' : 'Prisma'}
</div>
{message.content && (
<button
onClick={handleCopy}
className={`p-1.5 rounded-md transition-all duration-200 flex items-center gap-1.5
${copied
? 'text-emerald-600 bg-emerald-50'
: 'text-slate-400 hover:text-slate-600 hover:bg-slate-100 opacity-0 group-hover:opacity-100 focus:opacity-100'
}`}
title="Copy message"
>
{copied ? (
<>
<Check size={14} />
<span className="text-[10px] font-medium uppercase tracking-wider">Copied</span>
</>
) : (
<Copy size={14} />
)}
</button>
)}
</div>
{/* Thinking Process Accordion (Only for AI) */}
@@ -100,4 +131,4 @@ const ChatMessageItem = ({ message, isLast }: ChatMessageProps) => {
);
};
export default ChatMessageItem;
export default ChatMessageItem;

View File

@@ -15,7 +15,7 @@ interface HeaderProps {
const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSidebar, onNewChat }: HeaderProps) => {
return (
<header className="sticky top-0 z-50 bg-white/80 backdrop-blur-md border-b border-slate-100">
<header className="sticky top-0 z-50 bg-white/80 backdrop-blur-md">
<div className="w-full px-4 h-16 flex items-center justify-between">
<div className="flex items-center gap-4">
<button
@@ -65,4 +65,4 @@ const Header = ({ selectedModel, setSelectedModel, onOpenSettings, onToggleSideb
);
};
export default Header;
export default Header;

View File

@@ -1,3 +1,4 @@
import React, { useRef, useLayoutEffect, useState, useEffect } from 'react';
import { ArrowUp, Square } from 'lucide-react';
import { AppState } from '../types';
@@ -8,9 +9,10 @@ interface InputSectionProps {
onRun: () => void;
onStop: () => void;
appState: AppState;
focusTrigger?: number;
}
const InputSection = ({ query, setQuery, onRun, onStop, appState }: InputSectionProps) => {
const InputSection = ({ query, setQuery, onRun, onStop, appState, focusTrigger }: InputSectionProps) => {
const textareaRef = useRef<HTMLTextAreaElement>(null);
const [isComposing, setIsComposing] = useState(false);
@@ -35,11 +37,12 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState }: InputSection
};
// Focus input on mount and when app becomes idle (e.g. after "New Chat" or completion)
// or when explicitly triggered by focusTrigger
useEffect(() => {
if (appState === 'idle' && textareaRef.current) {
textareaRef.current.focus();
}
}, [appState]);
}, [appState, focusTrigger]);
// useLayoutEffect prevents visual flickering by adjusting height before paint
useLayoutEffect(() => {
@@ -107,4 +110,4 @@ const InputSection = ({ query, setQuery, onRun, onStop, appState }: InputSection
);
};
export default InputSection;
export default InputSection;

View File

@@ -1,3 +1,4 @@
import React, { useState, useEffect } from 'react';
import { Users, Zap, Brain, Loader2, CheckCircle2, Clock } from 'lucide-react';
import { AppState, AnalysisResult, ExpertResult } from '../types';
@@ -66,7 +67,7 @@ const ProcessFlow = ({ appState, managerAnalysis, experts, defaultExpanded = tru
const expertsStatus = anyExpertWorking ? 'active' : (allExpertsDone ? 'completed' : 'idle');
return (
<div className="relative space-y-4 pt-4">
<div className="relative space-y-4 pt-4 w-full">
{/* Global Timer Overlay */}
<GlobalTimer start={processStartTime} end={processEndTime} appState={appState} />
@@ -115,7 +116,7 @@ const ProcessFlow = ({ appState, managerAnalysis, experts, defaultExpanded = tru
isExpanded={isExpanded}
onToggle={() => setIsExpanded(!isExpanded)}
>
<div className="grid grid-cols-1 gap-3 pt-2">
<div className="grid grid-cols-1 md:grid-cols-2 xl:grid-cols-3 gap-4 pt-2">
{experts.map((expert) => (
<ExpertCard key={expert.id} expert={expert} />
))}
@@ -152,4 +153,4 @@ const ProcessFlow = ({ appState, managerAnalysis, experts, defaultExpanded = tru
);
};
export default ProcessFlow;
export default ProcessFlow;

View File

@@ -21,6 +21,7 @@ export const useAppLogic = () => {
// UI State
const [isSidebarOpen, setIsSidebarOpen] = useState(true);
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
const [focusTrigger, setFocusTrigger] = useState(0); // Trigger for input focus
// Active Chat State
const [messages, setMessages] = useState<ChatMessage[]>([]);
@@ -149,6 +150,8 @@ export const useAppLogic = () => {
}
resetDeepThink();
// Refocus after completion
setFocusTrigger(prev => prev + 1);
}
}, [appState, finalOutput, managerAnalysis, experts, synthesisThoughts, resetDeepThink, processStartTime, processEndTime, currentSessionId, messages, selectedModel, createSession, updateSessionMessages]);
@@ -181,6 +184,7 @@ export const useAppLogic = () => {
setMessages([]);
setQuery('');
resetDeepThink();
setFocusTrigger(prev => prev + 1); // Trigger focus
if (window.innerWidth < 1024) setIsSidebarOpen(false);
}, [stopDeepThink, setCurrentSessionId, resetDeepThink]);
@@ -188,6 +192,7 @@ export const useAppLogic = () => {
stopDeepThink();
resetDeepThink();
setCurrentSessionId(id);
setFocusTrigger(prev => prev + 1); // Trigger focus
if (window.innerWidth < 1024) setIsSidebarOpen(false);
}, [stopDeepThink, resetDeepThink, setCurrentSessionId]);
@@ -223,6 +228,7 @@ export const useAppLogic = () => {
handleNewChat,
handleSelectSession,
handleDeleteSession,
stopDeepThink
stopDeepThink,
focusTrigger
};
};

View File

@@ -4,6 +4,8 @@
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Prisma</title>
<!-- SVG Favicon -->
<link rel="icon" type="image/svg+xml" href="data:image/svg+xml,%3Csvg viewBox='0 0 600 600' xmlns='http://www.w3.org/2000/svg'%3E%3Cg stroke-width='16' stroke-linecap='round' stroke-linejoin='round' fill='none' stroke='%23334155'%3E%3Cpath d='M300 180 L200 420 L400 420 Z'/%3E%3Cpath d='M300 50 L300 180'/%3E%3Cpath d='M100 480 L200 420'/%3E%3Cpath d='M500 480 L400 420'/%3E%3Cpath d='M300 50 L100 480 L500 480 Z'/%3E%3C/g%3E%3Cg stroke-width='12' stroke-linejoin='round' fill='none'%3E%3Cline x1='0' y1='275' x2='195' y2='275' stroke='%23334155'/%3E%3Cpolyline points='194,270 380,225 600,245' stroke='%232563eb' opacity='0.95'/%3E%3Cpolyline points='194,275 400,275 600,305' stroke='%234ade80' opacity='0.95'/%3E%3Cpolyline points='194,280 420,325 600,370' stroke='%239333ea' opacity='0.95'/%3E%3C/g%3E%3C/svg%3E">
<script src="https://cdn.tailwindcss.com"></script>
<script src="https://cdn.tailwindcss.com?plugins=typography"></script>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&family=JetBrains+Mono:wght@400;700&display=swap" rel="stylesheet">

View File

@@ -1,5 +1,6 @@
import { ModelOption, ExpertResult } from '../../types';
import { getExpertSystemInstruction } from './prompts';
import { withRetry } from '../utils/retry';
export const streamExpertResponse = async (
ai: any,
@@ -10,7 +11,10 @@ export const streamExpertResponse = async (
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
): Promise<void> => {
const streamResult = await ai.models.generateContentStream({
// We wrap the stream initiation in retry.
// If the stream is successfully established but fails during iteration,
// we catch that separately.
const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
config: {
@@ -21,23 +25,30 @@ export const streamExpertResponse = async (
includeThoughts: true
}
}
});
}));
for await (const chunk of streamResult) {
if (signal.aborted) break;
try {
for await (const chunk of streamResult) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
} catch (streamError) {
console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
// We don't retry mid-stream automatically here to avoid complex state management,
// but the initial connection is protected by withRetry.
throw streamError;
}
};

View File

@@ -2,6 +2,7 @@ import { Type } from "@google/genai";
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
import { withRetry } from '../utils/retry';
export const executeManagerAnalysis = async (
ai: any,
@@ -31,32 +32,36 @@ export const executeManagerAnalysis = async (
required: ["thought_process", "experts"]
};
const analysisResp = await ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: managerSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
});
const rawText = analysisResp.text || '{}';
const cleanText = cleanJsonString(rawText);
try {
const analysisResp = await withRetry(() => ai.models.generateContent({
model: model,
contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
config: {
systemInstruction: MANAGER_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: managerSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
}));
const rawText = analysisResp.text || '{}';
const cleanText = cleanJsonString(rawText);
const analysisJson = JSON.parse(cleanText) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
return analysisJson;
} catch (e) {
console.error("JSON Parse Error:", e, rawText);
return { thought_process: "Direct processing.", experts: [] };
console.error("Manager Analysis Error:", e);
// Return a fallback so the process doesn't completely die if planning fails
return {
thought_process: "Direct processing fallback due to analysis error.",
experts: []
};
}
};
@@ -97,28 +102,27 @@ export const executeManagerReview = async (
const content = `User Query: "${query}"\n\nCurrent Expert Outputs:\n${expertOutputs}`;
const resp = await ai.models.generateContent({
model: model,
contents: content,
config: {
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: reviewSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
});
const rawText = resp.text || '{}';
const cleanText = cleanJsonString(rawText);
try {
const resp = await withRetry(() => ai.models.generateContent({
model: model,
contents: content,
config: {
systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
responseMimeType: "application/json",
responseSchema: reviewSchema,
thinkingConfig: {
includeThoughts: true,
thinkingBudget: budget
}
}
}));
const rawText = resp.text || '{}';
const cleanText = cleanJsonString(rawText);
return JSON.parse(cleanText) as ReviewResult;
} catch (e) {
console.error("Review JSON Parse Error:", e);
// Fallback: Assume satisfied if JSON fails to avoid infinite loops due to format errors
return { satisfied: true, critique: "JSON Error, proceeding to synthesis." };
console.error("Review Error:", e);
// Fallback: Assume satisfied if JSON or API fails to avoid infinite loops
return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
}
};
};

View File

@@ -1,5 +1,6 @@
import { ModelOption, ExpertResult } from '../../types';
import { getSynthesisPrompt } from './prompts';
import { withRetry } from '../utils/retry';
export const streamSynthesisResponse = async (
ai: any,
@@ -13,7 +14,7 @@ export const streamSynthesisResponse = async (
): Promise<void> => {
const prompt = getSynthesisPrompt(historyContext, query, expertResults);
const synthesisStream = await ai.models.generateContentStream({
const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
config: {
@@ -22,23 +23,28 @@ export const streamSynthesisResponse = async (
includeThoughts: true
}
}
});
}));
for await (const chunk of synthesisStream) {
if (signal.aborted) break;
try {
for await (const chunk of synthesisStream) {
if (signal.aborted) break;
let chunkText = "";
let chunkThought = "";
let chunkText = "";
let chunkThought = "";
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
if (chunk.candidates?.[0]?.content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
if (part.thought) {
chunkThought += (part.text || "");
} else if (part.text) {
chunkText += part.text;
}
}
onChunk(chunkText, chunkThought);
}
}
} catch (streamError) {
console.error("Synthesis stream interrupted:", streamError);
throw streamError;
}
};

View File

@@ -0,0 +1,50 @@
/**
* Retry Utility for API calls
* Implements exponential backoff and handles transient errors (429, 5xx).
*/
export async function withRetry<T>(
fn: () => Promise<T>,
maxRetries: number = 3,
initialDelay: number = 1500
): Promise<T> {
let lastError: any;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
return await fn();
} catch (error: any) {
lastError = error;
// Determine if the error is transient
// 429: Too Many Requests
// 5xx: Server Errors
// Network failures (no status)
const status = error?.status || error?.response?.status;
const message = error?.message || "";
const isRateLimit = status === 429;
const isServerError = status >= 500 && status < 600;
const isNetworkError = !status;
const isTransient = isRateLimit || isServerError || isNetworkError;
// If we reached max retries or the error isn't transient, throw immediately
if (attempt === maxRetries || !isTransient) {
console.error(`[Prisma] Final attempt ${attempt} failed:`, error);
throw error;
}
// Calculate delay with exponential backoff: 1.5s, 3s, 6s...
const delay = initialDelay * Math.pow(2, attempt - 1);
console.warn(
`[Prisma] API call failed (Attempt ${attempt}/${maxRetries}). ` +
`Status: ${status || 'Network Error'}. Retrying in ${delay}ms...`
);
await new Promise(resolve => setTimeout(resolve, delay));
}
}
throw lastError || new Error("Maximum retries reached without success");
}