@@ -115,7 +116,7 @@ const ProcessFlow = ({ appState, managerAnalysis, experts, defaultExpanded = tru
isExpanded={isExpanded}
onToggle={() => setIsExpanded(!isExpanded)}
>
-
+
{experts.map((expert) => (
))}
@@ -152,4 +153,4 @@ const ProcessFlow = ({ appState, managerAnalysis, experts, defaultExpanded = tru
);
};
-export default ProcessFlow;
\ No newline at end of file
+export default ProcessFlow;
diff --git a/prisma/hooks/useAppLogic.ts b/prisma/hooks/useAppLogic.ts
index d4b0553..f7c550b 100644
--- a/prisma/hooks/useAppLogic.ts
+++ b/prisma/hooks/useAppLogic.ts
@@ -21,6 +21,7 @@ export const useAppLogic = () => {
// UI State
const [isSidebarOpen, setIsSidebarOpen] = useState(true);
const [isSettingsOpen, setIsSettingsOpen] = useState(false);
+ const [focusTrigger, setFocusTrigger] = useState(0); // Trigger for input focus
// Active Chat State
const [messages, setMessages] = useState([]);
@@ -149,6 +150,8 @@ export const useAppLogic = () => {
}
resetDeepThink();
+ // Refocus after completion
+ setFocusTrigger(prev => prev + 1);
}
}, [appState, finalOutput, managerAnalysis, experts, synthesisThoughts, resetDeepThink, processStartTime, processEndTime, currentSessionId, messages, selectedModel, createSession, updateSessionMessages]);
@@ -181,6 +184,7 @@ export const useAppLogic = () => {
setMessages([]);
setQuery('');
resetDeepThink();
+ setFocusTrigger(prev => prev + 1); // Trigger focus
if (window.innerWidth < 1024) setIsSidebarOpen(false);
}, [stopDeepThink, setCurrentSessionId, resetDeepThink]);
@@ -188,6 +192,7 @@ export const useAppLogic = () => {
stopDeepThink();
resetDeepThink();
setCurrentSessionId(id);
+ setFocusTrigger(prev => prev + 1); // Trigger focus
if (window.innerWidth < 1024) setIsSidebarOpen(false);
}, [stopDeepThink, resetDeepThink, setCurrentSessionId]);
@@ -223,6 +228,7 @@ export const useAppLogic = () => {
handleNewChat,
handleSelectSession,
handleDeleteSession,
- stopDeepThink
+ stopDeepThink,
+ focusTrigger
};
};
diff --git a/prisma/index.html b/prisma/index.html
index efe80fc..945af3e 100644
--- a/prisma/index.html
+++ b/prisma/index.html
@@ -4,6 +4,8 @@
Prisma
+
+
diff --git a/prisma/services/deepThink/expert.ts b/prisma/services/deepThink/expert.ts
index dd602a9..49c0269 100644
--- a/prisma/services/deepThink/expert.ts
+++ b/prisma/services/deepThink/expert.ts
@@ -1,5 +1,6 @@
import { ModelOption, ExpertResult } from '../../types';
import { getExpertSystemInstruction } from './prompts';
+import { withRetry } from '../utils/retry';
export const streamExpertResponse = async (
ai: any,
@@ -10,7 +11,10 @@ export const streamExpertResponse = async (
signal: AbortSignal,
onChunk: (text: string, thought: string) => void
): Promise => {
- const streamResult = await ai.models.generateContentStream({
+ // We wrap the stream initiation in retry.
+ // If the stream is successfully established but fails during iteration,
+ // we catch that separately.
+ const streamResult = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: expert.prompt,
config: {
@@ -21,23 +25,30 @@ export const streamExpertResponse = async (
includeThoughts: true
}
}
- });
+ }));
- for await (const chunk of streamResult) {
- if (signal.aborted) break;
+ try {
+ for await (const chunk of streamResult) {
+ if (signal.aborted) break;
- let chunkText = "";
- let chunkThought = "";
+ let chunkText = "";
+ let chunkThought = "";
- if (chunk.candidates?.[0]?.content?.parts) {
- for (const part of chunk.candidates[0].content.parts) {
- if (part.thought) {
- chunkThought += (part.text || "");
- } else if (part.text) {
- chunkText += part.text;
- }
- }
- onChunk(chunkText, chunkThought);
- }
+ if (chunk.candidates?.[0]?.content?.parts) {
+ for (const part of chunk.candidates[0].content.parts) {
+ if (part.thought) {
+ chunkThought += (part.text || "");
+ } else if (part.text) {
+ chunkText += part.text;
+ }
+ }
+ onChunk(chunkText, chunkThought);
+ }
+ }
+ } catch (streamError) {
+ console.error(`Stream interrupted for expert ${expert.role}:`, streamError);
+ // We don't retry mid-stream automatically here to avoid complex state management,
+ // but the initial connection is protected by withRetry.
+ throw streamError;
}
};
diff --git a/prisma/services/deepThink/manager.ts b/prisma/services/deepThink/manager.ts
index 77d0be9..7663e9e 100644
--- a/prisma/services/deepThink/manager.ts
+++ b/prisma/services/deepThink/manager.ts
@@ -2,6 +2,7 @@ import { Type } from "@google/genai";
import { ModelOption, AnalysisResult, ExpertResult, ReviewResult } from '../../types';
import { cleanJsonString } from '../../utils';
import { MANAGER_SYSTEM_PROMPT, MANAGER_REVIEW_SYSTEM_PROMPT } from './prompts';
+import { withRetry } from '../utils/retry';
export const executeManagerAnalysis = async (
ai: any,
@@ -31,32 +32,36 @@ export const executeManagerAnalysis = async (
required: ["thought_process", "experts"]
};
- const analysisResp = await ai.models.generateContent({
- model: model,
- contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
- config: {
- systemInstruction: MANAGER_SYSTEM_PROMPT,
- responseMimeType: "application/json",
- responseSchema: managerSchema,
- thinkingConfig: {
- includeThoughts: true,
- thinkingBudget: budget
- }
- }
- });
-
- const rawText = analysisResp.text || '{}';
- const cleanText = cleanJsonString(rawText);
-
try {
+ const analysisResp = await withRetry(() => ai.models.generateContent({
+ model: model,
+ contents: `Context:\n${context}\n\nCurrent Query: "${query}"`,
+ config: {
+ systemInstruction: MANAGER_SYSTEM_PROMPT,
+ responseMimeType: "application/json",
+ responseSchema: managerSchema,
+ thinkingConfig: {
+ includeThoughts: true,
+ thinkingBudget: budget
+ }
+ }
+ }));
+
+ const rawText = analysisResp.text || '{}';
+ const cleanText = cleanJsonString(rawText);
+
const analysisJson = JSON.parse(cleanText) as AnalysisResult;
if (!analysisJson.experts || !Array.isArray(analysisJson.experts)) {
throw new Error("Invalid schema structure");
}
return analysisJson;
} catch (e) {
- console.error("JSON Parse Error:", e, rawText);
- return { thought_process: "Direct processing.", experts: [] };
+ console.error("Manager Analysis Error:", e);
+ // Return a fallback so the process doesn't completely die if planning fails
+ return {
+ thought_process: "Direct processing fallback due to analysis error.",
+ experts: []
+ };
}
};
@@ -97,28 +102,27 @@ export const executeManagerReview = async (
const content = `User Query: "${query}"\n\nCurrent Expert Outputs:\n${expertOutputs}`;
- const resp = await ai.models.generateContent({
- model: model,
- contents: content,
- config: {
- systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
- responseMimeType: "application/json",
- responseSchema: reviewSchema,
- thinkingConfig: {
- includeThoughts: true,
- thinkingBudget: budget
- }
- }
- });
-
- const rawText = resp.text || '{}';
- const cleanText = cleanJsonString(rawText);
-
try {
+ const resp = await withRetry(() => ai.models.generateContent({
+ model: model,
+ contents: content,
+ config: {
+ systemInstruction: MANAGER_REVIEW_SYSTEM_PROMPT,
+ responseMimeType: "application/json",
+ responseSchema: reviewSchema,
+ thinkingConfig: {
+ includeThoughts: true,
+ thinkingBudget: budget
+ }
+ }
+ }));
+
+ const rawText = resp.text || '{}';
+ const cleanText = cleanJsonString(rawText);
return JSON.parse(cleanText) as ReviewResult;
} catch (e) {
- console.error("Review JSON Parse Error:", e);
- // Fallback: Assume satisfied if JSON fails to avoid infinite loops due to format errors
- return { satisfied: true, critique: "JSON Error, proceeding to synthesis." };
+ console.error("Review Error:", e);
+ // Fallback: Assume satisfied if JSON or API fails to avoid infinite loops
+ return { satisfied: true, critique: "Processing Error, proceeding to synthesis." };
}
-};
\ No newline at end of file
+};
diff --git a/prisma/services/deepThink/synthesis.ts b/prisma/services/deepThink/synthesis.ts
index d0ee74b..a8f0754 100644
--- a/prisma/services/deepThink/synthesis.ts
+++ b/prisma/services/deepThink/synthesis.ts
@@ -1,5 +1,6 @@
import { ModelOption, ExpertResult } from '../../types';
import { getSynthesisPrompt } from './prompts';
+import { withRetry } from '../utils/retry';
export const streamSynthesisResponse = async (
ai: any,
@@ -13,7 +14,7 @@ export const streamSynthesisResponse = async (
): Promise => {
const prompt = getSynthesisPrompt(historyContext, query, expertResults);
- const synthesisStream = await ai.models.generateContentStream({
+ const synthesisStream = await withRetry(() => ai.models.generateContentStream({
model: model,
contents: prompt,
config: {
@@ -22,23 +23,28 @@ export const streamSynthesisResponse = async (
includeThoughts: true
}
}
- });
+ }));
- for await (const chunk of synthesisStream) {
- if (signal.aborted) break;
+ try {
+ for await (const chunk of synthesisStream) {
+ if (signal.aborted) break;
- let chunkText = "";
- let chunkThought = "";
+ let chunkText = "";
+ let chunkThought = "";
- if (chunk.candidates?.[0]?.content?.parts) {
- for (const part of chunk.candidates[0].content.parts) {
- if (part.thought) {
- chunkThought += (part.text || "");
- } else if (part.text) {
- chunkText += part.text;
- }
- }
- onChunk(chunkText, chunkThought);
+ if (chunk.candidates?.[0]?.content?.parts) {
+ for (const part of chunk.candidates[0].content.parts) {
+ if (part.thought) {
+ chunkThought += (part.text || "");
+ } else if (part.text) {
+ chunkText += part.text;
+ }
+ }
+ onChunk(chunkText, chunkThought);
+ }
}
+ } catch (streamError) {
+ console.error("Synthesis stream interrupted:", streamError);
+ throw streamError;
}
};
diff --git a/prisma/services/utils/retry.ts b/prisma/services/utils/retry.ts
new file mode 100644
index 0000000..e2f157f
--- /dev/null
+++ b/prisma/services/utils/retry.ts
@@ -0,0 +1,50 @@
+/**
+ * Retry Utility for API calls
+ * Implements exponential backoff and handles transient errors (429, 5xx).
+ */
+
+export async function withRetry(
+ fn: () => Promise,
+ maxRetries: number = 3,
+ initialDelay: number = 1500
+): Promise {
+ let lastError: any;
+
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
+ try {
+ return await fn();
+ } catch (error: any) {
+ lastError = error;
+
+ // Determine if the error is transient
+ // 429: Too Many Requests
+ // 5xx: Server Errors
+ // Network failures (no status)
+ const status = error?.status || error?.response?.status;
+ const message = error?.message || "";
+
+ const isRateLimit = status === 429;
+ const isServerError = status >= 500 && status < 600;
+ const isNetworkError = !status;
+ const isTransient = isRateLimit || isServerError || isNetworkError;
+
+ // If we reached max retries or the error isn't transient, throw immediately
+ if (attempt === maxRetries || !isTransient) {
+ console.error(`[Prisma] Final attempt ${attempt} failed:`, error);
+ throw error;
+ }
+
+ // Calculate delay with exponential backoff: 1.5s, 3s, 6s...
+ const delay = initialDelay * Math.pow(2, attempt - 1);
+
+ console.warn(
+ `[Prisma] API call failed (Attempt ${attempt}/${maxRetries}). ` +
+ `Status: ${status || 'Network Error'}. Retrying in ${delay}ms...`
+ );
+
+ await new Promise(resolve => setTimeout(resolve, delay));
+ }
+ }
+
+ throw lastError || new Error("Maximum retries reached without success");
+}