aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/client/views/nodes/ChatBox/Agent.ts112
-rw-r--r--src/client/views/nodes/ChatBox/AnswerParser.ts17
-rw-r--r--src/client/views/nodes/ChatBox/ChatBox.tsx25
-rw-r--r--src/client/views/nodes/ChatBox/MessageComponent.tsx32
-rw-r--r--src/client/views/nodes/ChatBox/prompts.ts50
-rw-r--r--src/client/views/nodes/ChatBox/types.ts17
6 files changed, 126 insertions, 127 deletions
diff --git a/src/client/views/nodes/ChatBox/Agent.ts b/src/client/views/nodes/ChatBox/Agent.ts
index 413ecbd41..ae08271ee 100644
--- a/src/client/views/nodes/ChatBox/Agent.ts
+++ b/src/client/views/nodes/ChatBox/Agent.ts
@@ -1,5 +1,5 @@
import OpenAI from 'openai';
-import { Tool, AgentMessage, AssistantMessage, TEXT_TYPE, CHUNK_TYPE, ASSISTANT_ROLE } from './types';
+import { Tool, AgentMessage, AssistantMessage, TEXT_TYPE, CHUNK_TYPE, ASSISTANT_ROLE, ProcessingInfo, PROCESSING_TYPE } from './types';
import { getReactPrompt } from './prompts';
import { XMLParser, XMLBuilder } from 'fast-xml-parser';
import { Vectorstore } from './vectorstore/Vectorstore';
@@ -29,6 +29,8 @@ export class Agent {
private _csvData: () => { filename: string; id: string; text: string }[];
private actionNumber: number = 0;
private thoughtNumber: number = 0;
+ private processingNumber: number = 0;
+ private processingInfo: ProcessingInfo[] = [];
constructor(_vectorstore: Vectorstore, summaries: () => string, history: () => string, csvData: () => { filename: string; id: string; text: string }[], addLinkedUrlDoc: (url: string, id: string) => void) {
this.client = new OpenAI({ apiKey: process.env.OPENAI_KEY, dangerouslyAllowBrowser: true });
@@ -46,7 +48,7 @@ export class Agent {
};
}
- async askAgent(question: string, maxTurns: number = 30, onUpdate: (update: AssistantMessage) => void): Promise<AssistantMessage> {
+ async askAgent(question: string, onUpdate: (update: ProcessingInfo[]) => void, maxTurns: number = 30): Promise<AssistantMessage> {
console.log(`Starting query: ${question}`);
this.messages.push({ role: 'user', content: question });
const chatHistory = this._history();
@@ -57,48 +59,34 @@ export class Agent {
const builder = new XMLBuilder({ ignoreAttributes: false, attributeNamePrefix: '@_' });
let currentAction: string | undefined;
- let assistantMessage: AssistantMessage = {
- role: ASSISTANT_ROLE.ASSISTANT,
- content: [],
- thoughts: [],
- actions: [],
- citations: [],
- };
+ this.processingInfo = [];
for (let i = 2; i < maxTurns; i += 2) {
console.log(`Turn ${i}/${maxTurns}`);
- const result = await this.execute(assistantMessage, onUpdate);
+ const result = await this.execute(onUpdate);
this.interMessages.push({ role: 'assistant', content: result });
let parsedResult;
try {
parsedResult = parser.parse(result);
} catch (error) {
- console.log('Error: Invalid XML response from bot');
- assistantMessage.content.push({ index: assistantMessage.content.length, type: TEXT_TYPE.ERROR, text: 'Invalid response from bot', citation_ids: null });
- return assistantMessage;
+ throw new Error(`Error parsing response: ${error}`);
}
const stage = parsedResult.stage;
if (!stage) {
- console.log('Error: No stage found in response');
- assistantMessage.content.push({ index: assistantMessage.content.length, type: TEXT_TYPE.ERROR, text: 'Invalid response from bot', citation_ids: null });
- return assistantMessage;
+ throw new Error(`Error: No stage found in response`);
}
for (const key in stage) {
- if (!assistantMessage.actions) {
- assistantMessage.actions = [];
- }
if (key === 'thought') {
console.log(`Thought: ${stage[key]}`);
- this.thoughtNumber++;
+ this.processingNumber++;
} else if (key === 'action') {
currentAction = stage[key] as string;
console.log(`Action: ${currentAction}`);
- onUpdate({ ...assistantMessage });
if (this.tools[currentAction]) {
const nextPrompt = [
{
@@ -118,35 +106,29 @@ export class Agent {
console.log(`Action input: ${actionInput}`);
if (currentAction) {
try {
- const observation = await this.processAction(currentAction, stage[key]);
+ const observation = await this.processAction(currentAction, stage[key].inputs);
const nextPrompt = [{ type: 'text', text: `<stage number="${i + 1}" role="user"> <observation>` }, ...observation, { type: 'text', text: '</observation></stage>' }];
console.log(observation);
this.interMessages.push({ role: 'user', content: nextPrompt });
- this.actionNumber++; //might not work with no tool
+ this.processingNumber++;
break;
} catch (error) {
- console.log(`Error processing action: ${error}`);
- assistantMessage.content.push({ index: assistantMessage.content.length, type: TEXT_TYPE.ERROR, text: 'Invalid response from bot', citation_ids: null });
- return assistantMessage;
+ throw new Error(`Error processing action: ${error}`);
}
} else {
- console.log('Error: Action input without a valid action');
- assistantMessage.content.push({ index: assistantMessage.content.length, type: TEXT_TYPE.ERROR, text: 'Invalid response from bot', citation_ids: null });
- return assistantMessage;
+ throw new Error('Error: Action input without a valid action');
}
} else if (key === 'answer') {
console.log('Answer found. Ending query.');
- const parsedAnswer = AnswerParser.parse(result, assistantMessage);
- onUpdate({ ...parsedAnswer });
+ const parsedAnswer = AnswerParser.parse(result, this.processingInfo);
return parsedAnswer;
}
}
}
- console.log('Reached maximum turns. Ending query.');
- return assistantMessage;
+ throw new Error('Reached maximum turns. Ending query.');
}
- private async execute(assistantMessage: AssistantMessage, onUpdate: (update: AssistantMessage) => void): Promise<string> {
+ private async execute(onUpdate: (update: ProcessingInfo[]) => void): Promise<string> {
const stream = await this.client.chat.completions.create({
model: 'gpt-4o',
messages: this.interMessages as ChatCompletionMessageParam[],
@@ -158,12 +140,6 @@ export class Agent {
let currentTag: string = '';
let currentContent: string = '';
let isInsideTag: boolean = false;
- let isInsideActionInput: boolean = false;
- let actionInputContent: string = '';
-
- if (!assistantMessage.actions) {
- assistantMessage.actions = [];
- }
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || '';
@@ -172,40 +148,20 @@ export class Agent {
for (const char of content) {
if (char === '<') {
isInsideTag = true;
- if (currentTag && currentContent) {
- if (currentTag === 'action_input') {
- assistantMessage.actions[assistantMessage.actions.length - 1].action_input = actionInputContent;
- actionInputContent = '';
- } else {
- this.processStreamedContent(currentTag, currentContent, assistantMessage);
- }
- onUpdate({ ...assistantMessage });
- }
currentTag = '';
currentContent = '';
} else if (char === '>') {
isInsideTag = false;
- if (currentTag === 'action_input') {
- isInsideActionInput = true;
- } else if (currentTag === '/action_input') {
- isInsideActionInput = false;
- console.log('Action input:', actionInputContent);
- assistantMessage.actions[assistantMessage.actions.length - 1].action_input = actionInputContent;
- actionInputContent = '';
- onUpdate({ ...assistantMessage });
- }
if (currentTag.startsWith('/')) {
currentTag = '';
}
} else if (isInsideTag) {
currentTag += char;
- } else if (isInsideActionInput) {
- actionInputContent += char;
} else {
currentContent += char;
- if (currentTag === 'thought' || currentTag === 'action') {
- this.processStreamedContent(currentTag, currentContent, assistantMessage);
- onUpdate({ ...assistantMessage });
+ if (currentTag === 'thought' || currentTag === 'action_input_description') {
+ this.processStreamedContent(currentTag, currentContent);
+ onUpdate(this.processingInfo);
}
}
}
@@ -214,24 +170,24 @@ export class Agent {
return fullResponse;
}
- private processStreamedContent(tag: string, content: string, assistantMessage: AssistantMessage) {
- if (!assistantMessage.thoughts) {
- assistantMessage.thoughts = [];
- }
- if (!assistantMessage.actions) {
- assistantMessage.actions = [];
- }
+ private processStreamedContent(tag: string, streamed_content: string) {
+ const current_info = this.processingInfo.find(info => info.index === this.processingNumber);
switch (tag) {
case 'thought':
- assistantMessage.thoughts[this.thoughtNumber] = content;
- break;
- case 'action':
- assistantMessage.actions[this.actionNumber].action = content;
-
- break;
- case 'action_input':
- assistantMessage.actions[this.actionNumber].action_input = content;
+ if (current_info) {
+ current_info.content = streamed_content;
+ } else {
+ console.log(`Adding thought: ${streamed_content}`);
+ this.processingInfo.push({ index: this.processingNumber, type: PROCESSING_TYPE.THOUGHT, content: streamed_content.trim() });
+ }
break;
+ case 'action_input_description':
+ if (current_info) {
+ current_info.content = streamed_content;
+ } else {
+ console.log(`Adding thought: ${streamed_content}`);
+ this.processingInfo.push({ index: this.processingNumber, type: PROCESSING_TYPE.ACTION, content: streamed_content.trim() });
+ }
}
}
diff --git a/src/client/views/nodes/ChatBox/AnswerParser.ts b/src/client/views/nodes/ChatBox/AnswerParser.ts
index 68637b7c7..1d46a366d 100644
--- a/src/client/views/nodes/ChatBox/AnswerParser.ts
+++ b/src/client/views/nodes/ChatBox/AnswerParser.ts
@@ -1,8 +1,8 @@
-import { ASSISTANT_ROLE, AssistantMessage, Citation, CHUNK_TYPE, TEXT_TYPE, getChunkType } from './types';
+import { ASSISTANT_ROLE, AssistantMessage, Citation, CHUNK_TYPE, TEXT_TYPE, getChunkType, ProcessingInfo } from './types';
import { v4 as uuid } from 'uuid';
export class AnswerParser {
- static parse(xml: string, currentMessage: AssistantMessage): AssistantMessage {
+ static parse(xml: string, processingInfo: ProcessingInfo[]): AssistantMessage {
const answerRegex = /<answer>([\s\S]*?)<\/answer>/;
const citationsRegex = /<citations>([\s\S]*?)<\/citations>/;
const citationRegex = /<citation index="([^"]+)" chunk_id="([^"]+)" type="([^"]+)">([\s\S]*?)<\/citation>/g;
@@ -102,10 +102,15 @@ export class AnswerParser {
followUpQuestions.push(questionMatch[1].trim());
}
}
- currentMessage.content = currentMessage.content.concat(content);
- currentMessage.citations = citations;
- currentMessage.follow_up_questions = followUpQuestions;
- return currentMessage;
+ const assistantResponse: AssistantMessage = {
+ role: ASSISTANT_ROLE.ASSISTANT,
+ content,
+ follow_up_questions: followUpQuestions,
+ citations,
+ processing_info: processingInfo,
+ };
+
+ return assistantResponse;
}
}
diff --git a/src/client/views/nodes/ChatBox/ChatBox.tsx b/src/client/views/nodes/ChatBox/ChatBox.tsx
index 099c0298e..36416a330 100644
--- a/src/client/views/nodes/ChatBox/ChatBox.tsx
+++ b/src/client/views/nodes/ChatBox/ChatBox.tsx
@@ -11,7 +11,7 @@ import { ViewBoxAnnotatableComponent } from '../../DocComponent';
import { FieldView, FieldViewProps } from '../FieldView';
import './ChatBox.scss';
import MessageComponentBox from './MessageComponent';
-import { ASSISTANT_ROLE, AssistantMessage, AI_Document, Citation, CHUNK_TYPE, RAGChunk, getChunkType, TEXT_TYPE, SimplifiedChunk } from './types';
+import { ASSISTANT_ROLE, AssistantMessage, AI_Document, Citation, CHUNK_TYPE, RAGChunk, getChunkType, TEXT_TYPE, SimplifiedChunk, ProcessingInfo } from './types';
import { Vectorstore } from './vectorstore/Vectorstore';
import { Agent } from './Agent';
import dotenv from 'dotenv';
@@ -150,26 +150,30 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
if (trimmedText) {
try {
textInput.value = '';
- this.history.push({ role: ASSISTANT_ROLE.USER, content: [{ index: 0, type: TEXT_TYPE.NORMAL, text: trimmedText, citation_ids: null }] });
+ this.history.push({ role: ASSISTANT_ROLE.USER, content: [{ index: 0, type: TEXT_TYPE.NORMAL, text: trimmedText, citation_ids: null }], processing_info: [] });
this.isLoading = true;
- this.current_message = { role: ASSISTANT_ROLE.ASSISTANT, content: [], thoughts: [], actions: [], citations: [] };
+ this.current_message = { role: ASSISTANT_ROLE.ASSISTANT, content: [], citations: [], processing_info: [] };
- const onUpdate = (update: AssistantMessage) => {
+ const onUpdate = (update: ProcessingInfo[]) => {
runInAction(() => {
- this.current_message = { ...update };
+ if (this.current_message) {
+ this.current_message = { ...this.current_message, processing_info: update };
+ }
});
};
- const finalMessage = await this.agent.askAgent(trimmedText, 20, onUpdate);
+ const finalMessage = await this.agent.askAgent(trimmedText, onUpdate);
runInAction(() => {
- this.history.push({ ...finalMessage });
- this.current_message = undefined;
- this.dataDoc.data = JSON.stringify(this.history);
+ if (this.current_message) {
+ this.history.push({ ...finalMessage });
+ this.current_message = undefined;
+ this.dataDoc.data = JSON.stringify(this.history);
+ }
});
} catch (err) {
console.error('Error:', err);
- this.history.push({ role: ASSISTANT_ROLE.ASSISTANT, content: [{ index: 0, type: TEXT_TYPE.NORMAL, text: 'Sorry, I encountered an error while processing your request.', citation_ids: null }] });
+ this.history.push({ role: ASSISTANT_ROLE.ASSISTANT, content: [{ index: 0, type: TEXT_TYPE.ERROR, text: 'Sorry, I encountered an error while processing your request.', citation_ids: null }], processing_info: [] });
} finally {
this.isLoading = false;
}
@@ -295,6 +299,7 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
this.history.push({
role: ASSISTANT_ROLE.ASSISTANT,
content: [{ index: 0, type: TEXT_TYPE.NORMAL, text: 'Welcome to the Document Analyser Assistant! Link a document or ask questions to get started.', citation_ids: null }],
+ processing_info: [],
});
});
}
diff --git a/src/client/views/nodes/ChatBox/MessageComponent.tsx b/src/client/views/nodes/ChatBox/MessageComponent.tsx
index e82dcd5f7..0b8fa6b96 100644
--- a/src/client/views/nodes/ChatBox/MessageComponent.tsx
+++ b/src/client/views/nodes/ChatBox/MessageComponent.tsx
@@ -1,6 +1,6 @@
import React from 'react';
import { observer } from 'mobx-react';
-import { AssistantMessage, Citation, MessageContent, TEXT_TYPE } from './types';
+import { AssistantMessage, Citation, MessageContent, PROCESSING_TYPE, ProcessingInfo, TEXT_TYPE } from './types';
import Markdown from 'react-markdown';
interface MessageComponentProps {
@@ -68,22 +68,24 @@ const MessageComponentBox: React.FC<MessageComponentProps> = function ({ message
}
};
+ console.log(message.processing_info);
+
return (
<div className={`message ${message.role}`}>
- {message.thoughts &&
- message.thoughts.map((thought, idx) => (
- <div key={idx} className="thought">
- <i>Thought: {thought}</i>
- </div>
- ))}
- {message.actions &&
- message.actions.map((action, idx) => (
- <div key={idx} className="action">
- <strong>Action:</strong> {action.action}
- <br />
- <strong>Input:</strong> {action.action_input}
- </div>
- ))}
+ {message.processing_info &&
+ (message.processing_info as ProcessingInfo[]).map(item =>
+ item.type === PROCESSING_TYPE.THOUGHT ? (
+ <div key={item.index} className="thought">
+ <strong>Thought:</strong> {item.content}
+ </div>
+ ) : item.type === PROCESSING_TYPE.ACTION ? (
+ <div key={item.index} className="action">
+ <strong>Action:</strong> {item.content}
+ </div>
+ ) : (
+ <div key={item.index} className="error"></div>
+ )
+ )}
<div>{message.content && message.content.map(messageFragment => <React.Fragment key={messageFragment.index}>{renderContent(messageFragment)}</React.Fragment>)}</div>
{message.follow_up_questions && message.follow_up_questions.length > 0 && (
<div className="follow-up-questions">
diff --git a/src/client/views/nodes/ChatBox/prompts.ts b/src/client/views/nodes/ChatBox/prompts.ts
index 5f4f79a7f..4a67ac1d1 100644
--- a/src/client/views/nodes/ChatBox/prompts.ts
+++ b/src/client/views/nodes/ChatBox/prompts.ts
@@ -16,7 +16,7 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
return `<system_message>
<task>
- You are an advanced AI assistant equipped with various tools to answer user queries accurately and efficiently. Your task is to provide a comprehensive response based on the user’s prompt using available tools, chat history, and provided information. Follow these guidelines meticulously to ensure the accuracy and structure of your response.
+ You are an advanced AI assistant equipped with various tools to answer user queries accurately and efficiently. Your task is to provide a comprehensive response based on the user's prompt using available tools, chat history, and provided information. Follow these guidelines meticulously to ensure the accuracy and structure of your response.
</task>
<critical_points>
@@ -42,7 +42,7 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<tag>&lt;citation&gt; - Provide citations for each grounded text, referencing the tool or chunk used.</tag>
</citations>
<follow_up_questions>
- <tag>&lt;question&gt; - Include exactly three follow-up questions from the user’s perspective within these tags.</tag>
+ <tag>&lt;question&gt; - Include exactly three follow-up questions from the user's perspective within these tags.</tag>
</follow_up_questions>
</answer>
</response_structure>
@@ -73,20 +73,20 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<step>Specify the correct type: "text", "image", "table", "csv", or "url".</step>
<step>For text-based information, include only the relevant subset of the original information that the &lt;grounded_text&gt; is based on.</step>
<step>For image, table, csv, or url citation types, leave the citation content empty.</step>
- <step>ALL CITATIONS MUST use the chunk_id field to reference the source, whether it’s from RAG, search + website scraping, data analysis, or any other tool.</step>
+ <step>ALL CITATIONS MUST use the chunk_id field to reference the source, whether it's from RAG, search + website scraping, data analysis, or any other tool.</step>
<step>One citation can be used for multiple &lt;grounded_text&gt; tags if they are based on the same tool or chunk information.</step>
<step>!!!DO NOT OVERCITE - only include citations for information that is directly relevant to the &lt;grounded_text&gt;.</step>
</citation_guidelines>
<operational_process>
- <step>Analyze the user’s query carefully.</step>
+ <step>Analyze the user's query carefully.</step>
<step>Determine whether a tool is required to answer the query accurately.</step>
<step>If a tool is necessary:</step>
<substeps>
<substep>Select the most appropriate tool.</substep>
<substep>Use the &lt;action&gt; tag to specify the tool.</substep>
<substep>End your response after the &lt;action&gt; tag and wait for action rules to be provided.</substep>
- <substep>Based on the action rules, provide the necessary tool parameters within the &lt;action_input&gt; tag.</substep>
+ <substep>Based on the action rules, provide the necessary tool parameters within the &lt;action_input&gt; tag, including a brief description of what you're doing with the action.</substep>
<substep>End your response again and wait for the observation from the tool.</substep>
</substeps>
<step>If no tool is needed, use the 'no_tool' action but still follow the same response structure.</step>
@@ -97,8 +97,8 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<final_answer_requirements>
<requirement>Your final &lt;answer&gt; tag must contain:</requirement>
<elements>
- <element>The complete answer to the user’s query, with grounded information wrapped in &lt;grounded_text&gt; tags and general information wrapped in &lt;normal_text&gt; tags.</element>
- <element>Exactly three follow-up questions written from the user’s perspective, enclosed within &lt;follow_up_questions&gt; tags.</element>
+ <element>The complete answer to the user's query, with grounded information wrapped in &lt;grounded_text&gt; tags and general information wrapped in &lt;normal_text&gt; tags.</element>
+ <element>Exactly three follow-up questions written from the user's perspective, enclosed within &lt;follow_up_questions&gt; tags.</element>
</elements>
</final_answer_requirements>
@@ -138,9 +138,12 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<stage number="4" role="assistant">
<action_input>
- <hypothetical_document_chunk>
- The user is asking for key moments and statements from official sources regarding the 2010 Vancouver Winter Olympics. Search the provided documents for any press releases or official statements that highlight significant events, achievements, or noteworthy aspects of the games.
- </hypothetical_document_chunk>
+ <action_input_description>Searching user documents for official statements and key moments of the 2010 Vancouver Winter Olympics.</action_input_description>
+ <inputs>
+ <hypothetical_document_chunk>
+ The user is asking for key moments and statements from official sources regarding the 2010 Vancouver Winter Olympics. Search the provided documents for any press releases or official statements that highlight significant events, achievements, or noteworthy aspects of the games.
+ </hypothetical_document_chunk>
+ </inputs>
</action_input>
</stage>
@@ -177,7 +180,10 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<stage number="8" role="assistant">
<action_input>
- <csv_file_name>2010_Vancouver_Olympics_Medal_Count.csv</csv_file_name>
+ <action_input_description>Analyzing the medal count data for the 2010 Vancouver Winter Olympics to compare country performances.</action_input_description>
+ <inputs>
+ <csv_file_name>2010_Vancouver_Olympics_Medal_Count.csv</csv_file_name>
+ </inputs>
</action_input>
</stage>
@@ -248,7 +254,10 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<stage number="4" role="assistant">
<action_input>
- <query>Best hiking trails Yosemite National Park</query>
+ <action_input_description>Searching the web for information about the best hiking trails in Yosemite National Park.</action_input_description>
+ <inputs>
+ <query>Best hiking trails Yosemite National Park</query>
+ </inputs>
</action_input>
</stage>
@@ -283,7 +292,10 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<stage number="8" role="assistant">
<action_input>
- <url>https://www.nps.gov/yose/planyourvisit/hiking.htm</url>
+ <action_input_description>Scraping information about hiking trails from the official Yosemite National Park website.</action_input_description>
+ <inputs>
+ <url>https://www.nps.gov/yose/planyourvisit/hiking.htm</url>
+ </inputs>
</action_input>
</stage>
@@ -309,7 +321,10 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<stage number="12" role="assistant">
<action_input>
- <url>https://www.alltrails.com/parks/us/california/yosemite-national-park</url>
+ <action_input_description>Scraping user reviews and ratings for Yosemite hiking trails from AllTrails.</action_input_description>
+ <inputs>
+ <url>https://www.alltrails.com/parks/us/california/yosemite-national-park</url>
+ </inputs>
</action_input>
</stage>
@@ -335,7 +350,10 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
<stage number="16" role="assistant">
<action_input>
- <url>https://www.yosemitehikes.com/</url>
+ <action_input_description>Gathering detailed trail descriptions and hiking information from YosemiteHikes.com.</action_input_description>
+ <inputs>
+ <url>https://www.yosemitehikes.com/</url>
+ </inputs>
</action_input>
</stage>
@@ -410,7 +428,7 @@ export function getReactPrompt(tools: Tool[], summaries: () => string, chatHisto
</chat_history>
<final_instruction>
- Now, process the user’s query and provide your response following the format and rules outlined above. Ensure your final answer is comprehensive, correctly cited, and entirely contained within the structured tags. Do not get stuck in infinite loops and keep responses concise, grounded, and most importantly, HELPFUL AND USEFUL!
+ Now, process the user's query and provide your response following the format and rules outlined above. Ensure your final answer is comprehensive, correctly cited, and entirely contained within the structured tags. Do not get stuck in infinite loops and keep responses concise, grounded, and most importantly, HELPFUL AND USEFUL!
</final_instruction>
</system_message>
`;
diff --git a/src/client/views/nodes/ChatBox/types.ts b/src/client/views/nodes/ChatBox/types.ts
index b4e66bdbe..09f14f019 100644
--- a/src/client/views/nodes/ChatBox/types.ts
+++ b/src/client/views/nodes/ChatBox/types.ts
@@ -1,6 +1,8 @@
import { breadcrumbsClasses } from '@mui/material';
import { Doc } from '../../../../fields/Doc';
import { StrCast } from '../../../../fields/Types';
+import e from 'cors';
+import { index } from 'd3';
export enum ASSISTANT_ROLE {
USER = 'user',
@@ -21,6 +23,12 @@ export enum CHUNK_TYPE {
CSV = 'CSV',
}
+export enum PROCESSING_TYPE {
+ THOUGHT = 'thought',
+ ACTION = 'action',
+ //eventually migrate error to here
+}
+
export function getChunkType(type: string): CHUNK_TYPE {
switch (type.toLowerCase()) {
case 'text':
@@ -44,13 +52,18 @@ export function getChunkType(type: string): CHUNK_TYPE {
}
}
+export interface ProcessingInfo {
+ index: number;
+ type: PROCESSING_TYPE;
+ content: string;
+}
+
export interface AssistantMessage {
role: ASSISTANT_ROLE;
content: MessageContent[];
follow_up_questions?: string[];
- thoughts?: string[];
- actions?: { index: number; action: string; action_input: string }[];
citations?: Citation[];
+ processing_info: ProcessingInfo[];
}
export interface MessageContent {