From 4791cd23af08da70895204a3a7fbaf889d9af2d5 Mon Sep 17 00:00:00 2001 From: "A.J. Shulman" Date: Sat, 7 Sep 2024 12:43:05 -0400 Subject: completely restructured, added comments, and significantly reduced the length of the prompt (~72% shorter and cheaper) --- src/client/views/Main.tsx | 2 +- src/client/views/nodes/ChatBox/Agent.ts | 254 ------- src/client/views/nodes/ChatBox/ChatBox.scss | 279 -------- src/client/views/nodes/ChatBox/ChatBox.tsx | 756 --------------------- .../views/nodes/ChatBox/MessageComponent.tsx | 105 --- src/client/views/nodes/ChatBox/ProgressBar.scss | 69 -- src/client/views/nodes/ChatBox/ProgressBar.tsx | 13 - src/client/views/nodes/ChatBox/prompts.ts | 465 ------------- .../nodes/ChatBox/response_parsers/AnswerParser.ts | 125 ---- .../response_parsers/StreamedAnswerParser.ts | 73 -- src/client/views/nodes/ChatBox/tools.ts | 26 - src/client/views/nodes/ChatBox/tools/BaseTool.ts | 24 - .../views/nodes/ChatBox/tools/CalculateTool.ts | 26 - .../views/nodes/ChatBox/tools/CreateCSVTool.ts | 51 -- .../nodes/ChatBox/tools/CreateCollectionTool.ts | 36 - .../views/nodes/ChatBox/tools/DataAnalysisTool.ts | 59 -- .../views/nodes/ChatBox/tools/GetDocsTool.ts | 29 - src/client/views/nodes/ChatBox/tools/NoTool.ts | 18 - src/client/views/nodes/ChatBox/tools/RAGTool.ts | 138 ---- src/client/views/nodes/ChatBox/tools/SearchTool.ts | 54 -- .../nodes/ChatBox/tools/WebsiteInfoScraperTool.ts | 43 -- .../views/nodes/ChatBox/tools/WikipediaTool.ts | 37 - src/client/views/nodes/ChatBox/types.ts | 169 ----- .../views/nodes/ChatBox/vectorstore/Vectorstore.ts | 258 ------- .../views/nodes/chatbot/agentsystem/Agent.ts | 278 ++++++++ .../views/nodes/chatbot/agentsystem/prompts.ts | 181 +++++ .../nodes/chatbot/chatboxcomponents/ChatBox.scss | 279 ++++++++ .../nodes/chatbot/chatboxcomponents/ChatBox.tsx | 745 ++++++++++++++++++++ .../chatbot/chatboxcomponents/MessageComponent.tsx | 146 ++++ .../chatbot/chatboxcomponents/ProgressBar.scss | 69 ++ .../chatbot/chatboxcomponents/ProgressBar.tsx | 22 + .../nodes/chatbot/response_parsers/AnswerParser.ts | 125 ++++ .../response_parsers/StreamedAnswerParser.ts | 73 ++ src/client/views/nodes/chatbot/tools/BaseTool.ts | 24 + .../views/nodes/chatbot/tools/CalculateTool.ts | 26 + .../views/nodes/chatbot/tools/CreateCSVTool.ts | 51 ++ .../nodes/chatbot/tools/CreateCollectionTool.ts | 36 + .../views/nodes/chatbot/tools/DataAnalysisTool.ts | 59 ++ .../views/nodes/chatbot/tools/GetDocsTool.ts | 29 + src/client/views/nodes/chatbot/tools/NoTool.ts | 18 + src/client/views/nodes/chatbot/tools/RAGTool.ts | 138 ++++ src/client/views/nodes/chatbot/tools/SearchTool.ts | 54 ++ .../nodes/chatbot/tools/WebsiteInfoScraperTool.ts | 83 +++ .../views/nodes/chatbot/tools/WikipediaTool.ts | 37 + src/client/views/nodes/chatbot/types/types.ts | 129 ++++ .../views/nodes/chatbot/vectorstore/Vectorstore.ts | 258 +++++++ src/server/ApiManagers/AssistantManager.ts | 2 +- 47 files changed, 2862 insertions(+), 3109 deletions(-) delete mode 100644 src/client/views/nodes/ChatBox/Agent.ts delete mode 100644 src/client/views/nodes/ChatBox/ChatBox.scss delete mode 100644 src/client/views/nodes/ChatBox/ChatBox.tsx delete mode 100644 src/client/views/nodes/ChatBox/MessageComponent.tsx delete mode 100644 src/client/views/nodes/ChatBox/ProgressBar.scss delete mode 100644 src/client/views/nodes/ChatBox/ProgressBar.tsx delete mode 100644 src/client/views/nodes/ChatBox/prompts.ts delete mode 100644 src/client/views/nodes/ChatBox/response_parsers/AnswerParser.ts delete mode 100644 src/client/views/nodes/ChatBox/response_parsers/StreamedAnswerParser.ts delete mode 100644 src/client/views/nodes/ChatBox/tools.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/BaseTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/CalculateTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/CreateCSVTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/CreateCollectionTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/DataAnalysisTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/GetDocsTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/NoTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/RAGTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/SearchTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/WebsiteInfoScraperTool.ts delete mode 100644 src/client/views/nodes/ChatBox/tools/WikipediaTool.ts delete mode 100644 src/client/views/nodes/ChatBox/types.ts delete mode 100644 src/client/views/nodes/ChatBox/vectorstore/Vectorstore.ts create mode 100644 src/client/views/nodes/chatbot/agentsystem/Agent.ts create mode 100644 src/client/views/nodes/chatbot/agentsystem/prompts.ts create mode 100644 src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss create mode 100644 src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx create mode 100644 src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx create mode 100644 src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss create mode 100644 src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx create mode 100644 src/client/views/nodes/chatbot/response_parsers/AnswerParser.ts create mode 100644 src/client/views/nodes/chatbot/response_parsers/StreamedAnswerParser.ts create mode 100644 src/client/views/nodes/chatbot/tools/BaseTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/CalculateTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/CreateCSVTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/CreateCollectionTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/DataAnalysisTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/GetDocsTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/NoTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/RAGTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/SearchTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts create mode 100644 src/client/views/nodes/chatbot/tools/WikipediaTool.ts create mode 100644 src/client/views/nodes/chatbot/types/types.ts create mode 100644 src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts (limited to 'src') diff --git a/src/client/views/Main.tsx b/src/client/views/Main.tsx index f7cd0e925..e1f7dd233 100644 --- a/src/client/views/Main.tsx +++ b/src/client/views/Main.tsx @@ -27,7 +27,7 @@ import { CollectionSchemaView } from './collections/collectionSchema/CollectionS import { SchemaRowBox } from './collections/collectionSchema/SchemaRowBox'; import './global/globalScripts'; import { AudioBox } from './nodes/AudioBox'; -import { ChatBox } from './nodes/ChatBox/ChatBox'; +import { ChatBox } from './nodes/chatbot/chatboxcomponents/ChatBox'; import { ComparisonBox } from './nodes/ComparisonBox'; import { DataVizBox } from './nodes/DataVizBox/DataVizBox'; import { DiagramBox } from './nodes/DiagramBox'; diff --git a/src/client/views/nodes/ChatBox/Agent.ts b/src/client/views/nodes/ChatBox/Agent.ts deleted file mode 100644 index 9eb069c78..000000000 --- a/src/client/views/nodes/ChatBox/Agent.ts +++ /dev/null @@ -1,254 +0,0 @@ -import OpenAI from 'openai'; -import { Tool, AgentMessage, AssistantMessage, TEXT_TYPE, CHUNK_TYPE, ASSISTANT_ROLE, ProcessingInfo, PROCESSING_TYPE } from './types'; -import { getReactPrompt } from './prompts'; -import { XMLParser, XMLBuilder } from 'fast-xml-parser'; -import { Vectorstore } from './vectorstore/Vectorstore'; -import { ChatCompletionMessageParam } from 'openai/resources'; -import dotenv from 'dotenv'; -import { CalculateTool } from './tools/CalculateTool'; -import { RAGTool } from './tools/RAGTool'; -import { DataAnalysisTool } from './tools/DataAnalysisTool'; -import { WebsiteInfoScraperTool } from './tools/WebsiteInfoScraperTool'; -import { SearchTool } from './tools/SearchTool'; -import { NoTool } from './tools/NoTool'; -import { on } from 'events'; -import { v4 as uuidv4 } from 'uuid'; -import { AnswerParser } from './response_parsers/AnswerParser'; -import { StreamedAnswerParser } from './response_parsers/StreamedAnswerParser'; -import { CreateCSVTool } from './tools/CreateCSVTool'; - -dotenv.config(); - -export class Agent { - private client: OpenAI; - private tools: Record>; - private messages: AgentMessage[] = []; - private interMessages: AgentMessage[] = []; - private vectorstore: Vectorstore; - private _history: () => string; - private _summaries: () => string; - private _csvData: () => { filename: string; id: string; text: string }[]; - private actionNumber: number = 0; - private thoughtNumber: number = 0; - private processingNumber: number = 0; - private processingInfo: ProcessingInfo[] = []; - private streamedAnswerParser: StreamedAnswerParser = new StreamedAnswerParser(); - - constructor( - _vectorstore: Vectorstore, - summaries: () => string, - history: () => string, - csvData: () => { filename: string; id: string; text: string }[], - addLinkedUrlDoc: (url: string, id: string) => void, - createCSVInDash: (url: string, title: string, id: string, data: string) => void - ) { - this.client = new OpenAI({ apiKey: process.env.OPENAI_KEY, dangerouslyAllowBrowser: true }); - this.vectorstore = _vectorstore; - this._history = history; - this._summaries = summaries; - this._csvData = csvData; - this.tools = { - calculate: new CalculateTool(), - rag: new RAGTool(this.vectorstore), - dataAnalysis: new DataAnalysisTool(csvData), - websiteInfoScraper: new WebsiteInfoScraperTool(addLinkedUrlDoc), - searchTool: new SearchTool(addLinkedUrlDoc), - createCSV: new CreateCSVTool(createCSVInDash), - no_tool: new NoTool(), - }; - } - - async askAgent(question: string, onProcessingUpdate: (processingUpdate: ProcessingInfo[]) => void, onAnswerUpdate: (answerUpdate: string) => void, maxTurns: number = 30): Promise { - console.log(`Starting query: ${question}`); - this.messages.push({ role: 'user', content: question }); - const chatHistory = this._history(); - const systemPrompt = getReactPrompt(Object.values(this.tools), this._summaries, chatHistory); - this.interMessages = [{ role: 'system', content: systemPrompt }]; - this.interMessages.push({ role: 'user', content: `${question}` }); - const parser = new XMLParser({ - ignoreAttributes: false, - attributeNamePrefix: '@_', - textNodeName: '_text', - isArray: (name, jpath, isLeafNode, isAttribute) => { - // Convert tags with the same name to arrays - return ['query', 'url'].indexOf(name) !== -1; - }, - }); - const builder = new XMLBuilder({ ignoreAttributes: false, attributeNamePrefix: '@_' }); - - let currentAction: string | undefined; - - this.processingInfo = []; - - for (let i = 2; i < maxTurns; i += 2) { - console.log(this.interMessages); - console.log(`Turn ${i}/${maxTurns}`); - - const result = await this.execute(onProcessingUpdate, onAnswerUpdate); - this.interMessages.push({ role: 'assistant', content: result }); - - let parsedResult; - try { - parsedResult = parser.parse(result); - } catch (error) { - throw new Error(`Error parsing response: ${error}`); - } - - const stage = parsedResult.stage; - - if (!stage) { - throw new Error(`Error: No stage found in response`); - } - - for (const key in stage) { - if (key === 'thought') { - console.log(`Thought: ${stage[key]}`); - this.processingNumber++; - } else if (key === 'action') { - currentAction = stage[key] as string; - console.log(`Action: ${currentAction}`); - if (this.tools[currentAction]) { - const nextPrompt = [ - { - type: 'text', - text: `` + builder.build({ action_rules: this.tools[currentAction].getActionRule() }) + ``, - }, - ]; - this.interMessages.push({ role: 'user', content: nextPrompt }); - break; - } else { - console.log('Error: No valid action'); - this.interMessages.push({ role: 'user', content: `No valid action, try again.` }); - break; - } - } else if (key === 'action_input') { - const actionInput = stage[key]; - console.log(`Action input:`, actionInput.inputs); - if (currentAction) { - try { - // Parse the inputs - //const parsedInputs = this.parseActionInputs(actionInput.inputs); - //console.log(`Parsed inputs:`, parsedInputs); - const observation = await this.processAction(currentAction, actionInput.inputs); - const nextPrompt = [{ type: 'text', text: ` ` }, ...observation, { type: 'text', text: '' }]; - console.log(observation); - this.interMessages.push({ role: 'user', content: nextPrompt }); - this.processingNumber++; - break; - } catch (error) { - throw new Error(`Error processing action: ${error}`); - } - } else { - throw new Error('Error: Action input without a valid action'); - } - } else if (key === 'answer') { - console.log('Answer found. Ending query.'); - this.streamedAnswerParser.reset(); - const parsedAnswer = AnswerParser.parse(result, this.processingInfo); - return parsedAnswer; - } - } - } - throw new Error('Reached maximum turns. Ending query.'); - } - - private async execute(onProcessingUpdate: (processingUpdate: ProcessingInfo[]) => void, onAnswerUpdate: (answerUpdate: string) => void): Promise { - const stream = await this.client.chat.completions.create({ - model: 'gpt-4o', - messages: this.interMessages as ChatCompletionMessageParam[], - temperature: 0, - stream: true, - }); - - let fullResponse: string = ''; - let currentTag: string = ''; - let currentContent: string = ''; - let isInsideTag: boolean = false; - - for await (const chunk of stream) { - let content = chunk.choices[0]?.delta?.content || ''; - fullResponse += content; - - for (const char of content) { - if (currentTag === 'answer') { - currentContent += char; - //console.log(char); - const streamedAnswer = this.streamedAnswerParser.parse(char); - //console.log(streamedAnswer); - onAnswerUpdate(streamedAnswer); - continue; - } else if (char === '<') { - isInsideTag = true; - currentTag = ''; - currentContent = ''; - } else if (char === '>') { - isInsideTag = false; - if (currentTag.startsWith('/')) { - currentTag = ''; - } - } else if (isInsideTag) { - currentTag += char; - } else if (currentTag === 'thought' || currentTag === 'action_input_description') { - currentContent += char; - const current_info = this.processingInfo.find(info => info.index === this.processingNumber); - if (current_info) { - current_info.content = currentContent.trim(); - onProcessingUpdate(this.processingInfo); - } else { - this.processingInfo.push({ index: this.processingNumber, type: currentTag === 'thought' ? PROCESSING_TYPE.THOUGHT : PROCESSING_TYPE.ACTION, content: currentContent.trim() }); - onProcessingUpdate(this.processingInfo); - } - } - } - } - - return fullResponse; - } - - private async processAction(action: string, actionInput: any): Promise { - if (!(action in this.tools)) { - throw new Error(`Unknown action: ${action}`); - } - - const tool = this.tools[action]; - const args: Record = {}; - - // for (const paramName in tool.parameters) { - // if (actionInput[paramName] !== undefined) { - // if (Array.isArray(actionInput[paramName])) { - // // If the input is already an array, use it as is - // args[paramName] = actionInput[paramName]; - // } else if (typeof actionInput[paramName] === 'object' && actionInput[paramName] !== null) { - // // If the input is an object, check if it has multiple of the same tag - // const values = Object.values(actionInput[paramName]); - // if (values.length > 1) { - // // If there are multiple values, convert to an array - // args[paramName] = values; - // } else { - // // If there's only one value, use it directly - // args[paramName] = values[0]; - // } - // } else { - // // For single values, use them as is - // args[paramName] = actionInput[paramName]; - // } - // } else if (tool.parameters[paramName].required === 'true') { - // throw new Error(`Missing required parameter '${paramName}' for action '${action}'`); - // } - // } - - return await tool.execute(actionInput); - } - - private parseActionInputs(inputs: any): Record { - const parsedInputs: Record = {}; - for (const key in inputs) { - if (Array.isArray(inputs[key])) { - parsedInputs[key] = inputs[key].map((item: any) => item._text); - } else { - parsedInputs[key] = inputs[key]._text; - } - } - return parsedInputs; - } -} diff --git a/src/client/views/nodes/ChatBox/ChatBox.scss b/src/client/views/nodes/ChatBox/ChatBox.scss deleted file mode 100644 index 42f6a0d61..000000000 --- a/src/client/views/nodes/ChatBox/ChatBox.scss +++ /dev/null @@ -1,279 +0,0 @@ -@import url('https://fonts.googleapis.com/css2?family=Atkinson+Hyperlegible:ital,wght@0,400;0,700;1,400;1,700&display=swap'); - -$primary-color: #4a90e2; -$secondary-color: #f5f8fa; -$text-color: #333; -$light-text-color: #777; -$border-color: #e1e8ed; -$shadow-color: rgba(0, 0, 0, 0.1); -$transition: all 0.3s ease; -.chat-box { - display: flex; - flex-direction: column; - height: 100%; - background-color: #fff; - font-family: - 'Atkinson Hyperlegible', - -apple-system, - BlinkMacSystemFont, - 'Segoe UI', - Roboto, - Helvetica, - Arial, - sans-serif; - border-radius: 12px; - overflow: hidden; - box-shadow: 0 4px 12px $shadow-color; - position: relative; - - .chat-header { - background-color: $primary-color; - color: white; - padding: 15px; - text-align: center; - box-shadow: 0 2px 4px $shadow-color; - height: fit-content; - - h2 { - margin: 0; - font-size: 1.3em; - font-weight: 500; - } - } - - .chat-messages { - flex-grow: 1; - overflow-y: auto; - padding: 20px; - display: flex; - flex-direction: column; - gap: 10px; // Added to give space between elements - - &::-webkit-scrollbar { - width: 6px; - } - - &::-webkit-scrollbar-thumb { - background-color: $border-color; - border-radius: 3px; - } - } - - .chat-input { - display: flex; - padding: 20px; - border-top: 1px solid $border-color; - background-color: #fff; - - input { - flex-grow: 1; - padding: 12px 15px; - border: 1px solid $border-color; - border-radius: 24px; - font-size: 15px; - transition: $transition; - - &:focus { - outline: none; - border-color: $primary-color; - box-shadow: 0 0 0 2px rgba($primary-color, 0.2); - } - } - - .submit-button { - background-color: $primary-color; - color: white; - border: none; - border-radius: 50%; - width: 48px; - height: 48px; - margin-left: 10px; - cursor: pointer; - transition: $transition; - display: flex; - align-items: center; - justify-content: center; - - &:hover { - background-color: darken($primary-color, 10%); - } - - &:disabled { - background-color: $light-text-color; - cursor: not-allowed; - } - - .spinner { - height: 24px; - width: 24px; - border: 3px solid rgba(255, 255, 255, 0.3); - border-top: 3px solid #fff; - border-radius: 50%; - animation: spin 1s linear infinite; - display: flex; - align-items: center; - justify-content: center; - } - } - } - .citation-popup { - position: fixed; - bottom: 50px; - left: 50%; - transform: translateX(-50%); - background-color: rgba(0, 0, 0, 0.8); - color: white; - padding: 10px 20px; - border-radius: 10px; - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2); - z-index: 1000; - animation: fadeIn 0.3s ease-in-out; - - p { - margin: 0; - font-size: 14px; - } - - @keyframes fadeIn { - from { - opacity: 0; - } - to { - opacity: 1; - } - } - } -} - -.message { - max-width: 80%; - margin-bottom: 20px; - padding: 16px 20px; - border-radius: 18px; - font-size: 15px; - line-height: 1.5; - box-shadow: 0 2px 4px $shadow-color; - word-wrap: break-word; // To handle long words - - &.user { - align-self: flex-end; - background-color: $primary-color; - color: white; - border-bottom-right-radius: 4px; - } - - &.chatbot { - align-self: flex-start; - background-color: $secondary-color; - color: $text-color; - border-bottom-left-radius: 4px; - } - - .toggle-info { - background-color: transparent; - color: $primary-color; - border: 1px solid $primary-color; - width: 100%; - height: fit-content; - border-radius: 8px; - padding: 10px 16px; - font-size: 14px; - cursor: pointer; - transition: $transition; - margin-top: 10px; - - &:hover { - background-color: rgba($primary-color, 0.1); - } - } -} - -.follow-up-questions { - margin-top: 15px; - - h4 { - font-size: 15px; - font-weight: 600; - margin-bottom: 10px; - } - - .questions-list { - display: flex; - flex-direction: column; - gap: 10px; - } - - .follow-up-button { - background-color: #fff; - color: $primary-color; - border: 1px solid $primary-color; - border-radius: 8px; - padding: 10px 16px; - font-size: 14px; - cursor: pointer; - transition: $transition; - text-align: left; - white-space: normal; - word-wrap: break-word; - width: 100%; - height: fit-content; - - &:hover { - background-color: $primary-color; - color: #fff; - } - } -} - -.citation-button { - display: inline-flex; - align-items: center; - justify-content: center; - width: 20px; - height: 20px; - border-radius: 50%; - background-color: rgba(0, 0, 0, 0.1); - color: $text-color; - font-size: 12px; - font-weight: bold; - margin-left: 5px; - cursor: pointer; - transition: $transition; - vertical-align: middle; - - &:hover { - background-color: rgba(0, 0, 0, 0.2); - } -} - -.uploading-overlay { - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-color: rgba(255, 255, 255, 0.8); - display: flex; - justify-content: center; - align-items: center; - z-index: 1000; -} - -@keyframes spin { - 0% { - transform: rotate(0deg); - } - 100% { - transform: rotate(360deg); - } -} - -@media (max-width: 768px) { - .chat-box { - border-radius: 0; - } - - .message { - max-width: 90%; - } -} diff --git a/src/client/views/nodes/ChatBox/ChatBox.tsx b/src/client/views/nodes/ChatBox/ChatBox.tsx deleted file mode 100644 index 98a2e6002..000000000 --- a/src/client/views/nodes/ChatBox/ChatBox.tsx +++ /dev/null @@ -1,756 +0,0 @@ -import { action, computed, makeObservable, observable, observe, reaction, runInAction, ObservableSet } from 'mobx'; -import { observer } from 'mobx-react'; -import OpenAI, { ClientOptions } from 'openai'; -import * as React from 'react'; -import { Doc, DocListCast } from '../../../../fields/Doc'; -import { CsvCast, DocCast, PDFCast, RTFCast, StrCast } from '../../../../fields/Types'; -import { DocumentType } from '../../../documents/DocumentTypes'; -import { Docs } from '../../../documents/Documents'; -import { LinkManager } from '../../../util/LinkManager'; -import { ViewBoxAnnotatableComponent } from '../../DocComponent'; -import { FieldView, FieldViewProps } from '../FieldView'; -import './ChatBox.scss'; -import MessageComponentBox from './MessageComponent'; -import { ASSISTANT_ROLE, AssistantMessage, Citation, CHUNK_TYPE, TEXT_TYPE, SimplifiedChunk, ProcessingInfo } from './types'; -import { Vectorstore } from './vectorstore/Vectorstore'; -import { Agent } from './Agent'; -import dotenv from 'dotenv'; -import { DocData, DocViews } from '../../../../fields/DocSymbols'; -import { DocumentManager } from '../../../util/DocumentManager'; -import { v4 as uuidv4 } from 'uuid'; -import { DocUtils } from '../../../documents/DocUtils'; -import { ClientUtils } from '../../../../ClientUtils'; -import { ProgressBar } from './ProgressBar'; -import { DocumentView } from '../DocumentView'; -import { Networking } from '../../../Network'; - -dotenv.config(); - -/** - * ChatBox is the main class responsible for managing the interaction between the user and the assistant, - * handling documents, and integrating with OpenAI for tasks such as document analysis, chat functionality, - * and vector store interactions. - */ -@observer -export class ChatBox extends ViewBoxAnnotatableComponent() { - // MobX observable properties to track UI state and data - @observable history: AssistantMessage[] = []; - @observable.deep current_message: AssistantMessage | undefined = undefined; - @observable isLoading: boolean = false; - @observable uploadProgress: number = 0; - @observable currentStep: string = ''; - @observable expandedScratchpadIndex: number | null = null; - @observable inputValue: string = ''; - @observable private linked_docs_to_add: ObservableSet = observable.set(); - @observable private linked_csv_files: { filename: string; id: string; text: string }[] = []; - @observable private isUploadingDocs: boolean = false; - @observable private citationPopup: { text: string; visible: boolean } = { text: '', visible: false }; - - // Private properties for managing OpenAI API, vector store, agent, and UI elements - private openai: OpenAI; - private vectorstore_id: string; - private vectorstore: Vectorstore; - private agent: Agent; - private _oldWheel: HTMLDivElement | null = null; - private messagesRef: React.RefObject; - - /** - * Static method that returns the layout string for the field. - * @param fieldKey Key to get the layout string. - */ - public static LayoutString(fieldKey: string) { - return FieldView.LayoutString(ChatBox, fieldKey); - } - - /** - * Constructor initializes the component, sets up OpenAI, vector store, and agent instances, - * and observes changes in the chat history to save the state in dataDoc. - * @param props The properties passed to the component. - */ - constructor(props: FieldViewProps) { - super(props); - makeObservable(this); // Enable MobX observables - - // Initialize OpenAI, vectorstore, and agent - this.openai = this.initializeOpenAI(); - if (StrCast(this.dataDoc.vectorstore_id) == '') { - console.log('new_id'); - this.vectorstore_id = uuidv4(); - this.dataDoc.vectorstore_id = this.vectorstore_id; - } else { - this.vectorstore_id = StrCast(this.dataDoc.vectorstore_id); - } - this.vectorstore = new Vectorstore(this.vectorstore_id, this.retrieveDocIds); - this.agent = new Agent(this.vectorstore, this.retrieveSummaries, this.retrieveFormattedHistory, this.retrieveCSVData, this.addLinkedUrlDoc, this.createCSVInDash); - this.messagesRef = React.createRef(); - - // Reaction to update dataDoc when chat history changes - reaction( - () => - this.history.map((msg: AssistantMessage) => ({ - role: msg.role, - content: msg.content, - follow_up_questions: msg.follow_up_questions, - citations: msg.citations, - })), - serializableHistory => { - this.dataDoc.data = JSON.stringify(serializableHistory); - } - ); - } - - /** - * Adds a document to the vectorstore for AI-based analysis. - * Handles the upload progress and errors during the process. - * @param newLinkedDoc The new document to add. - */ - @action - addDocToVectorstore = async (newLinkedDoc: Doc) => { - this.uploadProgress = 0; - this.currentStep = 'Initializing...'; - this.isUploadingDocs = true; - - try { - // Add the document to the vectorstore - await this.vectorstore.addAIDoc(newLinkedDoc, this.updateProgress); - } catch (error) { - console.error('Error uploading document:', error); - this.currentStep = 'Error during upload'; - } finally { - this.isUploadingDocs = false; - this.uploadProgress = 0; - this.currentStep = ''; - } - }; - - /** - * Updates the upload progress and the current step in the UI. - * @param progress The percentage of the progress. - * @param step The current step name. - */ - @action - updateProgress = (progress: number, step: string) => { - console.log('Progress:', progress, step); - this.uploadProgress = progress; - this.currentStep = step; - }; - - /** - * Adds a CSV file for analysis by sending it to OpenAI and generating a summary. - * @param newLinkedDoc The linked document representing the CSV file. - * @param id Optional ID for the document. - */ - @action - addCSVForAnalysis = async (newLinkedDoc: Doc, id?: string) => { - console.log('adding csv file for analysis'); - if (!newLinkedDoc.chunk_simpl) { - // Convert document text to CSV data - const csvData: string = StrCast(newLinkedDoc.text); - console.log('CSV Data:', csvData); - - // Generate a summary using OpenAI API - const completion = await this.openai.chat.completions.create({ - messages: [ - { - role: 'system', - content: - 'You are an AI assistant tasked with summarizing the content of a CSV file. You will be provided with the data from the CSV file and your goal is to generate a concise summary that captures the main themes, trends, and key points represented in the data.', - }, - { - role: 'user', - content: `Please provide a comprehensive summary of the CSV file based on the provided data. Ensure the summary highlights the most important information, patterns, and insights. Your response should be in paragraph form and be concise. - CSV Data: - ${csvData} - ********** - Summary:`, - }, - ], - model: 'gpt-3.5-turbo', - }); - - const csvId = id ?? uuidv4(); - - // Add CSV details to linked files - this.linked_csv_files.push({ - filename: CsvCast(newLinkedDoc.data).url.pathname, - id: csvId, - text: csvData, - }); - - // Add a chunk for the CSV and assign the summary - const chunkToAdd = { - chunkId: csvId, - chunkType: CHUNK_TYPE.CSV, - }; - newLinkedDoc.chunk_simpl = JSON.stringify({ chunks: [chunkToAdd] }); - newLinkedDoc.summary = completion.choices[0].message.content!; - } - }; - - /** - * Toggles the tool logs, expanding or collapsing the scratchpad at the given index. - * @param index Index of the tool log to toggle. - */ - @action - toggleToolLogs = (index: number) => { - this.expandedScratchpadIndex = this.expandedScratchpadIndex === index ? null : index; - }; - - /** - * Initializes the OpenAI API client using the API key from environment variables. - * @returns OpenAI client instance. - */ - initializeOpenAI() { - console.log(process.env.OPENAI_KEY); - const configuration: ClientOptions = { - apiKey: process.env.OPENAI_KEY, - dangerouslyAllowBrowser: true, - }; - return new OpenAI(configuration); - } - - /** - * Adds a scroll event listener to detect user scrolling and handle passive wheel events. - */ - addScrollListener = () => { - if (this.messagesRef.current) { - this.messagesRef.current.addEventListener('wheel', this.onPassiveWheel, { passive: false }); - } - }; - - /** - * Removes the scroll event listener from the chat messages container. - */ - removeScrollListener = () => { - if (this.messagesRef.current) { - this.messagesRef.current.removeEventListener('wheel', this.onPassiveWheel); - } - }; - - /** - * Scrolls the chat messages container to the bottom, ensuring the latest message is visible. - */ - scrollToBottom = () => { - if (this.messagesRef.current) { - this.messagesRef.current.scrollTop = this.messagesRef.current.scrollHeight; - } - }; - - /** - * Event handler for detecting wheel scrolling and stopping the event propagation. - * @param e The wheel event. - */ - onPassiveWheel = (e: WheelEvent) => { - if (this._props.isContentActive()) { - e.stopPropagation(); - } - }; - - /** - * Sends the user's input to OpenAI, displays the loading indicator, and updates the chat history. - * @param event The form submission event. - */ - @action - askGPT = async (event: React.FormEvent): Promise => { - event.preventDefault(); - this.inputValue = ''; - - // Extract the user's message - const textInput = event.currentTarget.elements.namedItem('messageInput') as HTMLInputElement; - const trimmedText = textInput.value.trim(); - - if (trimmedText) { - try { - textInput.value = ''; - // Add the user's message to the history - this.history.push({ - role: ASSISTANT_ROLE.USER, - content: [{ index: 0, type: TEXT_TYPE.NORMAL, text: trimmedText, citation_ids: null }], - processing_info: [], - }); - this.isLoading = true; - this.current_message = { - role: ASSISTANT_ROLE.ASSISTANT, - content: [], - citations: [], - processing_info: [], - }; - - // Define callbacks for real-time processing updates - const onProcessingUpdate = (processingUpdate: ProcessingInfo[]) => { - runInAction(() => { - if (this.current_message) { - this.current_message = { - ...this.current_message, - processing_info: processingUpdate, - }; - } - }); - this.scrollToBottom(); - }; - - const onAnswerUpdate = (answerUpdate: string) => { - runInAction(() => { - if (this.current_message) { - this.current_message = { - ...this.current_message, - content: [{ text: answerUpdate, type: TEXT_TYPE.NORMAL, index: 0, citation_ids: [] }], - }; - } - }); - }; - - // Send the user's question to the assistant and get the final message - const finalMessage = await this.agent.askAgent(trimmedText, onProcessingUpdate, onAnswerUpdate); - - // Update the history with the final assistant message - runInAction(() => { - if (this.current_message) { - this.history.push({ ...finalMessage }); - this.current_message = undefined; - this.dataDoc.data = JSON.stringify(this.history); - } - }); - } catch (err) { - console.error('Error:', err); - // Handle error in processing - this.history.push({ - role: ASSISTANT_ROLE.ASSISTANT, - content: [{ index: 0, type: TEXT_TYPE.ERROR, text: 'Sorry, I encountered an error while processing your request.', citation_ids: null }], - processing_info: [], - }); - } finally { - this.isLoading = false; - this.scrollToBottom(); - } - } - this.scrollToBottom(); - }; - - /** - * Updates the citations for a given message in the chat history. - * @param index The index of the message in the history. - * @param citations The list of citations to add to the message. - */ - @action - updateMessageCitations = (index: number, citations: Citation[]) => { - if (this.history[index]) { - this.history[index].citations = citations; - } - }; - - /** - * Adds a linked document from a URL for future reference and analysis. - * @param url The URL of the document to add. - * @param id The unique identifier for the document. - */ - @action - addLinkedUrlDoc = async (url: string, id: string) => { - const doc = Docs.Create.WebDocument(url, { data_useCors: true }); - console.log('Adding URL:', url); - - const linkDoc = Docs.Create.LinkDocument(this.Document, doc); - LinkManager.Instance.addLink(linkDoc); - let canDisplay; - - try { - // Fetch the URL content through the proxy - const { data } = await Networking.PostToServer('/proxyFetch', { url }); - - // Simulating header behavior since we can't fetch headers via proxy - const xFrameOptions = data.headers?.['x-frame-options']; - - if (xFrameOptions && xFrameOptions.toUpperCase() === 'SAMEORIGIN') { - console.log('URL cannot be displayed in an iframe:', url); - canDisplay = false; - } else { - console.log('URL can be displayed in an iframe:', url); - canDisplay = true; - } - } catch (error) { - console.error('Error fetching the URL from the server:', error); - } - - const chunkToAdd = { - chunkId: id, - chunkType: CHUNK_TYPE.URL, - url: url, - canDisplay: canDisplay, - }; - - doc.chunk_simpl = JSON.stringify({ chunks: [chunkToAdd] }); - }; - - /** - * Getter to retrieve the current user's name from the client utils. - */ - @computed - get userName() { - return ClientUtils.CurrentUserEmail; - } - - /** - * Creates a CSV document in the dashboard and adds it for analysis. - * @param url The URL of the CSV. - * @param title The title of the CSV document. - * @param id The unique ID for the document. - * @param data The CSV data content. - */ - @action - createCSVInDash = async (url: string, title: string, id: string, data: string) => { - console.log('Creating CSV in Dash:', url, title); - const doc = DocCast(await DocUtils.DocumentFromType('csv', url, { title: title, text: RTFCast(data) })); - - const linkDoc = Docs.Create.LinkDocument(this.Document, doc); - LinkManager.Instance.addLink(linkDoc); - - doc && this._props.addDocument?.(doc); - await DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {}); - - this.addCSVForAnalysis(doc, id); - }; - - /** - * Event handler to manage citations click in the message components. - * @param citation The citation object clicked by the user. - */ - @action - handleCitationClick = (citation: Citation) => { - console.log('Citation clicked:', citation); - const currentLinkedDocs: Doc[] = this.linkedDocs; - - const chunkId = citation.chunk_id; - - // Loop through the linked documents to find the matching chunk and handle its display - for (let doc of currentLinkedDocs) { - if (doc.chunk_simpl) { - const docChunkSimpl = JSON.parse(StrCast(doc.chunk_simpl)) as { chunks: SimplifiedChunk[] }; - const foundChunk = docChunkSimpl.chunks.find(chunk => chunk.chunkId === chunkId); - if (foundChunk) { - // Handle different types of chunks (image, text, table, etc.) - switch (foundChunk.chunkType) { - case CHUNK_TYPE.IMAGE: - case CHUNK_TYPE.TABLE: - const values = foundChunk.location?.replace(/[\[\]]/g, '').split(','); - - if (values?.length !== 4) { - console.error('Location string must contain exactly 4 numbers'); - return; - } - - const x1 = parseFloat(values[0]) * Doc.NativeWidth(doc); - const y1 = parseFloat(values[1]) * Doc.NativeHeight(doc) + foundChunk.startPage * Doc.NativeHeight(doc); - const x2 = parseFloat(values[2]) * Doc.NativeWidth(doc); - const y2 = parseFloat(values[3]) * Doc.NativeHeight(doc) + foundChunk.startPage * Doc.NativeHeight(doc); - - const annotationKey = Doc.LayoutFieldKey(doc) + '_annotations'; - - const existingDoc = DocListCast(doc[DocData][annotationKey]).find(d => d.citation_id === citation.citation_id); - const highlightDoc = existingDoc ?? this.createImageCitationHighlight(x1, y1, x2, y2, citation, annotationKey, doc); - - DocumentManager.Instance.showDocument(highlightDoc, { willZoomCentered: true }, () => {}); - break; - case CHUNK_TYPE.TEXT: - this.citationPopup = { text: citation.direct_text ?? 'No text available', visible: true }; - setTimeout(() => (this.citationPopup.visible = false), 3000); // Hide after 3 seconds - - DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => { - const firstView = Array.from(doc[DocViews])[0] as DocumentView; - firstView.ComponentView?.search?.(citation.direct_text ?? ''); - }); - break; - case CHUNK_TYPE.URL: - if (!foundChunk.canDisplay) { - window.open(StrCast(doc.displayUrl), '_blank'); - } else if (foundChunk.canDisplay) { - DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {}); - } - break; - case CHUNK_TYPE.CSV: - DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {}); - break; - default: - console.log('Chunk type not supported', foundChunk.chunkType); - break; - } - } - } - } - }; - - /** - * Creates an annotation highlight on a PDF document for image citations. - * @param x1 X-coordinate of the top-left corner of the highlight. - * @param y1 Y-coordinate of the top-left corner of the highlight. - * @param x2 X-coordinate of the bottom-right corner of the highlight. - * @param y2 Y-coordinate of the bottom-right corner of the highlight. - * @param citation The citation object to associate with the highlight. - * @param annotationKey The key used to store the annotation. - * @param pdfDoc The document where the highlight is created. - * @returns The highlighted document. - */ - createImageCitationHighlight = (x1: number, y1: number, x2: number, y2: number, citation: Citation, annotationKey: string, pdfDoc: Doc): Doc => { - const highlight_doc = Docs.Create.FreeformDocument([], { - x: x1, - y: y1, - _width: x2 - x1, - _height: y2 - y1, - backgroundColor: 'rgba(255, 255, 0, 0.5)', - }); - highlight_doc[DocData].citation_id = citation.citation_id; - Doc.AddDocToList(pdfDoc[DocData], annotationKey, highlight_doc); - highlight_doc.annotationOn = pdfDoc; - Doc.SetContainer(highlight_doc, pdfDoc); - return highlight_doc; - }; - - /** - * Lifecycle method that triggers when the component updates. - * Ensures the chat is scrolled to the bottom when new messages are added. - */ - componentDidUpdate() { - this.scrollToBottom(); - } - - /** - * Lifecycle method that triggers when the component mounts. - * Initializes scroll listeners, sets up document reactions, and loads chat history from dataDoc if available. - */ - componentDidMount() { - this._props.setContentViewBox?.(this); - if (this.dataDoc.data) { - try { - const storedHistory = JSON.parse(StrCast(this.dataDoc.data)); - runInAction(() => { - this.history.push( - ...storedHistory.map((msg: AssistantMessage) => ({ - role: msg.role, - content: msg.content, - follow_up_questions: msg.follow_up_questions, - citations: msg.citations, - })) - ); - }); - } catch (e) { - console.error('Failed to parse history from dataDoc:', e); - } - } else { - // Default welcome message - runInAction(() => { - this.history.push({ - role: ASSISTANT_ROLE.ASSISTANT, - content: [ - { - index: 0, - type: TEXT_TYPE.NORMAL, - text: `Hey, ${this.userName()}! Welcome to Your Friendly Assistant. Link a document or ask questions to get started.`, - citation_ids: null, - }, - ], - processing_info: [], - }); - }); - } - - // Set up reactions for linked documents - reaction( - () => { - const linkedDocs = LinkManager.Instance.getAllRelatedLinks(this.Document) - .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) - .map(d => DocCast(d?.annotationOn, d)) - .filter(d => d); - return linkedDocs; - }, - linked => linked.forEach(doc => this.linked_docs_to_add.add(doc)) - ); - - // Observe changes to linked documents and handle document addition - observe(this.linked_docs_to_add, change => { - if (change.type === 'add') { - if (PDFCast(change.newValue.data)) { - this.addDocToVectorstore(change.newValue); - } else if (CsvCast(change.newValue.data)) { - this.addCSVForAnalysis(change.newValue); - } - } else if (change.type === 'delete') { - console.log('Deleted docs: ', change.oldValue); - } - }); - this.addScrollListener(); - } - - /** - * Lifecycle method that triggers when the component unmounts. - * Removes scroll listeners to avoid memory leaks. - */ - componentWillUnmount() { - this.removeScrollListener(); - } - - /** - * Getter that retrieves all linked documents for the current document. - */ - @computed - get linkedDocs() { - return LinkManager.Instance.getAllRelatedLinks(this.Document) - .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) - .map(d => DocCast(d?.annotationOn, d)) - .filter(d => d); - } - - /** - * Getter that retrieves document IDs of linked documents that have AI-related content. - */ - @computed - get docIds() { - return LinkManager.Instance.getAllRelatedLinks(this.Document) - .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) - .map(d => DocCast(d?.annotationOn, d)) - .filter(d => d) - .filter(d => d.ai_doc_id) - .map(d => StrCast(d.ai_doc_id)); - } - - /** - * Getter that retrieves summaries of all linked documents. - */ - @computed - get summaries(): string { - return ( - LinkManager.Instance.getAllRelatedLinks(this.Document) - .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) - .map(d => DocCast(d?.annotationOn, d)) - .filter(d => d) - .filter(d => d.summary) - .map((doc, index) => { - if (PDFCast(doc.data)) { - return `${doc.summary}`; - } else if (CsvCast(doc.data)) { - return `${doc.summary}`; - } else { - return `${index + 1}) ${doc.summary}`; - } - }) - .join('\n') + '\n' - ); - } - - /** - * Getter that retrieves all linked CSV files for analysis. - */ - @computed - get linkedCSVs(): { filename: string; id: string; text: string }[] { - return this.linked_csv_files; - } - - /** - * Getter that formats the entire chat history as a string for the agent's system message. - */ - @computed - get formattedHistory(): string { - let history = '\n'; - for (const message of this.history) { - history += `<${message.role}>${message.content.map(content => content.text).join(' ')}`; - if (message.loop_summary) { - history += `${message.loop_summary}`; - } - history += `\n`; - } - history += ''; - return history; - } - - // Other helper methods for retrieving document data and processing - - retrieveSummaries = () => { - return this.summaries; - }; - - retrieveCSVData = () => { - return this.linkedCSVs; - }; - - retrieveFormattedHistory = () => { - return this.formattedHistory; - }; - - retrieveDocIds = () => { - return this.docIds; - }; - - /** - * Handles follow-up questions when the user clicks on them. - * Automatically sets the input value to the clicked follow-up question. - * @param question The follow-up question clicked by the user. - */ - @action - handleFollowUpClick = (question: string) => { - console.log('Follow-up question clicked:', question); - this.inputValue = question; - }; - - /** - * Renders the chat interface, including the message list, input field, and other UI elements. - */ - render() { - return ( -
- {this.isUploadingDocs && ( -
-
- -
{this.currentStep}
-
-
- )} -
-

{this.userName()}'s AI Assistant

-
-
- {this.history.map((message, index) => ( - - ))} - {this.current_message && ( - - )} -
-
- (this.inputValue = e.target.value)} /> - -
- {/* Popup for citation */} - {this.citationPopup.visible && ( -
-

- Text from your document: {this.citationPopup.text} -

-
- )} -
- ); - } -} - -/** - * Register the ChatBox component as the template for CHAT document types. - */ -Docs.Prototypes.TemplateMap.set(DocumentType.CHAT, { - layout: { view: ChatBox, dataField: 'data' }, - options: { acl: '', chat: '', chat_history: '', chat_thread_id: '', chat_assistant_id: '', chat_vector_store_id: '' }, -}); diff --git a/src/client/views/nodes/ChatBox/MessageComponent.tsx b/src/client/views/nodes/ChatBox/MessageComponent.tsx deleted file mode 100644 index 812e52ee0..000000000 --- a/src/client/views/nodes/ChatBox/MessageComponent.tsx +++ /dev/null @@ -1,105 +0,0 @@ -import React, { useState } from 'react'; -import { observer } from 'mobx-react'; -import { AssistantMessage, Citation, MessageContent, PROCESSING_TYPE, ProcessingInfo, TEXT_TYPE } from './types'; -import ReactMarkdown from 'react-markdown'; - -interface MessageComponentProps { - message: AssistantMessage; - index: number; - onFollowUpClick: (question: string) => void; - onCitationClick: (citation: Citation) => void; - updateMessageCitations: (index: number, citations: Citation[]) => void; -} - -const MessageComponentBox: React.FC = function ({ message, index, onFollowUpClick, onCitationClick, updateMessageCitations }) { - const [dropdownOpen, setDropdownOpen] = useState(false); - - const renderContent = (item: MessageContent) => { - const i = item.index; - //console.log('item', item, 'index', i); - if (item.type === TEXT_TYPE.GROUNDED) { - const citation_ids = item.citation_ids || []; - return ( - - {item.text} - {citation_ids.map((id, idx) => { - const citation = message.citations?.find(c => c.citation_id === id); - if (!citation) return null; - return ( - - ); - })} - - ); - } else if (item.type === TEXT_TYPE.NORMAL) { - return ( - - {item.text} - - ); - } else if ('query' in item) { - return ( - - {JSON.stringify(item.query)} - - ); - } else { - return ( - - {JSON.stringify(item)} - - ); - } - }; - - const hasProcessingInfo = message.processing_info && message.processing_info.length > 0; - - const renderProcessingInfo = (info: ProcessingInfo) => { - if (info.type === PROCESSING_TYPE.THOUGHT) { - return ( -
- Thought: {info.content} -
- ); - } else if (info.type === PROCESSING_TYPE.ACTION) { - return ( -
- Action: {info.content} -
- ); - } else { - return null; - } - }; - - return ( -
- {hasProcessingInfo && ( -
- - {dropdownOpen &&
{message.processing_info.map(renderProcessingInfo)}
} -
-
- )} -
{message.content && message.content.map(messageFragment => {renderContent(messageFragment)})}
- {message.follow_up_questions && message.follow_up_questions.length > 0 && ( -
-

Follow-up Questions:

-
- {message.follow_up_questions.map((question, idx) => ( - - ))} -
-
- )} -
- ); -}; - -export default observer(MessageComponentBox); diff --git a/src/client/views/nodes/ChatBox/ProgressBar.scss b/src/client/views/nodes/ChatBox/ProgressBar.scss deleted file mode 100644 index ff5be4a38..000000000 --- a/src/client/views/nodes/ChatBox/ProgressBar.scss +++ /dev/null @@ -1,69 +0,0 @@ -.spinner-container { - display: flex; - flex-direction: column; - justify-content: center; - align-items: center; - height: 100%; -} - -.spinner { - width: 60px; - height: 60px; - position: relative; - margin-bottom: 20px; // Space between spinner and text -} - -.double-bounce1, -.double-bounce2 { - width: 100%; - height: 100%; - border-radius: 50%; - background-color: #4a90e2; - opacity: 0.6; - position: absolute; - top: 0; - left: 0; - animation: bounce 2s infinite ease-in-out; -} - -.double-bounce2 { - animation-delay: -1s; -} - -@keyframes bounce { - 0%, - 100% { - transform: scale(0); - } - 50% { - transform: scale(1); - } -} - -.uploading-overlay { - position: absolute; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-color: rgba(255, 255, 255, 0.8); - display: flex; - align-items: center; - justify-content: center; - z-index: 1000; -} - -.progress-container { - display: flex; - flex-direction: column; - align-items: center; - text-align: center; -} - -.step-name { - font-size: 18px; - color: #333; - text-align: center; - width: 100%; - margin-top: -10px; // Adjust to move the text closer to the spinner -} diff --git a/src/client/views/nodes/ChatBox/ProgressBar.tsx b/src/client/views/nodes/ChatBox/ProgressBar.tsx deleted file mode 100644 index 0aa07213f..000000000 --- a/src/client/views/nodes/ChatBox/ProgressBar.tsx +++ /dev/null @@ -1,13 +0,0 @@ -import React from 'react'; -import './ProgressBar.scss'; - -export const ProgressBar: React.FC = () => { - return ( -
-
-
-
-
-
- ); -}; diff --git a/src/client/views/nodes/ChatBox/prompts.ts b/src/client/views/nodes/ChatBox/prompts.ts deleted file mode 100644 index 0a356189b..000000000 --- a/src/client/views/nodes/ChatBox/prompts.ts +++ /dev/null @@ -1,465 +0,0 @@ -// prompts.ts - -import { Tool } from './types'; - -export function getReactPrompt(tools: Tool[], summaries: () => string, chatHistory: string): string { - const toolDescriptions: string = tools - .map( - tool => ` - - ${tool.name} - ${tool.briefSummary} - - ` - ) - .join('\n'); - - return ` - - You are an advanced AI assistant equipped with various tools to answer user queries accurately and efficiently. Your task is to provide a comprehensive response based on the user's prompt using available tools, chat history, and provided information. Follow these guidelines meticulously to ensure the accuracy and structure of your response. - - - - **MOST IMPORTANT**: Always output responses within stage number tags, using the stage number and the system role as the root tag (e.g., , , etc.). This is crucial and should never be overlooked. - **STOP after every stage and wait for the system to provide the next input (e.g., action rules or observations). - Only output **ONE stage at a time** in your responses. Do not skip stages or provide multiple stages at once. Thus, you should only output even stage number root tags. - Always structure your responses using valid, well-formed XML with properly nested tags. - If a tool is needed, ALWAYS select the most appropriate tool based on the user's query. - If the query could relate to user documents or require external information (e.g., RAG, search + website scraping, data analysis), USE the appropriate tool to gather that information. - If there are no user docs or the user docs have not yielded helpful information, use the search tool to find websites followed by the website scraper tool to get useful infromation from one of those websites. You can use the website scraper (or even the search tool), multiple times to find information from multiple websites either from the same search or different searches. - Ensure at the end of every final answer, you provide exactly three follow-up questions from the user's perspective—from the perspective that they are asking the question. - Always follow the response structure provided in the instructions. - If a tool doesn't work—or yield helpful results—after two tries, EITHER use another tool or proceed with the response and ask the user for more information or clarification or let them know you cannot answer their question and why. DO NOT CONTINUE WITH THE SAME TOOL 3 TIMES. - Use multiple tools in conjunction with each other to provide a comprehensive answer to the user's query, if needed (i.e. for the prompt "create a CSV showing historical bird migration trends", you could use the search tool and the webscraper tool to get the info, and then use the create CSV tool to create the CSV) - - - - - When providing your final response, use the following structure: - - - <grounded_text> - Wrap text that is derived from tool-based or chunk-based information within these tags, ensuring proper citation. - <normal_text> - Wrap text that is not derived from tool-based or chunk-based information within these tags. - - <citation> - Provide citations for each grounded text, referencing the tool or chunk used. - - - <question> - Include exactly three follow-up questions from the user's perspective within these tags. - - - <loop_summary> - Provide a summary of the actions and tools used by the assistant throughout the interaction within these tags. - - - - - - Wrap all information derived from tools (e.g., RAG, search + website scraping, data analysis)—which will be provided in chunks—in <grounded_text> tags. - DO NOT PUT ANYTHING THAT IS NOT DIRECTLY DERIVED FROM TOOLS OR CHUNKS IN <grounded_text> TAGS. - Use a single <grounded_text> tag for sequential and closely related information that references the same citation. - If other citations are used sequentially, create new <grounded_text> tags. - Ensure each <grounded_text> tag has corresponding citations (up to three, and one is fine). Separate multiple citation indices with commas. - Grounded text can be as short as a few words or as long as several sentences. - Avoid overlapping or nesting <grounded_text> tags; use sequential tags instead. - Grounded text tags should always have a citation_index attribute that references a citation index number that the text is grounded in. - Content within the <grounded_text> tags should be in Markdown format. - - - - Wrap all text that is not derived from tools or chunks in <normal_text> tags (any text outputted in the answer that is not in a <grounded_text> tag should be within a normal text tag). - Ensure that these tags are used for your reasoning, background knowledge, or general information that does not require a citation. - Do not use <normal_text> tags for information that needs grounding or citation. - Anything that is in any user docs should be grounded text and cited, not normal text, even if it is background or general information. - Content within the <normal_text> tags should be in Markdown format. - - - - Create a unique citation for each distinct piece of information from tools or chunks that is used to support <grounded_text>. - Ensure each citation has a unique index number. - Specify the correct type: "text", "image", "table", "csv", or "url". - For text-based information, include only the relevant subset of the original information that the <grounded_text> is based on. - For image, table, csv, or url citation types, leave the citation content empty. - ALL CITATIONS MUST use the chunk_id field to reference the source, whether it's from RAG, search + website scraping, data analysis, or any other tool. - One citation can be used for multiple <grounded_text> tags if they are based on the same tool or chunk information. - !!!DO NOT OVERCITE - only include citations for information that is directly relevant to the <grounded_text>. - - - - Analyze the user's query carefully. - Determine whether a tool is required to answer the query accurately. - If a tool is necessary: - - Select the most appropriate tool. - Use the <action> tag to specify the tool. - End your response after the <action> tag and wait for action rules to be provided. - Based on the action rules, provide the necessary tool parameters within the <inputs>. The <inputs> tag should be within the <action_input> tag, and should follow an <action_description> tag that contains a brief description of what you're doing with the action. - For each input, you may provide as many different iterations of the same tag (i.e. to provide multiple inputs to the tool) as is specified in the input's max_input's field. - End your response again and wait for the observation from the tool. - - If no tool is needed, use the 'no_tool' action but still follow the same response structure. - If the query might relate to user documents or requires external information, **ALWAYS** use the appropriate tool to retrieve the information (either rag or dataAnalysis). - Once all observations are collected, or if no tool was needed, provide your comprehensive answer within the <answer> tag, using the <grounded_text> and <normal_text> tags as required. - - - - Your final <answer> tag must contain: - - The complete answer to the user's query, with grounded information wrapped in <grounded_text> tags and general information wrapped in <normal_text> tags. - Exactly three follow-up questions written from the user's perspective, enclosed within <follow_up_questions> tags. - A concise <loop_summary> that describes the actions and tools used throughout the interaction. - - - - - ${toolDescriptions} - If no external tool is required to answer the question, use the 'no_tool' action. However, if the query might relate to user documents or require external information, do not use 'no_tool'—instead, use the appropriate tool (RAG, search + website scraping, data analysis), even if unsure. - - - - ENSURE THAT YOU ONLY USE TOOLS THAT ANALYZE OR OTHERWISE USE USER DOCS IF THE QUERY APPLIES TO ONE OF THESE USER SUMMARIES (AT LEAST SOMEWHAT). IF THERE ARE NO SUMMARIES, THERE ARE NO USER DOCUMENTS. - - ${summaries()} - - - - - These examples are not, by any means, exhaustive in terms of how tools can be used in conjunction with one another. They simply are to provide you with examples of how to structure your outputs and use some of the tools in some contexts. - YOU ONLY OUTPUT THE ASSISTANT STAGES: - - - ***SYSTEM MESSAGE ELIDED*** - - - Could you provide a detailed overview of the 2010 Vancouver Winter Olympics's impact, including the overall summary of the games, key moments from official statements, and how the medal count compared across countries? - - - - - Since the user has Olympics related docs, I will use the RAG tool to find relevant information from the user's documents, specifically focusing on key moments and statements from an official press release. - - rag - - - - ***Action rules elided*** - - - - - Searching user documents for official statements and key moments of the 2010 Vancouver Winter Olympics. - - - The user is asking for key moments and statements from official sources regarding the 2010 Vancouver Winter Olympics. Search the provided documents for any press releases or official statements that highlight significant events, achievements, or noteworthy aspects of the games. - - - - - - - - PRESS RELEASE: 2010 VANCOUVER WINTER OLYMPICS - - Vancouver, BC - - - The 2010 Winter Olympics, officially known as the XXI Olympic Winter Games, took place in Vancouver, British Columbia, Canada, from February 12 to 28, 2010. It featured 86 events in 15 disciplines across 7 sports, with 2,566 athletes from 82 National Olympic Committees participating. This edition of the Winter Olympics was notable for being the first hosted by Canada since the 1988 Winter Olympics in Calgary. - - Overall, the 2010 Winter Olympics have been hailed as a monumental success, showcasing the best of Canadian culture and athletic prowess. The Opening Ceremony, held on February 12, 2010, was a spectacular celebration of the nation's history and diversity... - - Key Moments: - - The Canadian men's and women's ice hockey teams both claimed gold medals, an achievement that brought immense pride to the host nation. - - Tragedy struck early in the games with the death of Georgian luger Nodar Kumaritashvili during a training run. This incident led to increased scrutiny of safety measures across all events... - - ***More chunk content elided for brevity*** - - The games concluded on February 28, 2010, with a memorable Closing Ceremony that highlighted the achievements of the athletes and the spirit of unity that defined the 2010 Vancouver Winter Olympics. - - - - - - With information on key moments from the press release, I will now analyze the medal count data and economic impact data to provide a comprehensive overview of the 2010 Vancouver Winter Olympics's impact. - - dataAnalysis - - - - ***Action rules elided*** - - - - - Analyzing the medal count data and economic impact data for the 2010 Vancouver Winter Olympics to compare country performances and assess the games' overall impact. - - 2010_Vancouver_Olympics_Medal_Count.csv - 2010_Vancouver_Olympics_Economic_Impact.csv - - - - - - - Country,Gold,Silver,Bronze,Total - USA,9,15,13,37 - Germany,10,13,7,30 - Canada,14,7,5,26 - Norway,9,8,6,23 - Russia,3,5,7,15 - South Korea,6,6,2,14 - China,5,2,4,11 - Sweden,5,2,4,11 - France,2,3,6,11 - Austria,4,6,6,16 - - - Category,Value - Total Cost (CAD),7.7 billion - Revenue Generated (CAD),2.5 billion - Jobs Created,45000 - Tourism Increase (%),35 - Infrastructure Investment (CAD),1.9 billion - Post-Olympic Annual Tourism Revenue Increase (CAD),220 million - Venues Repurposed,8 - Carbon Footprint Offset (%),118 - - - - - - Now that I have gathered comprehensive information from the official press release, medal count data, and economic impact data, I will provide a detailed answer to the user's query, incorporating all aspects of the 2010 Vancouver Winter Olympics's impact. - - - - The 2010 Winter Olympics, officially known as the XXI Olympic Winter Games, took place in Vancouver, British Columbia, Canada, from February 12 to 28, 2010. It featured 86 events in 15 disciplines across 7 sports, with 2,566 athletes from 82 National Olympic Committees participating. - - - - The Opening Ceremony was one of the most spectacular in Olympic history, celebrating Canadian culture and heritage. A significant highlight for the host nation was the victory of the Canadian men's and women's ice hockey teams, both securing gold medals, which was a source of immense national pride. - - - - In terms of medal performance, the United States led the overall tally with 37 medals (9 gold, 15 silver, 13 bronze), followed by Germany with 30 medals (10 gold, 13 silver, 7 bronze). Canada, while placing third in total medals with 26, achieved a record number of 14 gold medals, the highest of any country in the 2010 games. Other notable performances came from Norway (23 medals), Russia (15 medals), and South Korea (14 medals). - - - - The economic impact of the 2010 Vancouver Winter Olympics was significant. The total cost of hosting the games was approximately 7.7 billion CAD, while generating revenue of 2.5 billion CAD. The games created about 45,000 jobs and led to a 35% increase in tourism during the event. The long-term benefits include an annual increase of 220 million CAD in tourism revenue and 1.9 billion CAD invested in infrastructure. - - - - From an environmental perspective, the Vancouver Olympics made efforts to be sustainable, with 118% of the carbon footprint offset and 8 venues repurposed after the games, demonstrating a commitment to leaving a positive legacy. - - - - However, the games were also marked by tragedy with the unfortunate death of Georgian luger Nodar Kumaritashvili during a training run, which deeply affected the Olympic community and led to increased scrutiny of safety measures across all events. - - - - Despite this tragedy, the 2010 Vancouver Winter Olympics were generally hailed as a monumental success, showcasing the best of Canadian culture and athletic prowess. The games concluded on February 28, 2010, with a memorable Closing Ceremony that highlighted the achievements of the athletes and the spirit of unity that defined these Olympics. - - - - - - - - - - - - - - How did the 2010 Vancouver Winter Olympics compare to previous Winter Olympics in terms of economic impact and sustainability efforts? - What specific infrastructure improvements were made in Vancouver as a result of hosting the 2010 Winter Olympics? - How did the performance of Canadian athletes in the 2010 Vancouver Winter Olympics compare to their performance in previous Winter Olympics? - - - The assistant used the RAG tool to gather information from the user's documents, focusing on key moments and official statements. It then used the dataAnalysis tool to analyze the medal count and economic impact data, providing a comprehensive overview of the 2010 Vancouver Winter Olympics's impact. - - - - - - - ***SYSTEM MESSAGE ELIDED*** - - - What are the best hiking trails in Yosemite National Park? And which are the safest? - - - - To provide comprehensive information about the best hiking trails in Yosemite National Park, I'll use the search tool to find relevant websites with hiking recommendations, safety information, and general park information. - - searchTool - - - ***Action rules elided*** - - - - Searching the web for information about the best and safest hiking trails in Yosemite National Park. - - Best hiking trails Yosemite National Park - Safest hiking trails Yosemite National Park - Information about hiking in Yosemite National Park - - - - - - - https://www.nps.gov/yose/planyourvisit/hiking.htm - Official National Park Service website with hiking information for Yosemite - - - https://www.alltrails.com/parks/us/california/yosemite-national-park - AllTrails page featuring user-rated hiking trails in Yosemite - - - https://www.yosemitehikes.com/ - Comprehensive guide to Yosemite hikes with detailed trail information - - - - - - I've found several relevant websites about hiking in Yosemite National Park. I'll start by scraping content from the official National Park Service website for the most reliable information, then move on to AllTrails for user ratings and reviews, and finally to YosemiteHikes.com for more detailed trail descriptions. - - websiteInfoScraper - - - ***Action rules elided*** - - - - Scraping information from three websites about hiking trails in Yosemite National Park. - - https://www.nps.gov/yose/planyourvisit/hiking.htm - https://www.alltrails.com/parks/us/california/yosemite-national-park - https://www.yosemitehikes.com/ - - - - - - - ***NPS general planning website image elided*** - - - ***AllTrails website image elided*** - - - ***YosemiteHikes.com website image elided*** - - - - - - The information gathered from the three websites provides a good overview of the best hiking trails and safety considerations in Yosemite National Park. However, I noticed that there's limited information about seasonal trail conditions and closures, which could be crucial for planning a safe hike. To address this, I'll scrape additional information from the Yosemite National Park's current conditions page. - - websiteInfoScraper - - - ***Action rules elided*** - - - - Scraping information about current trail conditions and closures in Yosemite National Park. - - https://www.nps.gov/yose/planyourvisit/conditions.htm - - - - - - - ***NPS conditions website image elided*** - - - - - - Now that I have additional information about current trail conditions and seasonal closures, I can provide a more comprehensive and up-to-date answer to the user's query about the best and safest hiking trails in Yosemite National Park. - - - - Yosemite National Park offers a wide range of hiking trails suitable for various skill levels. Some of the best hiking trails in Yosemite, based on the information gathered, include: - - 1. Mist Trail to Vernal Fall: A moderate 3-mile round-trip hike in Yosemite Valley, offering close-up views of waterfalls... ***REST ELIDED FOR BREVITY*** - - - ***CONTENT ELIDED FOR BREVITY*** - - - - ***CONTENT ELIDED FOR BREVITY*** - - - Regarding safety, some of the safest trails for hikers of all skill levels include: - - - 1. Lower Yosemite Fall Trail: An easy 1-mile loop in Yosemite Valley... ***REST ELIDED FOR BREVITY*** - - - - ***CONTENT ELIDED FOR BREVITY*** - - - To ensure a safe hiking experience in Yosemite, regardless of the trail you choose, follow these safety tips: - - - 1. Stay on marked trails at all times... - ***REST ELIDED FOR BREVITY*** - - - Remember that even the "safest" trails can pose risks if proper precautions are not taken. Always hike within your ability level and be prepared for the specific conditions of the trail you choose. It's essential to check for daily updates on trail conditions before starting your hike, as they can change rapidly, especially during spring and fall. - - - - https://www.nps.gov/yose/planyourvisit/hiking.htm - - - https://www.alltrails.com/parks/us/california/yosemite-national-park - - - https://www.yosemitehikes.com/ - - - https://www.nps.gov/yose/planyourvisit/conditions.htm - - - - What is the best time of year to hike in Yosemite National Park? - Are there any special permits required for hiking certain trails in Yosemite? - What wildlife might I encounter while hiking in Yosemite, and how should I prepare for potential encounters? - - - The assistant used the search tool to find relevant websites, then scraped information from those sites using the websiteInfoScraper tool. Additional information about current trail conditions was gathered from the NPS website to provide a comprehensive and safe hiking guide. - - - - - - - - ${chatHistory} - Use this history for context when appropriate. - - - - Now, process the user's query and provide your response following the format and rules outlined above. Ensure your final answer is comprehensive, correctly cited, and entirely contained within the structured tags. Do not get stuck in infinite loops and keep responses concise, grounded, and most importantly, HELPFUL AND USEFUL! - - -`; -} - -export function getSummarizedChunksPrompt(chunks: string): string { - return `Please provide a comprehensive summary of what you think the document from which these chunks originated. - Ensure the summary captures the main ideas and key points from all provided chunks. Be concise and brief and only provide the summary in paragraph form. - - Text chunks: - \`\`\` - ${chunks} - \`\`\``; -} - -export function getSummarizedSystemPrompt(): string { - return 'You are an AI assistant tasked with summarizing a document. You are provided with important chunks from the document and provide a summary, as best you can, of what the document will contain overall. Be concise and brief with your response.'; -} diff --git a/src/client/views/nodes/ChatBox/response_parsers/AnswerParser.ts b/src/client/views/nodes/ChatBox/response_parsers/AnswerParser.ts deleted file mode 100644 index 79b53b0a3..000000000 --- a/src/client/views/nodes/ChatBox/response_parsers/AnswerParser.ts +++ /dev/null @@ -1,125 +0,0 @@ -import { ASSISTANT_ROLE, AssistantMessage, Citation, CHUNK_TYPE, TEXT_TYPE, getChunkType, ProcessingInfo } from '../types'; -import { v4 as uuid } from 'uuid'; - -export class AnswerParser { - static parse(xml: string, processingInfo: ProcessingInfo[]): AssistantMessage { - const answerRegex = /([\s\S]*?)<\/answer>/; - const citationsRegex = /([\s\S]*?)<\/citations>/; - const citationRegex = /([\s\S]*?)<\/citation>/g; - const followUpQuestionsRegex = /([\s\S]*?)<\/follow_up_questions>/; - const questionRegex = /(.*?)<\/question>/g; - const groundedTextRegex = /([\s\S]*?)<\/grounded_text>/g; - const normalTextRegex = /([\s\S]*?)<\/normal_text>/g; - const loopSummaryRegex = /([\s\S]*?)<\/loop_summary>/; - - const answerMatch = answerRegex.exec(xml); - const citationsMatch = citationsRegex.exec(xml); - const followUpQuestionsMatch = followUpQuestionsRegex.exec(xml); - const loopSummaryMatch = loopSummaryRegex.exec(xml); - - if (!answerMatch) { - throw new Error('Invalid XML: Missing tag.'); - } - - let rawTextContent = answerMatch[1].trim(); - let content: AssistantMessage['content'] = []; - let citations: Citation[] = []; - let contentIndex = 0; - - // Remove citations and follow-up questions from rawTextContent - if (citationsMatch) { - rawTextContent = rawTextContent.replace(citationsMatch[0], '').trim(); - } - if (followUpQuestionsMatch) { - rawTextContent = rawTextContent.replace(followUpQuestionsMatch[0], '').trim(); - } - if (loopSummaryMatch) { - rawTextContent = rawTextContent.replace(loopSummaryMatch[0], '').trim(); - } - - // Parse citations - let citationMatch; - const citationMap = new Map(); - if (citationsMatch) { - const citationsContent = citationsMatch[1]; - while ((citationMatch = citationRegex.exec(citationsContent)) !== null) { - const [_, index, chunk_id, type, direct_text] = citationMatch; - const citation_id = uuid(); - citationMap.set(index, citation_id); - citations.push({ - direct_text: direct_text.trim(), - type: getChunkType(type), - chunk_id, - citation_id, - }); - } - } - - rawTextContent = rawTextContent.replace(normalTextRegex, '$1'); - - // Parse text content (normal and grounded) - let lastIndex = 0; - let match; - - while ((match = groundedTextRegex.exec(rawTextContent)) !== null) { - const [fullMatch, citationIndex, groundedText] = match; - - // Add normal text that is before the grounded text - if (match.index > lastIndex) { - const normalText = rawTextContent.slice(lastIndex, match.index).trim(); - if (normalText) { - content.push({ - index: contentIndex++, - type: TEXT_TYPE.NORMAL, - text: normalText, - citation_ids: null, - }); - } - } - - // Add grounded text - const citation_ids = citationIndex.split(',').map(index => citationMap.get(index) || ''); - content.push({ - index: contentIndex++, - type: TEXT_TYPE.GROUNDED, - text: groundedText.trim(), - citation_ids, - }); - - lastIndex = match.index + fullMatch.length; - } - - // Add any remaining normal text after the last grounded text - if (lastIndex < rawTextContent.length) { - const remainingText = rawTextContent.slice(lastIndex).trim(); - if (remainingText) { - content.push({ - index: contentIndex++, - type: TEXT_TYPE.NORMAL, - text: remainingText, - citation_ids: null, - }); - } - } - - let followUpQuestions: string[] = []; - if (followUpQuestionsMatch) { - const questionsText = followUpQuestionsMatch[1]; - let questionMatch; - while ((questionMatch = questionRegex.exec(questionsText)) !== null) { - followUpQuestions.push(questionMatch[1].trim()); - } - } - - const assistantResponse: AssistantMessage = { - role: ASSISTANT_ROLE.ASSISTANT, - content, - follow_up_questions: followUpQuestions, - citations, - processing_info: processingInfo, - loop_summary: loopSummaryMatch ? loopSummaryMatch[1].trim() : undefined, - }; - - return assistantResponse; - } -} diff --git a/src/client/views/nodes/ChatBox/response_parsers/StreamedAnswerParser.ts b/src/client/views/nodes/ChatBox/response_parsers/StreamedAnswerParser.ts deleted file mode 100644 index 3585cab4a..000000000 --- a/src/client/views/nodes/ChatBox/response_parsers/StreamedAnswerParser.ts +++ /dev/null @@ -1,73 +0,0 @@ -import { threadId } from 'worker_threads'; - -enum ParserState { - Outside, - InGroundedText, - InNormalText, -} - -export class StreamedAnswerParser { - private state: ParserState = ParserState.Outside; - private buffer: string = ''; - private result: string = ''; - private isStartOfLine: boolean = true; - - public parse(char: string): string { - switch (this.state) { - case ParserState.Outside: - if (char === '<') { - this.buffer = '<'; - } else if (char === '>') { - if (this.buffer.startsWith('') { - this.state = ParserState.Outside; - this.buffer = ''; - } else if (this.buffer.startsWith('') { - this.state = ParserState.Outside; - this.buffer = ''; - } else if (this.buffer.startsWith('<')) { - this.buffer += char; - } else { - this.processChar(char); - } - break; - } - - return this.result.trim(); - } - - private processChar(char: string): void { - if (this.isStartOfLine && char === ' ') { - // Skip leading spaces - return; - } - if (char === '\n') { - this.result += char; - this.isStartOfLine = true; - } else { - this.result += char; - this.isStartOfLine = false; - } - } - - public reset(): void { - this.state = ParserState.Outside; - this.buffer = ''; - this.result = ''; - this.isStartOfLine = true; - } -} diff --git a/src/client/views/nodes/ChatBox/tools.ts b/src/client/views/nodes/ChatBox/tools.ts deleted file mode 100644 index 4035280a8..000000000 --- a/src/client/views/nodes/ChatBox/tools.ts +++ /dev/null @@ -1,26 +0,0 @@ -import { DocCast } from '../../../../fields/Types'; -import { DocServer } from '../../../DocServer'; -import { Docs } from '../../../documents/Documents'; -import { DocUtils } from '../../../documents/DocUtils'; -import { TabDocView } from '../../collections/TabDocView'; -import { DocumentView } from '../DocumentView'; -import { OpenWhere } from '../OpenWhere'; - -export function retrieval(json: any): string { - return ''; -} - -export function create_collection(docView: DocumentView, document_ids: string[], title: string): string { - const docs = document_ids.map(doc_id => DocCast(DocServer.GetCachedRefField(doc_id))); - const collection = Docs.Create.FreeformDocument(docs, { title }); - docView._props.addDocTab(collection, OpenWhere.addRight); //in future, create popup prompting user where to add - return 'Collection created in Dash called ' + title; -} - -export function create_link(docView: DocumentView, document_ids: string[]): string { - //Make document_ids a size 2 array - const docs = document_ids.map(doc_id => DocCast(DocServer.GetCachedRefField(doc_id))); - const linkDoc = DocUtils.MakeLink(docs[0], docs[1], {})!; - DocumentView.linkCommonAncestor(linkDoc)?.ComponentView?.addDocument?.(linkDoc); - return 'Link created between ' + docs[0].title + ' and ' + docs[1].title; -} diff --git a/src/client/views/nodes/ChatBox/tools/BaseTool.ts b/src/client/views/nodes/ChatBox/tools/BaseTool.ts deleted file mode 100644 index 2e2267653..000000000 --- a/src/client/views/nodes/ChatBox/tools/BaseTool.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { Tool } from '../types'; - -export abstract class BaseTool = Record> implements Tool { - constructor( - public name: string, - public description: string, - public parameters: Record, - public citationRules: string, - public briefSummary: string - ) {} - - abstract execute(args: T): Promise; - - getActionRule(): Record { - return { - [this.name]: { - name: this.name, - citationRules: this.citationRules, - description: this.description, - parameters: this.parameters, - }, - }; - } -} diff --git a/src/client/views/nodes/ChatBox/tools/CalculateTool.ts b/src/client/views/nodes/ChatBox/tools/CalculateTool.ts deleted file mode 100644 index 74b7ca27b..000000000 --- a/src/client/views/nodes/ChatBox/tools/CalculateTool.ts +++ /dev/null @@ -1,26 +0,0 @@ -import { BaseTool } from './BaseTool'; - -export class CalculateTool extends BaseTool<{ expression: string }> { - constructor() { - super( - 'calculate', - 'Perform a calculation', - { - expression: { - type: 'string', - description: 'The mathematical expression to evaluate', - required: 'true', - max_inputs: '1', - }, - }, - 'Provide a mathematical expression to calculate that would work with JavaScript eval().', - 'Runs a calculation and returns the number - uses JavaScript so be sure to use floating point syntax if necessary' - ); - } - - async execute(args: { expression: string }): Promise { - // Note: Using eval() can be dangerous. Consider using a safer alternative. - const result = eval(args.expression); - return [{ type: 'text', text: result.toString() }]; - } -} diff --git a/src/client/views/nodes/ChatBox/tools/CreateCSVTool.ts b/src/client/views/nodes/ChatBox/tools/CreateCSVTool.ts deleted file mode 100644 index 55015846b..000000000 --- a/src/client/views/nodes/ChatBox/tools/CreateCSVTool.ts +++ /dev/null @@ -1,51 +0,0 @@ -import { BaseTool } from './BaseTool'; -import { Networking } from '../../../../Network'; - -export class CreateCSVTool extends BaseTool<{ csvData: string; filename: string }> { - private _handleCSVResult: (url: string, filename: string, id: string, data: string) => void; - - constructor(handleCSVResult: (url: string, title: string, id: string, data: string) => void) { - super( - 'createCSV', - 'Creates a CSV file from raw CSV data and saves it to the server', - { - type: 'object', - properties: { - csvData: { - type: 'string', - description: 'A string of comma-separated values representing the CSV data.', - }, - filename: { - type: 'string', - description: 'The base name of the CSV file to be created. Should end in ".csv".', - }, - }, - required: ['csvData', 'filename'], - }, - 'Provide a CSV string and a filename to create a CSV file.', - 'Creates a CSV file from the provided CSV string and saves it to the server with a unique identifier, returning the file URL and UUID.' - ); - this._handleCSVResult = handleCSVResult; - } - - async execute(args: { csvData: string; filename: string }): Promise { - try { - console.log('Creating CSV file:', args.filename, ' with data:', args.csvData); - // Post the raw CSV data to the createCSV endpoint on the server - const { fileUrl, id } = await Networking.PostToServer('/createCSV', { filename: args.filename, data: args.csvData }); - - // Handle the result by invoking the callback - this._handleCSVResult(fileUrl, args.filename, id, args.csvData); - - return [ - { - type: 'text', - text: `File successfully created: ${fileUrl}. \nNow a CSV file with this data and the name ${args.filename} is available as a user doc.`, - }, - ]; - } catch (error) { - console.error('Error creating CSV file:', error); - throw new Error('Failed to create CSV file.'); - } - } -} diff --git a/src/client/views/nodes/ChatBox/tools/CreateCollectionTool.ts b/src/client/views/nodes/ChatBox/tools/CreateCollectionTool.ts deleted file mode 100644 index 573428179..000000000 --- a/src/client/views/nodes/ChatBox/tools/CreateCollectionTool.ts +++ /dev/null @@ -1,36 +0,0 @@ -import { DocCast } from '../../../../../fields/Types'; -import { DocServer } from '../../../../DocServer'; -import { Docs } from '../../../../documents/Documents'; -import { DocumentView } from '../../DocumentView'; -import { OpenWhere } from '../../OpenWhere'; -import { BaseTool } from './BaseTool'; - -export class GetDocsContentTool extends BaseTool<{ title: string; document_ids: string[] }> { - private _docView: DocumentView; - constructor(docView: DocumentView) { - super( - 'retrieveDocs', - 'Retrieves the contents of all Documents that the user is interacting with in Dash ', - { - title: { - type: 'string', - description: 'the title of the collection that you will be making', - required: 'true', - max_inputs: '1', - }, - }, - 'Provide a mathematical expression to calculate that would work with JavaScript eval().', - 'Runs a calculation and returns the number - uses JavaScript so be sure to use floating point syntax if necessary' - ); - this._docView = docView; - } - - async execute(args: { title: string; document_ids: string[] }): Promise { - // Note: Using eval() can be dangerous. Consider using a safer alternative. - const docs = args.document_ids.map(doc_id => DocCast(DocServer.GetCachedRefField(doc_id))); - const collection = Docs.Create.FreeformDocument(docs, { title: args.title }); - this._docView._props.addDocTab(collection, OpenWhere.addRight); //in future, create popup prompting user where to add - return [{ type: 'text', text: 'Collection created in Dash called ' + args.title }]; - } -} -//export function create_collection(docView: DocumentView, document_ids: string[], title: string): string {} diff --git a/src/client/views/nodes/ChatBox/tools/DataAnalysisTool.ts b/src/client/views/nodes/ChatBox/tools/DataAnalysisTool.ts deleted file mode 100644 index a12ee46e5..000000000 --- a/src/client/views/nodes/ChatBox/tools/DataAnalysisTool.ts +++ /dev/null @@ -1,59 +0,0 @@ -import { BaseTool } from './BaseTool'; - -export class DataAnalysisTool extends BaseTool<{ csv_file_name: string | string[] }> { - private csv_files_function: () => { filename: string; id: string; text: string }[]; - - constructor(csv_files: () => { filename: string; id: string; text: string }[]) { - super( - 'dataAnalysis', - 'Analyzes, and provides insights, from one or more CSV files', - { - csv_file_name: { - type: 'string', - description: 'Name(s) of the CSV file(s) to analyze', - required: 'true', - max_inputs: '3', - }, - }, - 'Provide the name(s) of up to 3 CSV files to analyze based on the user query and whichever available CSV files may be relevant.', - 'Provides the full CSV file text for your analysis based on the user query and the available CSV file(s). ' - ); - this.csv_files_function = csv_files; - } - - getFileContent(filename: string): string | undefined { - const files = this.csv_files_function(); - const file = files.find(f => f.filename === filename); - return file?.text; - } - - getFileID(filename: string): string | undefined { - const files = this.csv_files_function(); - const file = files.find(f => f.filename === filename); - return file?.id; - } - - async execute(args: { csv_file_name: string | string[] }): Promise { - const filenames = Array.isArray(args.csv_file_name) ? args.csv_file_name : [args.csv_file_name]; - const results = []; - - for (const filename of filenames) { - const fileContent = this.getFileContent(filename); - const fileID = this.getFileID(filename); - - if (fileContent && fileID) { - results.push({ - type: 'text', - text: `${fileContent}`, - }); - } else { - results.push({ - type: 'text', - text: `File not found: ${filename}`, - }); - } - } - - return results; - } -} diff --git a/src/client/views/nodes/ChatBox/tools/GetDocsTool.ts b/src/client/views/nodes/ChatBox/tools/GetDocsTool.ts deleted file mode 100644 index f970ca8ee..000000000 --- a/src/client/views/nodes/ChatBox/tools/GetDocsTool.ts +++ /dev/null @@ -1,29 +0,0 @@ -import { DocCast } from '../../../../../fields/Types'; -import { DocServer } from '../../../../DocServer'; -import { Docs } from '../../../../documents/Documents'; -import { DocumentView } from '../../DocumentView'; -import { OpenWhere } from '../../OpenWhere'; -import { BaseTool } from './BaseTool'; - -export class GetDocsTool extends BaseTool<{ title: string; document_ids: string[] }> { - private _docView: DocumentView; - constructor(docView: DocumentView) { - super( - 'retrieveDocs', - 'Retrieves the contents of all Documents that the user is interacting with in Dash', - {}, - 'No need to provide anything. Just run the tool and it will retrieve the contents of all Documents that the user is interacting with in Dash.', - 'Returns the the documents in Dash in JSON form. This will include the title of the document, the location in the FreeFormDocument, and the content of the document, any applicable data fields, the layout of the document, etc.' - ); - this._docView = docView; - } - - async execute(args: { title: string; document_ids: string[] }): Promise { - // Note: Using eval() can be dangerous. Consider using a safer alternative. - const docs = args.document_ids.map(doc_id => DocCast(DocServer.GetCachedRefField(doc_id))); - const collection = Docs.Create.FreeformDocument(docs, { title: args.title }); - this._docView._props.addDocTab(collection, OpenWhere.addRight); //in future, create popup prompting user where to add - return [{ type: 'text', text: 'Collection created in Dash called ' + args.title }]; - } -} -//export function create_collection(docView: DocumentView, document_ids: string[], title: string): string {} diff --git a/src/client/views/nodes/ChatBox/tools/NoTool.ts b/src/client/views/nodes/ChatBox/tools/NoTool.ts deleted file mode 100644 index 1f0830a77..000000000 --- a/src/client/views/nodes/ChatBox/tools/NoTool.ts +++ /dev/null @@ -1,18 +0,0 @@ -// tools/NoTool.ts -import { BaseTool } from './BaseTool'; - -export class NoTool extends BaseTool<{}> { - constructor() { - super( - 'no_tool', - 'Use this when no external tool or action is required to answer the question.', - {}, - 'When using the "no_tool" action, simply provide an empty element. The observation will always be "No tool used. Proceed with answering the question."', - 'Use when no external tool or action is required to answer the question.' - ); - } - - async execute(args: {}): Promise { - return [{ type: 'text', text: 'No tool used. Proceed with answering the question.' }]; - } -} diff --git a/src/client/views/nodes/ChatBox/tools/RAGTool.ts b/src/client/views/nodes/ChatBox/tools/RAGTool.ts deleted file mode 100644 index 544b9daba..000000000 --- a/src/client/views/nodes/ChatBox/tools/RAGTool.ts +++ /dev/null @@ -1,138 +0,0 @@ -import { BaseTool } from './BaseTool'; -import { Vectorstore } from '../vectorstore/Vectorstore'; -import { RAGChunk } from '../types'; -import * as fs from 'fs'; -import { Networking } from '../../../../Network'; -import { file } from 'jszip'; -import { ChatCompletion, ChatCompletionContentPart, ChatCompletionMessageParam } from 'openai/resources'; - -export class RAGTool extends BaseTool { - constructor(private vectorstore: Vectorstore) { - super( - 'rag', - 'Perform a RAG search on user documents', - { - hypothetical_document_chunk: { - type: 'string', - description: - "Detailed version of the prompt that is effectively a hypothetical document chunk that would be ideal to embed and compare to the vectors of real document chunks to fetch the most relevant document chunks to answer the user's query", - required: 'true', - }, - }, - ` - Your task is to provide a comprehensive response to the user's prompt based on the given chunks and chat history. Follow these structural guidelines meticulously: - - 1. Overall Structure: - - [Main content with grounded_text tags interspersed with normal plain text (information that is not derived from chunks' information)] - - [Individual citation tags] - - - [Three question tags] - - - - 2. Grounded Text Tag Structure: - - Basic format: - - [Your generated text based on information from a subset of a chunk (a citation's direct text)] - - - 3. Citation Tag Structure: - - [For text: relevant subset of original chunk] - [For image/table: leave empty] - - - 4. Detailed Grounded Text Guidelines: - a. Wrap all information derived from chunks in grounded_text tags. - b. DO NOT PUT ANYTHING THAT IS NOT DIRECTLY DERIVED FROM INFORMATION FROM CHUNKS (EITHER IMAGE, TABLE, OR TEXT) IN GROUNDED_TEXT TAGS. - c. Use a single grounded_text tag for suquential and closely related information that references the same citation. If other citations' information are used sequentially, create new grounded_text tags. - d. Ensure every grounded_text tag has up to a few corresponding citations (should not be more than 3 and only 1 is fine). Multiple citation indices should be separated by commas. - e. Grounded text can be as short as a few words or as long as several sentences. - f. Avoid overlapping or nesting grounded_text tags; instead, use sequential tags. - - 5. Detailed Citation Guidelines: - a. Create a unique citation for each distinct piece of information from the chunks that is used to support grounded_text. - b. ALL TEXT CITATIONS must have direct text in its element content (e.g. DIRECT TEXT HERE) that is a relevant SUBSET of the original text chunk that is being cited specifically. - c. DO NOT paraphrase or summarize the text; use the original text as much as possible. - d. DO NOT USE THE FULL TEXT CHUNK as the citation content; only use the relevant subset of the text that the grounded_text is base. AS SHORT AS POSSIBLE WHILE PROVIDING INFORMATION (ONE TO TWO SENTENCES USUALLY)! - e. Ensure each citation has a unique index number. - f. Specify the correct type: "text", "image", or "table". - g. For text chunks, the content of the citation should ALWAYS have the relevant subset of the original text that the grounded_text is based on. - h. For image/table chunks, leave the citation content empty. - i. One citation can be used for multiple grounded_text tags if they are based on the same chunk information. - j. !!!DO NOT OVERCITE - only include citations for information that is directly relevant to the grounded_text. - - 6. Structural Integrity Checks: - a. Ensure all opening tags have corresponding closing tags. - b. Verify that all grounded_text tags have valid citation_index attributes (they should be equal to the associated citation(s) index field—not their chunk_id field). - c. Check that all cited indices in grounded_text tags have corresponding citations. - - Example of grounded_text usage: - - - - Artificial Intelligence (AI) is revolutionizing various sectors, with healthcare experiencing significant transformations in areas such as diagnosis and treatment planning. - - - In the field of medical diagnosis, AI has shown remarkable capabilities, particularly in radiology. For instance, AI systems have drastically improved mammogram analysis, achieving 99% accuracy at a rate 30 times faster than human radiologists. - - - This advancement not only enhances the efficiency of healthcare systems but also significantly reduces the occurrence of false positives, leading to fewer unnecessary biopsies and reduced patient stress. - - - - Beyond diagnosis, AI is playing a crucial role in drug discovery and development. By analyzing vast amounts of genetic and molecular data, AI algorithms can identify potential drug candidates much faster than traditional methods. - - - This could potentially reduce the time and cost of bringing new medications to market, especially for rare diseases that have historically received less attention due to limited market potential. - - - [... rest of the content ...] - - - Artificial Intelligence is revolutionizing various industries, with healthcare being one of the most profoundly affected sectors. - AI has shown particular promise in the field of radiology, enhancing the accuracy and speed of image analysis. - According to recent studies, AI systems have achieved 99% accuracy in mammogram analysis, performing the task 30 times faster than human radiologists. - The improvement in mammogram accuracy has led to a significant reduction in false positives, decreasing the need for unnecessary biopsies and reducing patient anxiety. - AI is accelerating the drug discovery process by analyzing complex molecular and genetic data to identify potential drug candidates. - The use of AI in drug discovery could significantly reduce the time and cost associated with bringing new medications to market, particularly for rare diseases. - - - - How might AI-driven personalized medicine impact the cost and accessibility of healthcare in the future? - What measures can be taken to ensure that AI systems in healthcare are free from biases and equally effective for diverse populations? - How could the role of healthcare professionals evolve as AI becomes more integrated into medical practices? - - - `, - - `Performs a RAG (Retrieval-Augmented Generation) search on user documents and returns a - set of document chunks (either images or text) that can be used to provide a grounded response based on - user documents` - ); - } - - async execute(args: { hypothetical_document_chunk: string }): Promise { - const relevantChunks = await this.vectorstore.retrieve(args.hypothetical_document_chunk); - const formatted_chunks = await this.getFormattedChunks(relevantChunks); - return formatted_chunks; - } - - async getFormattedChunks(relevantChunks: RAGChunk[]): Promise { - try { - const { formattedChunks } = await Networking.PostToServer('/formatChunks', { relevantChunks }); - - if (!formattedChunks) { - throw new Error('Failed to format chunks'); - } - - return formattedChunks; - } catch (error) { - console.error('Error formatting chunks:', error); - throw error; - } - } -} diff --git a/src/client/views/nodes/ChatBox/tools/SearchTool.ts b/src/client/views/nodes/ChatBox/tools/SearchTool.ts deleted file mode 100644 index b926cbadc..000000000 --- a/src/client/views/nodes/ChatBox/tools/SearchTool.ts +++ /dev/null @@ -1,54 +0,0 @@ -import { max } from 'lodash'; -import { Networking } from '../../../../Network'; -import { BaseTool } from './BaseTool'; -import { v4 as uuidv4 } from 'uuid'; - -export class SearchTool extends BaseTool<{ query: string | string[] }> { - private _addLinkedUrlDoc: (url: string, id: string) => void; - private _max_results: number; - constructor(addLinkedUrlDoc: (url: string, id: string) => void, max_results: number = 5) { - super( - 'searchTool', - 'Search the web to find a wide range of websites related to a query or multiple queries', - { - query: { - type: 'string', - description: 'The search query or queries to use for finding websites', - required: 'true', - max_inputs: '3', - }, - }, - 'Provide up to 3 search queries to find a broad range of websites. This tool is intended to help you identify relevant websites, but not to be used for providing the final answer. Use this information to determine which specific website to investigate further.', - 'Returns a list of websites and their overviews based on the search queries, helping to identify which websites might contain relevant information.' - ); - this._addLinkedUrlDoc = addLinkedUrlDoc; - this._max_results = max_results; - } - - async execute(args: { query: string | string[] }): Promise { - const queries = Array.isArray(args.query) ? args.query : [args.query]; - const allResults = []; - - for (const query of queries) { - try { - const { results } = await Networking.PostToServer('/getWebSearchResults', { query, max_results: this._max_results }); - const data: { type: string; text: string }[] = results.map((result: { url: string; snippet: string }) => { - const id = uuidv4(); - return { - type: 'text', - text: ` - ${result.url} - ${result.snippet} - `, - }; - }); - allResults.push(...data); - } catch (error) { - console.log(error); - allResults.push({ type: 'text', text: `An error occurred while performing the web search for query: ${query}` }); - } - } - - return allResults; - } -} diff --git a/src/client/views/nodes/ChatBox/tools/WebsiteInfoScraperTool.ts b/src/client/views/nodes/ChatBox/tools/WebsiteInfoScraperTool.ts deleted file mode 100644 index 4588b5aec..000000000 --- a/src/client/views/nodes/ChatBox/tools/WebsiteInfoScraperTool.ts +++ /dev/null @@ -1,43 +0,0 @@ -import { Networking } from '../../../../Network'; -import { BaseTool } from './BaseTool'; -import { v4 as uuidv4 } from 'uuid'; - -export class WebsiteInfoScraperTool extends BaseTool<{ url: string | string[] }> { - private _addLinkedUrlDoc: (url: string, id: string) => void; - - constructor(addLinkedUrlDoc: (url: string, id: string) => void) { - super( - 'websiteInfoScraper', - 'Scrape detailed information from specific websites identified as relevant to the user query', - { - url: { - type: 'string', - description: 'The URL(s) of the website(s) to scrape', - required: 'true', - max_inputs: '3', - }, - }, - 'Provide up to 3 URLs of websites that you have identified as the most relevant from the previous search. This tool will provide the text content of those specific websites. When providing a final response to the user based on information from these chunks, ideally cite as many of the url chunks as possible (ground your infromation from multiple sources, if possible) in order to provide a well grounded result.', - 'Returns the text content of the webpages for analysis.' - ); - this._addLinkedUrlDoc = addLinkedUrlDoc; - } - - async execute(args: { url: string | string[] }): Promise { - const urls = Array.isArray(args.url) ? args.url : [args.url]; - const results = []; - - for (const url of urls) { - try { - const { website_plain_text } = await Networking.PostToServer('/scrapeWebsite', { url }); - const id = uuidv4(); - this._addLinkedUrlDoc(url, id); - results.push({ type: 'text', text: `\n${website_plain_text}\n\n` }); - } catch (error) { - results.push({ type: 'text', text: `An error occurred while scraping the website: ${url}` }); - } - } - - return results; - } -} diff --git a/src/client/views/nodes/ChatBox/tools/WikipediaTool.ts b/src/client/views/nodes/ChatBox/tools/WikipediaTool.ts deleted file mode 100644 index 143d91d80..000000000 --- a/src/client/views/nodes/ChatBox/tools/WikipediaTool.ts +++ /dev/null @@ -1,37 +0,0 @@ -import { title } from 'process'; -import { Networking } from '../../../../Network'; -import { BaseTool } from './BaseTool'; -import axios from 'axios'; -import { v4 as uuidv4 } from 'uuid'; - -export class WikipediaTool extends BaseTool<{ title: string }> { - private _addLinkedUrlDoc: (url: string, id: string) => void; - constructor(addLinkedUrlDoc: (url: string, id: string) => void) { - super( - 'wikipedia', - 'Search Wikipedia and return a summary', - { - title: { - type: 'string', - description: 'The title of the Wikipedia article to search', - required: true, - }, - }, - 'Provide simply the title you want to search on Wikipedia and nothing more. If re-using this tool, try a different title for different information.', - 'Returns a summary from searching an article title on Wikipedia' - ); - this._addLinkedUrlDoc = addLinkedUrlDoc; - } - - async execute(args: { title: string }): Promise { - try { - const { text } = await Networking.PostToServer('/getWikipediaSummary', { title: args.title }); - const id = uuidv4(); - const url = `https://en.wikipedia.org/wiki/${args.title.replace(/ /g, '_')}`; - this._addLinkedUrlDoc(url, id); - return [{ type: 'text', text: ` ${text} ` }]; - } catch (error) { - return [{ type: 'text', text: 'An error occurred while fetching the article.' }]; - } - } -} diff --git a/src/client/views/nodes/ChatBox/types.ts b/src/client/views/nodes/ChatBox/types.ts deleted file mode 100644 index a12c52592..000000000 --- a/src/client/views/nodes/ChatBox/types.ts +++ /dev/null @@ -1,169 +0,0 @@ -import { breadcrumbsClasses } from '@mui/material'; -import { Doc } from '../../../../fields/Doc'; -import { StrCast } from '../../../../fields/Types'; -import e from 'cors'; -import { index } from 'd3'; - -export enum ASSISTANT_ROLE { - USER = 'user', - ASSISTANT = 'assistant', -} - -export enum TEXT_TYPE { - NORMAL = 'normal', - GROUNDED = 'grounded', - ERROR = 'error', -} - -export enum CHUNK_TYPE { - TEXT = 'text', - IMAGE = 'image', - TABLE = 'table', - URL = 'url', - CSV = 'CSV', -} - -export enum PROCESSING_TYPE { - THOUGHT = 'thought', - ACTION = 'action', - //eventually migrate error to here -} - -export function getChunkType(type: string): CHUNK_TYPE { - switch (type.toLowerCase()) { - case 'text': - return CHUNK_TYPE.TEXT; - break; - case 'image': - return CHUNK_TYPE.IMAGE; - break; - case 'table': - return CHUNK_TYPE.TABLE; - break; - case 'CSV': - return CHUNK_TYPE.CSV; - break; - case 'url': - return CHUNK_TYPE.URL; - break; - default: - return CHUNK_TYPE.TEXT; - break; - } -} - -export interface ProcessingInfo { - index: number; - type: PROCESSING_TYPE; - content: string; -} - -export interface AssistantMessage { - role: ASSISTANT_ROLE; - content: MessageContent[]; - follow_up_questions?: string[]; - citations?: Citation[]; - processing_info: ProcessingInfo[]; - loop_summary?: string; -} - -export interface MessageContent { - index: number; - type: TEXT_TYPE; - text: string; - citation_ids: string[] | null; -} - -export interface Citation { - direct_text?: string; - type: CHUNK_TYPE; - chunk_id: string; - citation_id: string; - url?: string; -} - -export interface RAGChunk { - id: string; - values: number[]; - metadata: { - text: string; - type: CHUNK_TYPE; - original_document: string; - file_path: string; - doc_id: string; - location: string; - start_page: number; - end_page: number; - base64_data?: string | undefined; - page_width?: number | undefined; - page_height?: number | undefined; - }; -} - -export interface SimplifiedChunk { - chunkId: string; - startPage: number; - endPage: number; - location?: string; - chunkType: CHUNK_TYPE; - url?: string; - canDisplay?: boolean; -} - -export interface AI_Document { - purpose: string; - file_name: string; - num_pages: number; - summary: string; - chunks: RAGChunk[]; - type: string; -} - -export interface Tool = Record> { - name: string; - description: string; - parameters: Record; - citationRules: string; - briefSummary: string; - execute: (args: T) => Promise; - getActionRule: () => Record; -} - -export interface AgentMessage { - role: 'system' | 'user' | 'assistant'; - content: string | { type: string; text?: string; image_url?: { url: string } }[]; -} - -// export function convertToAIDocument(json: any): AI_Document { -// if (!json) { -// throw new Error('Invalid JSON object'); -// } - -// const chunks: Chunk[] = json.chunks.map((chunk: any) => ({ -// id: chunk.id, -// values: chunk.values, -// metadata: { -// text: chunk.metadata.text, -// type: chunk.metadata.type as CHUNK_TYPE, // Ensure type casting -// original_document: chunk.metadata.original_document, -// file_path: chunk.metadata.file_path, -// location: chunk.metadata.location, -// start_page: chunk.metadata.start_page, -// end_page: chunk.metadata.end_page, -// base64_data: chunk.metadata.base64_data, -// width: chunk.metadata.width, -// height: chunk.metadata.height, -// }, -// })); - -// const aiDocument: AI_Document = { -// purpose: json.purpose, -// file_name: json.file_name, -// num_pages: json.num_pages, -// summary: json.summary, -// chunks: chunks, -// type: json.type, -// }; - -// return aiDocument; -// } diff --git a/src/client/views/nodes/ChatBox/vectorstore/Vectorstore.ts b/src/client/views/nodes/ChatBox/vectorstore/Vectorstore.ts deleted file mode 100644 index cc3b1ccd5..000000000 --- a/src/client/views/nodes/ChatBox/vectorstore/Vectorstore.ts +++ /dev/null @@ -1,258 +0,0 @@ -import { Pinecone, Index, IndexList, PineconeRecord, RecordMetadata, QueryResponse } from '@pinecone-database/pinecone'; -import { CohereClient } from 'cohere-ai'; -import { EmbedResponse } from 'cohere-ai/api'; -import dotenv from 'dotenv'; -import { RAGChunk, AI_Document, CHUNK_TYPE } from '../types'; -import { Doc } from '../../../../../fields/Doc'; -import { CsvCast, PDFCast, StrCast } from '../../../../../fields/Types'; -import { Networking } from '../../../../Network'; - -dotenv.config(); - -/** - * The Vectorstore class integrates with Pinecone for vector-based document indexing and retrieval, - * and Cohere for text embedding. It handles AI document management, uploads, and query-based retrieval. - */ -export class Vectorstore { - private pinecone: Pinecone; // Pinecone client for managing the vector index. - private index!: Index; // The specific Pinecone index used for document chunks. - private cohere: CohereClient; // Cohere client for generating embeddings. - private indexName: string = 'pdf-chatbot'; // Default name for the index. - private _id: string; // Unique ID for the Vectorstore instance. - private _doc_ids: string[] = []; // List of document IDs handled by this instance. - - documents: AI_Document[] = []; // Store the documents indexed in the vectorstore. - - /** - * Constructor initializes the Pinecone and Cohere clients, sets up the document ID list, - * and initializes the Pinecone index. - * @param id The unique identifier for the vectorstore instance. - * @param doc_ids A function that returns a list of document IDs. - */ - constructor(id: string, doc_ids: () => string[]) { - const pineconeApiKey = process.env.PINECONE_API_KEY; - if (!pineconeApiKey) { - throw new Error('PINECONE_API_KEY is not defined.'); - } - - // Initialize Pinecone and Cohere clients with API keys from the environment. - this.pinecone = new Pinecone({ apiKey: pineconeApiKey }); - this.cohere = new CohereClient({ token: process.env.COHERE_API_KEY }); - this._id = id; - this._doc_ids = doc_ids(); - this.initializeIndex(); - } - - /** - * Initializes the Pinecone index by checking if it exists, and creating it if not. - * The index is set to use the cosine metric for vector similarity. - */ - private async initializeIndex() { - const indexList: IndexList = await this.pinecone.listIndexes(); - - // Check if the index already exists, otherwise create it. - if (!indexList.indexes?.some(index => index.name === this.indexName)) { - await this.pinecone.createIndex({ - name: this.indexName, - dimension: 1024, - metric: 'cosine', - spec: { - serverless: { - cloud: 'aws', - region: 'us-east-1', - }, - }, - }); - } - - // Set the index for future use. - this.index = this.pinecone.Index(this.indexName); - } - - /** - * Adds an AI document to the vectorstore. This method handles document chunking, uploading to the - * vectorstore, and updating the progress for long-running tasks like file uploads. - * @param doc The document to be added to the vectorstore. - * @param progressCallback Callback to update the progress of the upload. - */ - async addAIDoc(doc: Doc, progressCallback: (progress: number, step: string) => void) { - console.log('Adding AI Document:', doc); - const ai_document_status: string = StrCast(doc.ai_document_status); - - // Skip if the document is already in progress or completed. - if (ai_document_status !== undefined && ai_document_status.trim() !== '' && ai_document_status !== '{}') { - if (ai_document_status === 'IN PROGRESS') { - console.log('Already in progress.'); - return; - } - if (!this._doc_ids.includes(StrCast(doc.ai_doc_id))) { - this._doc_ids.push(StrCast(doc.ai_doc_id)); - } - } else { - // Start processing the document. - doc.ai_document_status = 'PROGRESS'; - console.log(doc); - - // Get the local file path (CSV or PDF). - const local_file_path: string = CsvCast(doc.data)?.url?.pathname ?? PDFCast(doc.data)?.url?.pathname; - console.log('Local File Path:', local_file_path); - - if (local_file_path) { - console.log('Creating AI Document...'); - // Start the document creation process by sending the file to the server. - const { jobId } = await Networking.PostToServer('/createDocument', { file_path: local_file_path }); - - // Poll the server for progress updates. - let inProgress: boolean = true; - let result: any = null; - while (inProgress) { - // Polling interval for status updates. - await new Promise(resolve => setTimeout(resolve, 2000)); - - // Check if the job is completed. - const resultResponse = await Networking.FetchFromServer(`/getResult/${jobId}`); - const resultResponseJson = JSON.parse(resultResponse); - if (resultResponseJson.status === 'completed') { - console.log('Result here:', resultResponseJson); - result = resultResponseJson; - break; - } - - // Fetch progress information and update the progress callback. - const progressResponse = await Networking.FetchFromServer(`/getProgress/${jobId}`); - const progressResponseJson = JSON.parse(progressResponse); - if (progressResponseJson) { - const progress = progressResponseJson.progress; - const step = progressResponseJson.step; - progressCallback(progress, step); - } - } - - // Once completed, process the document and add it to the vectorstore. - console.log('Document JSON:', result); - this.documents.push(result); - await this.indexDocument(result); - console.log(`Document added: ${result.file_name}`); - - // Update document metadata such as summary, purpose, and vectorstore ID. - doc.summary = result.summary; - doc.ai_doc_id = result.doc_id; - this._doc_ids.push(result.doc_id); - doc.ai_purpose = result.purpose; - - if (!doc.vectorstore_id) { - doc.vectorstore_id = JSON.stringify([this._id]); - } else { - doc.vectorstore_id = JSON.stringify(JSON.parse(StrCast(doc.vectorstore_id)).concat([this._id])); - } - - if (!doc.chunk_simpl) { - doc.chunk_simpl = JSON.stringify({ chunks: [] }); - } - - // Process each chunk of the document and update the document's chunk_simpl field. - result.chunks.forEach((chunk: RAGChunk) => { - const chunkToAdd = { - chunkId: chunk.id, - startPage: chunk.metadata.start_page, - endPage: chunk.metadata.end_page, - location: chunk.metadata.location, - chunkType: chunk.metadata.type as CHUNK_TYPE, - text: chunk.metadata.text, - }; - const new_chunk_simpl = JSON.parse(StrCast(doc.chunk_simpl)); - new_chunk_simpl.chunks = new_chunk_simpl.chunks.concat(chunkToAdd); - doc.chunk_simpl = JSON.stringify(new_chunk_simpl); - }); - - // Mark the document status as completed. - doc.ai_document_status = 'COMPLETED'; - } - } - } - - /** - * Indexes the processed document by uploading the document's vector chunks to the Pinecone index. - * @param document The processed document containing its chunks and metadata. - */ - private async indexDocument(document: any) { - console.log('Uploading vectors to content namespace...'); - - // Prepare Pinecone records for each chunk in the document. - const pineconeRecords: PineconeRecord[] = (document.chunks as RAGChunk[]).map(chunk => ({ - id: chunk.id, - values: chunk.values, - metadata: { ...chunk.metadata } as RecordMetadata, - })); - - // Upload the records to Pinecone. - await this.index.upsert(pineconeRecords); - } - - /** - * Retrieves the top K document chunks relevant to the user's query. - * This involves embedding the query using Cohere, then querying Pinecone for matching vectors. - * @param query The search query string. - * @param topK The number of top results to return (default is 10). - * @returns A list of document chunks that match the query. - */ - async retrieve(query: string, topK: number = 10): Promise { - console.log(`Retrieving chunks for query: ${query}`); - try { - // Generate an embedding for the query using Cohere. - const queryEmbeddingResponse: EmbedResponse = await this.cohere.embed({ - texts: [query], - model: 'embed-english-v3.0', - inputType: 'search_query', - }); - - let queryEmbedding: number[]; - - // Extract the embedding from the response. - if (Array.isArray(queryEmbeddingResponse.embeddings)) { - queryEmbedding = queryEmbeddingResponse.embeddings[0]; - } else if (queryEmbeddingResponse.embeddings && 'embeddings' in queryEmbeddingResponse.embeddings) { - queryEmbedding = (queryEmbeddingResponse.embeddings as { embeddings: number[][] }).embeddings[0]; - } else { - throw new Error('Invalid embedding response format'); - } - - if (!Array.isArray(queryEmbedding)) { - throw new Error('Query embedding is not an array'); - } - - // Query the Pinecone index using the embedding and filter by document IDs. - const queryResponse: QueryResponse = await this.index.query({ - vector: queryEmbedding, - filter: { - doc_id: { $in: this._doc_ids }, - }, - topK, - includeValues: true, - includeMetadata: true, - }); - - // Map the results into RAGChunks and return them. - return queryResponse.matches.map( - match => - ({ - id: match.id, - values: match.values as number[], - metadata: match.metadata as { - text: string; - type: string; - original_document: string; - file_path: string; - doc_id: string; - location: string; - start_page: number; - end_page: number; - }, - }) as RAGChunk - ); - } catch (error) { - console.error(`Error retrieving chunks: ${error}`); - return []; - } - } -} diff --git a/src/client/views/nodes/chatbot/agentsystem/Agent.ts b/src/client/views/nodes/chatbot/agentsystem/Agent.ts new file mode 100644 index 000000000..180d05cf3 --- /dev/null +++ b/src/client/views/nodes/chatbot/agentsystem/Agent.ts @@ -0,0 +1,278 @@ +import OpenAI from 'openai'; +import { Tool, AgentMessage, AssistantMessage, TEXT_TYPE, CHUNK_TYPE, ASSISTANT_ROLE, ProcessingInfo, PROCESSING_TYPE } from '../types/types'; +import { getReactPrompt } from './prompts'; +import { XMLParser, XMLBuilder } from 'fast-xml-parser'; +import { Vectorstore } from '../vectorstore/Vectorstore'; +import { ChatCompletionMessageParam } from 'openai/resources'; +import dotenv from 'dotenv'; +import { CalculateTool } from '../tools/CalculateTool'; +import { RAGTool } from '../tools/RAGTool'; +import { DataAnalysisTool } from '../tools/DataAnalysisTool'; +import { WebsiteInfoScraperTool } from '../tools/WebsiteInfoScraperTool'; +import { SearchTool } from '../tools/SearchTool'; +import { NoTool } from '../tools/NoTool'; +import { v4 as uuidv4 } from 'uuid'; +import { AnswerParser } from '../response_parsers/AnswerParser'; +import { StreamedAnswerParser } from '../response_parsers/StreamedAnswerParser'; +import { CreateCSVTool } from '../tools/CreateCSVTool'; + +dotenv.config(); + +/** + * The Agent class handles the interaction between the assistant and the tools available, + * processes user queries, and manages the communication flow between the tools and OpenAI. + */ +export class Agent { + // Private properties + private client: OpenAI; + private tools: Record>; + private messages: AgentMessage[] = []; + private interMessages: AgentMessage[] = []; + private vectorstore: Vectorstore; + private _history: () => string; + private _summaries: () => string; + private _csvData: () => { filename: string; id: string; text: string }[]; + private actionNumber: number = 0; + private thoughtNumber: number = 0; + private processingNumber: number = 0; + private processingInfo: ProcessingInfo[] = []; + private streamedAnswerParser: StreamedAnswerParser = new StreamedAnswerParser(); + + /** + * The constructor initializes the agent with the vector store and toolset, and sets up the OpenAI client. + * @param _vectorstore Vector store instance for document storage and retrieval. + * @param summaries A function to retrieve document summaries. + * @param history A function to retrieve chat history. + * @param csvData A function to retrieve CSV data linked to the assistant. + * @param addLinkedUrlDoc A function to add a linked document from a URL. + * @param createCSVInDash A function to create a CSV document in the dashboard. + */ + constructor( + _vectorstore: Vectorstore, + summaries: () => string, + history: () => string, + csvData: () => { filename: string; id: string; text: string }[], + addLinkedUrlDoc: (url: string, id: string) => void, + createCSVInDash: (url: string, title: string, id: string, data: string) => void + ) { + // Initialize OpenAI client with API key from environment + this.client = new OpenAI({ apiKey: process.env.OPENAI_KEY, dangerouslyAllowBrowser: true }); + this.vectorstore = _vectorstore; + this._history = history; + this._summaries = summaries; + this._csvData = csvData; + + // Define available tools for the assistant + this.tools = { + calculate: new CalculateTool(), + rag: new RAGTool(this.vectorstore), + dataAnalysis: new DataAnalysisTool(csvData), + websiteInfoScraper: new WebsiteInfoScraperTool(addLinkedUrlDoc), + searchTool: new SearchTool(addLinkedUrlDoc), + createCSV: new CreateCSVTool(createCSVInDash), + no_tool: new NoTool(), + }; + } + + /** + * This method handles the conversation flow with the assistant, processes user queries, + * and manages the assistant's decision-making process, including tool actions. + * @param question The user's question. + * @param onProcessingUpdate Callback function for processing updates. + * @param onAnswerUpdate Callback function for answer updates. + * @param maxTurns The maximum number of turns to allow in the conversation. + * @returns The final response from the assistant. + */ + async askAgent(question: string, onProcessingUpdate: (processingUpdate: ProcessingInfo[]) => void, onAnswerUpdate: (answerUpdate: string) => void, maxTurns: number = 30): Promise { + console.log(`Starting query: ${question}`); + + // Push user's question to message history + this.messages.push({ role: 'user', content: question }); + + // Retrieve chat history and generate system prompt + const chatHistory = this._history(); + const systemPrompt = getReactPrompt(Object.values(this.tools), this._summaries, chatHistory); + + // Initialize intermediate messages + this.interMessages = [{ role: 'system', content: systemPrompt }]; + this.interMessages.push({ role: 'user', content: `${question}` }); + + // Setup XML parser and builder + const parser = new XMLParser({ + ignoreAttributes: false, + attributeNamePrefix: '@_', + textNodeName: '_text', + isArray: (name, jpath, isLeafNode, isAttribute) => ['query', 'url'].indexOf(name) !== -1, + }); + const builder = new XMLBuilder({ ignoreAttributes: false, attributeNamePrefix: '@_' }); + + let currentAction: string | undefined; + this.processingInfo = []; + + // Conversation loop (up to maxTurns) + for (let i = 2; i < maxTurns; i += 2) { + console.log(this.interMessages); + console.log(`Turn ${i}/${maxTurns}`); + + // Execute a step in the conversation and get the result + const result = await this.execute(onProcessingUpdate, onAnswerUpdate); + this.interMessages.push({ role: 'assistant', content: result }); + + let parsedResult; + try { + // Parse XML result from the assistant + parsedResult = parser.parse(result); + } catch (error) { + throw new Error(`Error parsing response: ${error}`); + } + + // Extract the stage from the parsed result + const stage = parsedResult.stage; + if (!stage) { + throw new Error(`Error: No stage found in response`); + } + + // Handle different stage elements (thoughts, actions, inputs, answers) + for (const key in stage) { + if (key === 'thought') { + // Handle assistant's thoughts + console.log(`Thought: ${stage[key]}`); + this.processingNumber++; + } else if (key === 'action') { + // Handle action stage + currentAction = stage[key] as string; + console.log(`Action: ${currentAction}`); + + if (this.tools[currentAction]) { + // Prepare the next action based on the current tool + const nextPrompt = [ + { + type: 'text', + text: `` + builder.build({ action_rules: this.tools[currentAction].getActionRule() }) + ``, + }, + ]; + this.interMessages.push({ role: 'user', content: nextPrompt }); + break; + } else { + // Handle error in case of an invalid action + console.log('Error: No valid action'); + this.interMessages.push({ role: 'user', content: `No valid action, try again.` }); + break; + } + } else if (key === 'action_input') { + // Handle action input stage + const actionInput = stage[key]; + console.log(`Action input:`, actionInput.inputs); + + if (currentAction) { + try { + // Process the action with its input + const observation = await this.processAction(currentAction, actionInput.inputs); + const nextPrompt = [{ type: 'text', text: ` ` }, ...observation, { type: 'text', text: '' }]; + console.log(observation); + this.interMessages.push({ role: 'user', content: nextPrompt }); + this.processingNumber++; + break; + } catch (error) { + throw new Error(`Error processing action: ${error}`); + } + } else { + throw new Error('Error: Action input without a valid action'); + } + } else if (key === 'answer') { + // If an answer is found, end the query + console.log('Answer found. Ending query.'); + this.streamedAnswerParser.reset(); + const parsedAnswer = AnswerParser.parse(result, this.processingInfo); + return parsedAnswer; + } + } + } + + throw new Error('Reached maximum turns. Ending query.'); + } + + /** + * Executes a step in the conversation, processing the assistant's response and parsing it in real-time. + * @param onProcessingUpdate Callback for processing updates. + * @param onAnswerUpdate Callback for answer updates. + * @returns The full response from the assistant. + */ + private async execute(onProcessingUpdate: (processingUpdate: ProcessingInfo[]) => void, onAnswerUpdate: (answerUpdate: string) => void): Promise { + // Stream OpenAI response for real-time updates + const stream = await this.client.chat.completions.create({ + model: 'gpt-4o', + messages: this.interMessages as ChatCompletionMessageParam[], + temperature: 0, + stream: true, + }); + + let fullResponse: string = ''; + let currentTag: string = ''; + let currentContent: string = ''; + let isInsideTag: boolean = false; + + // Process each chunk of the streamed response + for await (const chunk of stream) { + let content = chunk.choices[0]?.delta?.content || ''; + fullResponse += content; + + // Parse the streamed content character by character + for (const char of content) { + if (currentTag === 'answer') { + // Handle answer parsing for real-time updates + currentContent += char; + const streamedAnswer = this.streamedAnswerParser.parse(char); + onAnswerUpdate(streamedAnswer); + continue; + } else if (char === '<') { + // Start of a new tag + isInsideTag = true; + currentTag = ''; + currentContent = ''; + } else if (char === '>') { + // End of the tag + isInsideTag = false; + if (currentTag.startsWith('/')) { + currentTag = ''; + } + } else if (isInsideTag) { + // Append characters to the tag name + currentTag += char; + } else if (currentTag === 'thought' || currentTag === 'action_input_description') { + // Handle processing information for thought or action input description + currentContent += char; + const current_info = this.processingInfo.find(info => info.index === this.processingNumber); + if (current_info) { + current_info.content = currentContent.trim(); + onProcessingUpdate(this.processingInfo); + } else { + this.processingInfo.push({ + index: this.processingNumber, + type: currentTag === 'thought' ? PROCESSING_TYPE.THOUGHT : PROCESSING_TYPE.ACTION, + content: currentContent.trim(), + }); + onProcessingUpdate(this.processingInfo); + } + } + } + } + + return fullResponse; + } + + /** + * Processes a specific action by invoking the appropriate tool with the provided inputs. + * @param action The action to perform. + * @param actionInput The inputs for the action. + * @returns The result of the action. + */ + private async processAction(action: string, actionInput: any): Promise { + if (!(action in this.tools)) { + throw new Error(`Unknown action: ${action}`); + } + + const tool = this.tools[action]; + return await tool.execute(actionInput); + } +} diff --git a/src/client/views/nodes/chatbot/agentsystem/prompts.ts b/src/client/views/nodes/chatbot/agentsystem/prompts.ts new file mode 100644 index 000000000..9daabc35f --- /dev/null +++ b/src/client/views/nodes/chatbot/agentsystem/prompts.ts @@ -0,0 +1,181 @@ +// prompts.ts + +import { Tool } from '../types/types'; + +export function getReactPrompt(tools: Tool[], summaries: () => string, chatHistory: string): string { + const toolDescriptions = tools + .map( + tool => ` + + ${tool.name} + ${tool.briefSummary} + ` + ) + .join('\n'); + + return ` + + You are an advanced AI assistant equipped with tools to answer user queries efficiently. You operate in a loop that is RIGIDLY structured and requires the use of specific tags and formats for your responses. Your goal is to provide accurate and well-structured answers to user queries. Below are the guidelines and information you can use to structure your approach to accomplishing this task. + + + + **STRUCTURE**: Always use the correct stage tags (e.g., ) for every response. Use only even-numbered stages for your responses. + **STOP after every stage and wait for input. Do not combine multiple stages in one response.** + If a tool is needed, select the most appropriate tool based on the query. + **If one tool does not yield satisfactory results or fails twice, try another tool that might work better for the query.** + Ensure that **ALL answers follow the answer structure**: grounded text wrapped in tags with corresponding citations, normal text in tags, and three follow-up questions at the end. + + + + + - All information derived from tools or user documents must be wrapped in these tags with proper citation. + - Use this tag for text not derived from tools or user documents. + + - Provide proper citations for each , referencing the tool or document chunk used. + + - Provide exactly three user-perspective follow-up questions. + - Summarize the actions and tools used in the conversation. + + + + + **Wrap ALL tool-based information** in tags and provide citations. + Use separate tags for distinct information or when switching to a different tool or document. + Ensure that **EVERY** tag includes a citation index referencing the source of the information. + Over-citing is discouraged—only cite the information that is directly relevant to the user's query. + + + + Wrap general information or reasoning **not derived from tools or documents** in tags. + Never put information derived from user documents or tools in tags—use for those. + + + + Carefully analyze the user query and determine if a tool is necessary to provide an accurate answer. + If a tool is needed, choose the most appropriate one and **stop after the action** to wait for system input. + If no tool is needed, use the 'no_tool' action but follow the structure. + When all observations are complete, format the final answer using and tags with appropriate citations. + Include exactly three follow-up questions from the user's perspective. + Provide a loop summary at the end of the conversation. + + + + ${toolDescriptions} + If no external tool is required, use 'no_tool', but if there might be relevant external information, use the appropriate tool. + + + + ${summaries()} + + + + ${chatHistory} + + + + + + Can you provide key moments from the 2022 World Cup and its impact on tourism in Qatar? + + + + + I will use the RAG tool to retrieve key moments from the user's World Cup documents. Afterward, I will use the website scraper tool to gather tourism impact data on Qatar. + + rag + + + + ***Action rules omitted*** + + + + + Searching user documents for key moments from the 2022 World Cup. + + Key moments from the 2022 World Cup. Goals, assists, big wins, big losses. + + + + + + + + The 2022 FIFA World Cup saw Argentina win, with Lionel Messi's performance being a key highlight. It was widely celebrated as a historical moment in sports. + + + + + + + With key moments from the World Cup retrieved, I will now use the website scraper tool to gather data on Qatar’s tourism impact during the World Cup. + + websiteInfoScraper + + + + ***Action rules omitted*** + + + + + Scraping websites for information about Qatar's tourism impact during the 2022 World Cup. + + Tourism impact of the 2022 World Cup in Qatar + + + + + + + + https://www.qatartourism.com/world-cup-impact + During the 2022 World Cup, Qatar saw a 40% increase in tourism, with over 1.5 million visitors attending. + + + + + + + Now that I have gathered both key moments from the World Cup and tourism impact data from Qatar, I will summarize the information in my final response. + + + **The 2022 World Cup** saw Argentina crowned champions, with **Lionel Messi** leading his team to victory, marking a historic moment in sports. + **Qatar** experienced a **40% increase in tourism** during the World Cup, welcoming over **1.5 million visitors**, significantly boosting its economy. + Moments like **Messi’s triumph** often become ingrained in the legacy of World Cups, immortalizing these tournaments in both sports and cultural memory. The **long-term implications** of the World Cup on Qatar's **economy, tourism**, and **global image** remain important areas of interest as the country continues to build on the momentum generated by hosting this prestigious event. + + Key moments from the 2022 World Cup. + + + + What long-term effects has the World Cup had on Qatar's economy and infrastructure? + Can you compare Qatar's tourism numbers with previous World Cup hosts? + How has Qatar’s image on the global stage evolved post-World Cup? + + + The assistant first used the RAG tool to extract key moments from the user documents about the 2022 World Cup. Then, the assistant utilized the website scraping tool to gather data on Qatar's tourism impact. Both tools provided valuable information, and no additional tools were needed. + + + + + + + + Process the user's query according to these rules. Ensure your final answer is comprehensive, well-structured, and includes citations where appropriate. + +`; +} + +export function getSummarizedChunksPrompt(chunks: string): string { + return `Please provide a comprehensive summary of what you think the document from which these chunks originated. + Ensure the summary captures the main ideas and key points from all provided chunks. Be concise and brief and only provide the summary in paragraph form. + + Text chunks: + \`\`\` + ${chunks} + \`\`\``; +} + +export function getSummarizedSystemPrompt(): string { + return 'You are an AI assistant tasked with summarizing a document. You are provided with important chunks from the document and provide a summary, as best you can, of what the document will contain overall. Be concise and brief with your response.'; +} diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss new file mode 100644 index 000000000..42f6a0d61 --- /dev/null +++ b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss @@ -0,0 +1,279 @@ +@import url('https://fonts.googleapis.com/css2?family=Atkinson+Hyperlegible:ital,wght@0,400;0,700;1,400;1,700&display=swap'); + +$primary-color: #4a90e2; +$secondary-color: #f5f8fa; +$text-color: #333; +$light-text-color: #777; +$border-color: #e1e8ed; +$shadow-color: rgba(0, 0, 0, 0.1); +$transition: all 0.3s ease; +.chat-box { + display: flex; + flex-direction: column; + height: 100%; + background-color: #fff; + font-family: + 'Atkinson Hyperlegible', + -apple-system, + BlinkMacSystemFont, + 'Segoe UI', + Roboto, + Helvetica, + Arial, + sans-serif; + border-radius: 12px; + overflow: hidden; + box-shadow: 0 4px 12px $shadow-color; + position: relative; + + .chat-header { + background-color: $primary-color; + color: white; + padding: 15px; + text-align: center; + box-shadow: 0 2px 4px $shadow-color; + height: fit-content; + + h2 { + margin: 0; + font-size: 1.3em; + font-weight: 500; + } + } + + .chat-messages { + flex-grow: 1; + overflow-y: auto; + padding: 20px; + display: flex; + flex-direction: column; + gap: 10px; // Added to give space between elements + + &::-webkit-scrollbar { + width: 6px; + } + + &::-webkit-scrollbar-thumb { + background-color: $border-color; + border-radius: 3px; + } + } + + .chat-input { + display: flex; + padding: 20px; + border-top: 1px solid $border-color; + background-color: #fff; + + input { + flex-grow: 1; + padding: 12px 15px; + border: 1px solid $border-color; + border-radius: 24px; + font-size: 15px; + transition: $transition; + + &:focus { + outline: none; + border-color: $primary-color; + box-shadow: 0 0 0 2px rgba($primary-color, 0.2); + } + } + + .submit-button { + background-color: $primary-color; + color: white; + border: none; + border-radius: 50%; + width: 48px; + height: 48px; + margin-left: 10px; + cursor: pointer; + transition: $transition; + display: flex; + align-items: center; + justify-content: center; + + &:hover { + background-color: darken($primary-color, 10%); + } + + &:disabled { + background-color: $light-text-color; + cursor: not-allowed; + } + + .spinner { + height: 24px; + width: 24px; + border: 3px solid rgba(255, 255, 255, 0.3); + border-top: 3px solid #fff; + border-radius: 50%; + animation: spin 1s linear infinite; + display: flex; + align-items: center; + justify-content: center; + } + } + } + .citation-popup { + position: fixed; + bottom: 50px; + left: 50%; + transform: translateX(-50%); + background-color: rgba(0, 0, 0, 0.8); + color: white; + padding: 10px 20px; + border-radius: 10px; + box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2); + z-index: 1000; + animation: fadeIn 0.3s ease-in-out; + + p { + margin: 0; + font-size: 14px; + } + + @keyframes fadeIn { + from { + opacity: 0; + } + to { + opacity: 1; + } + } + } +} + +.message { + max-width: 80%; + margin-bottom: 20px; + padding: 16px 20px; + border-radius: 18px; + font-size: 15px; + line-height: 1.5; + box-shadow: 0 2px 4px $shadow-color; + word-wrap: break-word; // To handle long words + + &.user { + align-self: flex-end; + background-color: $primary-color; + color: white; + border-bottom-right-radius: 4px; + } + + &.chatbot { + align-self: flex-start; + background-color: $secondary-color; + color: $text-color; + border-bottom-left-radius: 4px; + } + + .toggle-info { + background-color: transparent; + color: $primary-color; + border: 1px solid $primary-color; + width: 100%; + height: fit-content; + border-radius: 8px; + padding: 10px 16px; + font-size: 14px; + cursor: pointer; + transition: $transition; + margin-top: 10px; + + &:hover { + background-color: rgba($primary-color, 0.1); + } + } +} + +.follow-up-questions { + margin-top: 15px; + + h4 { + font-size: 15px; + font-weight: 600; + margin-bottom: 10px; + } + + .questions-list { + display: flex; + flex-direction: column; + gap: 10px; + } + + .follow-up-button { + background-color: #fff; + color: $primary-color; + border: 1px solid $primary-color; + border-radius: 8px; + padding: 10px 16px; + font-size: 14px; + cursor: pointer; + transition: $transition; + text-align: left; + white-space: normal; + word-wrap: break-word; + width: 100%; + height: fit-content; + + &:hover { + background-color: $primary-color; + color: #fff; + } + } +} + +.citation-button { + display: inline-flex; + align-items: center; + justify-content: center; + width: 20px; + height: 20px; + border-radius: 50%; + background-color: rgba(0, 0, 0, 0.1); + color: $text-color; + font-size: 12px; + font-weight: bold; + margin-left: 5px; + cursor: pointer; + transition: $transition; + vertical-align: middle; + + &:hover { + background-color: rgba(0, 0, 0, 0.2); + } +} + +.uploading-overlay { + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: rgba(255, 255, 255, 0.8); + display: flex; + justify-content: center; + align-items: center; + z-index: 1000; +} + +@keyframes spin { + 0% { + transform: rotate(0deg); + } + 100% { + transform: rotate(360deg); + } +} + +@media (max-width: 768px) { + .chat-box { + border-radius: 0; + } + + .message { + max-width: 90%; + } +} diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx new file mode 100644 index 000000000..6dc691798 --- /dev/null +++ b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx @@ -0,0 +1,745 @@ +import { action, computed, makeObservable, observable, observe, reaction, runInAction, ObservableSet } from 'mobx'; +import { observer } from 'mobx-react'; +import OpenAI, { ClientOptions } from 'openai'; +import * as React from 'react'; +import { Doc, DocListCast } from '../../../../../fields/Doc'; +import { CsvCast, DocCast, PDFCast, RTFCast, StrCast } from '../../../../../fields/Types'; +import { DocumentType } from '../../../../documents/DocumentTypes'; +import { Docs } from '../../../../documents/Documents'; +import { LinkManager } from '../../../../util/LinkManager'; +import { ViewBoxAnnotatableComponent } from '../../../DocComponent'; +import { FieldView, FieldViewProps } from '../../FieldView'; +import './ChatBox.scss'; +import MessageComponentBox from './MessageComponent'; +import { ASSISTANT_ROLE, AssistantMessage, Citation, CHUNK_TYPE, TEXT_TYPE, SimplifiedChunk, ProcessingInfo } from '../types/types'; +import { Vectorstore } from '../vectorstore/Vectorstore'; +import { Agent } from '../agentsystem/Agent'; +import dotenv from 'dotenv'; +import { DocData, DocViews } from '../../../../../fields/DocSymbols'; +import { DocumentManager } from '../../../../util/DocumentManager'; +import { v4 as uuidv4 } from 'uuid'; +import { DocUtils } from '../../../../documents/DocUtils'; +import { ClientUtils } from '../../../../../ClientUtils'; +import { ProgressBar } from './ProgressBar'; +import { DocumentView } from '../../DocumentView'; +import { Networking } from '../../../../Network'; + +dotenv.config(); + +/** + * ChatBox is the main class responsible for managing the interaction between the user and the assistant, + * handling documents, and integrating with OpenAI for tasks such as document analysis, chat functionality, + * and vector store interactions. + */ +@observer +export class ChatBox extends ViewBoxAnnotatableComponent() { + // MobX observable properties to track UI state and data + @observable history: AssistantMessage[] = []; + @observable.deep current_message: AssistantMessage | undefined = undefined; + @observable isLoading: boolean = false; + @observable uploadProgress: number = 0; + @observable currentStep: string = ''; + @observable expandedScratchpadIndex: number | null = null; + @observable inputValue: string = ''; + @observable private linked_docs_to_add: ObservableSet = observable.set(); + @observable private linked_csv_files: { filename: string; id: string; text: string }[] = []; + @observable private isUploadingDocs: boolean = false; + @observable private citationPopup: { text: string; visible: boolean } = { text: '', visible: false }; + + // Private properties for managing OpenAI API, vector store, agent, and UI elements + private openai: OpenAI; + private vectorstore_id: string; + private vectorstore: Vectorstore; + private agent: Agent; + private _oldWheel: HTMLDivElement | null = null; + private messagesRef: React.RefObject; + + /** + * Static method that returns the layout string for the field. + * @param fieldKey Key to get the layout string. + */ + public static LayoutString(fieldKey: string) { + return FieldView.LayoutString(ChatBox, fieldKey); + } + + /** + * Constructor initializes the component, sets up OpenAI, vector store, and agent instances, + * and observes changes in the chat history to save the state in dataDoc. + * @param props The properties passed to the component. + */ + constructor(props: FieldViewProps) { + super(props); + makeObservable(this); // Enable MobX observables + + // Initialize OpenAI, vectorstore, and agent + this.openai = this.initializeOpenAI(); + if (StrCast(this.dataDoc.vectorstore_id) == '') { + this.vectorstore_id = uuidv4(); + this.dataDoc.vectorstore_id = this.vectorstore_id; + } else { + this.vectorstore_id = StrCast(this.dataDoc.vectorstore_id); + } + this.vectorstore = new Vectorstore(this.vectorstore_id, this.retrieveDocIds); + this.agent = new Agent(this.vectorstore, this.retrieveSummaries, this.retrieveFormattedHistory, this.retrieveCSVData, this.addLinkedUrlDoc, this.createCSVInDash); + this.messagesRef = React.createRef(); + + // Reaction to update dataDoc when chat history changes + reaction( + () => + this.history.map((msg: AssistantMessage) => ({ + role: msg.role, + content: msg.content, + follow_up_questions: msg.follow_up_questions, + citations: msg.citations, + })), + serializableHistory => { + this.dataDoc.data = JSON.stringify(serializableHistory); + } + ); + } + + /** + * Adds a document to the vectorstore for AI-based analysis. + * Handles the upload progress and errors during the process. + * @param newLinkedDoc The new document to add. + */ + @action + addDocToVectorstore = async (newLinkedDoc: Doc) => { + this.uploadProgress = 0; + this.currentStep = 'Initializing...'; + this.isUploadingDocs = true; + + try { + // Add the document to the vectorstore + await this.vectorstore.addAIDoc(newLinkedDoc, this.updateProgress); + } catch (error) { + console.error('Error uploading document:', error); + this.currentStep = 'Error during upload'; + } finally { + this.isUploadingDocs = false; + this.uploadProgress = 0; + this.currentStep = ''; + } + }; + + /** + * Updates the upload progress and the current step in the UI. + * @param progress The percentage of the progress. + * @param step The current step name. + */ + @action + updateProgress = (progress: number, step: string) => { + this.uploadProgress = progress; + this.currentStep = step; + }; + + /** + * Adds a CSV file for analysis by sending it to OpenAI and generating a summary. + * @param newLinkedDoc The linked document representing the CSV file. + * @param id Optional ID for the document. + */ + @action + addCSVForAnalysis = async (newLinkedDoc: Doc, id?: string) => { + if (!newLinkedDoc.chunk_simpl) { + // Convert document text to CSV data + const csvData: string = StrCast(newLinkedDoc.text); + + // Generate a summary using OpenAI API + const completion = await this.openai.chat.completions.create({ + messages: [ + { + role: 'system', + content: + 'You are an AI assistant tasked with summarizing the content of a CSV file. You will be provided with the data from the CSV file and your goal is to generate a concise summary that captures the main themes, trends, and key points represented in the data.', + }, + { + role: 'user', + content: `Please provide a comprehensive summary of the CSV file based on the provided data. Ensure the summary highlights the most important information, patterns, and insights. Your response should be in paragraph form and be concise. + CSV Data: + ${csvData} + ********** + Summary:`, + }, + ], + model: 'gpt-3.5-turbo', + }); + + const csvId = id ?? uuidv4(); + + // Add CSV details to linked files + this.linked_csv_files.push({ + filename: CsvCast(newLinkedDoc.data).url.pathname, + id: csvId, + text: csvData, + }); + + // Add a chunk for the CSV and assign the summary + const chunkToAdd = { + chunkId: csvId, + chunkType: CHUNK_TYPE.CSV, + }; + newLinkedDoc.chunk_simpl = JSON.stringify({ chunks: [chunkToAdd] }); + newLinkedDoc.summary = completion.choices[0].message.content!; + } + }; + + /** + * Toggles the tool logs, expanding or collapsing the scratchpad at the given index. + * @param index Index of the tool log to toggle. + */ + @action + toggleToolLogs = (index: number) => { + this.expandedScratchpadIndex = this.expandedScratchpadIndex === index ? null : index; + }; + + /** + * Initializes the OpenAI API client using the API key from environment variables. + * @returns OpenAI client instance. + */ + initializeOpenAI() { + const configuration: ClientOptions = { + apiKey: process.env.OPENAI_KEY, + dangerouslyAllowBrowser: true, + }; + return new OpenAI(configuration); + } + + /** + * Adds a scroll event listener to detect user scrolling and handle passive wheel events. + */ + addScrollListener = () => { + if (this.messagesRef.current) { + this.messagesRef.current.addEventListener('wheel', this.onPassiveWheel, { passive: false }); + } + }; + + /** + * Removes the scroll event listener from the chat messages container. + */ + removeScrollListener = () => { + if (this.messagesRef.current) { + this.messagesRef.current.removeEventListener('wheel', this.onPassiveWheel); + } + }; + + /** + * Scrolls the chat messages container to the bottom, ensuring the latest message is visible. + */ + scrollToBottom = () => { + if (this.messagesRef.current) { + this.messagesRef.current.scrollTop = this.messagesRef.current.scrollHeight; + } + }; + + /** + * Event handler for detecting wheel scrolling and stopping the event propagation. + * @param e The wheel event. + */ + onPassiveWheel = (e: WheelEvent) => { + if (this._props.isContentActive()) { + e.stopPropagation(); + } + }; + + /** + * Sends the user's input to OpenAI, displays the loading indicator, and updates the chat history. + * @param event The form submission event. + */ + @action + askGPT = async (event: React.FormEvent): Promise => { + event.preventDefault(); + this.inputValue = ''; + + // Extract the user's message + const textInput = event.currentTarget.elements.namedItem('messageInput') as HTMLInputElement; + const trimmedText = textInput.value.trim(); + + if (trimmedText) { + try { + textInput.value = ''; + // Add the user's message to the history + this.history.push({ + role: ASSISTANT_ROLE.USER, + content: [{ index: 0, type: TEXT_TYPE.NORMAL, text: trimmedText, citation_ids: null }], + processing_info: [], + }); + this.isLoading = true; + this.current_message = { + role: ASSISTANT_ROLE.ASSISTANT, + content: [], + citations: [], + processing_info: [], + }; + + // Define callbacks for real-time processing updates + const onProcessingUpdate = (processingUpdate: ProcessingInfo[]) => { + runInAction(() => { + if (this.current_message) { + this.current_message = { + ...this.current_message, + processing_info: processingUpdate, + }; + } + }); + this.scrollToBottom(); + }; + + const onAnswerUpdate = (answerUpdate: string) => { + runInAction(() => { + if (this.current_message) { + this.current_message = { + ...this.current_message, + content: [{ text: answerUpdate, type: TEXT_TYPE.NORMAL, index: 0, citation_ids: [] }], + }; + } + }); + }; + + // Send the user's question to the assistant and get the final message + const finalMessage = await this.agent.askAgent(trimmedText, onProcessingUpdate, onAnswerUpdate); + + // Update the history with the final assistant message + runInAction(() => { + if (this.current_message) { + this.history.push({ ...finalMessage }); + this.current_message = undefined; + this.dataDoc.data = JSON.stringify(this.history); + } + }); + } catch (err) { + console.error('Error:', err); + // Handle error in processing + this.history.push({ + role: ASSISTANT_ROLE.ASSISTANT, + content: [{ index: 0, type: TEXT_TYPE.ERROR, text: 'Sorry, I encountered an error while processing your request.', citation_ids: null }], + processing_info: [], + }); + } finally { + this.isLoading = false; + this.scrollToBottom(); + } + } + this.scrollToBottom(); + }; + + /** + * Updates the citations for a given message in the chat history. + * @param index The index of the message in the history. + * @param citations The list of citations to add to the message. + */ + @action + updateMessageCitations = (index: number, citations: Citation[]) => { + if (this.history[index]) { + this.history[index].citations = citations; + } + }; + + /** + * Adds a linked document from a URL for future reference and analysis. + * @param url The URL of the document to add. + * @param id The unique identifier for the document. + */ + @action + addLinkedUrlDoc = async (url: string, id: string) => { + const doc = Docs.Create.WebDocument(url, { data_useCors: true }); + + const linkDoc = Docs.Create.LinkDocument(this.Document, doc); + LinkManager.Instance.addLink(linkDoc); + let canDisplay; + + try { + // Fetch the URL content through the proxy + const { data } = await Networking.PostToServer('/proxyFetch', { url }); + + // Simulating header behavior since we can't fetch headers via proxy + const xFrameOptions = data.headers?.['x-frame-options']; + + if (xFrameOptions && xFrameOptions.toUpperCase() === 'SAMEORIGIN') { + canDisplay = false; + } else { + canDisplay = true; + } + } catch (error) { + console.error('Error fetching the URL from the server:', error); + } + + const chunkToAdd = { + chunkId: id, + chunkType: CHUNK_TYPE.URL, + url: url, + canDisplay: canDisplay, + }; + + doc.chunk_simpl = JSON.stringify({ chunks: [chunkToAdd] }); + }; + + /** + * Getter to retrieve the current user's name from the client utils. + */ + @computed + get userName() { + return ClientUtils.CurrentUserEmail; + } + + /** + * Creates a CSV document in the dashboard and adds it for analysis. + * @param url The URL of the CSV. + * @param title The title of the CSV document. + * @param id The unique ID for the document. + * @param data The CSV data content. + */ + @action + createCSVInDash = async (url: string, title: string, id: string, data: string) => { + const doc = DocCast(await DocUtils.DocumentFromType('csv', url, { title: title, text: RTFCast(data) })); + + const linkDoc = Docs.Create.LinkDocument(this.Document, doc); + LinkManager.Instance.addLink(linkDoc); + + doc && this._props.addDocument?.(doc); + await DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {}); + + this.addCSVForAnalysis(doc, id); + }; + + /** + * Event handler to manage citations click in the message components. + * @param citation The citation object clicked by the user. + */ + @action + handleCitationClick = (citation: Citation) => { + const currentLinkedDocs: Doc[] = this.linkedDocs; + + const chunkId = citation.chunk_id; + + // Loop through the linked documents to find the matching chunk and handle its display + for (let doc of currentLinkedDocs) { + if (doc.chunk_simpl) { + const docChunkSimpl = JSON.parse(StrCast(doc.chunk_simpl)) as { chunks: SimplifiedChunk[] }; + const foundChunk = docChunkSimpl.chunks.find(chunk => chunk.chunkId === chunkId); + if (foundChunk) { + // Handle different types of chunks (image, text, table, etc.) + switch (foundChunk.chunkType) { + case CHUNK_TYPE.IMAGE: + case CHUNK_TYPE.TABLE: + const values = foundChunk.location?.replace(/[\[\]]/g, '').split(','); + + if (values?.length !== 4) { + console.error('Location string must contain exactly 4 numbers'); + return; + } + + const x1 = parseFloat(values[0]) * Doc.NativeWidth(doc); + const y1 = parseFloat(values[1]) * Doc.NativeHeight(doc) + foundChunk.startPage * Doc.NativeHeight(doc); + const x2 = parseFloat(values[2]) * Doc.NativeWidth(doc); + const y2 = parseFloat(values[3]) * Doc.NativeHeight(doc) + foundChunk.startPage * Doc.NativeHeight(doc); + + const annotationKey = Doc.LayoutFieldKey(doc) + '_annotations'; + + const existingDoc = DocListCast(doc[DocData][annotationKey]).find(d => d.citation_id === citation.citation_id); + const highlightDoc = existingDoc ?? this.createImageCitationHighlight(x1, y1, x2, y2, citation, annotationKey, doc); + + DocumentManager.Instance.showDocument(highlightDoc, { willZoomCentered: true }, () => {}); + break; + case CHUNK_TYPE.TEXT: + this.citationPopup = { text: citation.direct_text ?? 'No text available', visible: true }; + setTimeout(() => (this.citationPopup.visible = false), 3000); // Hide after 3 seconds + + DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => { + const firstView = Array.from(doc[DocViews])[0] as DocumentView; + firstView.ComponentView?.search?.(citation.direct_text ?? ''); + }); + break; + case CHUNK_TYPE.URL: + if (!foundChunk.canDisplay) { + window.open(StrCast(doc.displayUrl), '_blank'); + } else if (foundChunk.canDisplay) { + DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {}); + } + break; + case CHUNK_TYPE.CSV: + DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {}); + break; + default: + console.error('Chunk type not recognized:', foundChunk.chunkType); + break; + } + } + } + } + }; + + /** + * Creates an annotation highlight on a PDF document for image citations. + * @param x1 X-coordinate of the top-left corner of the highlight. + * @param y1 Y-coordinate of the top-left corner of the highlight. + * @param x2 X-coordinate of the bottom-right corner of the highlight. + * @param y2 Y-coordinate of the bottom-right corner of the highlight. + * @param citation The citation object to associate with the highlight. + * @param annotationKey The key used to store the annotation. + * @param pdfDoc The document where the highlight is created. + * @returns The highlighted document. + */ + createImageCitationHighlight = (x1: number, y1: number, x2: number, y2: number, citation: Citation, annotationKey: string, pdfDoc: Doc): Doc => { + const highlight_doc = Docs.Create.FreeformDocument([], { + x: x1, + y: y1, + _width: x2 - x1, + _height: y2 - y1, + backgroundColor: 'rgba(255, 255, 0, 0.5)', + }); + highlight_doc[DocData].citation_id = citation.citation_id; + Doc.AddDocToList(pdfDoc[DocData], annotationKey, highlight_doc); + highlight_doc.annotationOn = pdfDoc; + Doc.SetContainer(highlight_doc, pdfDoc); + return highlight_doc; + }; + + /** + * Lifecycle method that triggers when the component updates. + * Ensures the chat is scrolled to the bottom when new messages are added. + */ + componentDidUpdate() { + this.scrollToBottom(); + } + + /** + * Lifecycle method that triggers when the component mounts. + * Initializes scroll listeners, sets up document reactions, and loads chat history from dataDoc if available. + */ + componentDidMount() { + this._props.setContentViewBox?.(this); + if (this.dataDoc.data) { + try { + const storedHistory = JSON.parse(StrCast(this.dataDoc.data)); + runInAction(() => { + this.history.push( + ...storedHistory.map((msg: AssistantMessage) => ({ + role: msg.role, + content: msg.content, + follow_up_questions: msg.follow_up_questions, + citations: msg.citations, + })) + ); + }); + } catch (e) { + console.error('Failed to parse history from dataDoc:', e); + } + } else { + // Default welcome message + runInAction(() => { + this.history.push({ + role: ASSISTANT_ROLE.ASSISTANT, + content: [ + { + index: 0, + type: TEXT_TYPE.NORMAL, + text: `Hey, ${this.userName()}! Welcome to Your Friendly Assistant. Link a document or ask questions to get started.`, + citation_ids: null, + }, + ], + processing_info: [], + }); + }); + } + + // Set up reactions for linked documents + reaction( + () => { + const linkedDocs = LinkManager.Instance.getAllRelatedLinks(this.Document) + .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) + .map(d => DocCast(d?.annotationOn, d)) + .filter(d => d); + return linkedDocs; + }, + linked => linked.forEach(doc => this.linked_docs_to_add.add(doc)) + ); + + // Observe changes to linked documents and handle document addition + observe(this.linked_docs_to_add, change => { + if (change.type === 'add') { + if (PDFCast(change.newValue.data)) { + this.addDocToVectorstore(change.newValue); + } else if (CsvCast(change.newValue.data)) { + this.addCSVForAnalysis(change.newValue); + } + } else if (change.type === 'delete') { + // Handle document removal + } + }); + this.addScrollListener(); + } + + /** + * Lifecycle method that triggers when the component unmounts. + * Removes scroll listeners to avoid memory leaks. + */ + componentWillUnmount() { + this.removeScrollListener(); + } + + /** + * Getter that retrieves all linked documents for the current document. + */ + @computed + get linkedDocs() { + return LinkManager.Instance.getAllRelatedLinks(this.Document) + .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) + .map(d => DocCast(d?.annotationOn, d)) + .filter(d => d); + } + + /** + * Getter that retrieves document IDs of linked documents that have AI-related content. + */ + @computed + get docIds() { + return LinkManager.Instance.getAllRelatedLinks(this.Document) + .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) + .map(d => DocCast(d?.annotationOn, d)) + .filter(d => d) + .filter(d => d.ai_doc_id) + .map(d => StrCast(d.ai_doc_id)); + } + + /** + * Getter that retrieves summaries of all linked documents. + */ + @computed + get summaries(): string { + return ( + LinkManager.Instance.getAllRelatedLinks(this.Document) + .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document))) + .map(d => DocCast(d?.annotationOn, d)) + .filter(d => d) + .filter(d => d.summary) + .map((doc, index) => { + if (PDFCast(doc.data)) { + return `${doc.summary}`; + } else if (CsvCast(doc.data)) { + return `${doc.summary}`; + } else { + return `${index + 1}) ${doc.summary}`; + } + }) + .join('\n') + '\n' + ); + } + + /** + * Getter that retrieves all linked CSV files for analysis. + */ + @computed + get linkedCSVs(): { filename: string; id: string; text: string }[] { + return this.linked_csv_files; + } + + /** + * Getter that formats the entire chat history as a string for the agent's system message. + */ + @computed + get formattedHistory(): string { + let history = '\n'; + for (const message of this.history) { + history += `<${message.role}>${message.content.map(content => content.text).join(' ')}`; + if (message.loop_summary) { + history += `${message.loop_summary}`; + } + history += `\n`; + } + history += ''; + return history; + } + + // Other helper methods for retrieving document data and processing + + retrieveSummaries = () => { + return this.summaries; + }; + + retrieveCSVData = () => { + return this.linkedCSVs; + }; + + retrieveFormattedHistory = () => { + return this.formattedHistory; + }; + + retrieveDocIds = () => { + return this.docIds; + }; + + /** + * Handles follow-up questions when the user clicks on them. + * Automatically sets the input value to the clicked follow-up question. + * @param question The follow-up question clicked by the user. + */ + @action + handleFollowUpClick = (question: string) => { + this.inputValue = question; + }; + + /** + * Renders the chat interface, including the message list, input field, and other UI elements. + */ + render() { + return ( +
+ {this.isUploadingDocs && ( +
+
+ +
{this.currentStep}
+
+
+ )} +
+

{this.userName()}'s AI Assistant

+
+
+ {this.history.map((message, index) => ( + + ))} + {this.current_message && ( + + )} +
+
+ (this.inputValue = e.target.value)} /> + +
+ {/* Popup for citation */} + {this.citationPopup.visible && ( +
+

+ Text from your document: {this.citationPopup.text} +

+
+ )} +
+ ); + } +} + +/** + * Register the ChatBox component as the template for CHAT document types. + */ +Docs.Prototypes.TemplateMap.set(DocumentType.CHAT, { + layout: { view: ChatBox, dataField: 'data' }, + options: { acl: '', chat: '', chat_history: '', chat_thread_id: '', chat_assistant_id: '', chat_vector_store_id: '' }, +}); diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx b/src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx new file mode 100644 index 000000000..801becb64 --- /dev/null +++ b/src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx @@ -0,0 +1,146 @@ +import React, { useState } from 'react'; +import { observer } from 'mobx-react'; +import { AssistantMessage, Citation, MessageContent, PROCESSING_TYPE, ProcessingInfo, TEXT_TYPE } from '../types/types'; +import ReactMarkdown from 'react-markdown'; + +/** + * Props for the MessageComponentBox. + * @interface MessageComponentProps + * @property {AssistantMessage} message - The message data to display. + * @property {number} index - The index of the message. + * @property {Function} onFollowUpClick - Callback to handle follow-up question clicks. + * @property {Function} onCitationClick - Callback to handle citation clicks. + * @property {Function} updateMessageCitations - Function to update message citations. + */ +interface MessageComponentProps { + message: AssistantMessage; + index: number; + onFollowUpClick: (question: string) => void; + onCitationClick: (citation: Citation) => void; + updateMessageCitations: (index: number, citations: Citation[]) => void; +} + +/** + * MessageComponentBox displays the content of an AssistantMessage including text, citations, + * processing information, and follow-up questions. + * @param {MessageComponentProps} props - The props for the component. + */ +const MessageComponentBox: React.FC = ({ message, index, onFollowUpClick, onCitationClick, updateMessageCitations }) => { + // State for managing whether the dropdown is open or closed for processing info + const [dropdownOpen, setDropdownOpen] = useState(false); + + /** + * Renders the content of the message based on the type (e.g., grounded text, normal text). + * @param {MessageContent} item - The content item to render. + * @returns {JSX.Element} JSX element rendering the content. + */ + const renderContent = (item: MessageContent) => { + const i = item.index; + + // Handle grounded text with citations + if (item.type === TEXT_TYPE.GROUNDED) { + const citation_ids = item.citation_ids || []; + return ( + + {item.text} + {citation_ids.map((id, idx) => { + const citation = message.citations?.find(c => c.citation_id === id); + if (!citation) return null; + return ( + + ); + })} + + ); + } + + // Handle normal text + else if (item.type === TEXT_TYPE.NORMAL) { + return ( + + {item.text} + + ); + } + + // Handle query type content + else if ('query' in item) { + return ( + + {JSON.stringify(item.query)} + + ); + } + + // Fallback for any other content type + else { + return ( + + {JSON.stringify(item)} + + ); + } + }; + + // Check if the message contains processing information (thoughts/actions) + const hasProcessingInfo = message.processing_info && message.processing_info.length > 0; + + /** + * Renders processing information such as thoughts or actions during message handling. + * @param {ProcessingInfo} info - The processing information to render. + * @returns {JSX.Element | null} JSX element rendering the processing info or null. + */ + const renderProcessingInfo = (info: ProcessingInfo) => { + if (info.type === PROCESSING_TYPE.THOUGHT) { + return ( +
+ Thought: {info.content} +
+ ); + } else if (info.type === PROCESSING_TYPE.ACTION) { + return ( +
+ Action: {info.content} +
+ ); + } + return null; + }; + + return ( +
+ {/* Processing Information Dropdown */} + {hasProcessingInfo && ( +
+ + {dropdownOpen &&
{message.processing_info.map(renderProcessingInfo)}
} +
+
+ )} + + {/* Message Content */} +
{message.content && message.content.map(messageFragment => {renderContent(messageFragment)})}
+ + {/* Follow-up Questions Section */} + {message.follow_up_questions && message.follow_up_questions.length > 0 && ( +
+

Follow-up Questions:

+
+ {message.follow_up_questions.map((question, idx) => ( + + ))} +
+
+ )} +
+ ); +}; + +// Export the observer-wrapped component to allow MobX to react to state changes +export default observer(MessageComponentBox); diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss b/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss new file mode 100644 index 000000000..ff5be4a38 --- /dev/null +++ b/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss @@ -0,0 +1,69 @@ +.spinner-container { + display: flex; + flex-direction: column; + justify-content: center; + align-items: center; + height: 100%; +} + +.spinner { + width: 60px; + height: 60px; + position: relative; + margin-bottom: 20px; // Space between spinner and text +} + +.double-bounce1, +.double-bounce2 { + width: 100%; + height: 100%; + border-radius: 50%; + background-color: #4a90e2; + opacity: 0.6; + position: absolute; + top: 0; + left: 0; + animation: bounce 2s infinite ease-in-out; +} + +.double-bounce2 { + animation-delay: -1s; +} + +@keyframes bounce { + 0%, + 100% { + transform: scale(0); + } + 50% { + transform: scale(1); + } +} + +.uploading-overlay { + position: absolute; + top: 0; + left: 0; + right: 0; + bottom: 0; + background-color: rgba(255, 255, 255, 0.8); + display: flex; + align-items: center; + justify-content: center; + z-index: 1000; +} + +.progress-container { + display: flex; + flex-direction: column; + align-items: center; + text-align: center; +} + +.step-name { + font-size: 18px; + color: #333; + text-align: center; + width: 100%; + margin-top: -10px; // Adjust to move the text closer to the spinner +} diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx b/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx new file mode 100644 index 000000000..b9fd08742 --- /dev/null +++ b/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx @@ -0,0 +1,22 @@ +import React from 'react'; +import './ProgressBar.scss'; + +/** + * ProgressBar is a functional React component that displays a loading spinner + * to indicate progress or ongoing processing. It uses two bouncing elements + * to create a smooth animation that represents an active state. + * + * The animation consists of two divs (`double-bounce1` and `double-bounce2`), + * each of which will bounce in and out of view, creating a pulsating effect. + */ +export const ProgressBar: React.FC = () => { + return ( +
+ {/* Spinner div containing two bouncing elements */} +
+
{/* First bouncing element */} +
{/* Second bouncing element */} +
+
+ ); +}; diff --git a/src/client/views/nodes/chatbot/response_parsers/AnswerParser.ts b/src/client/views/nodes/chatbot/response_parsers/AnswerParser.ts new file mode 100644 index 000000000..3b4fdb6f5 --- /dev/null +++ b/src/client/views/nodes/chatbot/response_parsers/AnswerParser.ts @@ -0,0 +1,125 @@ +import { ASSISTANT_ROLE, AssistantMessage, Citation, CHUNK_TYPE, TEXT_TYPE, getChunkType, ProcessingInfo } from '../types/types'; +import { v4 as uuid } from 'uuid'; + +export class AnswerParser { + static parse(xml: string, processingInfo: ProcessingInfo[]): AssistantMessage { + const answerRegex = /([\s\S]*?)<\/answer>/; + const citationsRegex = /([\s\S]*?)<\/citations>/; + const citationRegex = /([\s\S]*?)<\/citation>/g; + const followUpQuestionsRegex = /([\s\S]*?)<\/follow_up_questions>/; + const questionRegex = /(.*?)<\/question>/g; + const groundedTextRegex = /([\s\S]*?)<\/grounded_text>/g; + const normalTextRegex = /([\s\S]*?)<\/normal_text>/g; + const loopSummaryRegex = /([\s\S]*?)<\/loop_summary>/; + + const answerMatch = answerRegex.exec(xml); + const citationsMatch = citationsRegex.exec(xml); + const followUpQuestionsMatch = followUpQuestionsRegex.exec(xml); + const loopSummaryMatch = loopSummaryRegex.exec(xml); + + if (!answerMatch) { + throw new Error('Invalid XML: Missing tag.'); + } + + let rawTextContent = answerMatch[1].trim(); + let content: AssistantMessage['content'] = []; + let citations: Citation[] = []; + let contentIndex = 0; + + // Remove citations and follow-up questions from rawTextContent + if (citationsMatch) { + rawTextContent = rawTextContent.replace(citationsMatch[0], '').trim(); + } + if (followUpQuestionsMatch) { + rawTextContent = rawTextContent.replace(followUpQuestionsMatch[0], '').trim(); + } + if (loopSummaryMatch) { + rawTextContent = rawTextContent.replace(loopSummaryMatch[0], '').trim(); + } + + // Parse citations + let citationMatch; + const citationMap = new Map(); + if (citationsMatch) { + const citationsContent = citationsMatch[1]; + while ((citationMatch = citationRegex.exec(citationsContent)) !== null) { + const [_, index, chunk_id, type, direct_text] = citationMatch; + const citation_id = uuid(); + citationMap.set(index, citation_id); + citations.push({ + direct_text: direct_text.trim(), + type: getChunkType(type), + chunk_id, + citation_id, + }); + } + } + + rawTextContent = rawTextContent.replace(normalTextRegex, '$1'); + + // Parse text content (normal and grounded) + let lastIndex = 0; + let match; + + while ((match = groundedTextRegex.exec(rawTextContent)) !== null) { + const [fullMatch, citationIndex, groundedText] = match; + + // Add normal text that is before the grounded text + if (match.index > lastIndex) { + const normalText = rawTextContent.slice(lastIndex, match.index).trim(); + if (normalText) { + content.push({ + index: contentIndex++, + type: TEXT_TYPE.NORMAL, + text: normalText, + citation_ids: null, + }); + } + } + + // Add grounded text + const citation_ids = citationIndex.split(',').map(index => citationMap.get(index) || ''); + content.push({ + index: contentIndex++, + type: TEXT_TYPE.GROUNDED, + text: groundedText.trim(), + citation_ids, + }); + + lastIndex = match.index + fullMatch.length; + } + + // Add any remaining normal text after the last grounded text + if (lastIndex < rawTextContent.length) { + const remainingText = rawTextContent.slice(lastIndex).trim(); + if (remainingText) { + content.push({ + index: contentIndex++, + type: TEXT_TYPE.NORMAL, + text: remainingText, + citation_ids: null, + }); + } + } + + let followUpQuestions: string[] = []; + if (followUpQuestionsMatch) { + const questionsText = followUpQuestionsMatch[1]; + let questionMatch; + while ((questionMatch = questionRegex.exec(questionsText)) !== null) { + followUpQuestions.push(questionMatch[1].trim()); + } + } + + const assistantResponse: AssistantMessage = { + role: ASSISTANT_ROLE.ASSISTANT, + content, + follow_up_questions: followUpQuestions, + citations, + processing_info: processingInfo, + loop_summary: loopSummaryMatch ? loopSummaryMatch[1].trim() : undefined, + }; + + return assistantResponse; + } +} diff --git a/src/client/views/nodes/chatbot/response_parsers/StreamedAnswerParser.ts b/src/client/views/nodes/chatbot/response_parsers/StreamedAnswerParser.ts new file mode 100644 index 000000000..3585cab4a --- /dev/null +++ b/src/client/views/nodes/chatbot/response_parsers/StreamedAnswerParser.ts @@ -0,0 +1,73 @@ +import { threadId } from 'worker_threads'; + +enum ParserState { + Outside, + InGroundedText, + InNormalText, +} + +export class StreamedAnswerParser { + private state: ParserState = ParserState.Outside; + private buffer: string = ''; + private result: string = ''; + private isStartOfLine: boolean = true; + + public parse(char: string): string { + switch (this.state) { + case ParserState.Outside: + if (char === '<') { + this.buffer = '<'; + } else if (char === '>') { + if (this.buffer.startsWith('') { + this.state = ParserState.Outside; + this.buffer = ''; + } else if (this.buffer.startsWith('') { + this.state = ParserState.Outside; + this.buffer = ''; + } else if (this.buffer.startsWith('<')) { + this.buffer += char; + } else { + this.processChar(char); + } + break; + } + + return this.result.trim(); + } + + private processChar(char: string): void { + if (this.isStartOfLine && char === ' ') { + // Skip leading spaces + return; + } + if (char === '\n') { + this.result += char; + this.isStartOfLine = true; + } else { + this.result += char; + this.isStartOfLine = false; + } + } + + public reset(): void { + this.state = ParserState.Outside; + this.buffer = ''; + this.result = ''; + this.isStartOfLine = true; + } +} diff --git a/src/client/views/nodes/chatbot/tools/BaseTool.ts b/src/client/views/nodes/chatbot/tools/BaseTool.ts new file mode 100644 index 000000000..b57f1c8e4 --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/BaseTool.ts @@ -0,0 +1,24 @@ +import { Tool } from '../types/types'; + +export abstract class BaseTool = Record> implements Tool { + constructor( + public name: string, + public description: string, + public parameters: Record, + public citationRules: string, + public briefSummary: string + ) {} + + abstract execute(args: T): Promise; + + getActionRule(): Record { + return { + [this.name]: { + name: this.name, + citationRules: this.citationRules, + description: this.description, + parameters: this.parameters, + }, + }; + } +} diff --git a/src/client/views/nodes/chatbot/tools/CalculateTool.ts b/src/client/views/nodes/chatbot/tools/CalculateTool.ts new file mode 100644 index 000000000..74b7ca27b --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/CalculateTool.ts @@ -0,0 +1,26 @@ +import { BaseTool } from './BaseTool'; + +export class CalculateTool extends BaseTool<{ expression: string }> { + constructor() { + super( + 'calculate', + 'Perform a calculation', + { + expression: { + type: 'string', + description: 'The mathematical expression to evaluate', + required: 'true', + max_inputs: '1', + }, + }, + 'Provide a mathematical expression to calculate that would work with JavaScript eval().', + 'Runs a calculation and returns the number - uses JavaScript so be sure to use floating point syntax if necessary' + ); + } + + async execute(args: { expression: string }): Promise { + // Note: Using eval() can be dangerous. Consider using a safer alternative. + const result = eval(args.expression); + return [{ type: 'text', text: result.toString() }]; + } +} diff --git a/src/client/views/nodes/chatbot/tools/CreateCSVTool.ts b/src/client/views/nodes/chatbot/tools/CreateCSVTool.ts new file mode 100644 index 000000000..55015846b --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/CreateCSVTool.ts @@ -0,0 +1,51 @@ +import { BaseTool } from './BaseTool'; +import { Networking } from '../../../../Network'; + +export class CreateCSVTool extends BaseTool<{ csvData: string; filename: string }> { + private _handleCSVResult: (url: string, filename: string, id: string, data: string) => void; + + constructor(handleCSVResult: (url: string, title: string, id: string, data: string) => void) { + super( + 'createCSV', + 'Creates a CSV file from raw CSV data and saves it to the server', + { + type: 'object', + properties: { + csvData: { + type: 'string', + description: 'A string of comma-separated values representing the CSV data.', + }, + filename: { + type: 'string', + description: 'The base name of the CSV file to be created. Should end in ".csv".', + }, + }, + required: ['csvData', 'filename'], + }, + 'Provide a CSV string and a filename to create a CSV file.', + 'Creates a CSV file from the provided CSV string and saves it to the server with a unique identifier, returning the file URL and UUID.' + ); + this._handleCSVResult = handleCSVResult; + } + + async execute(args: { csvData: string; filename: string }): Promise { + try { + console.log('Creating CSV file:', args.filename, ' with data:', args.csvData); + // Post the raw CSV data to the createCSV endpoint on the server + const { fileUrl, id } = await Networking.PostToServer('/createCSV', { filename: args.filename, data: args.csvData }); + + // Handle the result by invoking the callback + this._handleCSVResult(fileUrl, args.filename, id, args.csvData); + + return [ + { + type: 'text', + text: `File successfully created: ${fileUrl}. \nNow a CSV file with this data and the name ${args.filename} is available as a user doc.`, + }, + ]; + } catch (error) { + console.error('Error creating CSV file:', error); + throw new Error('Failed to create CSV file.'); + } + } +} diff --git a/src/client/views/nodes/chatbot/tools/CreateCollectionTool.ts b/src/client/views/nodes/chatbot/tools/CreateCollectionTool.ts new file mode 100644 index 000000000..573428179 --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/CreateCollectionTool.ts @@ -0,0 +1,36 @@ +import { DocCast } from '../../../../../fields/Types'; +import { DocServer } from '../../../../DocServer'; +import { Docs } from '../../../../documents/Documents'; +import { DocumentView } from '../../DocumentView'; +import { OpenWhere } from '../../OpenWhere'; +import { BaseTool } from './BaseTool'; + +export class GetDocsContentTool extends BaseTool<{ title: string; document_ids: string[] }> { + private _docView: DocumentView; + constructor(docView: DocumentView) { + super( + 'retrieveDocs', + 'Retrieves the contents of all Documents that the user is interacting with in Dash ', + { + title: { + type: 'string', + description: 'the title of the collection that you will be making', + required: 'true', + max_inputs: '1', + }, + }, + 'Provide a mathematical expression to calculate that would work with JavaScript eval().', + 'Runs a calculation and returns the number - uses JavaScript so be sure to use floating point syntax if necessary' + ); + this._docView = docView; + } + + async execute(args: { title: string; document_ids: string[] }): Promise { + // Note: Using eval() can be dangerous. Consider using a safer alternative. + const docs = args.document_ids.map(doc_id => DocCast(DocServer.GetCachedRefField(doc_id))); + const collection = Docs.Create.FreeformDocument(docs, { title: args.title }); + this._docView._props.addDocTab(collection, OpenWhere.addRight); //in future, create popup prompting user where to add + return [{ type: 'text', text: 'Collection created in Dash called ' + args.title }]; + } +} +//export function create_collection(docView: DocumentView, document_ids: string[], title: string): string {} diff --git a/src/client/views/nodes/chatbot/tools/DataAnalysisTool.ts b/src/client/views/nodes/chatbot/tools/DataAnalysisTool.ts new file mode 100644 index 000000000..a12ee46e5 --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/DataAnalysisTool.ts @@ -0,0 +1,59 @@ +import { BaseTool } from './BaseTool'; + +export class DataAnalysisTool extends BaseTool<{ csv_file_name: string | string[] }> { + private csv_files_function: () => { filename: string; id: string; text: string }[]; + + constructor(csv_files: () => { filename: string; id: string; text: string }[]) { + super( + 'dataAnalysis', + 'Analyzes, and provides insights, from one or more CSV files', + { + csv_file_name: { + type: 'string', + description: 'Name(s) of the CSV file(s) to analyze', + required: 'true', + max_inputs: '3', + }, + }, + 'Provide the name(s) of up to 3 CSV files to analyze based on the user query and whichever available CSV files may be relevant.', + 'Provides the full CSV file text for your analysis based on the user query and the available CSV file(s). ' + ); + this.csv_files_function = csv_files; + } + + getFileContent(filename: string): string | undefined { + const files = this.csv_files_function(); + const file = files.find(f => f.filename === filename); + return file?.text; + } + + getFileID(filename: string): string | undefined { + const files = this.csv_files_function(); + const file = files.find(f => f.filename === filename); + return file?.id; + } + + async execute(args: { csv_file_name: string | string[] }): Promise { + const filenames = Array.isArray(args.csv_file_name) ? args.csv_file_name : [args.csv_file_name]; + const results = []; + + for (const filename of filenames) { + const fileContent = this.getFileContent(filename); + const fileID = this.getFileID(filename); + + if (fileContent && fileID) { + results.push({ + type: 'text', + text: `${fileContent}`, + }); + } else { + results.push({ + type: 'text', + text: `File not found: ${filename}`, + }); + } + } + + return results; + } +} diff --git a/src/client/views/nodes/chatbot/tools/GetDocsTool.ts b/src/client/views/nodes/chatbot/tools/GetDocsTool.ts new file mode 100644 index 000000000..f970ca8ee --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/GetDocsTool.ts @@ -0,0 +1,29 @@ +import { DocCast } from '../../../../../fields/Types'; +import { DocServer } from '../../../../DocServer'; +import { Docs } from '../../../../documents/Documents'; +import { DocumentView } from '../../DocumentView'; +import { OpenWhere } from '../../OpenWhere'; +import { BaseTool } from './BaseTool'; + +export class GetDocsTool extends BaseTool<{ title: string; document_ids: string[] }> { + private _docView: DocumentView; + constructor(docView: DocumentView) { + super( + 'retrieveDocs', + 'Retrieves the contents of all Documents that the user is interacting with in Dash', + {}, + 'No need to provide anything. Just run the tool and it will retrieve the contents of all Documents that the user is interacting with in Dash.', + 'Returns the the documents in Dash in JSON form. This will include the title of the document, the location in the FreeFormDocument, and the content of the document, any applicable data fields, the layout of the document, etc.' + ); + this._docView = docView; + } + + async execute(args: { title: string; document_ids: string[] }): Promise { + // Note: Using eval() can be dangerous. Consider using a safer alternative. + const docs = args.document_ids.map(doc_id => DocCast(DocServer.GetCachedRefField(doc_id))); + const collection = Docs.Create.FreeformDocument(docs, { title: args.title }); + this._docView._props.addDocTab(collection, OpenWhere.addRight); //in future, create popup prompting user where to add + return [{ type: 'text', text: 'Collection created in Dash called ' + args.title }]; + } +} +//export function create_collection(docView: DocumentView, document_ids: string[], title: string): string {} diff --git a/src/client/views/nodes/chatbot/tools/NoTool.ts b/src/client/views/nodes/chatbot/tools/NoTool.ts new file mode 100644 index 000000000..1f0830a77 --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/NoTool.ts @@ -0,0 +1,18 @@ +// tools/NoTool.ts +import { BaseTool } from './BaseTool'; + +export class NoTool extends BaseTool<{}> { + constructor() { + super( + 'no_tool', + 'Use this when no external tool or action is required to answer the question.', + {}, + 'When using the "no_tool" action, simply provide an empty element. The observation will always be "No tool used. Proceed with answering the question."', + 'Use when no external tool or action is required to answer the question.' + ); + } + + async execute(args: {}): Promise { + return [{ type: 'text', text: 'No tool used. Proceed with answering the question.' }]; + } +} diff --git a/src/client/views/nodes/chatbot/tools/RAGTool.ts b/src/client/views/nodes/chatbot/tools/RAGTool.ts new file mode 100644 index 000000000..c24306dcd --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/RAGTool.ts @@ -0,0 +1,138 @@ +import { BaseTool } from './BaseTool'; +import { Vectorstore } from '../vectorstore/Vectorstore'; +import { RAGChunk } from '../types/types'; +import * as fs from 'fs'; +import { Networking } from '../../../../Network'; +import { file } from 'jszip'; +import { ChatCompletion, ChatCompletionContentPart, ChatCompletionMessageParam } from 'openai/resources'; + +export class RAGTool extends BaseTool { + constructor(private vectorstore: Vectorstore) { + super( + 'rag', + 'Perform a RAG search on user documents', + { + hypothetical_document_chunk: { + type: 'string', + description: + "Detailed version of the prompt that is effectively a hypothetical document chunk that would be ideal to embed and compare to the vectors of real document chunks to fetch the most relevant document chunks to answer the user's query", + required: 'true', + }, + }, + ` + Your task is to provide a comprehensive response to the user's prompt based on the given chunks and chat history. Follow these structural guidelines meticulously: + + 1. Overall Structure: + + [Main content with grounded_text tags interspersed with normal plain text (information that is not derived from chunks' information)] + + [Individual citation tags] + + + [Three question tags] + + + + 2. Grounded Text Tag Structure: + - Basic format: + + [Your generated text based on information from a subset of a chunk (a citation's direct text)] + + + 3. Citation Tag Structure: + + [For text: relevant subset of original chunk] + [For image/table: leave empty] + + + 4. Detailed Grounded Text Guidelines: + a. Wrap all information derived from chunks in grounded_text tags. + b. DO NOT PUT ANYTHING THAT IS NOT DIRECTLY DERIVED FROM INFORMATION FROM CHUNKS (EITHER IMAGE, TABLE, OR TEXT) IN GROUNDED_TEXT TAGS. + c. Use a single grounded_text tag for suquential and closely related information that references the same citation. If other citations' information are used sequentially, create new grounded_text tags. + d. Ensure every grounded_text tag has up to a few corresponding citations (should not be more than 3 and only 1 is fine). Multiple citation indices should be separated by commas. + e. Grounded text can be as short as a few words or as long as several sentences. + f. Avoid overlapping or nesting grounded_text tags; instead, use sequential tags. + + 5. Detailed Citation Guidelines: + a. Create a unique citation for each distinct piece of information from the chunks that is used to support grounded_text. + b. ALL TEXT CITATIONS must have direct text in its element content (e.g. DIRECT TEXT HERE) that is a relevant SUBSET of the original text chunk that is being cited specifically. + c. DO NOT paraphrase or summarize the text; use the original text as much as possible. + d. DO NOT USE THE FULL TEXT CHUNK as the citation content; only use the relevant subset of the text that the grounded_text is base. AS SHORT AS POSSIBLE WHILE PROVIDING INFORMATION (ONE TO TWO SENTENCES USUALLY)! + e. Ensure each citation has a unique index number. + f. Specify the correct type: "text", "image", or "table". + g. For text chunks, the content of the citation should ALWAYS have the relevant subset of the original text that the grounded_text is based on. + h. For image/table chunks, leave the citation content empty. + i. One citation can be used for multiple grounded_text tags if they are based on the same chunk information. + j. !!!DO NOT OVERCITE - only include citations for information that is directly relevant to the grounded_text. + + 6. Structural Integrity Checks: + a. Ensure all opening tags have corresponding closing tags. + b. Verify that all grounded_text tags have valid citation_index attributes (they should be equal to the associated citation(s) index field—not their chunk_id field). + c. Check that all cited indices in grounded_text tags have corresponding citations. + + Example of grounded_text usage: + + + + Artificial Intelligence (AI) is revolutionizing various sectors, with healthcare experiencing significant transformations in areas such as diagnosis and treatment planning. + + + In the field of medical diagnosis, AI has shown remarkable capabilities, particularly in radiology. For instance, AI systems have drastically improved mammogram analysis, achieving 99% accuracy at a rate 30 times faster than human radiologists. + + + This advancement not only enhances the efficiency of healthcare systems but also significantly reduces the occurrence of false positives, leading to fewer unnecessary biopsies and reduced patient stress. + + + + Beyond diagnosis, AI is playing a crucial role in drug discovery and development. By analyzing vast amounts of genetic and molecular data, AI algorithms can identify potential drug candidates much faster than traditional methods. + + + This could potentially reduce the time and cost of bringing new medications to market, especially for rare diseases that have historically received less attention due to limited market potential. + + + [... rest of the content ...] + + + Artificial Intelligence is revolutionizing various industries, with healthcare being one of the most profoundly affected sectors. + AI has shown particular promise in the field of radiology, enhancing the accuracy and speed of image analysis. + According to recent studies, AI systems have achieved 99% accuracy in mammogram analysis, performing the task 30 times faster than human radiologists. + The improvement in mammogram accuracy has led to a significant reduction in false positives, decreasing the need for unnecessary biopsies and reducing patient anxiety. + AI is accelerating the drug discovery process by analyzing complex molecular and genetic data to identify potential drug candidates. + The use of AI in drug discovery could significantly reduce the time and cost associated with bringing new medications to market, particularly for rare diseases. + + + + How might AI-driven personalized medicine impact the cost and accessibility of healthcare in the future? + What measures can be taken to ensure that AI systems in healthcare are free from biases and equally effective for diverse populations? + How could the role of healthcare professionals evolve as AI becomes more integrated into medical practices? + + + `, + + `Performs a RAG (Retrieval-Augmented Generation) search on user documents and returns a + set of document chunks (either images or text) that can be used to provide a grounded response based on + user documents` + ); + } + + async execute(args: { hypothetical_document_chunk: string }): Promise { + const relevantChunks = await this.vectorstore.retrieve(args.hypothetical_document_chunk); + const formatted_chunks = await this.getFormattedChunks(relevantChunks); + return formatted_chunks; + } + + async getFormattedChunks(relevantChunks: RAGChunk[]): Promise { + try { + const { formattedChunks } = await Networking.PostToServer('/formatChunks', { relevantChunks }); + + if (!formattedChunks) { + throw new Error('Failed to format chunks'); + } + + return formattedChunks; + } catch (error) { + console.error('Error formatting chunks:', error); + throw error; + } + } +} diff --git a/src/client/views/nodes/chatbot/tools/SearchTool.ts b/src/client/views/nodes/chatbot/tools/SearchTool.ts new file mode 100644 index 000000000..b926cbadc --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/SearchTool.ts @@ -0,0 +1,54 @@ +import { max } from 'lodash'; +import { Networking } from '../../../../Network'; +import { BaseTool } from './BaseTool'; +import { v4 as uuidv4 } from 'uuid'; + +export class SearchTool extends BaseTool<{ query: string | string[] }> { + private _addLinkedUrlDoc: (url: string, id: string) => void; + private _max_results: number; + constructor(addLinkedUrlDoc: (url: string, id: string) => void, max_results: number = 5) { + super( + 'searchTool', + 'Search the web to find a wide range of websites related to a query or multiple queries', + { + query: { + type: 'string', + description: 'The search query or queries to use for finding websites', + required: 'true', + max_inputs: '3', + }, + }, + 'Provide up to 3 search queries to find a broad range of websites. This tool is intended to help you identify relevant websites, but not to be used for providing the final answer. Use this information to determine which specific website to investigate further.', + 'Returns a list of websites and their overviews based on the search queries, helping to identify which websites might contain relevant information.' + ); + this._addLinkedUrlDoc = addLinkedUrlDoc; + this._max_results = max_results; + } + + async execute(args: { query: string | string[] }): Promise { + const queries = Array.isArray(args.query) ? args.query : [args.query]; + const allResults = []; + + for (const query of queries) { + try { + const { results } = await Networking.PostToServer('/getWebSearchResults', { query, max_results: this._max_results }); + const data: { type: string; text: string }[] = results.map((result: { url: string; snippet: string }) => { + const id = uuidv4(); + return { + type: 'text', + text: ` + ${result.url} + ${result.snippet} + `, + }; + }); + allResults.push(...data); + } catch (error) { + console.log(error); + allResults.push({ type: 'text', text: `An error occurred while performing the web search for query: ${query}` }); + } + } + + return allResults; + } +} diff --git a/src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts b/src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts new file mode 100644 index 000000000..2118218f6 --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts @@ -0,0 +1,83 @@ +import { Networking } from '../../../../Network'; +import { BaseTool } from './BaseTool'; +import { v4 as uuidv4 } from 'uuid'; + +export class WebsiteInfoScraperTool extends BaseTool<{ url: string | string[] }> { + private _addLinkedUrlDoc: (url: string, id: string) => void; + + constructor(addLinkedUrlDoc: (url: string, id: string) => void) { + super( + 'websiteInfoScraper', + 'Scrape detailed information from specific websites relevant to the user query', + { + url: { + type: 'string', + description: 'The URL(s) of the website(s) to scrape', + required: true, + max_inputs: 3, + }, + }, + ` + Your task is to provide a comprehensive response to the user's prompt using the content scraped from relevant websites. Ensure you follow these guidelines for structuring your response: + + 1. Grounded Text Tag Structure: + - Wrap all text derived from the scraped website(s) in tags. + - **Do not include non-sourced information** in tags. + - Use a single tag for content derived from a single website. If citing multiple websites, create new tags for each. + - Ensure each tag has a citation index corresponding to the scraped URL. + + 2. Citation Tag Structure: + - Create a tag for each distinct piece of information used from the website(s). + - Each tag must reference a URL chunk using the chunk_id attribute. + - For URL-based citations, leave the citation content empty, but reference the chunk_id and type as 'url'. + + 3. Structural Integrity Checks: + - Ensure all opening and closing tags are matched properly. + - Verify that all citation_index attributes in tags correspond to valid citations. + - Do not over-cite—cite only the most relevant parts of the websites. + + Example Usage: + + + + Based on data from the World Bank, economic growth has stabilized in recent years, following a surge in investments. + + + According to information retrieved from the International Monetary Fund, the inflation rate has been gradually decreasing since 2020. + + + + + + + + + What are the long-term economic impacts of increased investments on GDP? + How might inflation trends affect future monetary policy? + Are there additional factors that could influence economic growth beyond investments and inflation? + + + `, + 'Returns the text content of the webpages for further analysis and grounding.' + ); + this._addLinkedUrlDoc = addLinkedUrlDoc; + } + + async execute(args: { url: string | string[] }): Promise { + const urls = Array.isArray(args.url) ? args.url : [args.url]; + const results = []; + + for (const url of urls) { + try { + const { website_plain_text } = await Networking.PostToServer('/scrapeWebsite', { url }); + const id = uuidv4(); + this._addLinkedUrlDoc(url, id); + results.push({ type: 'text', text: `\n${website_plain_text}\n\n` }); + } catch (error) { + results.push({ type: 'text', text: `An error occurred while scraping the website: ${url}` }); + } + } + + return results; + } +} diff --git a/src/client/views/nodes/chatbot/tools/WikipediaTool.ts b/src/client/views/nodes/chatbot/tools/WikipediaTool.ts new file mode 100644 index 000000000..143d91d80 --- /dev/null +++ b/src/client/views/nodes/chatbot/tools/WikipediaTool.ts @@ -0,0 +1,37 @@ +import { title } from 'process'; +import { Networking } from '../../../../Network'; +import { BaseTool } from './BaseTool'; +import axios from 'axios'; +import { v4 as uuidv4 } from 'uuid'; + +export class WikipediaTool extends BaseTool<{ title: string }> { + private _addLinkedUrlDoc: (url: string, id: string) => void; + constructor(addLinkedUrlDoc: (url: string, id: string) => void) { + super( + 'wikipedia', + 'Search Wikipedia and return a summary', + { + title: { + type: 'string', + description: 'The title of the Wikipedia article to search', + required: true, + }, + }, + 'Provide simply the title you want to search on Wikipedia and nothing more. If re-using this tool, try a different title for different information.', + 'Returns a summary from searching an article title on Wikipedia' + ); + this._addLinkedUrlDoc = addLinkedUrlDoc; + } + + async execute(args: { title: string }): Promise { + try { + const { text } = await Networking.PostToServer('/getWikipediaSummary', { title: args.title }); + const id = uuidv4(); + const url = `https://en.wikipedia.org/wiki/${args.title.replace(/ /g, '_')}`; + this._addLinkedUrlDoc(url, id); + return [{ type: 'text', text: ` ${text} ` }]; + } catch (error) { + return [{ type: 'text', text: 'An error occurred while fetching the article.' }]; + } + } +} diff --git a/src/client/views/nodes/chatbot/types/types.ts b/src/client/views/nodes/chatbot/types/types.ts new file mode 100644 index 000000000..f5d14ad6a --- /dev/null +++ b/src/client/views/nodes/chatbot/types/types.ts @@ -0,0 +1,129 @@ +export enum ASSISTANT_ROLE { + USER = 'user', + ASSISTANT = 'assistant', +} + +export enum TEXT_TYPE { + NORMAL = 'normal', + GROUNDED = 'grounded', + ERROR = 'error', +} + +export enum CHUNK_TYPE { + TEXT = 'text', + IMAGE = 'image', + TABLE = 'table', + URL = 'url', + CSV = 'CSV', +} + +export enum PROCESSING_TYPE { + THOUGHT = 'thought', + ACTION = 'action', + //eventually migrate error to here +} + +export function getChunkType(type: string): CHUNK_TYPE { + switch (type.toLowerCase()) { + case 'text': + return CHUNK_TYPE.TEXT; + break; + case 'image': + return CHUNK_TYPE.IMAGE; + break; + case 'table': + return CHUNK_TYPE.TABLE; + break; + case 'CSV': + return CHUNK_TYPE.CSV; + break; + case 'url': + return CHUNK_TYPE.URL; + break; + default: + return CHUNK_TYPE.TEXT; + break; + } +} + +export interface ProcessingInfo { + index: number; + type: PROCESSING_TYPE; + content: string; +} + +export interface AssistantMessage { + role: ASSISTANT_ROLE; + content: MessageContent[]; + follow_up_questions?: string[]; + citations?: Citation[]; + processing_info: ProcessingInfo[]; + loop_summary?: string; +} + +export interface MessageContent { + index: number; + type: TEXT_TYPE; + text: string; + citation_ids: string[] | null; +} + +export interface Citation { + direct_text?: string; + type: CHUNK_TYPE; + chunk_id: string; + citation_id: string; + url?: string; +} + +export interface RAGChunk { + id: string; + values: number[]; + metadata: { + text: string; + type: CHUNK_TYPE; + original_document: string; + file_path: string; + doc_id: string; + location: string; + start_page: number; + end_page: number; + base64_data?: string | undefined; + page_width?: number | undefined; + page_height?: number | undefined; + }; +} + +export interface SimplifiedChunk { + chunkId: string; + startPage: number; + endPage: number; + location?: string; + chunkType: CHUNK_TYPE; + url?: string; + canDisplay?: boolean; +} + +export interface AI_Document { + purpose: string; + file_name: string; + num_pages: number; + summary: string; + chunks: RAGChunk[]; + type: string; +} + +export interface Tool = Record> { + name: string; + description: string; + parameters: Record; + citationRules: string; + briefSummary: string; + execute: (args: T) => Promise; + getActionRule: () => Record; +} + +export interface AgentMessage { + role: 'system' | 'user' | 'assistant'; + content: string | { type: string; text?: string; image_url?: { url: string } }[]; +} diff --git a/src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts b/src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts new file mode 100644 index 000000000..07a2b73bc --- /dev/null +++ b/src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts @@ -0,0 +1,258 @@ +import { Pinecone, Index, IndexList, PineconeRecord, RecordMetadata, QueryResponse } from '@pinecone-database/pinecone'; +import { CohereClient } from 'cohere-ai'; +import { EmbedResponse } from 'cohere-ai/api'; +import dotenv from 'dotenv'; +import { RAGChunk, AI_Document, CHUNK_TYPE } from '../types/types'; +import { Doc } from '../../../../../fields/Doc'; +import { CsvCast, PDFCast, StrCast } from '../../../../../fields/Types'; +import { Networking } from '../../../../Network'; + +dotenv.config(); + +/** + * The Vectorstore class integrates with Pinecone for vector-based document indexing and retrieval, + * and Cohere for text embedding. It handles AI document management, uploads, and query-based retrieval. + */ +export class Vectorstore { + private pinecone: Pinecone; // Pinecone client for managing the vector index. + private index!: Index; // The specific Pinecone index used for document chunks. + private cohere: CohereClient; // Cohere client for generating embeddings. + private indexName: string = 'pdf-chatbot'; // Default name for the index. + private _id: string; // Unique ID for the Vectorstore instance. + private _doc_ids: string[] = []; // List of document IDs handled by this instance. + + documents: AI_Document[] = []; // Store the documents indexed in the vectorstore. + + /** + * Constructor initializes the Pinecone and Cohere clients, sets up the document ID list, + * and initializes the Pinecone index. + * @param id The unique identifier for the vectorstore instance. + * @param doc_ids A function that returns a list of document IDs. + */ + constructor(id: string, doc_ids: () => string[]) { + const pineconeApiKey = process.env.PINECONE_API_KEY; + if (!pineconeApiKey) { + throw new Error('PINECONE_API_KEY is not defined.'); + } + + // Initialize Pinecone and Cohere clients with API keys from the environment. + this.pinecone = new Pinecone({ apiKey: pineconeApiKey }); + this.cohere = new CohereClient({ token: process.env.COHERE_API_KEY }); + this._id = id; + this._doc_ids = doc_ids(); + this.initializeIndex(); + } + + /** + * Initializes the Pinecone index by checking if it exists, and creating it if not. + * The index is set to use the cosine metric for vector similarity. + */ + private async initializeIndex() { + const indexList: IndexList = await this.pinecone.listIndexes(); + + // Check if the index already exists, otherwise create it. + if (!indexList.indexes?.some(index => index.name === this.indexName)) { + await this.pinecone.createIndex({ + name: this.indexName, + dimension: 1024, + metric: 'cosine', + spec: { + serverless: { + cloud: 'aws', + region: 'us-east-1', + }, + }, + }); + } + + // Set the index for future use. + this.index = this.pinecone.Index(this.indexName); + } + + /** + * Adds an AI document to the vectorstore. This method handles document chunking, uploading to the + * vectorstore, and updating the progress for long-running tasks like file uploads. + * @param doc The document to be added to the vectorstore. + * @param progressCallback Callback to update the progress of the upload. + */ + async addAIDoc(doc: Doc, progressCallback: (progress: number, step: string) => void) { + console.log('Adding AI Document:', doc); + const ai_document_status: string = StrCast(doc.ai_document_status); + + // Skip if the document is already in progress or completed. + if (ai_document_status !== undefined && ai_document_status.trim() !== '' && ai_document_status !== '{}') { + if (ai_document_status === 'IN PROGRESS') { + console.log('Already in progress.'); + return; + } + if (!this._doc_ids.includes(StrCast(doc.ai_doc_id))) { + this._doc_ids.push(StrCast(doc.ai_doc_id)); + } + } else { + // Start processing the document. + doc.ai_document_status = 'PROGRESS'; + console.log(doc); + + // Get the local file path (CSV or PDF). + const local_file_path: string = CsvCast(doc.data)?.url?.pathname ?? PDFCast(doc.data)?.url?.pathname; + console.log('Local File Path:', local_file_path); + + if (local_file_path) { + console.log('Creating AI Document...'); + // Start the document creation process by sending the file to the server. + const { jobId } = await Networking.PostToServer('/createDocument', { file_path: local_file_path }); + + // Poll the server for progress updates. + let inProgress: boolean = true; + let result: any = null; + while (inProgress) { + // Polling interval for status updates. + await new Promise(resolve => setTimeout(resolve, 2000)); + + // Check if the job is completed. + const resultResponse = await Networking.FetchFromServer(`/getResult/${jobId}`); + const resultResponseJson = JSON.parse(resultResponse); + if (resultResponseJson.status === 'completed') { + console.log('Result here:', resultResponseJson); + result = resultResponseJson; + break; + } + + // Fetch progress information and update the progress callback. + const progressResponse = await Networking.FetchFromServer(`/getProgress/${jobId}`); + const progressResponseJson = JSON.parse(progressResponse); + if (progressResponseJson) { + const progress = progressResponseJson.progress; + const step = progressResponseJson.step; + progressCallback(progress, step); + } + } + + // Once completed, process the document and add it to the vectorstore. + console.log('Document JSON:', result); + this.documents.push(result); + await this.indexDocument(result); + console.log(`Document added: ${result.file_name}`); + + // Update document metadata such as summary, purpose, and vectorstore ID. + doc.summary = result.summary; + doc.ai_doc_id = result.doc_id; + this._doc_ids.push(result.doc_id); + doc.ai_purpose = result.purpose; + + if (!doc.vectorstore_id) { + doc.vectorstore_id = JSON.stringify([this._id]); + } else { + doc.vectorstore_id = JSON.stringify(JSON.parse(StrCast(doc.vectorstore_id)).concat([this._id])); + } + + if (!doc.chunk_simpl) { + doc.chunk_simpl = JSON.stringify({ chunks: [] }); + } + + // Process each chunk of the document and update the document's chunk_simpl field. + result.chunks.forEach((chunk: RAGChunk) => { + const chunkToAdd = { + chunkId: chunk.id, + startPage: chunk.metadata.start_page, + endPage: chunk.metadata.end_page, + location: chunk.metadata.location, + chunkType: chunk.metadata.type as CHUNK_TYPE, + text: chunk.metadata.text, + }; + const new_chunk_simpl = JSON.parse(StrCast(doc.chunk_simpl)); + new_chunk_simpl.chunks = new_chunk_simpl.chunks.concat(chunkToAdd); + doc.chunk_simpl = JSON.stringify(new_chunk_simpl); + }); + + // Mark the document status as completed. + doc.ai_document_status = 'COMPLETED'; + } + } + } + + /** + * Indexes the processed document by uploading the document's vector chunks to the Pinecone index. + * @param document The processed document containing its chunks and metadata. + */ + private async indexDocument(document: any) { + console.log('Uploading vectors to content namespace...'); + + // Prepare Pinecone records for each chunk in the document. + const pineconeRecords: PineconeRecord[] = (document.chunks as RAGChunk[]).map(chunk => ({ + id: chunk.id, + values: chunk.values, + metadata: { ...chunk.metadata } as RecordMetadata, + })); + + // Upload the records to Pinecone. + await this.index.upsert(pineconeRecords); + } + + /** + * Retrieves the top K document chunks relevant to the user's query. + * This involves embedding the query using Cohere, then querying Pinecone for matching vectors. + * @param query The search query string. + * @param topK The number of top results to return (default is 10). + * @returns A list of document chunks that match the query. + */ + async retrieve(query: string, topK: number = 10): Promise { + console.log(`Retrieving chunks for query: ${query}`); + try { + // Generate an embedding for the query using Cohere. + const queryEmbeddingResponse: EmbedResponse = await this.cohere.embed({ + texts: [query], + model: 'embed-english-v3.0', + inputType: 'search_query', + }); + + let queryEmbedding: number[]; + + // Extract the embedding from the response. + if (Array.isArray(queryEmbeddingResponse.embeddings)) { + queryEmbedding = queryEmbeddingResponse.embeddings[0]; + } else if (queryEmbeddingResponse.embeddings && 'embeddings' in queryEmbeddingResponse.embeddings) { + queryEmbedding = (queryEmbeddingResponse.embeddings as { embeddings: number[][] }).embeddings[0]; + } else { + throw new Error('Invalid embedding response format'); + } + + if (!Array.isArray(queryEmbedding)) { + throw new Error('Query embedding is not an array'); + } + + // Query the Pinecone index using the embedding and filter by document IDs. + const queryResponse: QueryResponse = await this.index.query({ + vector: queryEmbedding, + filter: { + doc_id: { $in: this._doc_ids }, + }, + topK, + includeValues: true, + includeMetadata: true, + }); + + // Map the results into RAGChunks and return them. + return queryResponse.matches.map( + match => + ({ + id: match.id, + values: match.values as number[], + metadata: match.metadata as { + text: string; + type: string; + original_document: string; + file_path: string; + doc_id: string; + location: string; + start_page: number; + end_page: number; + }, + }) as RAGChunk + ); + } catch (error) { + console.error(`Error retrieving chunks: ${error}`); + return []; + } + } +} diff --git a/src/server/ApiManagers/AssistantManager.ts b/src/server/ApiManagers/AssistantManager.ts index 58d35ce64..88a81c5de 100644 --- a/src/server/ApiManagers/AssistantManager.ts +++ b/src/server/ApiManagers/AssistantManager.ts @@ -8,7 +8,7 @@ import { filesDirectory, publicDirectory } from '../SocketData'; import { Method } from '../RouteManager'; import ApiManager, { Registration } from './ApiManager'; import axios from 'axios'; -import { RAGChunk } from '../../client/views/nodes/ChatBox/types'; +import { RAGChunk } from '../../client/views/nodes/chatbot/types/types'; import { UnstructuredClient } from 'unstructured-client'; import { PartitionResponse } from 'unstructured-client/sdk/models/operations'; import { ChunkingStrategy, Strategy } from 'unstructured-client/sdk/models/shared'; -- cgit v1.2.3-70-g09d2