aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--SUMMARY.md45
-rw-r--r--src/client/documents/Documents.ts1
-rw-r--r--src/client/views/DictationButton.scss73
-rw-r--r--src/client/views/DictationButton.tsx25
-rw-r--r--src/client/views/nodes/PDFBox.scss23
-rw-r--r--src/client/views/nodes/PDFBox.tsx12
-rw-r--r--src/client/views/nodes/WebBox.scss24
-rw-r--r--src/client/views/nodes/WebBox.tsx92
-rw-r--r--src/client/views/nodes/WebBoxRenderer.js103
-rw-r--r--src/client/views/nodes/chatbot/agentsystem/Agent.ts382
-rw-r--r--src/client/views/nodes/chatbot/agentsystem/prompts.ts8
-rw-r--r--src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss1000
-rw-r--r--src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx925
-rw-r--r--src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx16
-rw-r--r--src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss69
-rw-r--r--src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx30
-rw-r--r--src/client/views/nodes/chatbot/guides/guide.md647
-rw-r--r--src/client/views/nodes/chatbot/tools/CodebaseSummarySearchTool.ts75
-rw-r--r--src/client/views/nodes/chatbot/tools/CreateAnyDocTool.ts158
-rw-r--r--src/client/views/nodes/chatbot/tools/CreateDocumentTool.ts497
-rw-r--r--src/client/views/nodes/chatbot/tools/CreateLinksTool.ts68
-rw-r--r--src/client/views/nodes/chatbot/tools/CreateNewTool.ts599
-rw-r--r--src/client/views/nodes/chatbot/tools/CreateTextDocumentTool.ts57
-rw-r--r--src/client/views/nodes/chatbot/tools/DocumentMetadataTool.ts853
-rw-r--r--src/client/views/nodes/chatbot/tools/FileContentTool.ts78
-rw-r--r--src/client/views/nodes/chatbot/tools/FileNamesTool.ts34
-rw-r--r--src/client/views/nodes/chatbot/tools/RAGTool.ts13
-rw-r--r--src/client/views/nodes/chatbot/tools/SearchTool.ts34
-rw-r--r--src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts126
-rw-r--r--src/client/views/nodes/chatbot/tools/WikipediaTool.ts2
-rw-r--r--src/client/views/nodes/chatbot/tools/dynamic/InspirationalQuotesTool.ts39
-rw-r--r--src/client/views/nodes/chatbot/types/tool_types.ts26
-rw-r--r--src/client/views/nodes/chatbot/types/types.ts5
-rw-r--r--src/client/views/nodes/chatbot/utils/AgentDocumentManager.ts1113
-rw-r--r--src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts662
-rw-r--r--src/client/views/pdf/PDFViewer.tsx572
-rw-r--r--src/server/ApiManagers/AssistantManager.ts508
-rw-r--r--src/server/api/dynamicTools.ts130
-rw-r--r--src/server/chunker/pdf_chunker.py187
-rw-r--r--src/server/index.ts1
-rw-r--r--src/server/server_Initialization.ts5
-rw-r--r--summarize_dash_ts.py248
-rw-r--r--test_dynamic_tools.js44
44 files changed, 8139 insertions, 1472 deletions
diff --git a/.gitignore b/.gitignore
index 7353bc7e0..8ffa03e80 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,4 +26,4 @@ packages/*/dist
/src/server/flashcard/venv
src/server/ApiManagers/temp_data.txt
/src/server/flashcard/venv
-/src/server/flashcard/venv
+/src/server/flashcard/venv \ No newline at end of file
diff --git a/SUMMARY.md b/SUMMARY.md
new file mode 100644
index 000000000..d8fece079
--- /dev/null
+++ b/SUMMARY.md
@@ -0,0 +1,45 @@
+# Simplified Chunks Implementation Summary
+
+## Problem
+
+- Inconsistency in creating and handling simplified chunks across different document types
+- Simplified chunks were being managed in Vectorstore.ts instead of AgentDocumentManager.ts
+- Different handling for different document types (PDFs, audio, video)
+- Some document types didn't have simplified chunks at all
+
+## Solution
+
+1. Created standardized methods in `AgentDocumentManager.ts` to handle simplified chunks consistently:
+
+ - `addSimplifiedChunks`: Adds simplified chunks to a document based on its type
+ - `getSimplifiedChunks`: Retrieves all simplified chunks from a document
+ - `getSimplifiedChunkById`: Gets a specific chunk by its ID
+ - `getOriginalSegments`: Retrieves original media segments for audio/video documents
+
+2. Updated `Vectorstore.ts` to use the new AgentDocumentManager methods:
+
+ - Replaced direct chunk_simpl handling for audio/video files
+ - Replaced separate chunk handling for PDF documents
+ - Added support for determining document type based on file extension
+
+3. Updated ChatBox components to use the new AgentDocumentManager methods:
+ - `handleCitationClick`: Now uses docManager.getSimplifiedChunkById
+ - `getDirectMatchingSegmentStart`: Now uses docManager.getOriginalSegments
+
+## Benefits
+
+1. Consistent simplified chunk creation across all document types
+2. Central management of chunks in AgentDocumentManager
+3. Better type safety and error handling
+4. Improved code maintainability
+5. Consistent approach to accessing chunks when citations are clicked
+
+## Document Types Supported
+
+- PDFs: startPage, endPage, location metadata
+- Audio: start_time, end_time, indexes metadata
+- Video: start_time, end_time, indexes metadata
+- CSV: rowStart, rowEnd, colStart, colEnd metadata
+- Default/Text: basic metadata only
+
+All document types now store consistent chunk IDs that match the ones used in the vector store.
diff --git a/src/client/documents/Documents.ts b/src/client/documents/Documents.ts
index 1ca298dd7..e1cd62602 100644
--- a/src/client/documents/Documents.ts
+++ b/src/client/documents/Documents.ts
@@ -274,6 +274,7 @@ export class DocumentOptions {
_layout_reflowHorizontal?: BOOLt = new BoolInfo('permit horizontal resizing with content reflow');
_layout_noSidebar?: BOOLt = new BoolInfo('whether to display the sidebar toggle button');
layout_boxShadow?: string; // box-shadow css string OR "standard" to use dash standard box shadow
+ _iframe_sandbox?: STRt = new StrInfo('sandbox attributes for iframes in web documents (e.g., allow-scripts, allow-same-origin)');
layout_maxShown?: NUMt = new NumInfo('maximum number of children to display at one time (see multicolumnview)');
_layout_columnWidth?: NUMt = new NumInfo('width of table column', false);
_layout_columnCount?: NUMt = new NumInfo('number of columns in a masonry view');
diff --git a/src/client/views/DictationButton.scss b/src/client/views/DictationButton.scss
new file mode 100644
index 000000000..ac8740c0f
--- /dev/null
+++ b/src/client/views/DictationButton.scss
@@ -0,0 +1,73 @@
+.dictation-button {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 48px;
+ height: 48px;
+ min-width: 48px;
+ border-radius: 50%;
+ border: none;
+ background-color: #487af0;
+ color: white;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ box-shadow: 0 2px 8px rgba(72, 122, 240, 0.3);
+ padding: 0;
+ margin-left: 5px;
+ position: relative;
+ overflow: hidden;
+
+ &::before {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: linear-gradient(135deg, transparent, rgba(255, 255, 255, 0.3));
+ opacity: 0;
+ transition: opacity 0.25s ease;
+ }
+
+ &:hover {
+ background-color: #3b6cd7; /* Slightly darker blue */
+ box-shadow: 0 3px 10px rgba(72, 122, 240, 0.4);
+
+ &::before {
+ opacity: 1;
+ }
+
+ svg {
+ transform: scale(1.1);
+ }
+ }
+
+ &:active {
+ background-color: #3463cc; /* Even darker for active state */
+ box-shadow: 0 2px 6px rgba(72, 122, 240, 0.3);
+ }
+
+ &.recording {
+ background-color: #ef4444;
+ color: white;
+ animation: pulse 1.5s infinite;
+ }
+
+ svg {
+ width: 22px;
+ height: 22px;
+ transition: transform 0.2s ease;
+ }
+}
+
+@keyframes pulse {
+ 0% {
+ box-shadow: 0 0 0 0 rgba(239, 68, 68, 0.5);
+ }
+ 70% {
+ box-shadow: 0 0 0 10px rgba(239, 68, 68, 0);
+ }
+ 100% {
+ box-shadow: 0 0 0 0 rgba(239, 68, 68, 0);
+ }
+}
diff --git a/src/client/views/DictationButton.tsx b/src/client/views/DictationButton.tsx
index 0ce586df4..fc3165f67 100644
--- a/src/client/views/DictationButton.tsx
+++ b/src/client/views/DictationButton.tsx
@@ -1,8 +1,7 @@
-import { IconButton, Type } from '@dash/components';
-import { action, makeObservable, observable } from 'mobx';
+import { makeObservable, observable, action } from 'mobx';
import { observer } from 'mobx-react';
import * as React from 'react';
-import { BiMicrophone } from 'react-icons/bi';
+import './DictationButton.scss';
import { DictationManager } from '../util/DictationManager';
import { SnappingManager } from '../util/SnappingManager';
@@ -10,9 +9,11 @@ export interface DictationButtonProps {
setInput: (val: string) => void;
inputRef?: HTMLInputElement | null | undefined;
}
+
@observer
export class DictationButton extends React.Component<DictationButtonProps> {
@observable private _isRecording = false;
+
constructor(props: DictationButtonProps) {
super(props);
makeObservable(this);
@@ -25,11 +26,9 @@ export class DictationButton extends React.Component<DictationButtonProps> {
render() {
return (
- <IconButton
- type={Type.TERT}
- color={this._isRecording ? '#2bcaff' : SnappingManager.userVariantColor}
- tooltip="Record"
- icon={<BiMicrophone size="16px" />}
+ <button
+ className={`dictation-button ${this._isRecording ? 'recording' : ''}`}
+ title="Record"
onClick={action(() => {
if (!this._isRecording) {
this._isRecording = true;
@@ -50,8 +49,14 @@ export class DictationButton extends React.Component<DictationButtonProps> {
} else {
this.stopDictation();
}
- })}
- />
+ })}>
+ <svg xmlns="http://www.w3.org/2000/svg" width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
+ <path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"></path>
+ <path d="M19 10v2a7 7 0 0 1-14 0v-2"></path>
+ <line x1="12" y1="19" x2="12" y2="23"></line>
+ <line x1="8" y1="23" x2="16" y2="23"></line>
+ </svg>
+ </button>
);
}
}
diff --git a/src/client/views/nodes/PDFBox.scss b/src/client/views/nodes/PDFBox.scss
index eaea272dc..44013a96d 100644
--- a/src/client/views/nodes/PDFBox.scss
+++ b/src/client/views/nodes/PDFBox.scss
@@ -344,3 +344,26 @@
font-size: 30px;
}
}
+
+.pdfBox-fuzzy {
+ border: none;
+ background-color: #4a4a4a;
+ color: white;
+ padding: 0 8px;
+ height: 24px;
+ cursor: pointer;
+ margin-right: 4px;
+ border-radius: 3px;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+
+ &.active {
+ background-color: #3498db;
+ color: white;
+ }
+
+ &:hover {
+ background-color: #2980b9;
+ }
+}
diff --git a/src/client/views/nodes/PDFBox.tsx b/src/client/views/nodes/PDFBox.tsx
index 55e6d5596..4ecbd65b6 100644
--- a/src/client/views/nodes/PDFBox.tsx
+++ b/src/client/views/nodes/PDFBox.tsx
@@ -53,6 +53,7 @@ export class PDFBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
private _sidebarRef = React.createRef<SidebarAnnos>();
@observable private _searching: boolean = false;
+ @observable private _fuzzySearchEnabled: boolean = true;
@observable private _pdf: Opt<Pdfjs.PDFDocumentProxy> = undefined;
@observable private _pageControls = false;
@@ -272,6 +273,14 @@ export class PDFBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
!this.Document._layout_fitWidth && (this.Document._height = NumCast(this.Document._width) * (p.height / p.width));
};
+ @action
+ toggleFuzzySearch = () => {
+ this._fuzzySearchEnabled = !this._fuzzySearchEnabled;
+ this._pdfViewer?.toggleFuzzySearch();
+ // Clear existing search results when switching modes
+ this.search('', false, true);
+ };
+
override search = action((searchString: string, bwd?: boolean, clear: boolean = false) => {
if (!this._searching && !clear) {
this._searching = true;
@@ -412,6 +421,9 @@ export class PDFBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
<button type="button" className="pdfBox-search" title="Search" onClick={e => this.search(this._searchString, e.shiftKey)}>
<FontAwesomeIcon icon="search" size="sm" />
</button>
+ <button type="button" className={`pdfBox-fuzzy ${this._fuzzySearchEnabled ? 'active' : ''}`} title={`${this._fuzzySearchEnabled ? 'Disable' : 'Enable'} Fuzzy Search`} onClick={this.toggleFuzzySearch}>
+ <FontAwesomeIcon icon="magic" size="sm" />
+ </button>
<button type="button" className="pdfBox-prevIcon" title="Previous Annotation" onClick={this.prevAnnotation}>
<FontAwesomeIcon icon="arrow-up" size="lg" />
</button>
diff --git a/src/client/views/nodes/WebBox.scss b/src/client/views/nodes/WebBox.scss
index 05d5babf9..77d7716f4 100644
--- a/src/client/views/nodes/WebBox.scss
+++ b/src/client/views/nodes/WebBox.scss
@@ -256,13 +256,37 @@
width: 100%;
height: 100%;
position: absolute;
+ pointer-events: all;
.indicator {
position: absolute;
+ transition: background-color 0.2s ease;
+ border-radius: 2px;
&.active {
background-color: rgba(0, 0, 0, 0.1);
+ box-shadow: 0 0 2px rgba(0, 0, 0, 0.2);
}
}
}
+
+ // Add styles to hide font errors and improve user experience
+ .font-error-hidden {
+ font-family:
+ system-ui,
+ -apple-system,
+ BlinkMacSystemFont,
+ 'Segoe UI',
+ Roboto,
+ Arial,
+ sans-serif !important;
+ }
+
+ // Change iframe behavior when resource loading errors occur
+ iframe.webBox-iframe {
+ &.loading-error {
+ // Make full content accessible when external resources fail
+ pointer-events: all !important;
+ }
+ }
}
diff --git a/src/client/views/nodes/WebBox.tsx b/src/client/views/nodes/WebBox.tsx
index 838dbea9d..1e158f484 100644
--- a/src/client/views/nodes/WebBox.tsx
+++ b/src/client/views/nodes/WebBox.tsx
@@ -505,6 +505,98 @@ export class WebBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
this._scrollHeight = this._iframe?.contentDocument?.body?.scrollHeight ?? 0;
this.addWebStyleSheetRule(this.addWebStyleSheet(this._iframe?.contentDocument), '::selection', { color: 'white', background: 'orange' }, '');
+ // Add error handler to suppress font CORS errors
+ if (this._iframe?.contentWindow) {
+ try {
+ // Track if any resource errors occurred
+ let hasResourceErrors = false;
+
+ // Override the console.error to filter out font CORS errors
+ const win = this._iframe.contentWindow as Window & { console: Console };
+ const originalConsoleError = win.console.error;
+ win.console.error = (...args: unknown[]) => {
+ const errorMsg = args.map(arg => String(arg)).join(' ');
+ if (errorMsg.includes('Access to font') && errorMsg.includes('has been blocked by CORS policy')) {
+ // Mark that we have font errors
+ hasResourceErrors = true;
+ // Ignore font CORS errors
+ return;
+ }
+ // Also catch other resource loading errors
+ if (errorMsg.includes('ERR_FAILED') || errorMsg.includes('ERR_BLOCKED_BY_CLIENT')) {
+ hasResourceErrors = true;
+ }
+ originalConsoleError.apply(win.console, args);
+ };
+
+ // Listen for resource loading errors
+ this._iframe.contentWindow.addEventListener(
+ 'error',
+ (e: Event) => {
+ const target = e.target as HTMLElement;
+ if (target instanceof HTMLElement) {
+ // If it's a resource that failed to load
+ if (target.tagName === 'LINK' || target.tagName === 'IMG' || target.tagName === 'SCRIPT') {
+ hasResourceErrors = true;
+ // Apply error class after a short delay to allow initial content to load
+ setTimeout(() => {
+ if (this._iframe && hasResourceErrors) {
+ this._iframe.classList.add('loading-error');
+ }
+ }, 1000);
+ }
+ }
+ },
+ true
+ );
+
+ // Add fallback CSS for fonts that fail to load
+ const style = this._iframe.contentDocument?.createElement('style');
+ if (style) {
+ style.textContent = `
+ @font-face {
+ font-family: 'CORS-fallback-serif';
+ src: local('Times New Roman'), local('Georgia'), serif;
+ }
+ @font-face {
+ font-family: 'CORS-fallback-sans';
+ src: local('Arial'), local('Helvetica'), sans-serif;
+ }
+ /* Fallback for all fonts that fail to load */
+ @font-face {
+ font-display: swap !important;
+ }
+
+ /* Add a script to find and fix elements with failed fonts */
+ @font-face {
+ font-family: '__failed_font__';
+ src: local('Arial');
+ unicode-range: U+0000;
+ }
+ `;
+ this._iframe.contentDocument?.head.appendChild(style);
+
+ // Add a script to detect and fix font loading issues
+ const script = this._iframe.contentDocument?.createElement('script');
+ if (script) {
+ script.textContent = `
+ // Fix font loading issues with fallbacks
+ setTimeout(function() {
+ document.querySelectorAll('*').forEach(function(el) {
+ if (window.getComputedStyle(el).fontFamily.includes('__failed_font__')) {
+ el.classList.add('font-error-hidden');
+ }
+ });
+ }, 1000);
+ `;
+ this._iframe.contentDocument?.head.appendChild(script);
+ }
+ }
+ } catch (e) {
+ console.log('Error setting up font error handling:', e);
+ }
+ }
+
let href: Opt<string>;
try {
href = iframe?.contentWindow?.location.href;
diff --git a/src/client/views/nodes/WebBoxRenderer.js b/src/client/views/nodes/WebBoxRenderer.js
index ef465c453..31e0ef5e4 100644
--- a/src/client/views/nodes/WebBoxRenderer.js
+++ b/src/client/views/nodes/WebBoxRenderer.js
@@ -146,6 +146,29 @@ const ForeignHtmlRenderer = function (styleSheets) {
};
/**
+ * Extracts font-face URLs from CSS rules
+ * @param {String} cssRuleStr
+ * @returns {String[]}
+ */
+ const getFontFaceUrlsFromCss = function (cssRuleStr) {
+ const fontFaceUrls = [];
+ // Find @font-face blocks
+ const fontFaceBlocks = cssRuleStr.match(/@font-face\s*{[^}]*}/g) || [];
+
+ fontFaceBlocks.forEach(block => {
+ // Extract URLs from src properties
+ const urls = block.match(/src\s*:\s*[^;]*/g) || [];
+ urls.forEach(srcDeclaration => {
+ // Find all url() references in the src declaration
+ const fontUrls = getUrlsFromCssString(srcDeclaration);
+ fontFaceUrls.push(...fontUrls);
+ });
+ });
+
+ return fontFaceUrls;
+ };
+
+ /**
*
* @param {String} html
* @returns {String[]}
@@ -159,6 +182,61 @@ const ForeignHtmlRenderer = function (styleSheets) {
};
/**
+ * Create a fallback font-face rule for handling CORS errors
+ * @returns {String}
+ */
+ const createFallbackFontFaceRules = function () {
+ return `
+ @font-face {
+ font-family: 'CORS-fallback-serif';
+ src: local('Times New Roman'), local('Georgia'), serif;
+ }
+ @font-face {
+ font-family: 'CORS-fallback-sans';
+ src: local('Arial'), local('Helvetica'), sans-serif;
+ }
+ /* Add fallback font handling */
+ [data-font-error] {
+ font-family: 'CORS-fallback-sans', sans-serif !important;
+ }
+ [data-font-error="serif"] {
+ font-family: 'CORS-fallback-serif', serif !important;
+ }
+ `;
+ };
+
+ /**
+ * Clean up and optimize CSS for better rendering
+ * @param {String} cssStyles
+ * @returns {String}
+ */
+ const optimizeCssForRendering = function (cssStyles) {
+ // Add fallback font-face rules
+ const enhanced = cssStyles + createFallbackFontFaceRules();
+
+ // Replace problematic font-face declarations with proxied versions
+ let optimized = enhanced.replace(/(url\(['"]?)(https?:\/\/[^)'"]+)(['"]?\))/gi, (match, prefix, url, suffix) => {
+ // If it's a font file, proxy it
+ if (url.match(/\.(woff2?|ttf|eot|otf)(\?.*)?$/i)) {
+ return `${prefix}${CorsProxy(url)}${suffix}`;
+ }
+ return match;
+ });
+
+ // Add error handling for fonts
+ optimized += `
+ /* Suppress font CORS errors in console */
+ @supports (font-display: swap) {
+ @font-face {
+ font-display: swap !important;
+ }
+ }
+ `;
+
+ return optimized;
+ };
+
+ /**
*
* @param {String} contentHtml
* @param {Number} width
@@ -175,6 +253,7 @@ const ForeignHtmlRenderer = function (styleSheets) {
// copy styles
let cssStyles = '';
const urlsFoundInCss = [];
+ const fontUrlsInCss = [];
for (let i = 0; i < styleSheets.length; i += 1) {
try {
@@ -182,6 +261,7 @@ const ForeignHtmlRenderer = function (styleSheets) {
for (let j = 0; j < rules.length; j += 1) {
const cssRuleStr = rules[j].cssText;
urlsFoundInCss.push(...getUrlsFromCssString(cssRuleStr));
+ fontUrlsInCss.push(...getFontFaceUrlsFromCss(cssRuleStr));
cssStyles += cssRuleStr;
}
} catch (e) {
@@ -189,6 +269,9 @@ const ForeignHtmlRenderer = function (styleSheets) {
}
}
+ // Optimize and enhance CSS
+ cssStyles = optimizeCssForRendering(cssStyles);
+
// const fetchedResourcesFromStylesheets = await getMultipleResourcesAsBase64(webUrl, urlsFoundInCss);
// for (let i = 0; i < fetchedResourcesFromStylesheets.length; i++) {
// const r = fetchedResourcesFromStylesheets[i];
@@ -203,6 +286,26 @@ const ForeignHtmlRenderer = function (styleSheets) {
.replace(/<div class="mediaset"><\/div>/g, '') // when scripting isn't available (ie, rendering web pages here), <noscript> tags should become <div>'s. But for Brown CS, there's a layout problem if you leave the empty <mediaset> tag
.replace(/<link[^>]*>/g, '') // don't need to keep any linked style sheets because we've already processed all style sheets above
.replace(/srcset="([^ "]*)[^"]*"/g, 'src="$1"'); // instead of converting each item in the srcset to a data url, just convert the first one and use that
+
+ // Add script to handle font loading errors
+ contentHtml += `
+ <script>
+ // Handle font loading errors with fallbacks
+ document.addEventListener('DOMContentLoaded', function() {
+ // Mark elements with font issues
+ document.querySelectorAll('*').forEach(function(el) {
+ const style = window.getComputedStyle(el);
+ const fontFamily = style.getPropertyValue('font-family');
+ if (fontFamily && !fontFamily.includes('serif') && !fontFamily.includes('sans')) {
+ el.setAttribute('data-font-error', 'sans');
+ } else if (fontFamily && fontFamily.includes('serif')) {
+ el.setAttribute('data-font-error', 'serif');
+ }
+ });
+ });
+ </script>
+ `;
+
const urlsFoundInHtml = getImageUrlsFromFromHtml(contentHtml).filter(url => !url.startsWith('data:'));
return getMultipleResourcesAsBase64(webUrl, urlsFoundInHtml).then(fetchedResources => {
for (let i = 0; i < fetchedResources.length; i += 1) {
diff --git a/src/client/views/nodes/chatbot/agentsystem/Agent.ts b/src/client/views/nodes/chatbot/agentsystem/Agent.ts
index 8075cab5f..361c5eb2b 100644
--- a/src/client/views/nodes/chatbot/agentsystem/Agent.ts
+++ b/src/client/views/nodes/chatbot/agentsystem/Agent.ts
@@ -7,8 +7,8 @@ import { AnswerParser } from '../response_parsers/AnswerParser';
import { StreamedAnswerParser } from '../response_parsers/StreamedAnswerParser';
import { BaseTool } from '../tools/BaseTool';
import { CalculateTool } from '../tools/CalculateTool';
-import { CreateDocTool } from '../tools/CreateDocumentTool';
import { DataAnalysisTool } from '../tools/DataAnalysisTool';
+import { DocumentMetadataTool } from '../tools/DocumentMetadataTool';
import { NoTool } from '../tools/NoTool';
import { SearchTool } from '../tools/SearchTool';
import { Parameter, ParametersType, TypeMap } from '../types/tool_types';
@@ -17,11 +17,17 @@ import { Vectorstore } from '../vectorstore/Vectorstore';
import { getReactPrompt } from './prompts';
import { ChatCompletionMessageParam } from 'openai/resources';
import { Doc } from '../../../../../fields/Doc';
-import { parsedDoc } from '../chatboxcomponents/ChatBox';
+import { ChatBox, parsedDoc } from '../chatboxcomponents/ChatBox';
import { WebsiteInfoScraperTool } from '../tools/WebsiteInfoScraperTool';
import { Upload } from '../../../../../server/SharedMediaTypes';
import { RAGTool } from '../tools/RAGTool';
-import { GPTTutorialTool } from '../tools/TutorialTool';
+import { AgentDocumentManager } from '../utils/AgentDocumentManager';
+import { CreateLinksTool } from '../tools/CreateLinksTool';
+import { CodebaseSummarySearchTool } from '../tools/CodebaseSummarySearchTool';
+import { FileContentTool } from '../tools/FileContentTool';
+import { FileNamesTool } from '../tools/FileNamesTool';
+import { CreateNewTool } from '../tools/CreateNewTool';
+//import { CreateTextDocTool } from '../tools/CreateTextDocumentTool';
dotenv.config();
@@ -36,7 +42,6 @@ export class Agent {
private interMessages: AgentMessage[] = [];
private vectorstore: Vectorstore;
private _history: () => string;
- private _summaries: () => string;
private _csvData: () => { filename: string; id: string; text: string }[];
private actionNumber: number = 0;
private thoughtNumber: number = 0;
@@ -44,47 +49,203 @@ export class Agent {
private processingInfo: ProcessingInfo[] = [];
private streamedAnswerParser: StreamedAnswerParser = new StreamedAnswerParser();
private tools: Record<string, BaseTool<ReadonlyArray<Parameter>>>;
- private Document: Doc;
+ private _docManager: AgentDocumentManager;
+ // Dynamic tool registry for tools created at runtime
+ private dynamicToolRegistry: Map<string, BaseTool<ReadonlyArray<Parameter>>> = new Map();
+ // Callback for notifying when tools are created and need reload
+ private onToolCreatedCallback?: (toolName: string) => void;
+ // Storage for deferred tool saving
+ private pendingToolSave?: { toolName: string; completeToolCode: string };
/**
* The constructor initializes the agent with the vector store and toolset, and sets up the OpenAI client.
* @param _vectorstore Vector store instance for document storage and retrieval.
- * @param summaries A function to retrieve document summaries.
+ * @param summaries A function to retrieve document summaries (deprecated, now using docManager directly).
* @param history A function to retrieve chat history.
* @param csvData A function to retrieve CSV data linked to the assistant.
- * @param addLinkedUrlDoc A function to add a linked document from a URL.
+ * @param getLinkedUrlDocId A function to get document IDs from URLs.
+ * @param createImage A function to create images in the dashboard.
* @param createCSVInDash A function to create a CSV document in the dashboard.
+ * @param docManager The document manager instance.
*/
constructor(
_vectorstore: Vectorstore,
- summaries: () => string,
history: () => string,
csvData: () => { filename: string; id: string; text: string }[],
- addLinkedUrlDoc: (url: string, id: string) => void,
createImage: (result: Upload.FileInformation & Upload.InspectionResults, options: DocumentOptions) => void,
- addLinkedDoc: (doc: parsedDoc) => Doc | undefined,
createCSVInDash: (url: string, title: string, id: string, data: string) => void,
- document: Doc
+ docManager: AgentDocumentManager
) {
// Initialize OpenAI client with API key from environment
this.client = new OpenAI({ apiKey: process.env.OPENAI_KEY, dangerouslyAllowBrowser: true });
this.vectorstore = _vectorstore;
this._history = history;
- this._summaries = summaries;
this._csvData = csvData;
- this.Document = document;
+ this._docManager = docManager;
+
+ // Initialize dynamic tool registry
+ this.dynamicToolRegistry = new Map();
// Define available tools for the assistant
this.tools = {
calculate: new CalculateTool(),
rag: new RAGTool(this.vectorstore),
dataAnalysis: new DataAnalysisTool(csvData),
- websiteInfoScraper: new WebsiteInfoScraperTool(addLinkedUrlDoc),
- searchTool: new SearchTool(addLinkedUrlDoc),
+ websiteInfoScraper: new WebsiteInfoScraperTool(this._docManager),
+ searchTool: new SearchTool(this._docManager),
noTool: new NoTool(),
- createDoc: new CreateDocTool(addLinkedDoc),
- generateTutorialNode: new GPTTutorialTool(addLinkedDoc),
+ //imageCreationTool: new ImageCreationTool(createImage),
+ documentMetadata: new DocumentMetadataTool(this._docManager),
+ createLinks: new CreateLinksTool(this._docManager),
+ codebaseSummarySearch: new CodebaseSummarySearchTool(this.vectorstore),
+ fileContent: new FileContentTool(this.vectorstore),
+ fileNames: new FileNamesTool(this.vectorstore),
};
+
+ // Add the createNewTool after other tools are defined
+ this.tools.createNewTool = new CreateNewTool(this.dynamicToolRegistry, this.tools, this);
+
+ // Load existing dynamic tools
+ this.loadExistingDynamicTools();
+ }
+
+ /**
+ * Loads every dynamic tool that the server reports via /getDynamicTools.
+ * • Uses dynamic `import()` so webpack/vite will code-split each tool automatically.
+ * • Registers the tool in `dynamicToolRegistry` under the name it advertises via
+ * `toolInfo.name`; also registers the legacy camel-case key if different.
+ */
+ private async loadExistingDynamicTools(): Promise<void> {
+ try {
+ console.log('Loading dynamic tools from server…');
+ const toolFiles = await this.fetchDynamicToolList();
+
+ let loaded = 0;
+ for (const { name: className, path } of toolFiles) {
+ // Legacy key (e.g., CharacterCountTool → characterCountTool)
+ const legacyKey = className.replace(/^[A-Z]/, m => m.toLowerCase());
+
+ // Skip if we already have the legacy key
+ if (this.dynamicToolRegistry.has(legacyKey)) continue;
+
+ try {
+ // @vite-ignore keeps Vite/Webpack from trying to statically analyse the variable part
+ const ToolClass = require(`../tools/${path}`)[className];
+
+ if (!ToolClass || !(ToolClass.prototype instanceof BaseTool)) {
+ console.warn(`File ${path} does not export a valid BaseTool subclass`);
+ continue;
+ }
+
+ const instance: BaseTool<ReadonlyArray<Parameter>> = new ToolClass();
+
+ // Prefer the tool’s self-declared name (matches <action> tag)
+ const key = (instance.name || '').trim() || legacyKey;
+
+ // Check for duplicates
+ if (this.dynamicToolRegistry.has(key)) {
+ console.warn(`Dynamic tool key '${key}' already registered – keeping existing instance`);
+ continue;
+ }
+
+ // ✅ register under the preferred key
+ this.dynamicToolRegistry.set(key, instance);
+
+ // optional: also register the legacy key for safety
+ if (key !== legacyKey && !this.dynamicToolRegistry.has(legacyKey)) {
+ this.dynamicToolRegistry.set(legacyKey, instance);
+ }
+
+ loaded++;
+ console.info(`✓ Loaded dynamic tool '${key}' from '${path}'`);
+ } catch (err) {
+ console.error(`✗ Failed to load '${path}':`, err);
+ }
+ }
+
+ console.log(`Dynamic-tool load complete – ${loaded}/${toolFiles.length} added`);
+ } catch (err) {
+ console.error('Dynamic-tool bootstrap failed:', err);
+ }
+ }
+
+ /**
+ * Manually registers a dynamic tool instance (called by CreateNewTool)
+ */
+ public registerDynamicTool(toolName: string, toolInstance: BaseTool<ReadonlyArray<Parameter>>): void {
+ this.dynamicToolRegistry.set(toolName, toolInstance);
+ console.log(`Manually registered dynamic tool: ${toolName}`);
+ }
+
+ /**
+ * Notifies that a tool has been created and saved to disk (called by CreateNewTool)
+ */
+ public notifyToolCreated(toolName: string, completeToolCode: string): void {
+ // Store the tool data for deferred saving
+ this.pendingToolSave = { toolName, completeToolCode };
+
+ if (this.onToolCreatedCallback) {
+ this.onToolCreatedCallback(toolName);
+ }
+ }
+
+ /**
+ * Performs the deferred tool save operation (called after user confirmation)
+ */
+ public async performDeferredToolSave(): Promise<boolean> {
+ if (!this.pendingToolSave) {
+ console.warn('No pending tool save operation');
+ return false;
+ }
+
+ const { toolName, completeToolCode } = this.pendingToolSave;
+
+ try {
+ // Get the CreateNewTool instance to perform the save
+ const createNewTool = this.tools.createNewTool as any;
+ if (createNewTool && typeof createNewTool.saveToolToServerDeferred === 'function') {
+ const success = await createNewTool.saveToolToServerDeferred(toolName, completeToolCode);
+
+ if (success) {
+ console.log(`Tool ${toolName} saved to server successfully via deferred save.`);
+ // Clear the pending save
+ this.pendingToolSave = undefined;
+ return true;
+ } else {
+ console.warn(`Tool ${toolName} could not be saved to server via deferred save.`);
+ return false;
+ }
+ } else {
+ console.error('CreateNewTool instance not available for deferred save');
+ return false;
+ }
+ } catch (error) {
+ console.error(`Error performing deferred tool save for ${toolName}:`, error);
+ return false;
+ }
+ }
+
+ /**
+ * Sets the callback for when tools are created
+ */
+ public setToolCreatedCallback(callback: (toolName: string) => void): void {
+ this.onToolCreatedCallback = callback;
+ }
+
+ /**
+ * Public method to reload dynamic tools (called when new tools are created)
+ */
+ public reloadDynamicTools(): void {
+ console.log('Reloading dynamic tools...');
+ this.loadExistingDynamicTools();
+ }
+
+ private async fetchDynamicToolList(): Promise<{ name: string; path: string }[]> {
+ const res = await fetch('/getDynamicTools');
+ if (!res.ok) throw new Error(`Failed to fetch dynamic tool list – ${res.statusText}`);
+ const json = await res.json();
+ console.log('Dynamic tools fetched:', json.tools);
+ return json.tools ?? [];
}
/**
@@ -96,13 +257,20 @@ export class Agent {
* @param maxTurns The maximum number of turns to allow in the conversation.
* @returns The final response from the assistant.
*/
- async askAgent(question: string, onProcessingUpdate: (processingUpdate: ProcessingInfo[]) => void, onAnswerUpdate: (answerUpdate: string) => void, maxTurns: number = 30): Promise<AssistantMessage> {
+ async askAgent(question: string, onProcessingUpdate: (processingUpdate: ProcessingInfo[]) => void, onAnswerUpdate: (answerUpdate: string) => void, maxTurns: number = 50): Promise<AssistantMessage> {
console.log(`Starting query: ${question}`);
const MAX_QUERY_LENGTH = 1000; // adjust the limit as needed
// Check if the question exceeds the maximum length
if (question.length > MAX_QUERY_LENGTH) {
- return { role: ASSISTANT_ROLE.ASSISTANT, content: [{ text: 'User query too long. Please shorten your question and try again.', index: 0, type: TEXT_TYPE.NORMAL, citation_ids: null }], processing_info: [] };
+ const errorText = `Your query is too long (${question.length} characters). Please shorten it to ${MAX_QUERY_LENGTH} characters or less and try again.`;
+ console.warn(errorText); // Log the specific reason
+ return {
+ role: ASSISTANT_ROLE.ASSISTANT,
+ // Use ERROR type for clarity in the UI if handled differently
+ content: [{ text: errorText, index: 0, type: TEXT_TYPE.ERROR, citation_ids: null }],
+ processing_info: [],
+ };
}
const sanitizedQuestion = escape(question); // Sanitized user input
@@ -110,20 +278,8 @@ export class Agent {
// Push sanitized user's question to message history
this.messages.push({ role: 'user', content: sanitizedQuestion });
- // Retrieve chat history and generate system prompt
- const chatHistory = this._history();
- let systemPrompt = getReactPrompt(Object.values(this.tools), this._summaries, chatHistory);
-
- // If this is a Dash documentation assistant chat, modify the system prompt
- if (this.Document?.is_dash_doc_assistant) {
- systemPrompt = systemPrompt.replace(
- '<task>',
- `<task>
- IMPORTANT: You are specifically focused on helping users with questions about Dash documentation and usage. When users ask questions, interpret them in the context of Dash documentation and features, even if they don't explicitly mention Dash. For example, if a user asks "How do I create a document?", interpret this as "How do I create a document in Dash?" and provide relevant Dash-specific guidance.
-
- For any questions about Dash features, functionality, or usage, you should use the generateTutorialNode tool to create a tutorial document that explains the concept in detail. This tool will help create well-formatted, interactive tutorials that guide users through Dash features.`
- );
- }
+ // Get system prompt with all tools (static + dynamic)
+ const systemPrompt = this.getSystemPromptWithAllTools();
// Initialize intermediate messages
this.interMessages = [{ role: 'system', content: systemPrompt }];
@@ -186,22 +342,25 @@ export class Agent {
currentAction = stage[key] as string;
console.log(`Action: ${currentAction}`);
- if (this.tools[currentAction]) {
+ // Check both static tools and dynamic registry
+ const tool = this.tools[currentAction] || this.dynamicToolRegistry.get(currentAction);
+
+ if (tool) {
// Prepare the next action based on the current tool
const nextPrompt = [
{
type: 'text',
- text: `<stage number="${i + 1}" role="user">` + builder.build({ action_rules: this.tools[currentAction].getActionRule() }) + `</stage>`,
+ text: `<stage number="${i + 1}" role="user">` + builder.build({ action_rules: tool.getActionRule() }) + `</stage>`,
} as Observation,
];
this.interMessages.push({ role: 'user', content: nextPrompt });
break;
} else {
// Handle error in case of an invalid action
- console.log('Error: No valid action');
+ console.log(`Error: Action "${currentAction}" is not a valid tool`);
this.interMessages.push({
role: 'user',
- content: `<stage number="${i + 1}" role="system-error-reporter">No valid action, try again.</stage>`,
+ content: `<stage number="${i + 1}" role="system-error-reporter">Action "${currentAction}" is not a valid tool, try again.</stage>`,
});
break;
}
@@ -220,9 +379,25 @@ export class Agent {
console.log(observation);
this.interMessages.push({ role: 'user', content: nextPrompt });
this.processingNumber++;
+ console.log(`Tool ${currentAction} executed successfully. Observations:`, observation);
+
break;
} catch (error) {
- throw new Error(`Error processing action: ${error}`);
+ console.error(`Error during execution of tool '${currentAction}':`, error);
+ const errorMessage = error instanceof Error ? error.message : String(error);
+ // Return an error observation formatted for the LLM loop
+ return {
+ role: ASSISTANT_ROLE.USER,
+ content: [
+ {
+ type: TEXT_TYPE.ERROR,
+ text: `<observation><error tool="${currentAction}">Execution failed: ${escape(errorMessage)}</error></observation>`,
+ index: 0,
+ citation_ids: null,
+ },
+ ],
+ processing_info: [],
+ };
}
} else {
throw new Error('Error: Action input without a valid action');
@@ -353,8 +528,8 @@ export class Agent {
throw new Error('Action must be a non-empty string');
}
- // Optional: Check if the action is among allowed actions
- const allowedActions = Object.keys(this.tools);
+ // Optional: Check if the action is among allowed actions (including dynamic tools)
+ const allowedActions = [...Object.keys(this.tools), ...Array.from(this.dynamicToolRegistry.keys())];
if (!allowedActions.includes(stage.action)) {
throw new Error(`Action "${stage.action}" is not a valid tool`);
}
@@ -459,15 +634,90 @@ export class Agent {
* @throws An error if the action is unknown, if required parameters are missing, or if input types don't match the expected parameter types.
*/
private async processAction(action: string, actionInput: ParametersType<ReadonlyArray<Parameter>>): Promise<Observation[]> {
- // Check if the action exists in the tools list
- if (!(action in this.tools)) {
+ // Check if the action exists in the tools list or dynamic registry
+ if (!(action in this.tools) && !this.dynamicToolRegistry.has(action)) {
throw new Error(`Unknown action: ${action}`);
}
console.log(actionInput);
- for (const param of this.tools[action].parameterRules) {
+ // Determine which tool to use - either from static tools or dynamic registry
+ const tool = this.tools[action] || this.dynamicToolRegistry.get(action);
+
+ // Special handling for documentMetadata tool with numeric or boolean fieldValue
+ if (action === 'documentMetadata') {
+ // Handle single field edit
+ if ('fieldValue' in actionInput) {
+ if (typeof actionInput.fieldValue === 'number' || typeof actionInput.fieldValue === 'boolean') {
+ // Convert number or boolean to string to pass validation
+ actionInput.fieldValue = String(actionInput.fieldValue);
+ }
+ }
+
+ // Handle fieldEdits parameter (for multiple field edits)
+ if ('fieldEdits' in actionInput && actionInput.fieldEdits) {
+ try {
+ // If it's already an array, stringify it to ensure it passes validation
+ if (Array.isArray(actionInput.fieldEdits)) {
+ actionInput.fieldEdits = JSON.stringify(actionInput.fieldEdits);
+ }
+ // If it's an object but not an array, it might be a single edit - convert to array and stringify
+ else if (typeof actionInput.fieldEdits === 'object') {
+ actionInput.fieldEdits = JSON.stringify([actionInput.fieldEdits]);
+ }
+ // Otherwise, ensure it's a string for the validator
+ else if (typeof actionInput.fieldEdits !== 'string') {
+ actionInput.fieldEdits = String(actionInput.fieldEdits);
+ }
+ } catch (error) {
+ console.error('Error processing fieldEdits:', error);
+ // Don't fail validation here, let the tool handle it
+ }
+ }
+ }
+
+ // Special handling for createNewTool with parsed XML toolCode
+ if (action === 'createNewTool') {
+ if ('toolCode' in actionInput && typeof actionInput.toolCode === 'object' && actionInput.toolCode !== null) {
+ try {
+ // Convert the parsed XML object back to a string
+ const extractText = (obj: any): string => {
+ if (typeof obj === 'string') {
+ return obj;
+ } else if (obj && typeof obj === 'object') {
+ if (obj._text) {
+ return obj._text;
+ }
+ // Recursively extract text from all properties
+ let text = '';
+ for (const key in obj) {
+ if (key !== '_text') {
+ const value = obj[key];
+ if (typeof value === 'string') {
+ text += value + '\n';
+ } else if (value && typeof value === 'object') {
+ text += extractText(value) + '\n';
+ }
+ }
+ }
+ return text;
+ }
+ return '';
+ };
+
+ const reconstructedCode = extractText(actionInput.toolCode);
+ actionInput.toolCode = reconstructedCode;
+ } catch (error) {
+ console.error('Error processing toolCode:', error);
+ // Convert to string as fallback
+ actionInput.toolCode = String(actionInput.toolCode);
+ }
+ }
+ }
+
+ // Check parameter requirements and types for the tool
+ for (const param of tool.parameterRules) {
// Check if the parameter is required and missing in the input
- if (param.required && !(param.name in actionInput) && !this.tools[action].inputValidator(actionInput)) {
+ if (param.required && !(param.name in actionInput) && !tool.inputValidator(actionInput)) {
throw new Error(`Missing required parameter: ${param.name}`);
}
@@ -485,8 +735,48 @@ export class Agent {
}
}
- const tool = this.tools[action];
-
+ // Execute the tool with the validated inputs
return await tool.execute(actionInput);
}
+
+ /**
+ * Gets a combined list of all tools, both static and dynamic
+ * @returns An array of all available tool instances
+ */
+ private getAllTools(): BaseTool<ReadonlyArray<Parameter>>[] {
+ // Combine static and dynamic tools
+ return [...Object.values(this.tools), ...Array.from(this.dynamicToolRegistry.values())];
+ }
+
+ /**
+ * Overridden method to get the React prompt with all tools (static + dynamic)
+ */
+ private getSystemPromptWithAllTools(): string {
+ const allTools = this.getAllTools();
+ const docSummaries = () => JSON.stringify(this._docManager.listDocs);
+ const chatHistory = this._history();
+
+ return getReactPrompt(allTools, docSummaries, chatHistory);
+ }
+
+ /**
+ * Reinitializes the DocumentMetadataTool with a direct reference to the ChatBox instance.
+ * This ensures that the tool can properly access the ChatBox document and find related documents.
+ *
+ * @param chatBox The ChatBox instance to pass to the DocumentMetadataTool
+ */
+ public reinitializeDocumentMetadataTool(): void {
+ if (this.tools && this.tools.documentMetadata) {
+ this.tools.documentMetadata = new DocumentMetadataTool(this._docManager);
+ console.log('Agent: Reinitialized DocumentMetadataTool with ChatBox instance');
+ } else {
+ console.warn('Agent: Could not reinitialize DocumentMetadataTool - tool not found');
+ }
+ }
+}
+
+// Forward declaration to avoid circular import
+interface AgentLike {
+ registerDynamicTool(toolName: string, toolInstance: BaseTool<ReadonlyArray<Parameter>>): void;
+ notifyToolCreated(toolName: string, completeToolCode: string): void;
}
diff --git a/src/client/views/nodes/chatbot/agentsystem/prompts.ts b/src/client/views/nodes/chatbot/agentsystem/prompts.ts
index dda6d44ef..fcb4ab450 100644
--- a/src/client/views/nodes/chatbot/agentsystem/prompts.ts
+++ b/src/client/views/nodes/chatbot/agentsystem/prompts.ts
@@ -103,9 +103,9 @@ export function getReactPrompt(tools: BaseTool<ReadonlyArray<Parameter>>[], summ
<note>If no external tool is required, use 'no_tool', but if there might be relevant external information, use the appropriate tool.</note>
</tools>
- <summaries>
+ <available_documents>
${summaries()}
- </summaries>
+ </available_documents>
<chat_history>
${chatHistory}
@@ -210,7 +210,7 @@ export function getReactPrompt(tools: BaseTool<ReadonlyArray<Parameter>>[], summ
<answer>
<grounded_text citation_index="1">**The 2022 World Cup** saw Argentina crowned champions, with **Lionel Messi** leading his team to victory, marking a historic moment in sports.</grounded_text>
<grounded_text citation_index="2">**Qatar** experienced a **40% increase in tourism** during the World Cup, welcoming over **1.5 million visitors**, significantly boosting its economy.</grounded_text>
- <normal_text>Moments like **Messi’s triumph** often become ingrained in the legacy of World Cups, immortalizing these tournaments in both sports and cultural memory. The **long-term implications** of the World Cup on Qatar's **economy, tourism**, and **global image** remain important areas of interest as the country continues to build on the momentum generated by hosting this prestigious event.</normal_text>
+ <normal_text>Moments like **Messi's triumph** often become ingrained in the legacy of World Cups, immortalizing these tournaments in both sports and cultural memory. The **long-term implications** of the World Cup on Qatar's **economy, tourism**, and **global image** remain important areas of interest as the country continues to build on the momentum generated by hosting this prestigious event.</normal_text>
<citations>
<citation index="1" chunk_id="1234" type="text">Key moments from the 2022 World Cup.</citation>
<citation index="2" chunk_id="5678" type="url"></citation>
@@ -218,7 +218,7 @@ export function getReactPrompt(tools: BaseTool<ReadonlyArray<Parameter>>[], summ
<follow_up_questions>
<question>What long-term effects has the World Cup had on Qatar's economy and infrastructure?</question>
<question>Can you compare Qatar's tourism numbers with previous World Cup hosts?</question>
- <question>How has Qatar’s image on the global stage evolved post-World Cup?</question>
+ <question>How has Qatar's image on the global stage evolved post-World Cup?</question>
</follow_up_questions>
<loop_summary>
The assistant first used the RAG tool to extract key moments from the user documents about the 2022 World Cup. Then, the assistant utilized the website scraping tool to gather data on Qatar's tourism impact. Both tools provided valuable information, and no additional tools were needed.
diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss
index 3d27fa887..8e00cbdb7 100644
--- a/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss
+++ b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.scss
@@ -1,240 +1,653 @@
@use 'sass:color';
-@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&display=swap');
-
-$primary-color: #3f51b5;
-$secondary-color: #f0f0f0;
-$text-color: #2e2e2e;
-$light-text-color: #6d6d6d;
-$border-color: #dcdcdc;
-$shadow-color: rgba(0, 0, 0, 0.1);
+@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap');
+
+// Dash color palette - updated to use Dash's blue colors
+$primary-color: #487af0; // Dash blue
+$primary-light: #e6f0fc;
+$secondary-color: #f7f7f9;
+$accent-color: #b5d9f3; // Light blue accent
+$bg-color: #ffffff;
+$text-color: #111827;
+$light-text-color: #6b7280;
+$border-color: #e5e7eb;
+$shadow-color: rgba(0, 0, 0, 0.06);
$transition: all 0.2s ease-in-out;
+// Font size variables
+$font-size-small: 13px;
+$font-size-normal: 14px;
+$font-size-large: 16px;
+$font-size-xlarge: 18px;
+
.chat-box {
display: flex;
flex-direction: column;
height: 100%;
- background-color: #fff;
+ width: 100%;
+ background-color: $bg-color;
font-family: 'Inter', sans-serif;
- border-radius: 8px;
+ border-radius: 12px;
overflow: hidden;
- box-shadow: 0 2px 8px $shadow-color;
+ box-shadow: 0 4px 20px $shadow-color;
position: relative;
+ transition:
+ box-shadow 0.3s cubic-bezier(0.25, 0.46, 0.45, 0.94),
+ transform 0.3s cubic-bezier(0.25, 0.46, 0.45, 0.94);
+
+ &:hover {
+ box-shadow: 0 8px 30px rgba($primary-color, 0.1);
+ }
.chat-header {
- background-color: $primary-color;
- color: #fff;
- padding: 16px;
- text-align: center;
- box-shadow: 0 1px 4px $shadow-color;
+ background: $primary-color;
+ color: white;
+ padding: 14px 20px;
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
+ z-index: 10;
+ position: relative;
h2 {
margin: 0;
- font-size: 1.5em;
- font-weight: 500;
+ font-size: 1.25rem;
+ font-weight: 600;
+ letter-spacing: 0.01em;
+ flex: 1;
+ text-align: center;
+ }
+
+ .font-size-control {
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ background-color: rgba(255, 255, 255, 0.15);
+ color: white;
+ border-radius: 6px;
+ padding: 6px;
+ cursor: pointer;
+ transition: background-color 0.2s ease;
+
+ &:hover {
+ background-color: rgba(255, 255, 255, 0.25);
+ }
+
+ svg {
+ width: 20px;
+ height: 20px;
+ }
+ }
+
+ .font-size-modal {
+ position: absolute;
+ top: 100%;
+ right: 10px;
+ background-color: white;
+ border-radius: 8px;
+ box-shadow: 0 4px 20px rgba(0, 0, 0, 0.15);
+ padding: 12px;
+ width: 180px;
+ z-index: 100;
+ transform-origin: top right;
+ animation: scaleIn 0.2s forwards;
+
+ .font-size-option {
+ display: flex;
+ align-items: center;
+ padding: 8px 12px;
+ border-radius: 6px;
+ cursor: pointer;
+ transition: background-color 0.2s ease;
+ color: $text-color;
+ margin-bottom: 4px;
+
+ &:last-child {
+ margin-bottom: 0;
+ }
+
+ &:hover {
+ background-color: $primary-light;
+ }
+
+ &.active {
+ background-color: $primary-light;
+ color: $primary-color;
+ font-weight: 500;
+ }
+
+ .option-label {
+ flex: 1;
+ }
+
+ .size-preview {
+ font-size: 10px;
+ opacity: 0.7;
+
+ &.small {
+ font-size: 11px;
+ }
+ &.normal {
+ font-size: 14px;
+ }
+ &.large {
+ font-size: 16px;
+ }
+ &.xlarge {
+ font-size: 18px;
+ }
+ }
+ }
}
}
.chat-messages {
flex-grow: 1;
overflow-y: auto;
- padding: 16px;
+ padding: 20px;
display: flex;
flex-direction: column;
- gap: 12px;
+ gap: 16px;
+ background-color: #f9fafb;
+ background-image: radial-gradient(#e5e7eb 1px, transparent 1px), radial-gradient(#e5e7eb 1px, transparent 1px);
+ background-size: 40px 40px;
+ background-position:
+ 0 0,
+ 20px 20px;
+ background-attachment: local;
+ scroll-behavior: smooth;
&::-webkit-scrollbar {
- width: 8px;
+ width: 6px;
+ }
+
+ &::-webkit-scrollbar-track {
+ background: transparent;
}
&::-webkit-scrollbar-thumb {
- background-color: rgba(0, 0, 0, 0.1);
- border-radius: 4px;
+ background-color: rgba($primary-color, 0.2);
+ border-radius: 10px;
+
+ &:hover {
+ background-color: rgba($primary-color, 0.3);
+ }
}
}
.chat-input {
display: flex;
- padding: 12px;
+ padding: 16px 20px;
border-top: 1px solid $border-color;
- background-color: #fff;
+ background-color: white;
+ box-shadow: 0 -4px 20px rgba(0, 0, 0, 0.05);
+ position: relative;
+ align-items: center;
+ gap: 12px;
+ z-index: 5;
+ transition: padding 0.2s ease;
- input {
+ &::before {
+ content: '';
+ position: absolute;
+ top: -5px;
+ left: 0;
+ right: 0;
+ height: 5px;
+ background: linear-gradient(to top, rgba(0, 0, 0, 0.06), transparent);
+ pointer-events: none;
+ }
+
+ .input-container {
+ position: relative;
flex-grow: 1;
- padding: 12px 16px;
- border: 1px solid $border-color;
+ display: flex;
+ align-items: center;
border-radius: 24px;
- font-size: 15px;
- transition: $transition;
+ background-color: #f9fafb;
+ border: 1px solid $border-color;
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.03);
+ transition: all 0.25s ease;
+ overflow: hidden;
- &:focus {
- outline: none;
+ &:focus-within {
border-color: $primary-color;
- box-shadow: 0 0 0 2px color.adjust($primary-color, $alpha: -0.8);
+ box-shadow: 0 0 0 3px rgba($primary-color, 0.15);
+ background-color: white;
+ transform: translateY(-1px);
}
- &:disabled {
- background-color: $secondary-color;
- cursor: not-allowed;
+ input {
+ flex-grow: 1;
+ padding: 14px 18px;
+ border: none;
+ background: transparent;
+ font-size: 14px;
+ transition: all 0.25s ease;
+ width: 100%;
+
+ &:focus {
+ outline: none;
+ }
+
+ &:disabled {
+ background-color: #f3f4f6;
+ cursor: not-allowed;
+ }
+
+ &::placeholder {
+ color: #9ca3af;
+ }
}
}
.submit-button {
- background-color: $primary-color;
+ background: $primary-color;
color: white;
border: none;
border-radius: 50%;
width: 48px;
height: 48px;
- margin-left: 10px;
+ min-width: 48px;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
- transition: $transition;
+ transition: all 0.2s ease;
+ box-shadow: 0 2px 8px rgba($primary-color, 0.3);
+ position: relative;
+ overflow: hidden;
&:hover {
- background-color: color.adjust($primary-color, $lightness: -10%);
+ background-color: #3b6cd7; /* Slightly darker blue */
+ box-shadow: 0 3px 10px rgba($primary-color, 0.4);
+ }
+
+ &:active {
+ background-color: #3463cc; /* Even darker for active state */
+ box-shadow: 0 2px 6px rgba($primary-color, 0.3);
}
&:disabled {
- background-color: color.adjust($primary-color, $lightness: 20%);
+ background: #9ca3af;
+ box-shadow: none;
cursor: not-allowed;
}
+ svg {
+ width: 20px;
+ height: 20px;
+ }
+
.spinner {
width: 20px;
height: 20px;
border: 3px solid rgba(255, 255, 255, 0.3);
border-top: 3px solid #fff;
border-radius: 50%;
- animation: spin 0.6s linear infinite;
+ animation: spin 0.8s cubic-bezier(0.34, 0.61, 0.71, 0.97) infinite;
}
}
}
.citation-popup {
position: fixed;
- bottom: 50px;
+ top: 50%;
left: 50%;
- transform: translateX(-50%);
- background-color: rgba(0, 0, 0, 0.8);
- color: white;
- padding: 10px 20px;
- border-radius: 10px;
- box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
+ transform: translate(-50%, -50%);
+ width: 90%;
+ max-width: 500px;
+ max-height: 300px;
+ border-radius: 8px;
+ background-color: white;
+ box-shadow: 0 8px 25px rgba(0, 0, 0, 0.25);
z-index: 1000;
- animation: fadeIn 0.3s ease-in-out;
+ display: flex;
+ flex-direction: column;
+ overflow: hidden;
+ animation: popup-fade-in 0.3s ease-out;
+ }
- p {
- margin: 0;
- font-size: 14px;
+ .citation-popup-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 12px 15px;
+ background-color: #f5f5f5;
+ border-bottom: 1px solid #ddd;
+ }
+
+ .citation-content {
+ padding: 15px;
+ overflow-y: auto;
+ max-height: 240px;
+ }
+
+ .citation-close-button {
+ background: none;
+ border: none;
+ cursor: pointer;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 32px;
+ height: 32px;
+ border-radius: 50%;
+ color: #666;
+ transition: background-color 0.2s;
+
+ &:hover {
+ background-color: #ddd;
}
- @keyframes fadeIn {
- from {
- opacity: 0;
- }
- to {
- opacity: 1;
- }
+ svg {
+ width: 20px;
+ height: 20px;
+ stroke-width: 2.5;
}
}
}
+@keyframes pulse {
+ 0% {
+ box-shadow: 0 0 0 0 rgba(239, 68, 68, 0.5);
+ }
+ 70% {
+ box-shadow: 0 0 0 10px rgba(239, 68, 68, 0);
+ }
+ 100% {
+ box-shadow: 0 0 0 0 rgba(239, 68, 68, 0);
+ }
+}
+
+// Font size modifiers
+.font-size-small {
+ .message-content,
+ .chat-input input,
+ .follow-up-button,
+ .follow-up-questions h4,
+ .processing-info,
+ .processing-info .dropdown-item,
+ .toggle-info {
+ font-size: $font-size-small !important;
+ }
+}
+
+.font-size-normal {
+ .message-content,
+ .chat-input input,
+ .follow-up-button,
+ .follow-up-questions h4,
+ .processing-info,
+ .processing-info .dropdown-item,
+ .toggle-info {
+ font-size: $font-size-normal !important;
+ }
+}
+
+.font-size-large {
+ .message-content,
+ .chat-input input,
+ .follow-up-button,
+ .follow-up-questions h4,
+ .processing-info,
+ .processing-info .dropdown-item,
+ .toggle-info {
+ font-size: $font-size-large !important;
+ }
+}
+
+.font-size-xlarge {
+ .message-content,
+ .chat-input input,
+ .follow-up-button,
+ .follow-up-questions h4,
+ .processing-info,
+ .processing-info .dropdown-item,
+ .toggle-info {
+ font-size: $font-size-xlarge !important;
+ }
+}
+
.message {
- max-width: 75%;
- padding: 12px 16px;
- border-radius: 12px;
- font-size: 15px;
+ max-width: 80%;
+ padding: 16px;
+ border-radius: 16px;
+ font-size: 14px;
line-height: 1.6;
- box-shadow: 0 1px 3px $shadow-color;
+ box-shadow: 0 2px 8px $shadow-color;
word-wrap: break-word;
display: flex;
flex-direction: column;
+ position: relative;
+ transition:
+ transform 0.3s cubic-bezier(0.25, 0.46, 0.45, 0.94),
+ box-shadow 0.3s cubic-bezier(0.25, 0.46, 0.45, 0.94);
+
+ &:hover {
+ box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
+ }
&.user {
align-self: flex-end;
- background-color: $primary-color;
- color: #fff;
+ background: $primary-color;
+ color: white;
border-bottom-right-radius: 4px;
+ transform-origin: bottom right;
+ animation: messageInUser 0.3s forwards;
+
+ strong {
+ color: rgba(255, 255, 255, 0.9);
+ }
}
&.assistant {
align-self: flex-start;
- background-color: $secondary-color;
+ background-color: white;
color: $text-color;
border-bottom-left-radius: 4px;
+ border: 1px solid $border-color;
+ transform-origin: bottom left;
+ animation: messageInAssistant 0.3s forwards;
+
+ .message-content {
+ p,
+ li,
+ a {
+ margin: 8px 0;
+
+ &:first-child {
+ margin-top: 0;
+ }
+
+ &:last-child {
+ margin-bottom: 0;
+ }
+ }
+
+ pre {
+ background-color: #f3f4f6;
+ padding: 12px;
+ border-radius: 8px;
+ overflow-x: auto;
+ font-size: 13px;
+ border: 1px solid $border-color;
+ }
+
+ code {
+ background-color: #f3f4f6;
+ padding: 2px 5px;
+ border-radius: 4px;
+ font-size: 13px;
+ font-family: monospace;
+ }
+ }
+ }
+
+ @keyframes messageInUser {
+ 0% {
+ opacity: 0;
+ transform: translateY(10px) scale(0.95);
+ }
+ 100% {
+ opacity: 1;
+ transform: translateY(0) scale(1);
+ }
+ }
+
+ @keyframes messageInAssistant {
+ 0% {
+ opacity: 0;
+ transform: translateY(10px) scale(0.95);
+ }
+ 100% {
+ opacity: 1;
+ transform: translateY(0) scale(1);
+ }
+ }
+
+ .processing-info {
+ margin: 0 0 12px 0;
+ padding: 12px 16px;
+ background-color: #f3f4f6;
+ border-radius: 10px;
+ font-size: 14px;
+ transform-origin: top center;
+ animation: fadeInExpand 0.3s forwards;
+
+ .dropdown-item {
+ margin-bottom: 8px;
+ padding-bottom: 8px;
+ border-bottom: 1px dashed #e5e7eb;
+
+ &:last-child {
+ margin-bottom: 0;
+ padding-bottom: 0;
+ border-bottom: none;
+ }
+
+ strong {
+ color: $primary-color;
+ font-weight: 600;
+ }
+ }
+
+ .info-content {
+ margin-top: 12px;
+ max-height: 200px;
+ overflow-y: auto;
+ padding-right: 8px;
+
+ &::-webkit-scrollbar {
+ width: 4px;
+ }
+
+ &::-webkit-scrollbar-thumb {
+ background-color: rgba($primary-color, 0.1);
+ border-radius: 8px;
+ }
+ }
}
.toggle-info {
- margin-top: 10px;
- background-color: transparent;
+ background-color: rgba($primary-color, 0.05);
color: $primary-color;
- border: 1px solid $primary-color;
+ border: 1px solid rgba($primary-color, 0.3);
border-radius: 8px;
padding: 8px 12px;
- font-size: 14px;
+ font-size: 13px;
+ font-weight: 500;
cursor: pointer;
- transition: $transition;
- margin-bottom: 16px;
+ transition: all 0.2s ease;
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ width: 100%;
&:hover {
- background-color: color.adjust($primary-color, $alpha: -0.9);
+ background-color: rgba($primary-color, 0.1);
+ border-color: rgba($primary-color, 0.4);
}
- }
- .processing-info {
- margin-bottom: 12px;
- padding: 10px 15px;
- background-color: #f9f9f9;
- border-radius: 8px;
- box-shadow: 0 1px 3px $shadow-color;
- font-size: 14px;
+ &:active {
+ background-color: rgba($primary-color, 0.15);
+ }
- .processing-item {
- margin-bottom: 5px;
- font-size: 14px;
- color: $light-text-color;
+ &:focus {
+ outline: none;
+ box-shadow: 0 0 0 2px rgba($primary-color, 0.2);
}
}
.message-content {
background-color: inherit;
- padding: 10px;
+ padding: 0;
border-radius: 8px;
- font-size: 15px;
- line-height: 1.5;
+ font-size: 14px;
+ line-height: 1.6;
+ color: inherit;
.citation-button {
display: inline-flex;
align-items: center;
justify-content: center;
- width: 22px;
- height: 22px;
+ width: 16px;
+ height: 16px;
border-radius: 50%;
- background-color: rgba(0, 0, 0, 0.1);
- color: $text-color;
- font-size: 12px;
- font-weight: bold;
- margin-left: 5px;
+ background-color: rgba($primary-color, 0.1);
+ color: $primary-color;
+ font-size: 10px;
+ font-weight: 600;
+ margin-left: 3px;
cursor: pointer;
+ transition: all 0.25s cubic-bezier(0.25, 0.46, 0.45, 0.94);
+ border: 1px solid rgba($primary-color, 0.2);
+ vertical-align: super;
+
+ &:hover {
+ background-color: $primary-color;
+ color: white;
+ transform: scale(1.1);
+ box-shadow: 0 2px 6px rgba($primary-color, 0.4);
+ }
+
+ &:active {
+ transform: scale(0.95);
+ }
+ }
+
+ a {
+ color: $primary-color;
+ text-decoration: none;
transition: $transition;
+ border-bottom: 1px dashed rgba($primary-color, 0.3);
&:hover {
- background-color: color.adjust($primary-color, $alpha: -0.8);
- color: #fff;
+ border-bottom: 1px solid $primary-color;
}
}
}
}
.follow-up-questions {
- margin-top: 12px;
+ margin-top: 14px;
+ background-color: rgba($primary-color, 0.05);
+ padding: 14px;
+ border-radius: 10px;
+ border: 1px solid rgba($primary-color, 0.1);
+ animation: fadeInUp 0.4s forwards;
+ transition: box-shadow 0.2s ease;
+
+ &:hover {
+ box-shadow: 0 4px 12px rgba($primary-color, 0.08);
+ }
h4 {
- font-size: 15px;
+ font-size: 13px;
font-weight: 600;
- margin-bottom: 8px;
+ margin: 0 0 10px 0;
+ color: $primary-color;
+ letter-spacing: 0.02em;
}
.questions-list {
@@ -244,34 +657,110 @@ $transition: all 0.2s ease-in-out;
}
.follow-up-button {
- background-color: #fff;
- color: $primary-color;
- border: 1px solid $primary-color;
+ background-color: white;
+ color: $text-color;
+ border: 1px solid rgba($primary-color, 0.2);
border-radius: 8px;
padding: 10px 14px;
- font-size: 14px;
+ font-size: 13px;
+ font-weight: 500;
cursor: pointer;
- transition: $transition;
+ transition: all 0.2s ease;
text-align: left;
+ box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
+ position: relative;
+ overflow: hidden;
+ text-transform: none !important; /* Force no text transform */
&:hover {
- background-color: $primary-color;
- color: #fff;
+ background-color: $primary-light;
+ border-color: rgba($primary-color, 0.3);
+ box-shadow: 0 2px 4px rgba($primary-color, 0.1);
+ }
+
+ &:active {
+ background-color: darken($primary-light, 3%);
}
}
}
+@keyframes fadeInUp {
+ 0% {
+ opacity: 0;
+ transform: translateY(10px);
+ }
+ 100% {
+ opacity: 1;
+ transform: translateY(0);
+ }
+}
+
+@keyframes fadeInExpand {
+ 0% {
+ opacity: 0;
+ transform: scaleY(0.9);
+ }
+ 100% {
+ opacity: 1;
+ transform: scaleY(1);
+ }
+}
+
.uploading-overlay {
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
- background-color: rgba(255, 255, 255, 0.8);
+ background-color: rgba(255, 255, 255, 0.92);
display: flex;
justify-content: center;
align-items: center;
z-index: 1000;
+ backdrop-filter: blur(4px);
+ animation: fadeIn 0.3s ease;
+
+ .progress-container {
+ width: 80%;
+ max-width: 400px;
+ background-color: white;
+ padding: 24px;
+ border-radius: 12px;
+ box-shadow: 0 10px 40px rgba($primary-color, 0.2);
+ animation: scaleIn 0.3s cubic-bezier(0.34, 1.56, 0.64, 1);
+
+ .progress-bar-wrapper {
+ height: 8px;
+ background-color: #f3f4f6;
+ border-radius: 4px;
+ margin-bottom: 16px;
+ overflow: hidden;
+
+ .progress-bar {
+ height: 100%;
+ background: linear-gradient(90deg, $primary-color, $accent-color);
+ border-radius: 4px;
+ transition: width 0.4s cubic-bezier(0.25, 0.46, 0.45, 0.94);
+ }
+ }
+
+ .progress-details {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+
+ .progress-percentage {
+ font-weight: 600;
+ color: $primary-color;
+ font-size: 16px;
+ }
+
+ .step-name {
+ color: $light-text-color;
+ font-size: 14px;
+ }
+ }
+ }
}
@keyframes spin {
@@ -283,12 +772,301 @@ $transition: all 0.2s ease-in-out;
}
}
+@keyframes scaleIn {
+ 0% {
+ transform: scale(0.9);
+ opacity: 0;
+ }
+ 100% {
+ transform: scale(1);
+ opacity: 1;
+ }
+}
+
+@keyframes popup-slide-up {
+ from {
+ opacity: 0;
+ transform: translate(-50%, 20px);
+ }
+ to {
+ opacity: 1;
+ transform: translate(-50%, 0);
+ }
+}
+
+@keyframes popup-fade-in {
+ from {
+ opacity: 0;
+ transform: translate(-50%, -45%);
+ }
+ to {
+ opacity: 1;
+ transform: translate(-50%, -50%);
+ }
+}
+
@media (max-width: 768px) {
.chat-box {
border-radius: 0;
}
.message {
- max-width: 90%;
+ max-width: 88%;
+ padding: 14px;
+ }
+
+ .chat-input {
+ padding: 12px;
+ }
+}
+
+// Responsive scaling
+@media (max-width: 480px) {
+ .chat-box .chat-input input {
+ font-size: 13px;
+ padding: 12px 14px;
+ }
+
+ .message {
+ max-width: 95%;
+ padding: 12px;
+ font-size: 13px;
+ }
+
+ .follow-up-questions {
+ padding: 12px;
+ }
+}
+
+// Dark mode support
+.dark-mode .chat-box {
+ background-color: #1f2937;
+
+ .chat-header {
+ background: $primary-color;
+
+ .font-size-control {
+ background-color: rgba(255, 255, 255, 0.2);
+
+ &:hover {
+ background-color: rgba(255, 255, 255, 0.3);
+ }
+ }
+
+ .font-size-modal {
+ background-color: #1f2937;
+ border: 1px solid #374151;
+
+ .font-size-option {
+ color: #f9fafb;
+
+ &:hover {
+ background-color: #2d3748;
+ }
+
+ &.active {
+ background-color: rgba($primary-color, 0.2);
+ }
+ }
+ }
+ }
+
+ .chat-messages {
+ background-color: #111827;
+ background-image: radial-gradient(#374151 1px, transparent 1px), radial-gradient(#374151 1px, transparent 1px);
+ }
+
+ .chat-input {
+ background-color: #1f2937;
+ border-top-color: #374151;
+
+ .input-container {
+ background-color: #374151;
+ border-color: #4b5563;
+
+ &:focus-within {
+ background-color: #2d3748;
+ border-color: $primary-color;
+ }
+
+ input {
+ color: white;
+
+ &::placeholder {
+ color: #9ca3af;
+ }
+ }
+ }
+ }
+
+ .message {
+ &.assistant {
+ background-color: #1f2937;
+ border-color: #374151;
+ color: #f9fafb;
+
+ .message-content {
+ pre,
+ code {
+ background-color: #111827;
+ border-color: #374151;
+ }
+ }
+ }
+
+ .processing-info {
+ background-color: #111827;
+
+ .dropdown-item {
+ border-color: #374151;
+ }
+ }
+ }
+
+ .follow-up-questions {
+ background-color: rgba($primary-color, 0.1);
+ border-color: rgba($primary-color, 0.2);
+
+ .follow-up-button {
+ background-color: #1f2937;
+ color: #f9fafb;
+ border-color: #4b5563;
+
+ &:hover {
+ background-color: #2d3748;
+ }
+ }
+ }
+
+ .uploading-overlay {
+ background-color: rgba(31, 41, 55, 0.9);
+
+ .progress-container {
+ background-color: #1f2937;
+
+ .progress-bar-wrapper {
+ background-color: #111827;
+ }
+ }
+ }
+}
+
+/* Tool Reload Modal Styles */
+.tool-reload-modal-overlay {
+ position: fixed;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background-color: rgba(0, 0, 0, 0.5);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ z-index: 10000;
+ backdrop-filter: blur(4px);
+}
+
+.tool-reload-modal {
+ background: white;
+ border-radius: 12px;
+ padding: 0;
+ min-width: 400px;
+ max-width: 500px;
+ box-shadow:
+ 0 20px 25px -5px rgba(0, 0, 0, 0.1),
+ 0 10px 10px -5px rgba(0, 0, 0, 0.04);
+ border: 1px solid #e2e8f0;
+ animation: modalSlideIn 0.3s ease-out;
+}
+
+@keyframes modalSlideIn {
+ from {
+ opacity: 0;
+ transform: scale(0.95) translateY(-20px);
+ }
+ to {
+ opacity: 1;
+ transform: scale(1) translateY(0);
+ }
+}
+
+.tool-reload-modal-header {
+ padding: 24px 24px 16px 24px;
+ border-bottom: 1px solid #e2e8f0;
+
+ h3 {
+ margin: 0;
+ font-size: 18px;
+ font-weight: 600;
+ color: #1a202c;
+ display: flex;
+ align-items: center;
+
+ &::before {
+ content: '🛠️';
+ margin-right: 8px;
+ font-size: 20px;
+ }
+ }
+}
+
+.tool-reload-modal-content {
+ padding: 20px 24px;
+
+ p {
+ margin: 0 0 12px 0;
+ line-height: 1.5;
+ color: #4a5568;
+
+ &:last-child {
+ margin-bottom: 0;
+ }
+
+ strong {
+ color: #2d3748;
+ font-weight: 600;
+ }
+ }
+}
+
+.tool-reload-modal-actions {
+ padding: 16px 24px 24px 24px;
+ display: flex;
+ gap: 12px;
+ justify-content: flex-end;
+
+ button {
+ padding: 10px 20px;
+ border-radius: 6px;
+ font-weight: 500;
+ font-size: 14px;
+ cursor: pointer;
+ transition: all 0.2s ease;
+ border: none;
+
+ &.primary {
+ background: #3182ce;
+ color: white;
+
+ &:hover {
+ background: #2c5aa0;
+ transform: translateY(-1px);
+ }
+
+ &:active {
+ transform: translateY(0);
+ }
+ }
+
+ &.secondary {
+ background: #f7fafc;
+ color: #4a5568;
+ border: 1px solid #e2e8f0;
+
+ &:hover {
+ background: #edf2f7;
+ border-color: #cbd5e0;
+ }
+ }
}
}
diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx
index fc51ca5f9..db01b7c88 100644
--- a/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx
+++ b/src/client/views/nodes/chatbot/chatboxcomponents/ChatBox.tsx
@@ -15,13 +15,15 @@ import * as React from 'react';
import { v4 as uuidv4 } from 'uuid';
import { ClientUtils, OmitKeys } from '../../../../../ClientUtils';
import { Doc, DocListCast, Opt } from '../../../../../fields/Doc';
-import { DocData, DocViews } from '../../../../../fields/DocSymbols';
+import { DocData, DocLayout, DocViews } from '../../../../../fields/DocSymbols';
+import { Id } from '../../../../../fields/FieldSymbols';
import { RichTextField } from '../../../../../fields/RichTextField';
import { ScriptField } from '../../../../../fields/ScriptField';
-import { CsvCast, DocCast, NumCast, PDFCast, RTFCast, StrCast } from '../../../../../fields/Types';
+import { CsvCast, DocCast, NumCast, PDFCast, RTFCast, StrCast, VideoCast, AudioCast } from '../../../../../fields/Types';
import { DocUtils } from '../../../../documents/DocUtils';
import { CollectionViewType, DocumentType } from '../../../../documents/DocumentTypes';
import { Docs, DocumentOptions } from '../../../../documents/Documents';
+import { DocServer } from '../../../../DocServer';
import { DocumentManager } from '../../../../util/DocumentManager';
import { ImageUtils } from '../../../../util/Import & Export/ImageUtils';
import { LinkManager } from '../../../../util/LinkManager';
@@ -35,18 +37,26 @@ import { PDFBox } from '../../PDFBox';
import { ScriptingBox } from '../../ScriptingBox';
import { VideoBox } from '../../VideoBox';
import { Agent } from '../agentsystem/Agent';
-import { supportedDocTypes } from '../tools/CreateDocumentTool';
+import { supportedDocTypes } from '../types/tool_types';
import { ASSISTANT_ROLE, AssistantMessage, CHUNK_TYPE, Citation, ProcessingInfo, SimplifiedChunk, TEXT_TYPE } from '../types/types';
import { Vectorstore } from '../vectorstore/Vectorstore';
import './ChatBox.scss';
import MessageComponentBox from './MessageComponent';
-import { ProgressBar } from './ProgressBar';
import { OpenWhere } from '../../OpenWhere';
import { Upload } from '../../../../../server/SharedMediaTypes';
+import { DocumentMetadataTool } from '../tools/DocumentMetadataTool';
+import { AgentDocumentManager } from '../utils/AgentDocumentManager';
dotenv.config();
-export type parsedDocData = { doc_type: string; data: unknown };
+export type parsedDocData = {
+ doc_type: string;
+ data: unknown;
+ _disable_resource_loading?: boolean;
+ _sandbox_iframe?: boolean;
+ _iframe_sandbox?: string;
+ data_useCors?: boolean;
+};
export type parsedDoc = DocumentOptions & parsedDocData;
/**
* ChatBox is the main class responsible for managing the interaction between the user and the assistant,
@@ -67,14 +77,18 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
@observable private _linked_csv_files: { filename: string; id: string; text: string }[] = [];
@observable private _isUploadingDocs: boolean = false;
@observable private _citationPopup: { text: string; visible: boolean } = { text: '', visible: false };
+ @observable private _isFontSizeModalOpen: boolean = false;
+ @observable private _fontSize: 'small' | 'normal' | 'large' | 'xlarge' = 'normal';
+ @observable private _toolReloadModal: { visible: boolean; toolName: string } = { visible: false, toolName: '' };
// Private properties for managing OpenAI API, vector store, agent, and UI elements
- private openai: OpenAI;
+ private openai!: OpenAI; // Using definite assignment assertion
private vectorstore_id: string;
private vectorstore: Vectorstore;
private agent: Agent;
private messagesRef: React.RefObject<HTMLDivElement>;
private _textInputRef: HTMLInputElement | undefined | null;
+ private docManager: AgentDocumentManager;
/**
* Static method that returns the layout string for the field.
@@ -95,19 +109,28 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
*/
constructor(props: FieldViewProps) {
super(props);
- makeObservable(this); // Enable MobX observables
+ makeObservable(this);
- // Initialize OpenAI, vectorstore, and agent
- this.openai = this.initializeOpenAI();
- if (StrCast(this.dataDoc.vectorstore_id) == '') {
- this.vectorstore_id = uuidv4();
- this.dataDoc.vectorstore_id = this.vectorstore_id;
- } else {
- this.vectorstore_id = StrCast(this.dataDoc.vectorstore_id);
- }
- this.vectorstore = new Vectorstore(this.vectorstore_id, this.retrieveDocIds);
- this.agent = new Agent(this.vectorstore, this.retrieveSummaries, this.retrieveFormattedHistory, this.retrieveCSVData, this.addLinkedUrlDoc, this.createImageInDash, this.createDocInDash, this.createCSVInDash, this.Document);
- this.messagesRef = React.createRef<HTMLDivElement>();
+ this.messagesRef = React.createRef();
+ this.docManager = new AgentDocumentManager(this);
+
+ // Initialize OpenAI client
+ this.initializeOpenAI();
+
+ // Create a unique vectorstore ID for this ChatBox
+ this.vectorstore_id = uuidv4();
+
+ // Initialize vectorstore with the document manager
+ this.vectorstore = new Vectorstore(this.vectorstore_id, this.docManager);
+
+ // Create an agent with the vectorstore
+ this.agent = new Agent(this.vectorstore, this.retrieveFormattedHistory.bind(this), this.retrieveCSVData.bind(this), this.createImageInDash.bind(this), this.createCSVInDash.bind(this), this.docManager);
+
+ // Set up the tool created callback
+ this.agent.setToolCreatedCallback(this.handleToolCreated);
+
+ // Add event listeners
+ this.addScrollListener();
// Reaction to update dataDoc when chat history changes
reaction(
@@ -122,6 +145,9 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
this.dataDoc.data = JSON.stringify(serializableHistory);
}
);
+
+ // Initialize font size from saved preference
+ this.initFontSize();
}
/**
@@ -131,22 +157,53 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
*/
@action
addDocToVectorstore = async (newLinkedDoc: Doc) => {
- this._uploadProgress = 0;
- this._currentStep = 'Initializing...';
- this._isUploadingDocs = true;
-
try {
- // Add the document to the vectorstore
+ const isAudioOrVideo = VideoCast(newLinkedDoc.data)?.url?.pathname || AudioCast(newLinkedDoc.data)?.url?.pathname;
+
+ // Set UI state to show the processing overlay
+ runInAction(() => {
+ this._isUploadingDocs = true;
+ this._uploadProgress = 0;
+ this._currentStep = isAudioOrVideo ? 'Preparing media file...' : 'Processing document...';
+ });
+
+ // Process the document first to ensure it has a valid ID
+ await this.docManager.processDocument(newLinkedDoc);
+
+ // Add the document to the vectorstore which will also register chunks
await this.vectorstore.addAIDoc(newLinkedDoc, this.updateProgress);
- } catch (error) {
- console.error('Error uploading document:', error);
- this._currentStep = 'Error during upload';
- } finally {
+
+ // Give a slight delay to show the completion message
+ if (this._uploadProgress === 100) {
+ await new Promise(resolve => setTimeout(resolve, 1000));
+ }
+
+ // Reset UI state
+ runInAction(() => {
+ this._isUploadingDocs = false;
+ this._uploadProgress = 0;
+ this._currentStep = '';
+ });
+
+ return true;
+ } catch (err) {
+ console.error('Error adding document to vectorstore:', err);
+
+ // Show error in UI
+ runInAction(() => {
+ this._currentStep = `Error: ${err instanceof Error ? err.message : 'Failed to process document'}`;
+ });
+
+ await new Promise(resolve => setTimeout(resolve, 2000));
+
+ // Reset UI state
runInAction(() => {
this._isUploadingDocs = false;
this._uploadProgress = 0;
this._currentStep = '';
});
+
+ return false;
}
};
@@ -157,10 +214,18 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
*/
@action
updateProgress = (progress: number, step: string) => {
- this._uploadProgress = progress;
+ // Ensure progress is within expected bounds
+ const validProgress = Math.min(Math.max(0, progress), 100);
+ this._uploadProgress = validProgress;
this._currentStep = step;
+
+ // Force UI update
+ if (process.env.NODE_ENV !== 'production') {
+ console.log(`Progress: ${validProgress}%, Step: ${step}`);
+ }
};
+ //TODO: Update for new chunk_simpl on agentDocument
/**
* Adds a CSV file for analysis by sending it to OpenAI and generating a summary.
* @param newLinkedDoc The linked document representing the CSV file.
@@ -229,7 +294,7 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
apiKey: process.env.OPENAI_KEY,
dangerouslyAllowBrowser: true,
};
- return new OpenAI(configuration);
+ this.openai = new OpenAI(configuration);
}
/**
@@ -354,27 +419,6 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
};
/**
- * Adds a linked document from a URL for future reference and analysis.
- * @param url The URL of the document to add.
- * @param id The unique identifier for the document.
- */
- @action
- addLinkedUrlDoc = async (url: string, id: string) => {
- const doc = Docs.Create.WebDocument(url, { data_useCors: true });
-
- const linkDoc = Docs.Create.LinkDocument(this.Document, doc);
- LinkManager.Instance.addLink(linkDoc);
-
- const chunkToAdd = {
- chunkId: id,
- chunkType: CHUNK_TYPE.URL,
- url: url,
- };
-
- doc.chunk_simpl = JSON.stringify({ chunks: [chunkToAdd] });
- };
-
- /**
* Getter to retrieve the current user's name from the client utils.
*/
@computed
@@ -427,20 +471,32 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
private createCollectionWithChildren = (data: parsedDoc[], insideCol: boolean): Opt<Doc>[] => data.map(doc => this.whichDoc(doc, insideCol));
@action
- whichDoc = (doc: parsedDoc, insideCol: boolean): Opt<Doc> => {
+ public whichDoc = (doc: parsedDoc, insideCol: boolean): Opt<Doc> => {
const options = OmitKeys(doc, ['doct_type', 'data']).omit as DocumentOptions;
const data = (doc as parsedDocData).data;
const ndoc = (() => {
switch (doc.doc_type) {
default:
- case supportedDocTypes.text: return Docs.Create.TextDocument(doc.text as string, options);
+ case supportedDocTypes.note: return Docs.Create.TextDocument(data as string, options);
case supportedDocTypes.comparison: return this.createComparison(JSON.parse(data as string) as parsedDoc[], options);
case supportedDocTypes.flashcard: return this.createFlashcard(JSON.parse(data as string) as parsedDoc[], options);
case supportedDocTypes.deck: return this.createDeck(JSON.parse(data as string) as parsedDoc[], options);
case supportedDocTypes.image: return Docs.Create.ImageDocument(data as string, options);
case supportedDocTypes.equation: return Docs.Create.EquationDocument(data as string, options);
case supportedDocTypes.notetaking: return Docs.Create.NoteTakingDocument([], options);
- case supportedDocTypes.web: return Docs.Create.WebDocument(data as string, { ...options, data_useCors: true });
+ case supportedDocTypes.web:
+ // Create web document with enhanced safety options
+ const webOptions = {
+ ...options,
+ data_useCors: true
+ };
+
+ // If iframe_sandbox was passed from AgentDocumentManager, add it to the options
+ if ('_iframe_sandbox' in options) {
+ (webOptions as any)._iframe_sandbox = options._iframe_sandbox;
+ }
+
+ return Docs.Create.WebDocument(data as string, webOptions);
case supportedDocTypes.dataviz: return Docs.Create.DataVizDocument('/users/rz/Downloads/addresses.csv', options);
case supportedDocTypes.pdf: return Docs.Create.PdfDocument(data as string, options);
case supportedDocTypes.video: return Docs.Create.VideoDocument(data as string, options);
@@ -497,28 +553,6 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
};
/**
- * Creates a document in the dashboard.
- *
- * @param {string} doc_type - The type of document to create.
- * @param {string} data - The data used to generate the document.
- * @param {DocumentOptions} options - Configuration options for the document.
- * @returns {Promise<void>} A promise that resolves once the document is created and displayed.
- */
- @action
- createDocInDash = (pdoc: parsedDoc) => {
- const linkAndShowDoc = (doc: Opt<Doc>) => {
- if (doc) {
- LinkManager.Instance.addLink(Docs.Create.LinkDocument(this.Document, doc));
- this._props.addDocument?.(doc);
- DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {});
- }
- };
- const doc = this.whichDoc(pdoc, false);
- if (doc) linkAndShowDoc(doc);
- return doc;
- };
-
- /**
* Creates a deck of flashcards.
*
* @param {any} data - The data used to generate the flashcards. Can be a string or an object.
@@ -591,83 +625,137 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
*/
@action
handleCitationClick = async (citation: Citation) => {
- const currentLinkedDocs: Doc[] = this.linkedDocs;
- const chunkId = citation.chunk_id;
+ try {
+ // Extract values from MobX proxy object if needed
+ const chunkId = typeof citation.chunk_id === 'object' ? (citation.chunk_id as any).toString() : citation.chunk_id;
- for (const doc of currentLinkedDocs) {
- if (doc.chunk_simpl) {
- const docChunkSimpl = JSON.parse(StrCast(doc.chunk_simpl)) as { chunks: SimplifiedChunk[] };
- const foundChunk = docChunkSimpl.chunks.find(chunk => chunk.chunkId === chunkId);
+ // For debugging
+ console.log('Citation clicked:', {
+ chunkId,
+ citation: JSON.stringify(citation, null, 2),
+ });
- if (foundChunk) {
- // Handle media chunks specifically
+ // Get the simplified chunk using the document manager
+ const { foundChunk, doc, dataDoc } = this.docManager.getSimplifiedChunkById(chunkId);
+ console.log('doc: ', doc);
+ console.log('dataDoc: ', dataDoc);
+ if (!foundChunk || !doc) {
+ if (doc) {
+ console.warn(`Chunk not found in document, ${doc.id}, for chunk ID: ${chunkId}`);
+ DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {});
+ } else {
+ console.warn(`Chunk not found for chunk ID: ${chunkId}`);
+ }
+ return;
+ }
- if (doc.ai_type == 'video' || doc.ai_type == 'audio') {
- const directMatchSegmentStart = this.getDirectMatchingSegmentStart(doc, citation.direct_text || '', foundChunk.indexes || []);
+ console.log(`Found chunk in document:`, foundChunk);
- if (directMatchSegmentStart) {
- // Navigate to the segment's start time in the media player
- await this.goToMediaTimestamp(doc, directMatchSegmentStart, doc.ai_type);
- } else {
- console.error('No direct matching segment found for the citation.');
- }
- } else {
- // Handle other chunk types as before
- this.handleOtherChunkTypes(foundChunk, citation, doc);
- }
+ // Handle different chunk types
+ if (foundChunk.chunkType === CHUNK_TYPE.AUDIO || foundChunk.chunkType === CHUNK_TYPE.VIDEO) {
+ const directMatchSegmentStart = this.getDirectMatchingSegmentStart(doc, citation.direct_text || '', foundChunk.indexes || []);
+ if (directMatchSegmentStart) {
+ await this.goToMediaTimestamp(doc, directMatchSegmentStart, foundChunk.chunkType);
+ } else {
+ console.error('No direct matching segment found for the citation.');
}
+ } else if (foundChunk.chunkType === CHUNK_TYPE.TABLE || foundChunk.chunkType === CHUNK_TYPE.IMAGE) {
+ console.log('here: ', foundChunk);
+ this.handleOtherChunkTypes(foundChunk as SimplifiedChunk, citation, doc);
+ } else {
+ if (doc.type === 'web') {
+ DocumentManager.Instance.showDocument(doc, { openLocation: OpenWhere.addRight }, () => {});
+ return;
+ }
+ this.handleOtherChunkTypes(foundChunk, citation, doc, dataDoc);
+ // Show the chunk text in citation popup
+ let chunkText = citation.direct_text || 'Text content not available';
+ this.showCitationPopup(chunkText);
+
+ // Also navigate to the document
+ DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {});
}
+ } catch (error) {
+ console.error('Error handling citation click:', error);
}
};
+ /**
+ * Finds a matching segment in a document based on text content.
+ * @param doc The document to search in
+ * @param citationText The text to find in the document
+ * @param indexesOfSegments Optional indexes of segments to search in
+ * @returns The starting timestamp of the matching segment, or -1 if not found
+ */
getDirectMatchingSegmentStart = (doc: Doc, citationText: string, indexesOfSegments: string[]): number => {
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
- const originalSegments = JSON.parse(StrCast(doc.original_segments!)).map((segment: any, index: number) => ({
- index: index.toString(),
- text: segment.text,
- start: segment.start,
- end: segment.end,
- }));
-
- if (!Array.isArray(originalSegments) || originalSegments.length === 0 || !Array.isArray(indexesOfSegments)) {
- return 0;
+ if (!doc || !citationText) return -1;
+
+ // Get original segments using document manager
+ const original_segments = this.docManager.getOriginalSegments(doc);
+
+ if (!original_segments || !Array.isArray(original_segments) || original_segments.length === 0) {
+ return -1;
}
- // Create itemsToSearch array based on indexesOfSegments
- const itemsToSearch = indexesOfSegments.map((indexStr: string) => {
- const index = parseInt(indexStr, 10);
- const segment = originalSegments[index];
- return { text: segment.text, start: segment.start };
- });
+ let segments = original_segments;
+
+ // If specific indexes are provided, filter segments by those indexes
+ if (indexesOfSegments && indexesOfSegments.length > 0) {
+ segments = original_segments.filter((segment: any) => indexesOfSegments.includes(segment.index));
+ }
+
+ // If no segments match the indexes, use all segments
+ if (segments.length === 0) {
+ segments = original_segments;
+ }
- console.log('Constructed itemsToSearch:', itemsToSearch);
+ // First try to find an exact match
+ const exactMatch = segments.find((segment: any) => segment.text && segment.text.includes(citationText));
- // Helper function to calculate word overlap score
+ if (exactMatch) {
+ return exactMatch.start;
+ }
+
+ // If no exact match, find segment with best word overlap
const calculateWordOverlap = (text1: string, text2: string): number => {
- const words1 = new Set(text1.toLowerCase().split(/\W+/));
- const words2 = new Set(text2.toLowerCase().split(/\W+/));
- const intersection = new Set([...words1].filter(word => words2.has(word)));
- return intersection.size / Math.max(words1.size, words2.size); // Jaccard similarity
+ if (!text1 || !text2) return 0;
+
+ const words1 = text1.toLowerCase().split(/\s+/);
+ const words2 = text2.toLowerCase().split(/\s+/);
+ const wordSet1 = new Set(words1);
+
+ let overlap = 0;
+ for (const word of words2) {
+ if (wordSet1.has(word)) {
+ overlap++;
+ }
+ }
+
+ // Return percentage of overlap relative to the shorter text
+ return overlap / Math.min(words1.length, words2.length);
};
- // Search for the best matching segment
- let bestMatchStart = 0;
- let bestScore = 0;
-
- console.log(`Searching for best match for query: "${citationText}"`);
- itemsToSearch.forEach(item => {
- const score = calculateWordOverlap(citationText, item.text);
- console.log(`Comparing query to segment: "${item.text}" | Score: ${score}`);
- if (score > bestScore) {
- bestScore = score;
- bestMatchStart = item.start;
+ // Find segment with highest word overlap
+ let bestMatch = null;
+ let highestOverlap = 0;
+
+ for (const segment of segments) {
+ if (!segment.text) continue;
+
+ const overlap = calculateWordOverlap(segment.text, citationText);
+ if (overlap > highestOverlap) {
+ highestOverlap = overlap;
+ bestMatch = segment;
}
- });
+ }
- console.log('Best match found with score:', bestScore, '| Start time:', bestMatchStart);
+ // Only return matches with significant overlap (more than 30%)
+ if (bestMatch && highestOverlap > 0.3) {
+ return bestMatch.start;
+ }
- // Return the start time of the best match
- return bestMatchStart;
+ // If no good match found, return the start of the first segment as fallback
+ return segments.length > 0 ? segments[0].start : -1;
};
/**
@@ -701,7 +789,7 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
* @param citation The citation object.
* @param doc The document containing the chunk.
*/
- handleOtherChunkTypes = (foundChunk: SimplifiedChunk, citation: Citation, doc: Doc) => {
+ handleOtherChunkTypes = (foundChunk: SimplifiedChunk, citation: Citation, doc: Doc, dataDoc?: Doc) => {
switch (foundChunk.chunkType) {
case CHUNK_TYPE.IMAGE:
case CHUNK_TYPE.TABLE:
@@ -716,6 +804,7 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {});
return;
}
+
const x1 = parseFloat(values[0]) * Doc.NativeWidth(doc);
const y1 = parseFloat(values[1]) * Doc.NativeHeight(doc) + foundChunk.startPage * Doc.NativeHeight(doc);
const x2 = parseFloat(values[2]) * Doc.NativeWidth(doc);
@@ -723,31 +812,181 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
const annotationKey = '$' + Doc.LayoutDataKey(doc) + '_annotations';
- const existingDoc = DocListCast(doc[annotationKey]).find(d => d.citation_id === citation.citation_id);
+ const existingDoc = DocListCast(doc[DocData][annotationKey]).find(d => d.citation_id === citation.citation_id);
+ if (existingDoc) {
+ existingDoc.x = x1;
+ existingDoc.y = y1;
+ existingDoc._width = x2 - x1;
+ existingDoc._height = y2 - y1;
+ }
const highlightDoc = existingDoc ?? this.createImageCitationHighlight(x1, y1, x2, y2, citation, annotationKey, doc);
- DocumentManager.Instance.showDocument(highlightDoc, { willZoomCentered: true }, () => {});
+ //doc.layout_scroll = y1;
+ doc._layout_curPage = foundChunk.startPage + 1;
+ DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {});
+ //DocumentManager.Instance.showDocument(highlightDoc, { willZoomCentered: true }, () => {});
}
break;
case CHUNK_TYPE.TEXT:
this._citationPopup = { text: citation.direct_text ?? 'No text available', visible: true };
- setTimeout(() => (this._citationPopup.visible = false), 3000);
+ this.startCitationPopupTimer();
- DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {
- const firstView = Array.from(doc[DocViews])[0] as DocumentView;
- (firstView.ComponentView as PDFBox)?.gotoPage?.(foundChunk.startPage ?? 0);
- (firstView.ComponentView as PDFBox)?.search?.(citation.direct_text ?? '');
- });
+ // Check if the document is a PDF (has a PDF viewer component)
+ const isPDF = PDFCast(dataDoc!.data) !== null || dataDoc!.type === DocumentType.PDF;
+
+ // First ensure document is fully visible before trying to access its views
+ this.ensureDocumentIsVisible(dataDoc!, isPDF, citation, foundChunk, doc);
break;
case CHUNK_TYPE.CSV:
case CHUNK_TYPE.URL:
- DocumentManager.Instance.showDocument(doc, { willZoomCentered: true });
+ DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {
+ console.log(`Showing web document in viewer with URL: ${foundChunk.url}`);
+ });
break;
default:
console.error('Unhandled chunk type:', foundChunk.chunkType);
break;
}
};
+
+ /**
+ * Ensures a document is fully visible and rendered before performing actions on it
+ * @param doc The document to ensure is visible
+ * @param isPDF Whether this is a PDF document
+ * @param citation The citation information
+ * @param foundChunk The chunk information
+ * @param doc The document to ensure is visible
+ */
+ ensureDocumentIsVisible = (doc: Doc, isPDF: boolean, citation: Citation, foundChunk: SimplifiedChunk, layoutDoc?: Doc) => {
+ try {
+ // First, check if the document already has views and is rendered
+ const hasViews = doc[DocViews] && doc[DocViews].size > 0;
+
+ console.log(`Document ${doc.id}: Current state - hasViews: ${hasViews}, isPDF: ${isPDF}`);
+
+ if (hasViews) {
+ // Document is already rendered, proceed with accessing its view
+ this.processPDFDocumentView(doc, isPDF, citation, foundChunk);
+ return;
+ } else if (layoutDoc) {
+ this.processPDFDocumentView(layoutDoc, isPDF, citation, foundChunk);
+ return;
+ }
+
+ // If document is not rendered yet, show it and wait for it to be ready
+ console.log(`Document ${doc.id} needs to be shown first`);
+
+ // Force document to be rendered by using willZoomCentered: true
+ DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {
+ // Wait a bit for the document to be fully rendered (longer than our previous attempts)
+ setTimeout(() => {
+ // Now manually check if document view exists and is valid
+ this.verifyAndProcessDocumentView(doc, isPDF, citation, foundChunk, 1);
+ }, 800); // Increased initial delay
+ });
+ } catch (error) {
+ console.error('Error ensuring document visibility:', error);
+ // Show the document anyway as a fallback
+ DocumentManager.Instance.showDocument(doc, { willZoomCentered: true });
+ }
+ };
+
+ /**
+ * Verifies document view exists and processes it, with retries if needed
+ */
+ verifyAndProcessDocumentView = (doc: Doc, isPDF: boolean, citation: Citation, foundChunk: SimplifiedChunk, attempt: number) => {
+ // Diagnostic info
+ console.log(`Verify attempt ${attempt}: Document views for ${doc.id}:`, doc[DocViews] ? `Found ${doc[DocViews].size} views` : 'No views');
+
+ // Double-check document exists in current document system
+ const docExists = DocServer.GetCachedRefField(doc[Id]) !== undefined;
+ if (!docExists) {
+ console.warn(`Document ${doc.id} no longer exists in document system`);
+ return;
+ }
+
+ try {
+ if (!doc[DocViews] || doc[DocViews].size === 0) {
+ if (attempt >= 5) {
+ console.error(`Maximum verification attempts (${attempt}) reached for document ${doc.id}`);
+
+ // Last resort: force re-creation of the document view
+ if (isPDF) {
+ console.log('Forcing document recreation as last resort');
+ DocumentManager.Instance.showDocument(doc, {
+ willZoomCentered: true,
+ });
+ }
+ return;
+ }
+
+ // Let's try explicitly requesting the document be shown again
+ if (attempt > 2) {
+ console.log(`Attempt ${attempt}: Re-requesting document be shown`);
+ DocumentManager.Instance.showDocument(doc, {
+ willZoomCentered: true,
+ openLocation: attempt % 2 === 0 ? OpenWhere.addRight : undefined,
+ });
+ }
+
+ // Use exponential backoff for retries
+ const nextDelay = Math.min(2000, 500 * Math.pow(1.5, attempt));
+ console.log(`Scheduling retry ${attempt + 1} in ${nextDelay}ms`);
+
+ setTimeout(() => {
+ this.verifyAndProcessDocumentView(doc, isPDF, citation, foundChunk, attempt + 1);
+ }, nextDelay);
+ return;
+ }
+
+ this.processPDFDocumentView(doc, isPDF, citation, foundChunk);
+ } catch (error) {
+ console.error(`Error on verification attempt ${attempt}:`, error);
+ if (attempt < 5) {
+ setTimeout(
+ () => {
+ this.verifyAndProcessDocumentView(doc, isPDF, citation, foundChunk, attempt + 1);
+ },
+ 500 * Math.pow(1.5, attempt)
+ );
+ }
+ }
+ };
+
+ /**
+ * Processes a PDF document view once we're sure it exists
+ */
+ processPDFDocumentView = (doc: Doc, isPDF: boolean, citation: Citation, foundChunk: SimplifiedChunk) => {
+ try {
+ const views = Array.from(doc[DocViews] || []);
+ if (!views.length) {
+ console.warn('No document views found in document that should have views');
+ return;
+ }
+
+ const firstView = views[0] as DocumentView;
+ if (!firstView) {
+ console.warn('First view is invalid');
+ return;
+ }
+
+ console.log(`Successfully found document view for ${doc.id}:`, firstView.ComponentView ? `Component: ${firstView.ComponentView.constructor.name}` : 'No component view');
+
+ if (!firstView.ComponentView) {
+ console.warn('Component view not available');
+ return;
+ }
+
+ // For PDF documents, perform fuzzy search
+ if (isPDF && firstView.ComponentView && citation.direct_text) {
+ const pdfComponent = firstView.ComponentView as PDFBox;
+ this.ensureFuzzySearchAndExecute(pdfComponent, citation.direct_text.trim(), foundChunk.startPage);
+ }
+ } catch (error) {
+ console.error('Error processing PDF document view:', error);
+ }
+ };
+
/**
* Creates an annotation highlight on a PDF document for image citations.
* @param x1 X-coordinate of the top-left corner of the highlight.
@@ -767,7 +1006,8 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
_height: y2 - y1,
backgroundColor: 'rgba(255, 255, 0, 0.5)',
});
- highlight_doc.$citation_id = citation.citation_id;
+ highlight_doc[DocData].citation_id = citation.citation_id;
+ highlight_doc.freeform_scale = 1;
Doc.AddDocToList(pdfDoc[DocData], annotationKey, highlight_doc);
highlight_doc.annotationOn = pdfDoc;
Doc.SetContainer(highlight_doc, pdfDoc);
@@ -849,6 +1089,16 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
}
});
this.addScrollListener();
+
+ // Initialize the document manager by finding existing documents
+ this.docManager.initializeFindDocsFreeform();
+
+ // If there are stored doc IDs in our list of docs to add, process them
+ if (this._linked_docs_to_add.size > 0) {
+ this._linked_docs_to_add.forEach(async doc => {
+ await this.docManager.processDocument(doc);
+ });
+ }
}
/**
@@ -860,58 +1110,6 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
}
/**
- * Getter that retrieves all linked documents for the current document.
- */
- @computed
- get linkedDocs() {
- return LinkManager.Instance.getAllRelatedLinks(this.Document)
- .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document)))
- .map(d => DocCast(d?.annotationOn, d))
- .filter(d => d)
- .map(d => d!);
- }
-
- /**
- * Getter that retrieves document IDs of linked documents that have AI-related content.
- */
- @computed
- get docIds() {
- return LinkManager.Instance.getAllRelatedLinks(this.Document)
- .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document)))
- .map(d => DocCast(d?.annotationOn, d))
- .filter(d => d)
- .map(d => d!)
- .filter(d => {
- console.log(d.ai_doc_id);
- return d.ai_doc_id;
- })
- .map(d => StrCast(d.ai_doc_id));
- }
-
- /**
- * Getter that retrieves summaries of all linked documents.
- */
- @computed
- get summaries(): string {
- return (
- LinkManager.Instance.getAllRelatedLinks(this.Document)
- .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.Document)))
- .map(d => DocCast(d?.annotationOn, d))
- .filter(d => d?.summary)
- .map((doc, index) => {
- if (PDFCast(doc?.data)) {
- return `<summary file_name="${PDFCast(doc!.data)!.url.pathname}" applicable_tools=["rag"]>${doc!.summary}</summary>`;
- } else if (CsvCast(doc?.data)) {
- return `<summary file_name="${CsvCast(doc!.data)!.url.pathname}" applicable_tools=["dataAnalysis"]>${doc!.summary}</summary>`;
- } else {
- return `${index + 1}) ${doc?.summary}`;
- }
- })
- .join('\n') + '\n'
- );
- }
-
- /**
* Getter that retrieves all linked CSV files for analysis.
*/
@computed get linkedCSVs(): { filename: string; id: string; text: string }[] {
@@ -936,22 +1134,14 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
// Other helper methods for retrieving document data and processing
- retrieveSummaries = () => {
- return this.summaries;
- };
-
retrieveCSVData = () => {
return this.linkedCSVs;
};
- retrieveFormattedHistory = () => {
+ retrieveFormattedHistory = (): string => {
return this.formattedHistory;
};
- retrieveDocIds = () => {
- return this.docIds;
- };
-
/**
* Handles follow-up questions when the user clicks on them.
* Automatically sets the input value to the clicked follow-up question.
@@ -962,23 +1152,273 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
this._inputValue = question;
};
+ /**
+ * Handles tool creation notification and shows the reload modal
+ * @param toolName The name of the tool that was created
+ */
+ @action
+ handleToolCreated = (toolName: string) => {
+ this._toolReloadModal = {
+ visible: true,
+ toolName: toolName,
+ };
+ };
+
+ /**
+ * Closes the tool reload modal
+ */
+ @action
+ closeToolReloadModal = () => {
+ this._toolReloadModal = {
+ visible: false,
+ toolName: '',
+ };
+ };
+
+ /**
+ * Handles the reload confirmation and triggers page reload
+ */
+ @action
+ handleReloadConfirmation = async () => {
+ // Close the modal first
+ this.closeToolReloadModal();
+
+ try {
+ // Perform the deferred tool save operation
+ const saveSuccess = await this.agent.performDeferredToolSave();
+
+ if (saveSuccess) {
+ console.log('Tool saved successfully, proceeding with reload...');
+ } else {
+ console.warn('Tool save failed, but proceeding with reload anyway...');
+ }
+ } catch (error) {
+ console.error('Error during deferred tool save:', error);
+ }
+
+ // Trigger page reload to rebuild webpack and load the new tool
+ setTimeout(() => {
+ window.location.reload();
+ }, 100);
+ };
+
_dictation: DictationButton | null = null;
+
+ /**
+ * Toggles the font size modal visibility
+ */
+ @action
+ toggleFontSizeModal = () => {
+ this._isFontSizeModalOpen = !this._isFontSizeModalOpen;
+ };
+
+ /**
+ * Changes the font size and applies it to the chat interface
+ * @param size The new font size to apply
+ */
+ @action
+ changeFontSize = (size: 'small' | 'normal' | 'large' | 'xlarge') => {
+ this._fontSize = size;
+ this._isFontSizeModalOpen = false;
+
+ // Save preference to localStorage if needed
+ if (typeof window !== 'undefined') {
+ localStorage.setItem('chatbox-font-size', size);
+ }
+ };
+
+ /**
+ * Initializes font size from saved preference
+ */
+ initFontSize = () => {
+ if (typeof window !== 'undefined') {
+ const savedSize = localStorage.getItem('chatbox-font-size');
+ if (savedSize && ['small', 'normal', 'large', 'xlarge'].includes(savedSize)) {
+ this._fontSize = savedSize as 'small' | 'normal' | 'large' | 'xlarge';
+ }
+ }
+ };
+
+ /**
+ * Renders a font size icon SVG
+ */
+ renderFontSizeIcon = () => (
+ <svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" strokeLinejoin="round">
+ <polyline points="4 7 4 4 20 4 20 7"></polyline>
+ <line x1="9" y1="20" x2="15" y2="20"></line>
+ <line x1="12" y1="4" x2="12" y2="20"></line>
+ </svg>
+ );
+
+ /**
+ * Shows the citation popup with the given text.
+ * @param text The text to display in the popup.
+ */
+ @action
+ showCitationPopup = (text: string) => {
+ this._citationPopup = {
+ text: text || 'No text available',
+ visible: true,
+ };
+ this.startCitationPopupTimer();
+ };
+
+ /**
+ * Closes the citation popup.
+ */
+ @action
+ closeCitationPopup = () => {
+ this._citationPopup.visible = false;
+ };
+
/**
- * Renders the chat interface, including the message list, input field, and other UI elements.
+ * Starts the auto-close timer for the citation popup.
+ */
+ startCitationPopupTimer = () => {
+ // Auto-close the popup after 5 seconds
+ setTimeout(() => this.closeCitationPopup(), 5000);
+ };
+
+ /**
+ * Retry PDF search with exponential backoff
+ */
+ retryPdfSearch = (doc: Doc, citation: Citation, foundChunk: SimplifiedChunk, isPDF: boolean, attempt: number) => {
+ if (attempt > 5) {
+ console.error('Maximum retry attempts reached for PDF search');
+ return;
+ }
+
+ const delay = Math.min(2000, 500 * Math.pow(1.5, attempt)); // Exponential backoff with max delay of 2 seconds
+
+ setTimeout(() => {
+ try {
+ if (!doc[DocViews] || doc[DocViews].size === 0) {
+ this.retryPdfSearch(doc, citation, foundChunk, isPDF, attempt + 1);
+ return;
+ }
+
+ const views = Array.from(doc[DocViews]);
+ if (!views.length) {
+ this.retryPdfSearch(doc, citation, foundChunk, isPDF, attempt + 1);
+ return;
+ }
+
+ const firstView = views[0] as DocumentView;
+ if (!firstView || !firstView.ComponentView) {
+ this.retryPdfSearch(doc, citation, foundChunk, isPDF, attempt + 1);
+ return;
+ }
+
+ const pdfComponent = firstView.ComponentView as PDFBox;
+ if (isPDF && pdfComponent && citation.direct_text) {
+ console.log(`PDF component found on attempt ${attempt}, executing search...`);
+ this.ensureFuzzySearchAndExecute(pdfComponent, citation.direct_text.trim(), foundChunk.startPage);
+ }
+ } catch (error) {
+ console.error(`Error on retry attempt ${attempt}:`, error);
+ this.retryPdfSearch(doc, citation, foundChunk, isPDF, attempt + 1);
+ }
+ }, delay);
+ };
+
+ /**
+ * Ensures fuzzy search is enabled in PDFBox and performs a search
+ * @param pdfComponent The PDFBox component
+ * @param searchText The text to search for
+ * @param startPage Optional page to navigate to before searching
+ */
+ private ensureFuzzySearchAndExecute = (pdfComponent: PDFBox, searchText: string, startPage?: number) => {
+ if (!pdfComponent) {
+ console.warn('PDF component is undefined, cannot perform search');
+ return;
+ }
+
+ if (!searchText?.trim()) {
+ console.warn('Search text is empty, skipping search');
+ return;
+ }
+
+ try {
+ // Check if the component has required methods
+ if (typeof pdfComponent.gotoPage !== 'function' || typeof pdfComponent.toggleFuzzySearch !== 'function' || typeof pdfComponent.search !== 'function') {
+ console.warn('PDF component missing required methods');
+ return;
+ }
+
+ // Navigate to the page if specified
+ if (typeof startPage === 'number') {
+ pdfComponent.gotoPage(startPage + 1);
+ }
+
+ // Always try to enable fuzzy search
+ try {
+ // PDFBox.tsx toggles fuzzy search state internally
+ // We'll call it once to make sure it's enabled
+ pdfComponent.toggleFuzzySearch();
+ } catch (toggleError) {
+ console.warn('Error toggling fuzzy search:', toggleError);
+ }
+
+ // Add a sufficient delay to ensure PDF is fully loaded before searching
+ setTimeout(() => {
+ try {
+ console.log('Performing fuzzy search for text:', searchText);
+ pdfComponent.search(searchText);
+ } catch (searchError) {
+ console.error('Error performing search:', searchError);
+ }
+ }, 1000); // Increased delay for better reliability
+ } catch (error) {
+ console.error('Error in fuzzy search setup:', error);
+ }
+ };
+
+ /**
+ * Main render method for the ChatBox
*/
render() {
+ const fontSizeClass = `font-size-${this._fontSize}`;
+
return (
- <div className="chat-box">
+ <div className={`chat-box ${fontSizeClass}`}>
{this._isUploadingDocs && (
<div className="uploading-overlay">
<div className="progress-container">
- <ProgressBar />
- <div className="step-name">{this._currentStep}</div>
+ <div className="progress-bar-wrapper">
+ <div className="progress-bar" style={{ width: `${this._uploadProgress}%` }} />
+ </div>
+ <div className="progress-details">
+ <div className="progress-percentage">{Math.round(this._uploadProgress)}%</div>
+ <div className="step-name">{this._currentStep}</div>
+ </div>
</div>
</div>
)}
<div className="chat-header">
- <h2>{this.dataDoc.is_dash_doc_assistant ? 'Dash Help Assistant' : `${this.userName()}'s AI Assistant`}</h2>
+ <h2>{this.userName()}&apos;s AI Assistant</h2>
+ <div className="font-size-control" onClick={this.toggleFontSizeModal}>
+ {this.renderFontSizeIcon()}
+ </div>
+ {this._isFontSizeModalOpen && (
+ <div className="font-size-modal">
+ <div className={`font-size-option ${this._fontSize === 'small' ? 'active' : ''}`} onClick={() => this.changeFontSize('small')}>
+ <span className="option-label">Small</span>
+ <span className="size-preview small">Aa</span>
+ </div>
+ <div className={`font-size-option ${this._fontSize === 'normal' ? 'active' : ''}`} onClick={() => this.changeFontSize('normal')}>
+ <span className="option-label">Normal</span>
+ <span className="size-preview normal">Aa</span>
+ </div>
+ <div className={`font-size-option ${this._fontSize === 'large' ? 'active' : ''}`} onClick={() => this.changeFontSize('large')}>
+ <span className="option-label">Large</span>
+ <span className="size-preview large">Aa</span>
+ </div>
+ <div className={`font-size-option ${this._fontSize === 'xlarge' ? 'active' : ''}`} onClick={() => this.changeFontSize('xlarge')}>
+ <span className="option-label">Extra Large</span>
+ <span className="size-preview xlarge">Aa</span>
+ </div>
+ </div>
+ )}
</div>
<div className="chat-messages" ref={this.messagesRef}>
{this._history.map((message, index) => (
@@ -990,18 +1430,20 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
</div>
<form onSubmit={this.askGPT} className="chat-input">
- <input
- ref={r => {
- this._textInputRef = r;
- }}
- type="text"
- name="messageInput"
- autoComplete="off"
- placeholder="Type your message here..."
- value={this._inputValue}
- onChange={action(e => (this._inputValue = e.target.value))}
- disabled={this._isLoading}
- />
+ <div className="input-container">
+ <input
+ ref={r => {
+ this._textInputRef = r;
+ }}
+ type="text"
+ name="messageInput"
+ autoComplete="off"
+ placeholder="Type your message here..."
+ value={this._inputValue}
+ onChange={action(e => (this._inputValue = e.target.value))}
+ disabled={this._isLoading}
+ />
+ </div>
<button className="submit-button" onClick={() => this._dictation?.stopDictation()} type="submit" disabled={this._isLoading || !this._inputValue.trim()}>
{this._isLoading ? (
<div className="spinner"></div>
@@ -1023,9 +1465,42 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
{/* Popup for citation */}
{this._citationPopup.visible && (
<div className="citation-popup">
- <p>
- <strong>Text from your document: </strong> {this._citationPopup.text}
- </p>
+ <div className="citation-popup-header">
+ <strong>Text from your document</strong>
+ <button className="citation-close-button" onClick={this.closeCitationPopup}>
+ <svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2.5">
+ <line x1="18" y1="6" x2="6" y2="18"></line>
+ <line x1="6" y1="6" x2="18" y2="18"></line>
+ </svg>
+ </button>
+ </div>
+ <div className="citation-content">{this._citationPopup.text}</div>
+ </div>
+ )}
+
+ {/* Tool Reload Modal */}
+ {this._toolReloadModal.visible && (
+ <div className="tool-reload-modal-overlay">
+ <div className="tool-reload-modal">
+ <div className="tool-reload-modal-header">
+ <h3>Tool Created Successfully!</h3>
+ </div>
+ <div className="tool-reload-modal-content">
+ <p>
+ The tool <strong>{this._toolReloadModal.toolName}</strong> has been created and saved successfully.
+ </p>
+ <p>To make the tool available for future use, the page needs to be reloaded to rebuild the application bundle.</p>
+ <p>Click "Reload Page" to complete the tool installation.</p>
+ </div>
+ <div className="tool-reload-modal-actions">
+ <button className="reload-button primary" onClick={this.handleReloadConfirmation}>
+ Reload Page
+ </button>
+ <button className="close-button secondary" onClick={this.closeToolReloadModal}>
+ Later
+ </button>
+ </div>
+ </div>
</div>
)}
</div>
@@ -1038,5 +1513,5 @@ export class ChatBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
*/
Docs.Prototypes.TemplateMap.set(DocumentType.CHAT, {
layout: { view: ChatBox, dataField: 'data' },
- options: { acl: '', _layout_fitWidth: true, chat: '', chat_history: '', chat_thread_id: '', chat_assistant_id: '', chat_vector_store_id: '' },
+ options: { acl: '', _layout_fitWidth: true, chat: '', chat_history: '', chat_thread_id: '', chat_assistant_id: '', chat_vector_store_id: '', _forceActive: true },
});
diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx b/src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx
index 4f1d68973..c7699b57f 100644
--- a/src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx
+++ b/src/client/views/nodes/chatbot/chatboxcomponents/MessageComponent.tsx
@@ -86,7 +86,6 @@ const MessageComponentBox: React.FC<MessageComponentProps> = ({ message, onFollo
}
// Handle query type content
- // bcz: What triggers this section? Where is 'query' added to item? Why isn't it a field?
else if ('query' in item) {
return (
<span key={i} className="query-text">
@@ -99,7 +98,7 @@ const MessageComponentBox: React.FC<MessageComponentProps> = ({ message, onFollo
else {
return (
<span key={i}>
- <ReactMarkdown>{item.text /* JSON.stringify(item)*/}</ReactMarkdown>
+ <ReactMarkdown>{item.text}</ReactMarkdown>
</span>
);
}
@@ -130,6 +129,18 @@ const MessageComponentBox: React.FC<MessageComponentProps> = ({ message, onFollo
return null;
};
+ /**
+ * Formats the follow-up question text to ensure proper capitalization
+ * @param {string} question - The original question text
+ * @returns {string} The formatted question
+ */
+ const formatFollowUpQuestion = (question: string) => {
+ // Only capitalize first letter if needed and preserve the rest
+ if (!question) return '';
+ const formattedQuestion = question.charAt(0).toUpperCase() + question.slice(1).toLowerCase();
+ return formattedQuestion;
+ };
+
return (
<div className={`message ${message.role}`}>
{/* Processing Information Dropdown */}
@@ -139,7 +150,6 @@ const MessageComponentBox: React.FC<MessageComponentProps> = ({ message, onFollo
{dropdownOpen ? 'Hide Agent Thoughts/Actions' : 'Show Agent Thoughts/Actions'}
</button>
{dropdownOpen && <div className="info-content">{message.processing_info.map(renderProcessingInfo)}</div>}
- <br />
</div>
)}
diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss b/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss
deleted file mode 100644
index ff5be4a38..000000000
--- a/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.scss
+++ /dev/null
@@ -1,69 +0,0 @@
-.spinner-container {
- display: flex;
- flex-direction: column;
- justify-content: center;
- align-items: center;
- height: 100%;
-}
-
-.spinner {
- width: 60px;
- height: 60px;
- position: relative;
- margin-bottom: 20px; // Space between spinner and text
-}
-
-.double-bounce1,
-.double-bounce2 {
- width: 100%;
- height: 100%;
- border-radius: 50%;
- background-color: #4a90e2;
- opacity: 0.6;
- position: absolute;
- top: 0;
- left: 0;
- animation: bounce 2s infinite ease-in-out;
-}
-
-.double-bounce2 {
- animation-delay: -1s;
-}
-
-@keyframes bounce {
- 0%,
- 100% {
- transform: scale(0);
- }
- 50% {
- transform: scale(1);
- }
-}
-
-.uploading-overlay {
- position: absolute;
- top: 0;
- left: 0;
- right: 0;
- bottom: 0;
- background-color: rgba(255, 255, 255, 0.8);
- display: flex;
- align-items: center;
- justify-content: center;
- z-index: 1000;
-}
-
-.progress-container {
- display: flex;
- flex-direction: column;
- align-items: center;
- text-align: center;
-}
-
-.step-name {
- font-size: 18px;
- color: #333;
- text-align: center;
- width: 100%;
- margin-top: -10px; // Adjust to move the text closer to the spinner
-}
diff --git a/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx b/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx
deleted file mode 100644
index 240862f8b..000000000
--- a/src/client/views/nodes/chatbot/chatboxcomponents/ProgressBar.tsx
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * @file ProgressBar.tsx
- * @description This file defines the ProgressBar component, which displays a loading spinner
- * to indicate progress during ongoing tasks or processing. The animation consists of two
- * bouncing elements that create a pulsating effect, providing a visual cue for active progress.
- * The component is styled using the accompanying `ProgressBar.scss` for smooth animation.
- */
-
-import React from 'react';
-import './ProgressBar.scss';
-
-/**
- * ProgressBar is a functional React component that displays a loading spinner
- * to indicate progress or ongoing processing. It uses two bouncing elements
- * to create a smooth animation that represents an active state.
- *
- * The animation consists of two divs (`double-bounce1` and `double-bounce2`),
- * each of which will bounce in and out of view, creating a pulsating effect.
- */
-export const ProgressBar: React.FC = () => {
- return (
- <div className="spinner-container">
- {/* Spinner div containing two bouncing elements */}
- <div className="spinner">
- <div className="double-bounce1"></div> {/* First bouncing element */}
- <div className="double-bounce2"></div> {/* Second bouncing element */}
- </div>
- </div>
- );
-};
diff --git a/src/client/views/nodes/chatbot/guides/guide.md b/src/client/views/nodes/chatbot/guides/guide.md
new file mode 100644
index 000000000..2af76490d
--- /dev/null
+++ b/src/client/views/nodes/chatbot/guides/guide.md
@@ -0,0 +1,647 @@
+# Dash Agent Tool Development Guide
+
+**Table of Contents**
+
+1. [Introduction: The Role and Potential of Tools](#1-introduction-the-role-and-potential-of-tools)
+ - Beyond Information Retrieval: Action and Creation
+ - The Agent as an Extension of the User within Dash
+2. [Core Agent Architecture Deep Dive](#2-core-agent-architecture-deep-dive)
+ - The ReAct-Inspired Interaction Loop: Rationale and Flow
+ - XML Structure: Why XML? Parsing and LLM Guidance
+ - Stages (`<stage>`) and Roles (`role="..."`): Enforcing Order
+ - Message Management (`messages`, `interMessages`): Building Context
+ - State Handling: Agent's Internal State vs. Tool Statelessness
+ - Key Components Revisited (`Agent.ts`, `prompts.ts`, `BaseTool.ts`, Parsers)
+ - Role of `prompts.ts`: Template and Dynamic Content Injection
+ - Limits and Safeguards (`maxTurns`)
+3. [Anatomy of a Dash Agent Tool (Detailed Breakdown)](#3-anatomy-of-a-dash-agent-tool-detailed-breakdown)
+ - The `BaseTool` Abstract Class: Foundation and Contract
+ - `ToolInfo`: Defining Identity and LLM Instructions
+ - `name`: Uniqueness and LLM Invocation Trigger
+ - `description`: The LLM's Primary Guide - _Dynamically Injected into Prompt_
+ - `parameterRules`: The Input Contract (In Depth)
+ - `citationRules`: Controlling Grounding in the Final Answer
+ - The `execute` Method: Heart of the Tool
+ - Asynchronous Nature (`async/await`)
+ - Receiving Arguments (`args: ParametersType<P>`)
+ - Performing the Core Logic (API calls, Dash functions)
+ - Returning `Observation[]`: The Output Contract (In Depth)
+ - The `inputValidator` Method: Handling Edge Cases
+4. [The Agent-Tool Interaction Flow (Annotated XML Trace)](#4-the-agent-tool-interaction-flow-annotated-xml-trace)
+ - Detailed Step-by-Step with `Agent.ts` actions highlighted
+5. [Step-by-Step Guide: Creating a New Tool](#5-step-by-step-guide-creating-a-new-tool)
+ - Step 1: Define Goal, Scope, Inputs, Outputs, Dash Interactions, Side Effects
+ - Step 2: Create the Tool Class File (Directory Structure)
+ - Step 3: Define Parameters (`parameterRules`) - Type Handling, Arrays
+ - Step 4: Define Tool Information (`ToolInfo`) - Crafting the _Crucial_ `description`
+ - Step 5: Implement `execute` - Defensive Coding, Using Injected Functions, Error Handling Pattern
+ - Step 6: Format Output (`Observation[]`) - Chunk Structure, `chunk_type`, IDs
+ - Step 7: Register Tool in `Agent.ts` - _This makes the tool available to the prompt_
+ - Step 8: Verify Prompt Integration (No Manual Editing Needed)
+ - Step 9: Testing Your Tool - Strategies and What to Look For
+6. [Deep Dive: Advanced Concepts & Patterns](#6-deep-dive-advanced-concepts--patterns)
+ - Handling Complex Data Types (Arrays, Objects) in Parameters/Observations
+ - Binary Data Handling (e.g., Base64 in Chunks)
+ - Managing Long-Running Tasks (Beyond simple `await`)
+ - Tools Needing Dash Context (Passing `this` vs. specific functions)
+ - The Role of `chunk_id` and `chunk_type`
+7. [Best Practices and Advanced Considerations](#7-best-practices-and-advanced-considerations)
+ - Error Handling & Reporting (Specific Error Chunks)
+ - Security Considerations (Input Sanitization, API Key Management, Output Filtering)
+ - Performance Optimization (Minimize `execute` workload)
+ - Idempotency: Designing for Retries
+ - Tool Granularity: Single Responsibility Principle
+ - Context Window Management (Concise Descriptions are Key)
+ - User Experience (Tool output clarity)
+ - Maintainability and Code Comments
+8. [Debugging Strategies](#8-debugging-strategies)
+ - Console Logging within `execute`
+ - Inspecting `interMessages` in `Agent.ts`
+ - Testing Tool Logic Standalone
+ - Analyzing LLM Failures (Incorrect tool choice -> Check `description`, bad parameters)
+9. [Example: `CreateDashNoteTool`](#9-example-createdashnotetool)
+10. [Glossary of Key Terms](#10-glossary-of-key-terms)
+11. [Conclusion](#11-conclusion)
+
+---
+
+## 1. Introduction: The Role and Potential of Tools
+
+Welcome, Dash team member! This guide will walk you through creating new tools for the Dash Agent. The Agent is designed to interact with users, understand their queries, and leverage specialized **Tools** to perform actions or retrieve information that the core Large Language Model (LLM) cannot do on its own.
+
+Tools extend the Agent's capabilities beyond simple conversation. They allow the Agent to:
+
+- Interact with external APIs (e.g., web search, calculators, image generation).
+- Access and process data specific to the user's Dash environment (e.g., querying document metadata, analyzing linked CSVs).
+- Perform actions within Dash (e.g., creating new documents, adding links, modifying metadata).
+
+By building new tools, you directly enhance the Agent's utility and integration within the Dash ecosystem.
+
+### Beyond Information Retrieval: Action and Creation
+
+While tools like `RAGTool` and `SearchTool` retrieve information, others _act_. `CalculateTool` performs computations, `ImageCreationTool` generates content, and importantly, tools like `DocumentMetadataTool` and your custom tools can **modify the Dash environment**, creating documents, adding links, or changing properties.
+
+### The Agent as an Extension of the User within Dash
+
+Think of the Agent, equipped with tools, as an intelligent assistant that can perform tasks _on behalf of the user_ directly within their Dash workspace. This deep integration is a key differentiator.
+
+---
+
+## 2. Core Agent Architecture Deep Dive
+
+Understanding the "why" behind the architecture helps in tool development.
+
+### The ReAct-Inspired Interaction Loop: Rationale and Flow
+
+The Agent operates based on a loop inspired by the ReAct (Reason + Act) framework. The LLM alternates between:
+
+- **Reasoning (`<thought>`):** Analyzing the query and deciding the next step.
+- **Acting (`<action>`, `<action_input>`):** Selecting and preparing to use a tool, or formulating a final answer (`<answer>`).
+- **Observing (`<observation>`):** Receiving the results from a tool execution.
+
+This structure (Reason -> Act -> Observe -> Reason...) forces the LLM to break down complex tasks into manageable steps, making the process more reliable and auditable than letting the LLM generate a monolithic plan upfront.
+
+### XML Structure: Why XML? Parsing and LLM Guidance
+
+- **Why XML?** LLMs are generally adept at generating well-formed XML. XML's explicit start/end tags make parsing by `Agent.ts` (using libraries like `fast-xml-parser`) more robust and less prone to LLM "hallucinations" breaking the structure compared to formats like JSON in some complex scenarios.
+- **LLM Guidance:** The strict XML schema defined in the system prompt provides clear guardrails for the LLM's output, constraining it to valid actions and formats.
+
+### Stages (`<stage>`) and Roles (`role="..."`): Enforcing Order
+
+The `<stage number="...">` ensures sequential processing. The `role` attribute indicates the source (e.g., `user`, `assistant`) and dictates control flow. `Agent.ts` _waits_ for a `user` (or `system-error-reporter`) stage after sending an `assistant` stage, enforcing the turn-based nature. The LLM is explicitly told only to generate `assistant` stages.
+
+### Message Management (`messages`, `interMessages`): Building Context
+
+- `messages`: The user-facing chat history (persisted in the Dash Doc `data` field).
+- `interMessages`: The **internal, complete context** sent to the LLM for each turn. It includes the system prompt, user queries, _all intermediate thoughts, actions, rules, inputs, and observations_. This ensures the LLM has the full history of the current reasoning chain. It grows with each step in the loop.
+
+### State Handling: Agent's Internal State vs. Tool Statelessness
+
+- `Agent.ts` manages the conversational state (`interMessages`, current turn number, `processingInfo`, etc.).
+- **Tools should be designed to be stateless.** They receive inputs via `args`, perform their action, and return results. Any persistent state relevant to the user's work should reside within Dash Docs/Fobs, accessible perhaps via tools like `DocumentMetadataTool` or specific functions passed to the tool.
+
+### Key Components Revisited (`Agent.ts`, `prompts.ts`, `BaseTool.ts`, Parsers)
+
+- `Agent.ts`: The central controller. Parses XML, validates actions, manages the loop, calls `tool.execute`, formats `Observation`s. Handles the streaming updates for the _final_ answer via `StreamedAnswerParser`. Holds the registry of available tools (`this.tools`).
+- `prompts.ts` (`getReactPrompt`): Generates the system prompt for the LLM. It acts as a **template** defining the Agent's overall task, rules, and the required XML structure. Crucially, it **dynamically injects the list of available tools** (including their names and descriptions) based on the tools registered in the `Agent.ts` instance at runtime. **_You do not manually add tool descriptions here._**
+- `BaseTool.ts`: The abstract class defining the _interface_ all tools must adhere to. Contains properties like `name` and `description` used by `getReactPrompt`.
+- Parsers (`AnswerParser`, `StreamedAnswerParser`): Handle the final `<answer>` tag, extracting structured content, citations, etc., for UI display (`ChatBox.tsx`).
+
+### Limits and Safeguards (`maxTurns`)
+
+`Agent.ts` includes a `maxTurns` limit (default 30) to prevent infinite loops if the LLM gets stuck or fails to reach an `<answer>` stage.
+
+---
+
+## 3. Anatomy of a Dash Agent Tool (Detailed Breakdown)
+
+All tools inherit from the abstract class `BaseTool`.
+
+### The `BaseTool` Abstract Class: Foundation and Contract
+
+- Located in `src/components/views/nodes/chatbot/agentsystem/tools/BaseTool.ts`.
+- Generic `BaseTool<P extends ReadonlyArray<Parameter>>`: `P` represents the specific, readonly array of `Parameter` definitions for _your_ tool, ensuring type safety for the `args` in `execute`.
+- Defines the public properties (`name`, `description`, `parameterRules`, `citationRules`) and the abstract `execute` method that all tools must implement.
+
+### `ToolInfo`: Defining Identity and LLM Instructions
+
+- A configuration object (`{ name: string; description: string; parameterRules: P; citationRules: string; }`) passed to the `BaseTool` constructor.
+- `name: string`:
+ - The **unique identifier** for your tool (e.g., `dictionaryLookup`, `createDashNote`).
+ - Must match the key used when registering the tool in `Agent.ts`'s `this.tools` map.
+ - This is the string the LLM will output in the `<action>` tag to invoke your tool.
+ - Keep it concise and descriptive (camelCase recommended).
+- `description: string`: The LLM's Primary Guide - _Dynamically Injected into Prompt_.
+ - This text is extracted from your `ToolInfo` object when `getReactPrompt` is called.
+ - It's **the text the LLM sees** to understand your tool's purpose and when to use it.
+ - **Crafting this is critical.** Make it extremely clear, concise, and accurate. Explicitly state:
+ - What the tool _does_.
+ - What _inputs_ it needs (briefly).
+ - What _output_ it provides.
+ - _Crucially_, under what circumstances the Agent should _choose_ this tool over others. (e.g., "Use this tool to create _new_ Dash notes, not for editing existing ones.")
+- `parameterRules: P` (where `P extends ReadonlyArray<Parameter>`):
+ - The readonly array defining the **exact inputs** your `execute` method expects.
+ - Each element is a `Parameter` object (`{ name: string; type: 'string' | ... ; required: boolean; description: string; max_inputs?: number }`):
+ - `name`: Name of the parameter (e.g., `wordToDefine`, `noteContent`). Used as the key in the `args` object passed to `execute`.
+ - `type`: `'string' | 'number' | 'boolean' | 'string[]' | 'number[]' | 'boolean[]'`. `Agent.ts` uses this for basic validation and parsing (specifically for arrays).
+ - `required`: `true` if the LLM _must_ provide this parameter for the tool to function. `Agent.ts` checks this before calling `execute` (unless `inputValidator` overrides).
+ - `description`: Explanation of the parameter _for the LLM_. Guides the LLM on _what value_ to provide. Be specific (e.g., "The exact URL to scrape", "A search query suitable for web search").
+ - `max_inputs?`: Optional. For array types (`string[]`, etc.), suggests a limit to the LLM on the number of items to provide.
+- `citationRules: string`:
+ - Instructions for the LLM on how to construct the `<citations>` block within the final `<answer>` tag _when information obtained from this specific tool is used_.
+ - Directly influences the grounding and verifiability of the Agent's final response.
+ - Be explicit about the format: "Cite using `<citation index="..." chunk_id="..." type="your_chunk_type">Optional text snippet</citation>`".
+ - Specify what goes into `chunk_id` (e.g., "Use the ID provided in the observation chunk"), `type` (a constant string representing your tool's output type), and whether the text snippet should be included (often empty for URLs, calculations).
+ - If no citation is appropriate (e.g., calculator, ephemeral action), state clearly: "No citation needed for this tool's output."
+
+### The `execute` Method: Heart of the Tool
+
+- `abstract execute(args: ParametersType<P>): Promise<Observation[]>;`
+- **Asynchronous Nature (`async/await`):** Must be `async` because tool actions often involve I/O (network requests, database access via Dash functions, filesystem). Must return a `Promise`.
+- **Receiving Arguments (`args: ParametersType<P>`):**
+ - Receives a single argument `args`. This object's keys are your defined parameter names (from `parameterRules`), and the values are those provided by the LLM in the `<action_input><inputs>` block.
+ - The type `ParametersType<P>` infers the structure of `args` based on your specific `parameterRules` definition (`P`), providing TypeScript type safety.
+ - `Agent.ts` performs basic validation (required fields) and type coercion (for arrays) before calling `execute`. However, **always perform defensive checks** within `execute` (e.g., check if required args are truly present and not empty strings, check types if crucial).
+- **Performing the Core Logic:** This is where your tool does its work. Examples:
+ - Call external APIs (using `axios`, `fetch`, or specific SDKs).
+ - Call Dash functions passed via the constructor (e.g., `this._createDocInDash(...)`, `this._addLinkedUrlDoc(...)`).
+ - Perform calculations or data transformations.
+ - Interact with other backend systems if necessary.
+- **Returning `Observation[]`: The Output Contract (In Depth)**
+ - The method **must** resolve to an array of `Observation` objects. Even if there's only one piece of output, return it in an array: `[observation]`.
+ - Each `Observation` object usually has the structure `{ type: 'text', text: string }`. Other types might be possible but `text` is standard for LLM interaction.
+ - The `text` string **must** contain the tool's output formatted within one or more `<chunk>` tags. This is how the Agent passes structured results back to the LLM.
+ - Format: `<chunk chunk_id="UNIQUE_ID" chunk_type="YOUR_TYPE">OUTPUT_DATA</chunk>`
+ - `chunk_id`: A unique identifier for this specific piece of output (use `uuidv4()`). Essential for linking citations back to observations.
+ - `chunk_type`: A string literal describing the _semantic type_ of the data (e.g., `'search_result_url'`, `'calculation_result'`, `'note_creation_status'`, `'error'`, `'metadata_info'`). Helps the LLM interpret the result. Choose consistent and descriptive names.
+ - `OUTPUT_DATA`: The actual result from your tool. Can be simple text, JSON stringified data, etc. Keep it concise if possible for the LLM context.
+ - **Return errors** using the same format, but with `chunk_type="error"` and a descriptive error message inside the chunk tag. This allows the Agent loop to continue gracefully and potentially inform the LLM or user.
+
+### The `inputValidator` Method: Handling Edge Cases
+
+- `inputValidator(inputParam: ParametersType<readonly Parameter[]>) { return false; }` (Default implementation in `BaseTool`).
+- Override this method _only_ if your tool needs complex input validation logic beyond simple `required` checks (e.g., dependencies between parameters).
+- If you override it to return `true`, `Agent.ts` will skip its standard check for missing _required_ parameters. Your `execute` method becomes fully responsible for validating the `args` object.
+- Use case example: `DocumentMetadataTool` uses it to allow either `fieldName`/`fieldValue` OR `fieldEdits` to be provided for the "edit" action.
+
+---
+
+## 4. The Agent-Tool Interaction Flow (Annotated XML Trace)
+
+Let's trace the `dictionaryLookup` example with `Agent.ts` actions:
+
+1. **User Input:** User types "What is hypermedia?" and submits.
+
+ - `// ChatBox.tsx calls agent.askAgent("What is hypermedia?")`
+ - `// Agent.ts adds stage 1 to interMessages:`
+
+ ```xml
+ <stage number="1" role="user">
+ <query>What is hypermedia?</query>
+ </stage>
+ ```
+
+ - `// Agent.ts calls LLM with interMessages.`
+
+2. **LLM Thought & Action:** LLM processes the query and system prompt (which includes the dynamically generated description for `dictionaryLookup`).
+
+ - `// LLM responds with stage 2:`
+
+ ```xml
+ <stage number="2" role="assistant">
+ <thought>The user is asking for a definition. The dictionaryLookup tool is appropriate for this.</thought>
+ <action>dictionaryLookup</action>
+ </stage>
+ ```
+
+ - `// Agent.ts parses stage 2. Finds <action>dictionaryLookup</action>.`
+ - `// Agent.ts retrieves the dictionaryTool instance: tool = this.tools['dictionaryLookup'].`
+ - `// Agent.ts gets parameter rules: rules = tool.getActionRule().`
+
+3. **Agent Provides Rules:** `Agent.ts` formats the rules into XML.
+
+ - `// Agent.ts adds stage 3 to interMessages:`
+
+ ```xml
+ <stage number="3" role="user">
+ <action_rules>
+ <tool>dictionaryLookup</tool>
+ <description>Looks up the definition of a given English word.</description>
+ <citationRules>Cite the definition using the provided chunk_id and type="dictionary_definition". Leave citation content empty.</citationRules>
+ <parameters>
+ <word>
+ <type>string</type>
+ <description>The word to define.</description>
+ <required>true</required>
+ </word>
+ </parameters>
+ </action_rules>
+ </stage>
+ ```
+
+ - `// Agent.ts calls LLM with updated interMessages.`
+
+4. **LLM Provides Inputs:** LLM uses the rules to formulate the required inputs.
+
+ - `// LLM responds with stage 4:`
+
+ ```xml
+ <stage number="4" role="assistant">
+ <action_input>
+ <action_input_description>Looking up the definition for the word 'hypermedia'.</action_input_description>
+ <inputs>
+ <word>hypermedia</word>
+ </inputs>
+ </action_input>
+ </stage>
+ ```
+
+ - `// Agent.ts parses stage 4. Finds <action_input>. Extracts inputs: { word: 'hypermedia' }.`
+ - `// Agent.ts validates required params (finds 'word'). Checks tool.inputValidator (returns false). OK.`
+ - `// Agent.ts calls: const observations = await dictionaryTool.execute({ word: 'hypermedia' });`
+
+5. **Tool Execution -> Observation:** The `dictionaryTool.execute` method runs, calls an API, gets the definition, and formats the `Observation`.
+
+ - `// dictionaryTool.execute resolves Promise with:`
+
+ ```javascript
+ [{ type: 'text', text: '<chunk chunk_id="dict-abc-123" chunk_type="dictionary_definition">Hypermedia is an extension of hypertext allowing...</chunk>' }];
+ ```
+
+ - `// Agent.ts receives the resolved Observation[].`
+
+6. **Agent Provides Observation:** `Agent.ts` formats the observation(s) into the next stage.
+
+ - `// Agent.ts adds stage 5 to interMessages:`
+
+ ```xml
+ <stage number="5" role="user">
+ <observation>
+ <chunk chunk_id="dict-abc-123" chunk_type="dictionary_definition">Hypermedia is an extension of hypertext allowing...</chunk>
+ </observation>
+ </stage>
+ ```
+
+ - `// Agent.ts calls LLM with updated interMessages.`
+
+7. **LLM Generates Final Answer:** LLM uses the full context (query, thoughts, actions, observation) and the `citationRules` from the `dictionaryLookup` tool.
+ - `// LLM responds with stage 6:`
+ ```xml
+ <stage number="6" role="assistant">
+ <thought>I have received the definition from the dictionary tool. I will now format the final answer according to the specified structure, including the citation.</thought>
+ <answer>
+ <grounded_text citation_index="1">Hypermedia is an extension of hypertext allowing graphics, audio, video, plain text and hyperlinks to intertwine to create a generally non-linear medium of information.</grounded_text>
+ <citations>
+ <citation index="1" chunk_id="dict-abc-123" type="dictionary_definition"></citation>
+ </citations>
+ <follow_up_questions>
+ <question>Can you give an example of a hypermedia system?</question>
+ <question>How does hypermedia differ from multimedia?</question>
+ <question>Who coined the term hypermedia?</question>
+ </follow_up_questions>
+ <loop_summary>Used dictionaryLookup tool to define 'hypermedia'.</loop_summary>
+ </answer>
+ </stage>
+ ```
+ - `// Agent.ts parses stage 6. Finds <answer>. Calls AnswerParser.`
+ - `// Agent.ts updates ChatBox UI (`\_history.push(...)`). Loop ends.`
+
+---
+
+## 5. Step-by-Step Guide: Creating a New Tool
+
+Let's use the example of creating a `CreateDashNoteTool`.
+
+### Step 1: Define Goal, Scope, Inputs, Outputs, Dash Interactions, Side Effects
+
+- **Goal:** Allow Agent to create a new text note (`DocumentType.TEXT` or equivalent) in Dash.
+- **Scope:** Creates a _simple_ note with title and text content. Does not handle complex formatting, linking (beyond default linking to ChatBox if handled by the creation function), or specific placement beyond a potential default offset.
+- **Inputs:** `noteTitle` (string, required), `noteContent` (string, required).
+- **Outputs (Observation):** Confirmation message with new note's Dash Document ID, or an error message.
+- **Dash Interactions:** Calls a function capable of creating Dash documents (e.g., `createDocInDash` passed via constructor).
+- **Side Effects:** A new Dash text document is created in the user's space and potentially linked to the ChatBox.
+
+### Step 2: Create the Tool Class File (Directory Structure)
+
+- Create file: `src/components/views/nodes/chatbot/agentsystem/tools/CreateDashNoteTool.ts`
+- Ensure it's within the `tools` subdirectory.
+
+### Step 3: Define Parameters (`parameterRules`) - Type Handling, Arrays
+
+- Use `as const` for the array to allow TypeScript to infer literal types, which aids `ParametersType`.
+- Define `noteTitle` and `noteContent` as required strings.
+
+```typescript
+import { Parameter } from '../types/tool_types';
+
+const createDashNoteToolParams = [
+ {
+ name: 'noteTitle',
+ type: 'string',
+ required: true,
+ description: 'The title for the new Dash note document. Cannot be empty.',
+ },
+ {
+ name: 'noteContent',
+ type: 'string',
+ required: true,
+ description: 'The text content for the new Dash note. Can be an empty string.', // Specify if empty content is allowed
+ },
+] as const; // Use 'as const' for precise typing
+
+// Infer the type for args object in execute
+type CreateDashNoteToolParamsType = typeof createDashNoteToolParams;
+```
+
+### Step 4: Define Tool Information (`ToolInfo`) - Crafting the _Crucial_ `description`
+
+- This object's `description` is key for the LLM.
+
+```typescript
+import { ToolInfo, ParametersType } from '../types/tool_types';
+
+// Assuming createDashNoteToolParams and CreateDashNoteToolParamsType are defined above
+
+const createDashNoteToolInfo: ToolInfo<CreateDashNoteToolParamsType> = {
+ name: 'createDashNote', // Must match registration key in Agent.ts
+ description:
+ 'Creates a *new*, simple text note document within the current Dash view. Requires a title and text content. The note will be linked to the ChatBox and placed nearby with default dimensions. Use this when the user asks to create a new note, save information, or write something down persistently in Dash.',
+ parameterRules: createDashNoteToolParams,
+ citationRules: 'This tool creates a document. The observation confirms success and provides the new document ID. No citation is typically needed in the final answer unless confirming the action.',
+};
+```
+
+### Step 5: Implement `execute` - Defensive Coding, Using Injected Functions, Error Handling Pattern
+
+- Implement the `execute` method within your class.
+- Wrap logic in `try...catch`.
+- Validate inputs defensively.
+- Check injected dependencies (`this._createDocInDash`).
+- Call the Dash function.
+- Handle the return value.
+- Format success or error `Observation`.
+
+```typescript
+import { BaseTool } from './BaseTool';
+import { Observation } from '../types/types';
+import { supportedDocTypes } from '../types/tool_types';
+import { parsedDoc } from '../chatboxcomponents/ChatBox'; // May need adjustment based on actual path
+import { Doc } from '../../../../../../fields/Doc'; // Adjust path as needed
+import { v4 as uuidv4 } from 'uuid';
+import { RTFCast } from '../../../../../../fields/Types'; // Adjust path as needed
+
+// Assuming createDashNoteToolParams, CreateDashNoteToolParamsType, createDashNoteToolInfo are defined above
+
+export class CreateDashNoteTool extends BaseTool<CreateDashNoteToolParamsType> {
+ // Dependency: Function to create a document in Dash
+ private _createDocInDash: (doc: parsedDoc) => Doc | undefined;
+
+ // Constructor to inject dependencies
+ constructor(createDocInDash: (doc: parsedDoc) => Doc | undefined) {
+ super(createDashNoteToolInfo);
+ if (typeof createDocInDash !== 'function') {
+ console.error('CreateDashNoteTool Error: createDocInDash function dependency not provided during instantiation!');
+ // Consider throwing an error or setting a flag to prevent execution
+ }
+ this._createDocInDash = createDocInDash;
+ }
+
+ async execute(args: ParametersType<CreateDashNoteToolParamsType>): Promise<Observation[]> {
+ const chunkId = uuidv4(); // Unique ID for this observation
+ const { noteTitle, noteContent } = args;
+
+ // --- Input Validation ---
+ if (typeof noteTitle !== 'string' || !noteTitle.trim()) {
+ return [{ type: 'text', text: `<chunk chunk_id="${chunkId}" chunk_type="error">Invalid input: Note title must be a non-empty string.</chunk>` }];
+ }
+ if (typeof noteContent !== 'string') {
+ // Assuming empty content IS allowed based on description
+ // If not allowed, return error here.
+ return [{ type: 'text', text: `<chunk chunk_id="${chunkId}" chunk_type="error">Invalid input: Note content must be a string.</chunk>` }];
+ }
+ if (!this._createDocInDash) {
+ return [{ type: 'text', text: `<chunk chunk_id="${chunkId}" chunk_type="error">Tool Configuration Error: Document creation function not available.</chunk>` }];
+ }
+ // --- End Validation ---
+
+ try {
+ const trimmedTitle = noteTitle.trim();
+
+ // Prepare the document object for the creation function
+ const noteDoc: parsedDoc = {
+ doc_type: supportedDocTypes.note, // Use the correct type for a text note
+ title: trimmedTitle,
+ data: RTFCast(noteContent) as unknown as string, // Ensure data is correctly formatted if needed
+ // Example default properties:
+ _width: 300,
+ _layout_fitWidth: false,
+ _layout_autoHeight: true,
+ backgroundColor: '#FFFFE0', // Light yellow background
+ // Add x, y coordinates if desired, potentially relative to ChatBox if context is available
+ };
+
+ console.log(`CreateDashNoteTool: Attempting to create doc:`, { title: noteDoc.title, type: noteDoc.doc_type }); // Avoid logging full content
+
+ // Call the injected Dash function
+ const createdDoc = this._createDocInDash(noteDoc);
+
+ // Check the result
+ if (createdDoc && createdDoc.id) {
+ const successMessage = `Successfully created note titled "${trimmedTitle}" with ID: ${createdDoc.id}. It has been added to your current view.`;
+ console.log(`CreateDashNoteTool: Success - ${successMessage}`);
+ // Return observation confirming success
+ return [{ type: 'text', text: `<chunk chunk_id="${chunkId}" chunk_type="note_creation_status">${successMessage}</chunk>` }];
+ } else {
+ console.error('CreateDashNoteTool Error: _createDocInDash returned undefined or document without an ID.');
+ throw new Error('Dash document creation failed or did not return a valid document ID.');
+ }
+ } catch (error) {
+ console.error(`CreateDashNoteTool: Error creating note titled "${noteTitle.trim()}":`, error);
+ const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred during note creation.';
+ // Return observation indicating error
+ return [{ type: 'text', text: `<chunk chunk_id="${chunkId}" chunk_type="error">Error creating note: ${errorMessage}</chunk>` }];
+ }
+ }
+}
+```
+
+### Step 6: Format Output (`Observation[]`) - Chunk Structure, `chunk_type`, IDs
+
+- Ensure the `text` field within the returned `Observation` contains `<chunk chunk_id="..." chunk_type="...">...</chunk>`.
+- Use a specific `chunk_type` (e.g., `note_creation_status`, `error`).
+- Generate a unique `chunk_id` using `uuidv4()`.
+- The text inside the chunk should be informative for the LLM and potentially for debugging.
+
+### Step 7: Register Tool in `Agent.ts` - _This makes the tool available to the prompt_
+
+- Import your tool class at the top of `Agent.ts`:
+ ```typescript
+ import { CreateDashNoteTool } from '../tools/CreateDashNoteTool';
+ ```
+- In the `Agent` constructor, instantiate your tool within the `this.tools = { ... };` block. Ensure the key matches `ToolInfo.name` and pass any required dependencies (like the `createDocInDash` function).
+
+ ```typescript
+ constructor(
+ _vectorstore: Vectorstore,
+ summaries: () => string,
+ history: () => string,
+ csvData: () => { filename: string; id: string; text: string }[],
+ addLinkedUrlDoc: (url: string, id: string) => void,
+ createImage: (result: any, options: any) => void, // Use specific types if known
+ createDocInDashFunc: (doc: parsedDoc) => Doc | undefined, // Renamed for clarity
+ createCSVInDash: (url: string, title: string, id: string, data: string) => void
+ ) {
+ // ... existing initializations (OpenAI client, vectorstore, etc.) ...
+ this.vectorstore = _vectorstore;
+ this._summaries = summaries;
+ this._history = history;
+ this._csvData = csvData;
+
+ this.tools = {
+ calculate: new CalculateTool(),
+ rag: new RAGTool(this.vectorstore),
+ dataAnalysis: new DataAnalysisTool(csvData),
+ websiteInfoScraper: new WebsiteInfoScraperTool(addLinkedUrlDoc),
+ searchTool: new SearchTool(addLinkedUrlDoc),
+ noTool: new NoTool(),
+ imageCreationTool: new ImageCreationTool(createImage),
+ documentMetadata: new DocumentMetadataTool(this), // Pass ChatBox instance if needed by tool
+ // Register the new tool here:
+ createDashNote: new CreateDashNoteTool(createDocInDashFunc), // Pass the required function
+ };
+ // ... rest of constructor
+ }
+ ```
+
+- **Verify Dependencies:** Ensure that the `createDocInDashFunc` parameter (or however you name it) is actually being passed into the `Agent` constructor when it's instantiated (likely within `ChatBox.tsx`). Trace the dependency chain.
+
+### Step 8: Verify Prompt Integration (No Manual Editing Needed)
+
+- **No manual changes are needed in `prompts.ts`**. The `getReactPrompt` function dynamically builds the `<tools>` section from `this.tools`.
+- **Verify (Recommended):** Temporarily add `console.log(systemPrompt)` in `Agent.ts` right after `const systemPrompt = getReactPrompt(...)` within the `askAgent` method. Run a query. Examine the console output to confirm the system prompt includes your tool's `<title>` and `<description>` within the `<tools>` block. Remove the log afterward.
+
+### Step 9: Testing Your Tool - Strategies and What to Look For
+
+- **Functional Tests:** Use specific prompts like "Create a note called 'Ideas' with content 'Test 1'." Check the Dash UI for the note and the chat for the success message/ID.
+- **Edge Case Tests:** Test empty titles (should fail validation), empty content (should succeed if allowed), titles/content with special characters or excessive length.
+- **LLM Interaction Tests:** Use less direct prompts like "Save this thought: Remember to buy milk." Does the LLM correctly identify the need for your tool and extract/request the title and content?
+- **Failure Tests:** If possible, simulate failure in the dependency (`createDocInDash`) to ensure the `error` chunk is returned correctly.
+- **Console/Debugging:** Use `console.log` within `execute` and inspect `interMessages` in `Agent.ts` to trace the flow and identify issues.
+
+---
+
+## 6. Deep Dive: Advanced Concepts & Patterns
+
+### Handling Complex Data Types (Arrays, Objects) in Parameters/Observations
+
+- **Parameters:** For complex inputs, define the parameter `type` as `string` in `parameterRules`. In the `description`, instruct the LLM to provide a **valid JSON string**. Inside your `execute` method, use `JSON.parse()` within a `try...catch` block to parse this string. Handle potential parsing errors gracefully (return an `error` chunk).
+- **Observations:** To return structured data, `JSON.stringify` your object/array and embed this string _inside_ the `<chunk>` tag. Use a specific `chunk_type` (e.g., `json_data_analysis`). The LLM might need guidance (via prompt engineering) on how to interpret and use this JSON data effectively in its final response.
+
+### Binary Data Handling (e.g., Base64 in Chunks)
+
+- **Avoid large binary data in observations.** Context windows are limited.
+- **Preferred:** Save binary data server-side (e.g., using `DashUploadUtils` or similar) or reference existing Dash media docs. Return a **reference** (URL, Doc ID, file path accessible by Dash) within the `<chunk>`.
+- **If absolutely necessary:** For small images/data needed _directly_ by the LLM, Base64 encode it inside the chunk: `<chunk chunk_id="..." chunk_type="base64_image_png">BASE64_STRING</chunk>`.
+
+### Managing Long-Running Tasks (Beyond simple `await`)
+
+- The agent's `askAgent` loop `await`s `tool.execute()`. Tasks taking more than ~5-10 seconds degrade user experience. Very long tasks risk timeouts.
+- **Limitation:** The current architecture doesn't have built-in support for asynchronous background jobs with status polling.
+- **Possible (Complex) Workaround:**
+ 1. Tool `execute` initiates a long-running _external_ process (like the Python PDF chunker) or backend job.
+ 2. `execute` _immediately_ returns an `Observation` like `<chunk chunk_type="task_initiated" job_id="JOB123">Processing started. Use status check tool with ID JOB123.</chunk>`.
+ 3. Requires a _separate_ `StatusCheckTool` that takes a `job_id` and queries the external process/backend for status.
+ 4. This adds significant complexity to the agent's reasoning flow. Use only if absolutely necessary.
+
+### Tools Needing Dash Context (Passing `this` vs. specific functions)
+
+- **Specific Functions (Preferred):** Pass only necessary functions from `ChatBox`/Dash utilities. Promotes modularity and testability. Requires updating constructors if needs change.
+- **`ChatBox` Instance (`this`) (As in `DocumentMetadataTool`):** Provides broad access to `ChatBox` state (`Document`, `layoutDoc`, computed properties) and methods. Easier for tools with complex Dash interactions but increases coupling and makes testing harder.
+- **Decision:** Start with specific functions. Escalate to passing `this` only if the tool's requirements become extensive and unmanageable via individual function injection.
+
+### The Role of `chunk_id` and `chunk_type`
+
+- `chunk_id` (e.g., `uuidv4()`): **Traceability & Citation.** Uniquely identifies a piece of data returned by a tool. Allows the final `<answer>`'s `<citation>` tag to precisely reference the source observation via this ID. Essential for debugging and grounding.
+- `chunk_type`: **Semantic Meaning.** Tells the LLM _what kind_ of information the chunk contains (e.g., `url`, `calculation_result`, `error`, `note_creation_status`). Guides the LLM in processing the observation and formatting the final answer appropriately. Use consistent and descriptive type names.
+
+---
+
+## 7. Best Practices and Advanced Considerations
+
+- **Error Handling & Reporting:** Return errors in structured `<chunk chunk_type="error">...</chunk>` format. Include context in the message (e.g., "API call failed for URL: [url]", "Invalid value for parameter: [param_name]").
+- **Security:**
+ - **Input Sanitization:** **Crucial.** If tool inputs influence API calls, file paths, database queries, etc., validate and sanitize them rigorously. Do not trust LLM output implicitly.
+ - **API Keys:** Use server-side environment variables (`process.env`) for keys used in backend routes called by tools. Avoid exposing keys directly in client-side tool code if possible.
+ - **Output Filtering:** Be mindful of sensitive data. Don't leak PII or internal details in observations or error messages.
+- **Performance Optimization:** Keep `execute` logic efficient. Minimize blocking operations. Use asynchronous patterns correctly.
+- **Idempotency:** Design tools (especially those causing side effects like creation/modification) to be safe if run multiple times with the same input, if possible.
+- **Tool Granularity (SRP):** Aim for tools that do one thing well. Complex workflows can be achieved by the LLM chaining multiple focused tools.
+- **Context Window Management:** Write concise but clear tool `description`s. Keep `Observation` data relevant and succinct.
+- **User Experience:** Tool output (via observations) influences the final answer. Ensure returned data is clear and `citationRules` guide the LLM to produce understandable results.
+- **Maintainability:** Use clear code, comments for complex logic, TypeScript types, and follow project conventions.
+
+---
+
+## 8. Debugging Strategies
+
+1. **`console.log`:** Liberally use `console.log` inside your tool's `execute` method to inspect `args`, intermediate variables, API responses, and the `Observation[]` object just before returning.
+2. **Inspect `interMessages`:** Temporarily modify `Agent.ts` (e.g., in the `askAgent` `while` loop) to `console.log(JSON.stringify(this.interMessages, null, 2))` before each LLM call. This shows the exact XML context the LLM sees and its raw XML response. Pinpoint where the conversation deviates or breaks.
+3. **Test Standalone:** Create a simple test script (`.ts` file run with `ts-node` or similar). Import your tool. Create mock objects/functions for its dependencies (e.g., `const mockCreateDoc = (doc) => ({ id: 'mock-doc-123', ...doc });`). Instantiate your tool with mocks: `const tool = new YourTool(mockCreateDoc);`. Call `await tool.execute(testArgs);` and assert the output. This isolates tool logic.
+4. **Analyzing LLM Failures:** Use the `interMessages` log:
+ - **Wrong Tool Chosen:** LLM's `<thought>` selects the wrong tool, or uses `<action>noTool</action>` inappropriately. -> **Refine your tool's `description`** in `ToolInfo` for clarity and better differentiation.
+ - **Missing/Incorrect Parameters:** LLM fails to provide required parameters in `<inputs>`, or provides wrong values. -> **Refine parameter `description`s** in `parameterRules`. Check the `<action_input>` stage in the log.
+ - **Ignoring Observation/Bad Answer:** LLM gets the correct `<observation>` but generates a poor `<answer>` (ignores data, bad citation). -> Check `chunk_type`, data format inside the chunk, and **clarify `citationRules`**. Simplify observation data if needed.
+ - **XML Formatting Errors:** LLM returns malformed XML. -> This might require adjusting the system prompt's structure rules or adding more robust parsing/error handling in `Agent.ts`.
+
+---
+
+## 9. Example: `CreateDashNoteTool`
+
+The code provided in Step 5 serves as a practical example, demonstrating dependency injection, input validation, calling a Dash function, and formatting success/error observations within the required `<chunk>` structure. Ensure the dependency (`createDocInDashFunc`) is correctly passed during `Agent` instantiation in `ChatBox.tsx`.
+
+---
+
+## 10. Glossary of Key Terms
+
+- **Agent (`Agent.ts`):** The orchestrator class managing the LLM interaction loop and tool usage.
+- **Tool (`BaseTool.ts`):** A class extending `BaseTool` to provide specific functionality (API calls, Dash actions).
+- **LLM (Large Language Model):** The AI model providing reasoning and text generation (e.g., GPT-4o).
+- **ReAct Loop:** The core interaction pattern: Reason -> Act -> Observe.
+- **XML Structure:** The tag-based format (`<stage>`, `<thought>`, etc.) for LLM communication.
+- **`interMessages`:** The internal, complete conversational context sent to the LLM.
+- **`ToolInfo`:** Configuration object (`name`, `description`, `parameterRules`, `citationRules`) defining a tool. **Source of dynamic prompt content for the tool list.**
+- **`parameterRules`:** Array defining a tool's expected input parameters.
+- **`citationRules`:** Instructions for the LLM on citing a tool's output.
+- **`execute`:** The primary asynchronous method within a tool containing its core logic.
+- **`Observation`:** The structured object (`{ type: 'text', text: '<chunk>...' }`) returned by `execute`.
+- **`<chunk>`:** The required XML-like wrapper within an Observation's `text`, containing `chunk_id` and `chunk_type`.
+- **`chunk_type`:** Semantic identifier for the data type within a `<chunk>`.
+- **System Prompt (`getReactPrompt`):** The foundational instructions for the LLM, acting as a **template dynamically populated** with registered tool descriptions.
+- **Dash Functions:** Capabilities from the Dash environment (e.g., `createDocInDash`) injected into tools.
+- **Stateless Tool:** A tool whose output depends solely on current inputs, not past interactions.
+
+---
+
+## 11. Conclusion
+
+This guide provides a detailed framework for extending the Dash Agent with custom tools. By adhering to the `BaseTool` structure, understanding the agent's interaction flow, crafting clear `ToolInfo` descriptions, implementing robust `execute` methods, and correctly registering your tool in `Agent.ts`, you can build powerful integrations that leverage both AI and the unique capabilities of the Dash hypermedia environment. Remember that testing and careful consideration of dependencies, errors, and security are crucial for creating reliable tools.
diff --git a/src/client/views/nodes/chatbot/tools/CodebaseSummarySearchTool.ts b/src/client/views/nodes/chatbot/tools/CodebaseSummarySearchTool.ts
new file mode 100644
index 000000000..5fdc52375
--- /dev/null
+++ b/src/client/views/nodes/chatbot/tools/CodebaseSummarySearchTool.ts
@@ -0,0 +1,75 @@
+import { Observation } from '../types/types';
+import { ParametersType, ToolInfo } from '../types/tool_types';
+import { Vectorstore } from '../vectorstore/Vectorstore';
+import { BaseTool } from './BaseTool';
+
+const codebaseSummarySearchToolParams = [
+ {
+ name: 'query',
+ type: 'string[]',
+ description: 'HIGHLY detailed (MANY SENTENCES) descriptions of the code files you want to find in the codebase.',
+ required: true,
+ },
+ {
+ name: 'top_k',
+ type: 'number',
+ description: 'Number of top matching files to return. Default is 5.',
+ required: false,
+ },
+] as const;
+
+type CodebaseSummarySearchToolParamsType = typeof codebaseSummarySearchToolParams;
+
+const codebaseSummarySearchToolInfo: ToolInfo<CodebaseSummarySearchToolParamsType> = {
+ name: 'codebaseSummarySearch',
+ description: 'Searches the Dash codebase for files that match a semantic query. Returns a list of the most relevant files with their summaries to help understand the codebase structure.',
+ citationRules: `When using the CodebaseSummarySearchTool:
+1. Present results clearly, showing filepaths and their summaries
+2. Use the file summaries to provide context about the codebase organization
+3. The results can be used to identify relevant files for deeper inspection`,
+ parameterRules: codebaseSummarySearchToolParams,
+};
+
+export class CodebaseSummarySearchTool extends BaseTool<CodebaseSummarySearchToolParamsType> {
+ constructor(private vectorstore: Vectorstore) {
+ super(codebaseSummarySearchToolInfo);
+ }
+
+ async execute(args: ParametersType<CodebaseSummarySearchToolParamsType>): Promise<Observation[]> {
+ console.log(`Executing codebase summary search with query: "${args.query}"`);
+
+ // Use the vectorstore's searchFileSummaries method
+ const topK = args.top_k || 5;
+ const results: { filepath: string; summary: string; score?: number | undefined }[] = [];
+ for (const query of args.query) {
+ const result = await this.vectorstore.searchFileSummaries(query, topK);
+ results.push(...result);
+ }
+
+ if (results.length === 0) {
+ return [
+ {
+ type: 'text',
+ text: `No files matching the query "${args.query}" were found in the codebase.`,
+ },
+ ];
+ }
+
+ // Format results as observations
+ const formattedResults: Observation[] = [
+ {
+ type: 'text',
+ text: `Found ${results.length} file(s) matching the query "${args.query}":\n\n`,
+ },
+ ];
+
+ results.forEach((result, index) => {
+ formattedResults.push({
+ type: 'text',
+ text: `File #${index + 1}: ${result.filepath}\n` + `Relevance Score: ${result.score?.toFixed(4) || 'N/A'}\n` + `Summary: ${result.summary}\n\n`,
+ });
+ });
+
+ return formattedResults;
+ }
+}
diff --git a/src/client/views/nodes/chatbot/tools/CreateAnyDocTool.ts b/src/client/views/nodes/chatbot/tools/CreateAnyDocTool.ts
deleted file mode 100644
index 754d230c8..000000000
--- a/src/client/views/nodes/chatbot/tools/CreateAnyDocTool.ts
+++ /dev/null
@@ -1,158 +0,0 @@
-import { toLower } from 'lodash';
-import { Doc } from '../../../../../fields/Doc';
-import { Id } from '../../../../../fields/FieldSymbols';
-import { DocumentOptions } from '../../../../documents/Documents';
-import { parsedDoc } from '../chatboxcomponents/ChatBox';
-import { ParametersType, ToolInfo } from '../types/tool_types';
-import { Observation } from '../types/types';
-import { BaseTool } from './BaseTool';
-import { supportedDocTypes } from './CreateDocumentTool';
-
-const standardOptions = ['title', 'backgroundColor'];
-/**
- * Description of document options and data field for each type.
- */
-const documentTypesInfo: { [key in supportedDocTypes]: { options: string[]; dataDescription: string } } = {
- [supportedDocTypes.flashcard]: {
- options: [...standardOptions, 'fontColor', 'text_align'],
- dataDescription: 'an array of two strings. the first string contains a question, and the second string contains an answer',
- },
- [supportedDocTypes.text]: {
- options: [...standardOptions, 'fontColor', 'text_align'],
- dataDescription: 'The text content of the document.',
- },
- [supportedDocTypes.html]: {
- options: [],
- dataDescription: 'The HTML-formatted text content of the document.',
- },
- [supportedDocTypes.equation]: {
- options: [...standardOptions, 'fontColor'],
- dataDescription: 'The equation content as a string.',
- },
- [supportedDocTypes.functionplot]: {
- options: [...standardOptions, 'function_definition'],
- dataDescription: 'The function definition(s) for plotting. Provide as a string or array of function definitions.',
- },
- [supportedDocTypes.dataviz]: {
- options: [...standardOptions, 'chartType'],
- dataDescription: 'A string of comma-separated values representing the CSV data.',
- },
- [supportedDocTypes.notetaking]: {
- options: standardOptions,
- dataDescription: 'The initial content or structure for note-taking.',
- },
- [supportedDocTypes.rtf]: {
- options: standardOptions,
- dataDescription: 'The rich text content in RTF format.',
- },
- [supportedDocTypes.image]: {
- options: standardOptions,
- dataDescription: 'The image content as an image file URL.',
- },
- [supportedDocTypes.pdf]: {
- options: standardOptions,
- dataDescription: 'the pdf content as a PDF file url.',
- },
- [supportedDocTypes.audio]: {
- options: standardOptions,
- dataDescription: 'The audio content as a file url.',
- },
- [supportedDocTypes.video]: {
- options: standardOptions,
- dataDescription: 'The video content as a file url.',
- },
- [supportedDocTypes.message]: {
- options: standardOptions,
- dataDescription: 'The message content of the document.',
- },
- [supportedDocTypes.diagram]: {
- options: ['title', 'backgroundColor'],
- dataDescription: 'diagram content as a text string in Mermaid format.',
- },
- [supportedDocTypes.script]: {
- options: ['title', 'backgroundColor'],
- dataDescription: 'The compilable JavaScript code. Use this for creating scripts.',
- },
-};
-
-const createAnyDocumentToolParams = [
- {
- name: 'document_type',
- type: 'string',
- description: `The type of the document to create. Supported types are: ${Object.values(supportedDocTypes).join(', ')}`,
- required: true,
- },
- {
- name: 'data',
- type: 'string',
- description: 'The content or data of the document. The exact format depends on the document type.',
- required: true,
- },
- {
- name: 'options',
- type: 'string',
- required: false,
- description: `A JSON string representing the document options. Available options depend on the document type. For example:
- ${Object.entries(documentTypesInfo).map( ([doc_type, info]) => `
-- For '${doc_type}' documents, options include: ${info.options.join(', ')}`)
- .join('\n')}`, // prettier-ignore
- },
-] as const;
-
-type CreateAnyDocumentToolParamsType = typeof createAnyDocumentToolParams;
-
-const createAnyDocToolInfo: ToolInfo<CreateAnyDocumentToolParamsType> = {
- name: 'createAnyDocument',
- description:
- `Creates any type of document with the provided options and data.
- Supported document types are: ${Object.values(supportedDocTypes).join(', ')}.
- dataviz is a csv table tool, so for CSVs, use dataviz. Here are the options for each type:
- <supported_document_types>` +
- Object.entries(documentTypesInfo)
- .map(
- ([doc_type, info]) =>
- `<document_type name="${doc_type}">
- <data_description>${info.dataDescription}</data_description>
- <options>` +
- info.options.map(option => `<option>${option}</option>`).join('\n') +
- `</options>
- </document_type>`
- )
- .join('\n') +
- `</supported_document_types>`,
- parameterRules: createAnyDocumentToolParams,
- citationRules: 'No citation needed.',
-};
-
-export class CreateAnyDocumentTool extends BaseTool<CreateAnyDocumentToolParamsType> {
- private _addLinkedDoc: (doc: parsedDoc) => Doc | undefined;
-
- constructor(addLinkedDoc: (doc: parsedDoc) => Doc | undefined) {
- super(createAnyDocToolInfo);
- this._addLinkedDoc = addLinkedDoc;
- }
-
- async execute(args: ParametersType<CreateAnyDocumentToolParamsType>): Promise<Observation[]> {
- try {
- const documentType = toLower(args.document_type) as unknown as supportedDocTypes;
- const info = documentTypesInfo[documentType];
-
- if (info === undefined) {
- throw new Error(`Unsupported document type: ${documentType}. Supported types are: ${Object.values(supportedDocTypes).join(', ')}.`);
- }
-
- if (!args.data) {
- throw new Error(`Data is required for ${documentType} documents. ${info.dataDescription}`);
- }
-
- const options: DocumentOptions = !args.options ? {} : JSON.parse(args.options);
-
- // Call the function to add the linked document (add default title that can be overriden if set in options)
- const doc = this._addLinkedDoc({ doc_type: documentType, data: args.data, title: `New ${documentType.charAt(0).toUpperCase() + documentType.slice(1)} Document`, ...options });
-
- return [{ type: 'text', text: `Created ${documentType} document with ID ${doc?.[Id]}.` }];
- } catch (error) {
- return [{ type: 'text', text: 'Error creating document: ' + (error as Error).message }];
- }
- }
-}
diff --git a/src/client/views/nodes/chatbot/tools/CreateDocumentTool.ts b/src/client/views/nodes/chatbot/tools/CreateDocumentTool.ts
deleted file mode 100644
index 284879a4a..000000000
--- a/src/client/views/nodes/chatbot/tools/CreateDocumentTool.ts
+++ /dev/null
@@ -1,497 +0,0 @@
-import { BaseTool } from './BaseTool';
-import { Observation } from '../types/types';
-import { Parameter, ParametersType, ToolInfo } from '../types/tool_types';
-import { parsedDoc } from '../chatboxcomponents/ChatBox';
-import { CollectionViewType } from '../../../../documents/DocumentTypes';
-
-/**
- * List of supported document types that can be created via text LLM.
- */
-export enum supportedDocTypes {
- flashcard = 'flashcard',
- text = 'text',
- html = 'html',
- equation = 'equation',
- functionplot = 'functionplot',
- dataviz = 'dataviz',
- notetaking = 'notetaking',
- audio = 'audio',
- video = 'video',
- pdf = 'pdf',
- rtf = 'rtf',
- message = 'message',
- collection = 'collection',
- image = 'image',
- deck = 'deck',
- web = 'web',
- comparison = 'comparison',
- diagram = 'diagram',
- script = 'script',
-}
-/**
- * Tthe CreateDocTool class is responsible for creating
- * documents of various types (e.g., text, flashcards, collections) and organizing them in a
- * structured manner. The tool supports creating dashboards with diverse document types and
- * ensures proper placement of documents without overlap.
- */
-
-// Example document structure for various document types
-const example = [
- {
- doc_type: supportedDocTypes.equation,
- title: 'quadratic',
- data: 'x^2 + y^2 = 3',
- _width: 300,
- _height: 300,
- x: 0,
- y: 0,
- },
- {
- doc_type: supportedDocTypes.collection,
- title: 'Advanced Biology',
- data: [
- {
- doc_type: supportedDocTypes.text,
- title: 'Cell Structure',
- data: 'Cells are the basic building blocks of all living organisms.',
- _width: 300,
- _height: 300,
- x: 500,
- y: 0,
- },
- ],
- backgroundColor: '#00ff00',
- _width: 600,
- _height: 600,
- x: 600,
- y: 0,
- type_collection: 'tree',
- },
- {
- doc_type: supportedDocTypes.image,
- title: 'experiment',
- data: 'https://plus.unsplash.com/premium_photo-1694819488591-a43907d1c5cc?q=80&w=2628&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D',
- _width: 300,
- _height: 300,
- x: 600,
- y: 300,
- },
- {
- doc_type: supportedDocTypes.deck,
- title: 'Chemistry',
- data: [
- {
- doc_type: supportedDocTypes.flashcard,
- title: 'Photosynthesis',
- data: [
- {
- doc_type: supportedDocTypes.text,
- title: 'front_Photosynthesis',
- data: 'What is photosynthesis?',
- _width: 300,
- _height: 300,
- x: 100,
- y: 600,
- },
- {
- doc_type: supportedDocTypes.text,
- title: 'back_photosynthesis',
- data: 'The process by which plants make food.',
- _width: 300,
- _height: 300,
- x: 100,
- y: 700,
- },
- ],
- backgroundColor: '#00ff00',
- _width: 300,
- _height: 300,
- x: 300,
- y: 1000,
- },
- {
- doc_type: supportedDocTypes.flashcard,
- title: 'Photosynthesis',
- data: [
- {
- doc_type: supportedDocTypes.text,
- title: 'front_Photosynthesis',
- data: 'What is photosynthesis?',
- _width: 300,
- _height: 300,
- x: 200,
- y: 800,
- },
- {
- doc_type: supportedDocTypes.text,
- title: 'back_photosynthesis',
- data: 'The process by which plants make food.',
- _width: 300,
- _height: 300,
- x: 100,
- y: -100,
- },
- ],
- backgroundColor: '#00ff00',
- _width: 300,
- _height: 300,
- x: 10,
- y: 70,
- },
- ],
- backgroundColor: '#00ff00',
- _width: 600,
- _height: 600,
- x: 200,
- y: 800,
- },
- {
- doc_type: supportedDocTypes.web,
- title: 'Brown University Wikipedia',
- data: 'https://en.wikipedia.org/wiki/Brown_University',
- _width: 300,
- _height: 300,
- x: 1000,
- y: 2000,
- },
- {
- doc_type: supportedDocTypes.comparison,
- title: 'WWI vs. WWII',
- data: [
- {
- doc_type: supportedDocTypes.text,
- title: 'WWI',
- data: 'From 1914 to 1918, fighting took place across several continents, at sea and, for the first time, in the air.',
- _width: 300,
- _height: 300,
- x: 100,
- y: 100,
- },
- {
- doc_type: supportedDocTypes.text,
- title: 'WWII',
- data: 'A devastating global conflict spanning from 1939 to 1945, saw the Allied powers fight against the Axis powers.',
- _width: 300,
- _height: 300,
- x: 100,
- y: 100,
- },
- ],
- _width: 300,
- _height: 300,
- x: 100,
- y: 100,
- },
- {
- doc_type: supportedDocTypes.collection,
- title: 'Science Collection',
- data: [
- {
- doc_type: supportedDocTypes.flashcard,
- title: 'Photosynthesis',
- data: [
- {
- doc_type: supportedDocTypes.text,
- title: 'front_Photosynthesis',
- data: 'What is photosynthesis?',
- _width: 300,
- _height: 300,
- },
- {
- doc_type: supportedDocTypes.text,
- title: 'back_photosynthesis',
- data: 'The process by which plants make food.',
- _width: 300,
- _height: 300,
- },
- ],
- backgroundColor: '#00ff00',
- _width: 300,
- _height: 300,
- },
- {
- doc_type: supportedDocTypes.web,
- title: 'Brown University Wikipedia',
- data: 'https://en.wikipedia.org/wiki/Brown_University',
- _width: 300,
- _height: 300,
- x: 1100,
- y: 1100,
- },
- {
- doc_type: supportedDocTypes.text,
- title: 'Water Cycle',
- data: 'The continuous movement of water on, above, and below the Earth’s surface.',
- _width: 300,
- _height: 300,
- x: 1500,
- y: 500,
- },
- {
- doc_type: supportedDocTypes.collection,
- title: 'Advanced Biology',
- data: [
- {
- doc_type: 'text',
- title: 'Cell Structure',
- data: 'Cells are the basic building blocks of all living organisms.',
- _width: 300,
- _height: 300,
- },
- ],
- backgroundColor: '#00ff00',
- _width: 600,
- _height: 600,
- x: 1100,
- y: 500,
- type_collection: 'stacking',
- },
- ],
- _width: 600,
- _height: 600,
- x: 500,
- y: 500,
- type_collection: 'carousel',
- },
-];
-
-// Stringify the entire structure for transmission if needed
-const finalJsonString = JSON.stringify(example);
-
-const standardOptions = ['title', 'backgroundColor'];
-/**
- * Description of document options and data field for each type.
- */
-const documentTypesInfo: { [key in supportedDocTypes]: { options: string[]; dataDescription: string } } = {
- comparison: {
- options: [...standardOptions, 'fontColor', 'text_align'],
- dataDescription: 'an array of two documents of any kind that can be compared.',
- },
- deck: {
- options: [...standardOptions, 'fontColor', 'text_align'],
- dataDescription: 'an array of flashcard docs',
- },
- flashcard: {
- options: [...standardOptions, 'fontColor', 'text_align'],
- dataDescription: 'an array of two strings. the first string contains a question, and the second string contains an answer',
- },
- text: {
- options: [...standardOptions, 'fontColor', 'text_align'],
- dataDescription: 'The text content of the document.',
- },
- web: {
- options: [],
- dataDescription: 'A URL to a webpage. Example: https://en.wikipedia.org/wiki/Brown_University',
- },
- html: {
- options: [],
- dataDescription: 'The HTML-formatted text content of the document.',
- },
- equation: {
- options: [...standardOptions, 'fontColor'],
- dataDescription: 'The equation content represented as a MathML string.',
- },
- functionplot: {
- options: [...standardOptions, 'function_definition'],
- dataDescription: 'The function definition(s) for plotting. Provide as a string or array of function definitions.',
- },
- dataviz: {
- options: [...standardOptions, 'chartType'],
- dataDescription: 'A string of comma-separated values representing the CSV data.',
- },
- notetaking: {
- options: standardOptions,
- dataDescription: 'An array of related text documents with small amounts of text.',
- },
- rtf: {
- options: standardOptions,
- dataDescription: 'The rich text content in RTF format.',
- },
- image: {
- options: standardOptions,
- dataDescription: `A url string that must end with '.png', '.jpeg', '.gif', or '.jpg'`,
- },
- pdf: {
- options: standardOptions,
- dataDescription: 'the pdf content as a PDF file url.',
- },
- audio: {
- options: standardOptions,
- dataDescription: 'The audio content as a file url.',
- },
- video: {
- options: standardOptions,
- dataDescription: 'The video content as a file url.',
- },
- message: {
- options: standardOptions,
- dataDescription: 'The message content of the document.',
- },
- diagram: {
- options: standardOptions,
- dataDescription: 'diagram content as a text string in Mermaid format.',
- },
- script: {
- options: standardOptions,
- dataDescription: 'The compilable JavaScript code. Use this for creating scripts.',
- },
- collection: {
- options: [...standardOptions, 'type_collection'],
- dataDescription: 'A collection of Docs represented as an array.',
- },
-};
-
-// Parameters for creating individual documents
-const createDocToolParams: { name: string; type: 'string' | 'number' | 'boolean' | 'string[]' | 'number[]'; description: string; required: boolean }[] = [
- {
- name: 'data',
- type: 'string', // Accepts either string or array, supporting individual and nested data
- description:
- 'the data that describes the Document contents. For collections this is an' +
- `Array of documents in stringified JSON format. Each item in the array should be an individual stringified JSON object. ` +
- `Creates any type of document with the provided options and data. Supported document types are: ${Object.keys(documentTypesInfo).join(', ')}.
- dataviz is a csv table tool, so for CSVs, use dataviz. Here are the options for each type:
- <supported_document_types>` +
- Object.entries(documentTypesInfo)
- .map(
- ([doc_type, info]) =>
- `<document_type name="${doc_type}">
- <data_description>${info.dataDescription}</data_description>
- <options>` +
- info.options.map(option => `<option>${option}</option>`).join('\n') +
- `
- </options>
- </document_type>`
- )
- .join('\n') +
- `</supported_document_types> An example of the structure of a collection is:` +
- finalJsonString, // prettier-ignore,
- required: true,
- },
- {
- name: 'doc_type',
- type: 'string',
- description: `The type of the document. Options: ${Object.keys(documentTypesInfo).join(',')}.`,
- required: true,
- },
- {
- name: 'title',
- type: 'string',
- description: 'The title of the document.',
- required: true,
- },
- {
- name: 'x',
- type: 'number',
- description: 'The x location of the document; 0 <= x.',
- required: true,
- },
- {
- name: 'y',
- type: 'number',
- description: 'The y location of the document; 0 <= y.',
- required: true,
- },
- {
- name: 'backgroundColor',
- type: 'string',
- description: 'The background color of the document as a hex string.',
- required: false,
- },
- {
- name: 'fontColor',
- type: 'string',
- description: 'The font color of the document as a hex string.',
- required: false,
- },
- {
- name: '_width',
- type: 'number',
- description: 'The width of the document in pixels.',
- required: true,
- },
- {
- name: '_height',
- type: 'number',
- description: 'The height of the document in pixels.',
- required: true,
- },
- {
- name: 'type_collection',
- type: 'string',
- description: `the visual style for a collection doc. Options include: ${Object.values(CollectionViewType).join(',')}.`,
- required: false,
- },
-] as const;
-
-type CreateDocToolParamsType = typeof createDocToolParams;
-
-const createDocToolInfo: ToolInfo<CreateDocToolParamsType> = {
- name: 'createDoc',
- description: `Creates one or more documents that best fit the user’s request.
- If the user requests a "dashboard," first call the search tool and then generate a variety of document types individually, with absolutely a minimum of 20 documents
- with two stacks of flashcards that are small and it should have a couple nested freeform collections of things, each with different content and color schemes.
- For example, create multiple individual documents, including ${Object.keys(documentTypesInfo)
- .map(t => '"' + t + '"')
- .join(',')}
- If the "doc_type" parameter is missing, set it to an empty string ("").
- Use Decks instead of Flashcards for dashboards. Decks should have at least three flashcards.
- Really think about what documents are useful to the user. If they ask for a dashboard about the skeletal system, include flashcards, as they would be helpful.
- Arrange the documents in a grid layout, ensuring that the x and y coordinates are calculated so no documents overlap but they should be directly next to each other with 20 padding in between.
- Take into account the width and height of each document, spacing them appropriately to prevent collisions.
- Use a systematic approach, such as placing each document in a grid cell based on its order, where cell dimensions match the document dimensions plus a fixed margin for spacing.
- Do not nest all documents within a single collection unless explicitly requested by the user.
- Instead, create a set of independent documents with diverse document types. Each type should appear separately unless specified otherwise.
- Use the "data" parameter for document content and include title, color, and document dimensions.
- Ensure web documents use URLs from the search tool if relevant. Each document in a dashboard should be unique and well-differentiated in type and content,
- without repetition of similar types in any single collection.
- When creating a dashboard, ensure that it consists of a broad range of document types.
- Include a variety of documents, such as text, web, deck, comparison, image, and equation documents,
- each with distinct titles and colors, following the user’s preferences.
- Do not overuse collections or nest all document types within a single collection; instead, represent document types individually. Use this example for reference:
- ${finalJsonString} .
- Which documents are created should be random with different numbers of each document type and different for each dashboard.
- Must use search tool before creating a dashboard.`,
- parameterRules: createDocToolParams,
- citationRules: 'No citation needed.',
-};
-
-// Tool class for creating documents
-export class CreateDocTool extends BaseTool<
- {
- name: string;
- type: 'string' | 'number' | 'boolean' | 'string[]' | 'number[]';
- description: string;
- required: boolean;
- }[]
-> {
- private _addLinkedDoc: (doc: parsedDoc) => void;
-
- constructor(addLinkedDoc: (doc: parsedDoc) => void) {
- super(createDocToolInfo);
- this._addLinkedDoc = addLinkedDoc;
- }
-
- override inputValidator(inputParam: ParametersType<readonly Parameter[]>) {
- return !!inputParam.data;
- }
- // Executes the tool logic for creating documents
- async execute(
- args: ParametersType<
- {
- name: 'string';
- type: 'string' | 'number' | 'boolean' | 'string[]' | 'number[]';
- description: 'string';
- required: boolean;
- }[]
- >
- ): Promise<Observation[]> {
- try {
- const parsedDocs = args instanceof Array ? args : Object.keys(args).length === 1 && 'data' in args ? JSON.parse(args.data as string) : [args];
- parsedDocs.forEach((pdoc: parsedDoc) => this._addLinkedDoc({ ...pdoc, _layout_fitWidth: false, _layout_autoHeight: true }));
- return [{ type: 'text', text: 'Created document.' }];
- } catch (error) {
- return [{ type: 'text', text: 'Error creating text document, ' + error }];
- }
- }
-}
diff --git a/src/client/views/nodes/chatbot/tools/CreateLinksTool.ts b/src/client/views/nodes/chatbot/tools/CreateLinksTool.ts
new file mode 100644
index 000000000..c2850a8ce
--- /dev/null
+++ b/src/client/views/nodes/chatbot/tools/CreateLinksTool.ts
@@ -0,0 +1,68 @@
+import { Observation } from '../types/types';
+import { ParametersType, ToolInfo } from '../types/tool_types';
+import { BaseTool } from './BaseTool';
+import { AgentDocumentManager } from '../utils/AgentDocumentManager';
+
+const createLinksToolParams = [
+ {
+ name: 'document_ids',
+ type: 'string[]',
+ description: 'List of document IDs to create links between. All documents will be linked to each other.',
+ required: true,
+ },
+] as const;
+
+type CreateLinksToolParamsType = typeof createLinksToolParams;
+
+const createLinksToolInfo: ToolInfo<CreateLinksToolParamsType> = {
+ name: 'createLinks',
+ description: 'Creates visual links between multiple documents in the dashboard. This allows related documents to be connected visually with lines that users can see.',
+ citationRules: 'No citation needed.',
+ parameterRules: createLinksToolParams,
+};
+
+export class CreateLinksTool extends BaseTool<CreateLinksToolParamsType> {
+ private _documentManager: AgentDocumentManager;
+
+ constructor(documentManager: AgentDocumentManager) {
+ super(createLinksToolInfo);
+ this._documentManager = documentManager;
+ }
+
+ async execute(args: ParametersType<CreateLinksToolParamsType>): Promise<Observation[]> {
+ try {
+ // Validate that we have at least 2 documents to link
+ if (args.document_ids.length < 2) {
+ return [{ type: 'text', text: 'Error: At least 2 document IDs are required to create links.' }];
+ }
+
+ // Validate that all documents exist
+ const missingDocIds = args.document_ids.filter(id => !this._documentManager.has(id));
+ if (missingDocIds.length > 0) {
+ return [
+ {
+ type: 'text',
+ text: `Error: The following document IDs were not found: ${missingDocIds.join(', ')}`,
+ },
+ ];
+ }
+
+ // Create links between all documents with the specified relationship
+ const createdLinks = this._documentManager.addLinks(args.document_ids);
+
+ return [
+ {
+ type: 'text',
+ text: `Successfully created ${createdLinks.length} visual links between ${args.document_ids.length}.`,
+ },
+ ];
+ } catch (error) {
+ return [
+ {
+ type: 'text',
+ text: `Error creating links: ${error instanceof Error ? error.message : String(error)}`,
+ },
+ ];
+ }
+ }
+}
diff --git a/src/client/views/nodes/chatbot/tools/CreateNewTool.ts b/src/client/views/nodes/chatbot/tools/CreateNewTool.ts
new file mode 100644
index 000000000..1cc50a803
--- /dev/null
+++ b/src/client/views/nodes/chatbot/tools/CreateNewTool.ts
@@ -0,0 +1,599 @@
+import { Observation } from '../types/types';
+import { Parameter, ParametersType, ToolInfo } from '../types/tool_types';
+import { BaseTool } from './BaseTool';
+import * as ts from 'typescript';
+import { v4 as uuidv4 } from 'uuid';
+import { Networking } from '../../../../Network';
+
+// Forward declaration to avoid circular import
+interface AgentLike {
+ registerDynamicTool(toolName: string, toolInstance: BaseTool<ReadonlyArray<Parameter>>): void;
+ notifyToolCreated(toolName: string, completeToolCode: string): void;
+}
+
+const createNewToolParams = [
+ {
+ name: 'toolName',
+ type: 'string',
+ description: 'The name of the new tool class (PascalCase) and filename. This will also be converted to lowercase for the action name.',
+ required: true,
+ },
+ {
+ name: 'toolCode',
+ type: 'string',
+ description:
+ 'The complete TypeScript code for the new tool class. IMPORTANT: Provide this as a single string without any XML formatting. Do not break it into multiple lines or add any XML tags. The tool must extend BaseTool, implement an async execute method, and have proper parameter definitions. Use CDATA format if needed: <![CDATA[your code here]]>',
+ required: true,
+ },
+ {
+ name: 'description',
+ type: 'string',
+ description: 'A brief description of what the tool does.',
+ required: true,
+ },
+] as const;
+
+type CreateNewToolParamsType = typeof createNewToolParams;
+
+const createNewToolInfo: ToolInfo<CreateNewToolParamsType> = {
+ name: 'createNewTool',
+ description: `Creates a new tool for the agent to use based on research done with the codebase search, file content, and filenames tools. The new tool will be instantly available for use in the current session and saved as a proper TypeScript file.
+
+IMPORTANT TOOL CREATION RULES:
+1. Your tool will be created with proper imports adjusted for the dynamic subfolder location
+2. Your tool MUST extend BaseTool with proper parameter type definition
+3. Your tool MUST implement an async execute method that returns Promise<Observation[]>
+4. Your tool MUST call super() with the proper tool info configuration object
+5. CRITICAL: The toolInfo.name property MUST be lowercase and should match the action name you want to use
+6. Follow this EXACT pattern (imports will be added automatically):
+
+\`\`\`typescript
+const yourToolParams = [
+ {
+ name: 'inputParam',
+ type: 'string',
+ description: 'Your parameter description',
+ required: true
+ }
+] as const;
+
+type YourToolParamsType = typeof yourToolParams;
+
+const yourToolInfo: ToolInfo<YourToolParamsType> = {
+ name: 'yourtoolname',
+ description: 'Your tool description',
+ citationRules: 'No citation needed.',
+ parameterRules: yourToolParams
+};
+
+export class YourToolName extends BaseTool<YourToolParamsType> {
+ constructor() {
+ super(yourToolInfo);
+ }
+
+ async execute(args: ParametersType<YourToolParamsType>): Promise<Observation[]> {
+ const { inputParam } = args;
+ // Your implementation here
+ return [{ type: 'text', text: 'Your result' }];
+ }
+}
+\`\`\`
+
+EXAMPLE - Character Count Tool:
+
+\`\`\`typescript
+const characterCountParams = [
+ {
+ name: 'text',
+ type: 'string',
+ description: 'The text to count characters in',
+ required: true
+ }
+] as const;
+
+type CharacterCountParamsType = typeof characterCountParams;
+
+const characterCountInfo: ToolInfo<CharacterCountParamsType> = {
+ name: 'charactercount',
+ description: 'Counts characters in text, excluding spaces',
+ citationRules: 'No citation needed.',
+ parameterRules: characterCountParams
+};
+
+export class CharacterCountTool extends BaseTool<CharacterCountParamsType> {
+ constructor() {
+ super(characterCountInfo);
+ }
+
+ async execute(args: ParametersType<CharacterCountParamsType>): Promise<Observation[]> {
+ const { text } = args;
+ const count = text ? text.replace(/\\s/g, '').length : 0;
+ return [{ type: 'text', text: \`Character count (excluding spaces): \${count}\` }];
+ }
+}
+\`\`\``,
+ citationRules: `No citation needed.`,
+ parameterRules: createNewToolParams,
+};
+
+/**
+ * This tool allows the agent to create new custom tools after researching the codebase.
+ * It validates the provided code, dynamically compiles it, and registers it with the
+ * Agent for immediate use.
+ */
+export class CreateNewTool extends BaseTool<CreateNewToolParamsType> {
+ // Reference to the dynamic tool registry in the Agent class
+ private dynamicToolRegistry: Map<string, BaseTool<ReadonlyArray<Parameter>>>;
+ private existingTools: Record<string, BaseTool<ReadonlyArray<Parameter>>>;
+ private agent?: AgentLike;
+
+ constructor(toolRegistry: Map<string, BaseTool<ReadonlyArray<Parameter>>>, existingTools: Record<string, BaseTool<ReadonlyArray<Parameter>>> = {}, agent?: AgentLike) {
+ super(createNewToolInfo);
+ this.dynamicToolRegistry = toolRegistry;
+ this.existingTools = existingTools;
+ this.agent = agent;
+ }
+
+ /**
+ * Validates TypeScript code for basic safety and correctness
+ * @param code The TypeScript code to validate
+ * @returns An object with validation result and any error messages
+ */
+ private validateToolCode(code: string, toolName: string): { valid: boolean; errors: string[] } {
+ const errors: string[] = [];
+
+ // Check for fundamental structure
+ if (!code.includes('extends BaseTool')) {
+ errors.push('Tool must extend BaseTool class');
+ }
+
+ if (!code.includes(`class ${toolName} extends`)) {
+ errors.push(`Tool class name must match the provided toolName: ${toolName}`);
+ }
+
+ if (!code.includes('async execute(')) {
+ errors.push('Tool must implement an async execute method');
+ }
+
+ if (!code.includes('super(')) {
+ errors.push('Tool must call super() in constructor');
+ }
+
+ // Check if the tool exports the class correctly (should use export class)
+ if (!code.includes(`export class ${toolName}`)) {
+ errors.push(`Tools must export the class using: export class ${toolName}`);
+ }
+
+ // Check if tool info has name property in lowercase
+ const nameMatch = code.match(/name\s*:\s*['"]([^'"]+)['"]/);
+ if (nameMatch && nameMatch[1]) {
+ const toolInfoName = nameMatch[1];
+ if (toolInfoName !== toolInfoName.toLowerCase()) {
+ errors.push(`Tool info name property must be lowercase. Found: "${toolInfoName}", should be "${toolInfoName.toLowerCase()}"`);
+ }
+ } else {
+ errors.push('Tool info must have a name property');
+ }
+
+ // Check for type definition - make this more flexible
+ const hasTypeDefinition = code.includes(`type ${toolName}ParamsType`) || code.includes(`type ${toolName.toLowerCase()}ParamsType`) || code.includes('ParamsType = typeof');
+ if (!hasTypeDefinition) {
+ errors.push(`Tool must define a type for parameters like: type ${toolName}ParamsType = typeof ${toolName.toLowerCase()}Params`);
+ }
+
+ // Check for ToolInfo type annotation - make this more flexible
+ const hasToolInfoType = code.includes(`ToolInfo<${toolName}ParamsType>`) || code.includes(`ToolInfo<${toolName.toLowerCase()}ParamsType>`) || code.includes('ToolInfo<');
+ if (!hasToolInfoType) {
+ errors.push(`Tool info must be typed as ToolInfo<YourParamsType>`);
+ }
+
+ // Check for proper execute method typing - make this more flexible
+ if (!code.includes(`ParametersType<${toolName}ParamsType>`) && !code.includes('args: ParametersType<')) {
+ errors.push(`Execute method must have typed parameters: args: ParametersType<${toolName}ParamsType>`);
+ }
+
+ // Check for unsafe code patterns
+ const unsafePatterns = [
+ { pattern: /eval\s*\(/, message: 'eval() is not allowed' },
+ { pattern: /Function\s*\(/, message: 'Function constructor is not allowed' },
+ { pattern: /require\s*\(\s*['"]child_process['"]/, message: 'child_process module is not allowed' },
+ { pattern: /require\s*\(\s*['"]fs['"]/, message: 'Direct fs module import is not allowed' },
+ { pattern: /require\s*\(\s*['"]path['"]/, message: 'Direct path module import is not allowed' },
+ { pattern: /process\.env/, message: 'Accessing process.env is not allowed' },
+ { pattern: /import\s+.*['"]child_process['"]/, message: 'child_process module is not allowed' },
+ { pattern: /import\s+.*['"]fs['"]/, message: 'Direct fs module import is not allowed' },
+ { pattern: /import\s+.*['"]path['"]/, message: 'Direct path module import is not allowed' },
+ { pattern: /\bnew\s+Function\b/, message: 'Function constructor is not allowed' },
+ { pattern: /\bwindow\b/, message: 'Direct window access is not allowed' },
+ { pattern: /\bdocument\b/, message: 'Direct document access is not allowed' },
+ { pattern: /\blocation\b/, message: 'Direct location access is not allowed' },
+ { pattern: /\bsessionStorage\b/, message: 'Direct sessionStorage access is not allowed' },
+ { pattern: /\blocalStorage\b/, message: 'Direct localStorage access is not allowed' },
+ { pattern: /fetch\s*\(/, message: 'Direct fetch calls are not allowed' },
+ { pattern: /XMLHttpRequest/, message: 'Direct XMLHttpRequest use is not allowed' },
+ ];
+
+ for (const { pattern, message } of unsafePatterns) {
+ if (pattern.test(code)) {
+ errors.push(message);
+ }
+ }
+
+ // Check if the tool name is already used by an existing tool
+ const toolNameLower = toolName.toLowerCase();
+ if (Object.keys(this.existingTools).some(key => key.toLowerCase() === toolNameLower) || Array.from(this.dynamicToolRegistry.keys()).some(key => key.toLowerCase() === toolNameLower)) {
+ errors.push(`A tool with the name "${toolNameLower}" already exists. Please choose a different name.`);
+ }
+
+ // Use TypeScript compiler API to check for syntax errors
+ try {
+ const sourceFile = ts.createSourceFile(`${toolName}.ts`, code, ts.ScriptTarget.Latest, true);
+
+ // Create a TypeScript program to check for type errors
+ const options: ts.CompilerOptions = {
+ target: ts.ScriptTarget.ES2020,
+ module: ts.ModuleKind.ESNext,
+ strict: true,
+ esModuleInterop: true,
+ skipLibCheck: true,
+ forceConsistentCasingInFileNames: true,
+ };
+
+ // Perform additional static analysis on the AST
+ const visitor = (node: ts.Node) => {
+ // Check for potentially unsafe constructs
+ if (ts.isCallExpression(node)) {
+ const expression = node.expression;
+ if (ts.isIdentifier(expression)) {
+ const name = expression.text;
+ if (name === 'eval' || name === 'Function') {
+ errors.push(`Use of ${name} is not allowed`);
+ }
+ }
+ }
+
+ // Recursively visit all child nodes
+ ts.forEachChild(node, visitor);
+ };
+
+ visitor(sourceFile);
+ } catch (error) {
+ errors.push(`TypeScript syntax error: ${error}`);
+ }
+
+ return {
+ valid: errors.length === 0,
+ errors,
+ };
+ }
+
+ /**
+ * Extracts tool info name from the tool code
+ * @param code The tool TypeScript code
+ * @returns The tool info name or null if not found
+ */
+ private extractToolInfoName(code: string): string | null {
+ const nameMatch = code.match(/name\s*:\s*['"]([^'"]+)['"]/);
+ return nameMatch && nameMatch[1] ? nameMatch[1] : null;
+ }
+
+ /**
+ * Extracts and parses parameter info from the tool code
+ * @param code The tool TypeScript code
+ * @returns An array of parameter objects
+ */
+ private extractToolParameters(code: string): Array<{ name: string; type: string; description: string; required: boolean }> {
+ // Basic regex-based extraction - in a production environment, this should use the TypeScript AST
+ const paramsMatch = code.match(/const\s+\w+Params\s*=\s*\[([\s\S]*?)\]\s*as\s*const/);
+ if (!paramsMatch || !paramsMatch[1]) {
+ return [];
+ }
+
+ const paramsText = paramsMatch[1];
+
+ // Parse individual parameters
+ const paramRegex = /{\s*name\s*:\s*['"]([^'"]+)['"]\s*,\s*type\s*:\s*['"]([^'"]+)['"]\s*,\s*description\s*:\s*['"]([^'"]+)['"]\s*,\s*required\s*:\s*(true|false)/g;
+ const params = [];
+ let match;
+
+ while ((match = paramRegex.exec(paramsText)) !== null) {
+ params.push({
+ name: match[1],
+ type: match[2],
+ description: match[3],
+ required: match[4] === 'true',
+ });
+ }
+
+ return params;
+ }
+
+ /**
+ * Generates the complete tool file content with proper imports for the dynamic subfolder
+ * @param toolCode The user-provided tool code
+ * @param toolName The name of the tool class
+ * @returns The complete TypeScript file content
+ */
+ private generateCompleteToolFile(toolCode: string, toolName: string): string {
+ // Add proper imports for the dynamic subfolder (one level deeper than regular tools)
+ const imports = `import { Observation } from '../../types/types';
+import { ParametersType, ToolInfo } from '../../types/tool_types';
+import { BaseTool } from '../BaseTool';
+
+`;
+
+ // Clean the user code - remove any existing imports they might have added
+ const cleanedCode = toolCode
+ .replace(/import\s+[^;]+;?\s*/g, '') // Remove any import statements
+ .trim();
+
+ return imports + cleanedCode;
+ }
+
+ /**
+ * Transpiles TypeScript code to JavaScript
+ * @param code The TypeScript code to compile
+ * @param filename The name of the file (for error reporting)
+ * @returns The compiled JavaScript code
+ */
+ private transpileTypeScript(code: string, filename: string): { jsCode: string; errors: string[] } {
+ try {
+ const transpileOptions: ts.TranspileOptions = {
+ compilerOptions: {
+ module: ts.ModuleKind.CommonJS, // Use CommonJS for dynamic imports
+ target: ts.ScriptTarget.ES2020,
+ moduleResolution: ts.ModuleResolutionKind.NodeJs,
+ esModuleInterop: true,
+ sourceMap: false,
+ strict: false, // Relax strict mode for dynamic compilation
+ noImplicitAny: false, // Allow implicit any types
+ },
+ reportDiagnostics: true,
+ fileName: `${filename}.ts`,
+ };
+
+ const output = ts.transpileModule(code, transpileOptions);
+
+ // Check for compilation errors
+ const errors: string[] = [];
+ if (output.diagnostics && output.diagnostics.length > 0) {
+ for (const diagnostic of output.diagnostics) {
+ if (diagnostic.file && diagnostic.start !== undefined) {
+ const { line, character } = diagnostic.file.getLineAndCharacterOfPosition(diagnostic.start);
+ const message = ts.flattenDiagnosticMessageText(diagnostic.messageText, '\n');
+ errors.push(`Line ${line + 1}, Column ${character + 1}: ${message}`);
+ } else {
+ errors.push(ts.flattenDiagnosticMessageText(diagnostic.messageText, '\n'));
+ }
+ }
+
+ if (errors.length > 0) {
+ return { jsCode: '', errors };
+ }
+ }
+
+ return { jsCode: output.outputText, errors: [] };
+ } catch (error) {
+ return {
+ jsCode: '',
+ errors: [`Transpilation failed: ${error instanceof Error ? error.message : String(error)}`],
+ };
+ }
+ }
+
+ /**
+ * Dynamically evaluates and instantiates a tool from JavaScript code
+ * @param jsCode The JavaScript code
+ * @param toolName The name of the tool class
+ * @returns An instance of the tool or null if instantiation failed
+ */
+ private async createDynamicTool(jsCode: string, toolName: string): Promise<BaseTool<ReadonlyArray<Parameter>> | null> {
+ try {
+ // Create a safe evaluation context with necessary globals
+ const globalContext = {
+ BaseTool: BaseTool, // Actual class reference
+ exports: {},
+ module: { exports: {} },
+ require: (id: string) => {
+ // Mock require for the imports we know about
+ if (id.includes('types/types')) {
+ return { Observation: null };
+ }
+ if (id.includes('tool_types')) {
+ return { ParametersType: null, ToolInfo: null };
+ }
+ if (id.includes('BaseTool')) {
+ return { BaseTool: BaseTool };
+ }
+ return {};
+ },
+ console: console,
+ // Add any other commonly needed globals
+ JSON: JSON,
+ Array: Array,
+ Object: Object,
+ String: String,
+ Number: Number,
+ Boolean: Boolean,
+ Math: Math,
+ Date: Date,
+ };
+
+ // Create function to evaluate in the proper context
+ const evaluationFunction = new Function(
+ ...Object.keys(globalContext),
+ `"use strict";
+ try {
+ ${jsCode}
+ // Get the exported class from the module
+ const ToolClass = exports.${toolName} || module.exports.${toolName} || module.exports;
+ if (ToolClass && typeof ToolClass === 'function') {
+ return new ToolClass();
+ } else {
+ console.error('Tool class not found in exports:', Object.keys(exports), Object.keys(module.exports));
+ return null;
+ }
+ } catch (error) {
+ console.error('Error during tool evaluation:', error);
+ return null;
+ }`
+ );
+
+ // Execute with our controlled globals
+ const toolInstance = evaluationFunction(...Object.values(globalContext));
+
+ if (!toolInstance) {
+ console.error(`Failed to instantiate ${toolName} - no instance returned`);
+ return null;
+ }
+
+ // Verify it's a proper BaseTool instance
+ if (!(toolInstance instanceof BaseTool)) {
+ console.error(`${toolName} is not a proper instance of BaseTool`);
+ return null;
+ }
+
+ console.log(`Successfully created dynamic tool instance: ${toolName}`);
+ return toolInstance;
+ } catch (error) {
+ console.error('Error creating dynamic tool:', error);
+ return null;
+ }
+ }
+
+ /**
+ * Save the tool code to the server so it's available for future sessions
+ * @param toolName The name of the tool
+ * @param completeToolCode The complete TypeScript code for the tool with imports
+ */
+ private async saveToolToServer(toolName: string, completeToolCode: string): Promise<boolean> {
+ try {
+ // Create a server endpoint to save the tool
+ const response = await Networking.PostToServer('/saveDynamicTool', {
+ toolName: toolName,
+ toolCode: completeToolCode,
+ });
+
+ // Type check the response to avoid property access errors
+ return typeof response === 'object' && response !== null && 'success' in response && (response as { success: boolean }).success === true;
+ } catch (error) {
+ console.error('Failed to save tool to server:', error);
+ return false;
+ }
+ }
+
+ async execute(args: ParametersType<CreateNewToolParamsType>): Promise<Observation[]> {
+ const { toolName, toolCode, description } = args;
+
+ console.log(`Creating new tool: ${toolName}`);
+
+ // Remove any markdown backticks that might be in the code
+ const cleanedCode = (toolCode as string).replace(/```typescript|```/g, '').trim();
+
+ if (!cleanedCode) {
+ return [
+ {
+ type: 'text',
+ text: 'Failed to extract tool code from the provided input. Please ensure the tool code is provided as valid TypeScript code.',
+ },
+ ];
+ }
+
+ // Validate the provided code
+ const validation = this.validateToolCode(cleanedCode, toolName);
+ if (!validation.valid) {
+ return [
+ {
+ type: 'text',
+ text: `Failed to create tool: Code validation failed with the following errors:\n- ${validation.errors.join('\n- ')}`,
+ },
+ ];
+ }
+
+ try {
+ // Generate the complete tool file with proper imports
+ const completeToolCode = this.generateCompleteToolFile(cleanedCode, toolName);
+
+ // Extract tool info name from the code
+ const toolInfoName = this.extractToolInfoName(cleanedCode);
+ if (!toolInfoName) {
+ return [
+ {
+ type: 'text',
+ text: 'Failed to extract tool info name from the code. Make sure the tool has a name property.',
+ },
+ ];
+ }
+
+ // Extract parameters from the tool code
+ const parameters = this.extractToolParameters(cleanedCode);
+
+ // Transpile the TypeScript to JavaScript
+ const { jsCode, errors } = this.transpileTypeScript(completeToolCode, toolName);
+ if (errors.length > 0) {
+ return [
+ {
+ type: 'text',
+ text: `Failed to transpile tool code with the following errors:\n- ${errors.join('\n- ')}`,
+ },
+ ];
+ }
+
+ // Create a dynamic tool instance
+ const toolInstance = await this.createDynamicTool(jsCode, toolName);
+ if (!toolInstance) {
+ return [
+ {
+ type: 'text',
+ text: 'Failed to instantiate the tool. Make sure it follows all the required patterns and properly extends BaseTool.',
+ },
+ ];
+ }
+
+ // Register the tool in the dynamic registry
+ // Use the name property from the tool info as the registry key
+ this.dynamicToolRegistry.set(toolInfoName, toolInstance);
+
+ // If we have a reference to the agent, tell it to register dynamic tool
+ // This ensures the tool is properly loaded from the filesystem for the prompt system
+ if (this.agent) {
+ this.agent.registerDynamicTool(toolInfoName, toolInstance);
+ }
+
+ // Create the success message
+ const successMessage = `Successfully created and registered new tool: ${toolName}\n\nThe tool is now available for use in the current session. You can call it using the action "${toolInfoName}".\n\nDescription: ${description}\n\nParameters: ${
+ parameters.length > 0 ? parameters.map(p => `\n- ${p.name} (${p.type}${p.required ? ', required' : ''}): ${p.description}`).join('') : '\nNo parameters'
+ }\n\nThe tool will be saved permanently after you confirm the page reload.`;
+
+ // Notify the agent that a tool was created with the complete code for deferred saving
+ // This will trigger the modal but NOT save to disk yet
+ if (this.agent) {
+ this.agent.notifyToolCreated(toolName, completeToolCode);
+ }
+
+ return [
+ {
+ type: 'text',
+ text: successMessage,
+ },
+ ];
+ } catch (error) {
+ console.error(`Error creating new tool:`, error);
+ return [
+ {
+ type: 'text',
+ text: `Failed to create tool: ${(error as Error).message || 'Unknown error'}`,
+ },
+ ];
+ }
+ }
+
+ /**
+ * Public method to save tool to server (called by agent after user confirmation)
+ * @param toolName The name of the tool
+ * @param completeToolCode The complete TypeScript code for the tool with imports
+ */
+ public async saveToolToServerDeferred(toolName: string, completeToolCode: string): Promise<boolean> {
+ return this.saveToolToServer(toolName, completeToolCode);
+ }
+}
diff --git a/src/client/views/nodes/chatbot/tools/CreateTextDocumentTool.ts b/src/client/views/nodes/chatbot/tools/CreateTextDocumentTool.ts
deleted file mode 100644
index 16dc938bb..000000000
--- a/src/client/views/nodes/chatbot/tools/CreateTextDocumentTool.ts
+++ /dev/null
@@ -1,57 +0,0 @@
-import { parsedDoc } from '../chatboxcomponents/ChatBox';
-import { ParametersType, ToolInfo } from '../types/tool_types';
-import { Observation } from '../types/types';
-import { BaseTool } from './BaseTool';
-const createTextDocToolParams = [
- {
- name: 'text_content',
- type: 'string',
- description: 'The text content that the document will display',
- required: true,
- },
- {
- name: 'title',
- type: 'string',
- description: 'The title of the document',
- required: true,
- },
- // {
- // name: 'background_color',
- // type: 'string',
- // description: 'The background color of the document as a hex string',
- // required: false,
- // },
- // {
- // name: 'font_color',
- // type: 'string',
- // description: 'The font color of the document as a hex string',
- // required: false,
- // },
-] as const;
-
-type CreateTextDocToolParamsType = typeof createTextDocToolParams;
-
-const createTextDocToolInfo: ToolInfo<CreateTextDocToolParamsType> = {
- name: 'createTextDoc',
- description: 'Creates a text document with the provided content and title. Use if the user wants to create a textbox or text document of some sort. Can use after a search or other tool to save information.',
- citationRules: 'No citation needed.',
- parameterRules: createTextDocToolParams,
-};
-
-export class CreateTextDocTool extends BaseTool<CreateTextDocToolParamsType> {
- private _addLinkedDoc: (doc: parsedDoc) => void;
-
- constructor(addLinkedDoc: (doc: parsedDoc) => void) {
- super(createTextDocToolInfo);
- this._addLinkedDoc = addLinkedDoc;
- }
-
- async execute(args: ParametersType<CreateTextDocToolParamsType>): Promise<Observation[]> {
- try {
- this._addLinkedDoc({ doc_type: 'text', data: args.text_content, title: args.title });
- return [{ type: 'text', text: 'Created text document.' }];
- } catch (error) {
- return [{ type: 'text', text: 'Error creating text document, ' + error }];
- }
- }
-}
diff --git a/src/client/views/nodes/chatbot/tools/DocumentMetadataTool.ts b/src/client/views/nodes/chatbot/tools/DocumentMetadataTool.ts
new file mode 100644
index 000000000..a55f901e1
--- /dev/null
+++ b/src/client/views/nodes/chatbot/tools/DocumentMetadataTool.ts
@@ -0,0 +1,853 @@
+import { Doc, FieldType } from '../../../../../fields/Doc';
+import { DocData } from '../../../../../fields/DocSymbols';
+import { Observation } from '../types/types';
+import { ParametersType, ToolInfo, Parameter } from '../types/tool_types';
+import { BaseTool } from './BaseTool';
+import { DocumentOptions } from '../../../../documents/Documents';
+import { CollectionFreeFormDocumentView } from '../../../nodes/CollectionFreeFormDocumentView';
+import { v4 as uuidv4 } from 'uuid';
+import { LinkManager } from '../../../../util/LinkManager';
+import { DocCast, StrCast } from '../../../../../fields/Types';
+import { supportedDocTypes } from '../types/tool_types';
+import { parsedDoc } from '../chatboxcomponents/ChatBox';
+import { AgentDocumentManager } from '../utils/AgentDocumentManager';
+
+// Define the parameters for the DocumentMetadataTool
+const parameterDefinitions: ReadonlyArray<Parameter> = [
+ {
+ name: 'action',
+ type: 'string',
+ required: true,
+ description: 'The action to perform: "get" to retrieve metadata, "edit" to modify metadata, "getFieldOptions" to retrieve all available field options, or "create" to create a new document',
+ },
+ {
+ name: 'documentId',
+ type: 'string',
+ required: false,
+ description: 'The ID of the document to get or edit metadata for. Required for "edit", optional for "get", ignored for "getFieldOptions", and "create"',
+ },
+ {
+ name: 'fieldEdits',
+ type: 'string',
+ required: false,
+ description:
+ 'JSON array of field edits for editing fields. Each item should have fieldName and fieldValue. For single field edits, use an array with one item. Example: [{"fieldName":"layout_autoHeight","fieldValue":false},{"fieldName":"height","fieldValue":300}]',
+ },
+ {
+ name: 'title',
+ type: 'string',
+ required: false,
+ description: 'The title of the document to create. Required for "create" action',
+ },
+ {
+ name: 'data',
+ type: 'string',
+ required: false,
+ description: 'The data content for the document to create. Required for "create" action',
+ },
+ {
+ name: 'doc_type',
+ type: 'string',
+ required: false,
+ description: `The type of document to create. Required for "create" action. Options: ${Object.keys(supportedDocTypes).join(',')}`,
+ },
+] as const;
+
+type DocumentMetadataToolParamsType = typeof parameterDefinitions;
+
+// Detailed description with usage guidelines for the DocumentMetadataTool
+const toolDescription = `Extracts and modifies metadata from documents in the same Freeform view as the ChatBox, and can create new documents.
+This tool helps you work with document properties, understand available fields, edit document metadata, and create new documents.
+
+The Dash document system organizes fields in two locations:
+1. Layout documents: contain visual properties like position, dimensions, and appearance
+2. Data documents: contain the actual content and document-specific data
+
+This tool provides the following capabilities:
+- Get metadata from all documents in the current Freeform view
+- Get metadata from a specific document
+- Edit metadata fields on documents (in either layout or data documents)
+- Edit multiple fields at once (useful for updating dependent fields together)
+- Retrieve all available field options with metadata (IMPORTANT: always call this before editing)
+- Understand which fields are stored where (layout vs data document)
+- Get detailed information about all available document fields
+- Support for all value types: strings, numbers, and booleans
+- Create new documents with basic properties
+
+DOCUMENT CREATION:
+- Use action="create" to create new documents with a simplified approach
+- Required parameters: title, data, and doc_type
+- The tool will create the document with sensible defaults and link it to the current view
+- After creation, you can use the edit action to update its properties
+
+IMPORTANT: Before editing any document metadata, first call 'getFieldOptions' to understand:
+- Which fields are available
+- The data type of each field
+- Special dependencies between fields (like layout_autoHeight and height)
+- Proper naming conventions (with or without underscores)
+
+IMPORTANT: Some fields have dependencies that must be handled for edits to work correctly:
+- When editing "height", first set "layout_autoHeight" to false (as a boolean value, not a string)
+- When editing "width", first set "layout_autoWidth" to false (as a boolean value, not a string)
+- Check document metadata to identify other similar dependencies
+- All edits are done using the fieldEdits parameter which accepts an array of fields to modify
+
+Example: To change document height, disable auto-height and set height in a single operation:
+{... inputs: { action: "edit", documentId: "doc123", fieldEdits: [
+ { fieldName: "layout_autoHeight", fieldValue: false },
+ { fieldName: "height", fieldValue: 300 }
+]}}
+
+
+Here is a detailed example:
+
+<interaction description="Using DocumentMetadataTool to create and align a diagram with its notes">
+ <stage number="1" role="user">
+ <query>Create a flow-chart of neurotransmitter release at a synapse, and place a notes box directly underneath that explains each step.</query>
+ </stage>
+
+ <!-- CREATE THE DIAGRAM -->
+ <stage number="2" role="assistant">
+ <thought>
+ I will create a DiagramBox containing a Mermaid flow-chart. After that, I'll make a notes document, read the diagram’s metadata, and then move the notes so it lines up beneath the chart.
+ </thought>
+ <action>documentMetadata</action>
+ </stage>
+
+ <stage number="3" role="user">
+ <action_rules>***Action rules omitted***</action_rules>
+ </stage>
+
+ <stage number="4" role="assistant">
+ <action_input>
+ <action_input_description>Creating a DiagramBox with a Mermaid chart of neurotransmitter release.</action_input_description>
+ <inputs>
+ <action>create</action>
+ <title>Synaptic Release Flowchart</title>
+ <data>graph TD; A(Vesicle docks)-->B(Ca²⁺ influx); B-->C(Vesicle fuses); C-->D(Neurotransmitter released)</data>
+ <doc_type>diagram</doc_type>
+ </inputs>
+ </action_input>
+ </stage>
+
+ <stage number="5" role="user">
+ <observation>
+ <chunk chunk_id="9f2c43c8-0a16-4c12-987b-b813d2f7b1ee" chunk_type="text">
+ Document created successfully.
+ Document ID: 9f2c43c8-0a16-4c12-987b-b813d2f7b1ee
+ Type: diagram
+ Title: "Synaptic Release Flowchart"
+
+ The document has been created with default dimensions and positioning.
+ …
+ Full metadata for the created document:
+ { /* huge JSON omitted for brevity */ }
+ </chunk>
+ </observation>
+ </stage>
+
+ <!-- CREATE THE NOTES DOC -->
+ <stage number="6" role="assistant">
+ <thought>
+ I will create a plain-text notes document that summarises the diagram’s steps.
+ </thought>
+ <action>documentMetadata</action>
+ </stage>
+
+ <stage number="7" role="user">
+ <action_rules>***Action rules omitted***</action_rules>
+ </stage>
+
+ <stage number="8" role="assistant">
+ <action_input>
+ <action_input_description>Creating a text document for explanatory notes.</action_input_description>
+ <inputs>
+ <action>create</action>
+ <title>Synaptic Release – Notes</title>
+ <data>This diagram shows vesicle docking, calcium-triggered fusion, and neurotransmitter release into the synaptic cleft.</data>
+ <doc_type>note</doc_type>
+ </inputs>
+ </action_input>
+ </stage>
+
+ <stage number="9" role="user">
+ <observation>
+ <chunk chunk_id="c8e3f4b9-7d2e-462a-a444-61e8e9ff0c7f" chunk_type="text">
+ Document created successfully.
+ Document ID: c8e3f4b9-7d2e-462a-a444-61e8e9ff0c7f
+ Type: note
+ Title: "Synaptic Release – Notes"
+ …
+ Full metadata for the created document:
+ { /* huge JSON omitted for brevity */ }
+ </chunk>
+ </observation>
+ </stage>
+
+ <!-- READ DIAGRAM METADATA -->
+ <stage number="10" role="assistant">
+ <thought>
+ I'll fetch the diagram’s width, height, and position so I can align the notes box underneath it.
+ </thought>
+ <action>documentMetadata</action>
+ </stage>
+
+ <stage number="11" role="user">
+ <action_rules>***Action rules omitted***</action_rules>
+ </stage>
+
+ <stage number="12" role="assistant">
+ <action_input>
+ <action_input_description>Getting metadata for the diagram document.</action_input_description>
+ <inputs>
+ <action>get</action>
+ <documentId>9f2c43c8-0a16-4c12-987b-b813d2f7b1ee</documentId>
+ </inputs>
+ </action_input>
+ </stage>
+
+ <stage number="13" role="user">
+ <observation>
+ <chunk chunk_id="9f2c43c8-0a16-4c12-987b-b813d2f7b1ee" chunk_type="text">
+ {
+ "x": 100,
+ "y": 100,
+ "width": 600,
+ "height": 300,
+ "layout_autoWidth": false,
+ "layout_autoHeight": false
+ }
+ </chunk>
+ </observation>
+ </stage>
+
+ <!-- MOVE & SIZE THE NOTES DOC -->
+ <stage number="14" role="assistant">
+ <thought>
+ The diagram ends at y = 100 + 300 = 400.
+ I’ll move the notes to x = 100, y = 420 (a 20-px gap) and expand its width to 600 px.
+ </thought>
+ <action>documentMetadata</action>
+ </stage>
+
+ <stage number="15" role="user">
+ <action_rules>***Action rules omitted***</action_rules>
+ </stage>
+
+ <stage number="16" role="assistant">
+ <action_input>
+ <action_input_description>Editing the notes document’s position and size.</action_input_description>
+ <inputs>
+ <action>edit</action>
+ <documentId>c8e3f4b9-7d2e-462a-a444-61e8e9ff0c7f</documentId>
+ <fieldEdits>[
+ {"fieldName":"layout_autoWidth","fieldValue":false},
+ {"fieldName":"width","fieldValue":600},
+ {"fieldName":"x","fieldValue":100},
+ {"fieldName":"y","fieldValue":420}
+ ]</fieldEdits>
+ </inputs>
+ </action_input>
+ </stage>
+
+ <stage number="17" role="user">
+ <observation>
+ <chunk chunk_id="c8e3f4b9-7d2e-462a-a444-61e8e9ff0c7f" chunk_type="text">
+ Successfully edited 4 fields on document c8e3f4b9-7d2e-462a-a444-61e8e9ff0c7f.
+ New position: (x:100, y:420) Width:600
+ </chunk>
+ </observation>
+ </stage>
+
+ <!-- FINAL ANSWER -->
+ <stage number="18" role="assistant">
+ <thought>
+ Both documents are now created and perfectly aligned; I can summarise the result.
+ </thought>
+ <answer>
+ <grounded_text citation_index="1">
+ A **DiagramBox** depicting neurotransmitter release was created at **(100 px, 100 px)**, sized **600 × 300 px**.
+ </grounded_text>
+ <grounded_text citation_index="2">
+ A **notes document** was then resized to **600 px** wide and moved to **(100 px, 420 px)**—20 px below the diagram—to keep the explanation directly beneath the visualization.
+ </grounded_text>
+ <normal_text>
+ This layout ensures viewers can read the synopsis while referring to the flow-chart above.
+ </normal_text>
+ <citations>
+ <citation index="1" chunk_id="9f2c43c8-0a16-4c12-987b-b813d2f7b1ee" type="text"></citation>
+ <citation index="2" chunk_id="c8e3f4b9-7d2e-462a-a444-61e8e9ff0c7f" type="text"></citation>
+ </citations>
+ <follow_up_questions>
+ <question>Would you like to tweak the diagram’s styling (e.g., colours or fonts)?</question>
+ <question>Should I link external references or papers in the notes?</question>
+ <question>Do you want similar diagrams for other neural processes?</question>
+ </follow_up_questions>
+ <loop_summary>
+ The assistant used **DocumentMetadataTool** four times:
+ 1) **create** DiagramBox → 2) **create** notes document → 3) **get** diagram metadata → 4) **edit** notes position/size.
+ This demonstrates creating, inspecting, and aligning documents within a Freeform view.
+ </loop_summary>
+ </answer>
+ </stage>
+</interaction>
+
+<MermaidMindmapGuide>
+ <Overview>
+ <Description>
+ Mermaid mindmaps are hierarchical diagrams used to visually organize ideas. Nodes are created using indentation to show parent-child relationships.
+ </Description>
+ <Note>This is an experimental feature in Mermaid and may change in future versions.</Note>
+ </Overview>
+
+ <BasicSyntax>
+ <CodeExample language="mermaid">
+ <![CDATA[
+ mindmap
+ Root
+ Branch A
+ Leaf A1
+ Leaf A2
+ Branch B
+ Leaf B1
+ ]]>
+ </CodeExample>
+ <Explanation>
+ <Point><code>mindmap</code> declares the diagram.</Point>
+ <Point>Indentation determines the hierarchy.</Point>
+ <Point>Each level must be indented more than its parent.</Point>
+ </Explanation>
+ </BasicSyntax>
+
+ <NodeShapes>
+ <Description>Nodes can be styled with various shapes similar to flowchart syntax.</Description>
+ <Shapes>
+ <Shape name="Square"><Code>id[Square Text]</Code></Shape>
+ <Shape name="Rounded Square"><Code>id(Rounded Square)</Code></Shape>
+ <Shape name="Circle"><Code>id((Circle))</Code></Shape>
+ <Shape name="Bang"><Code>id))Bang((</Code></Shape>
+ <Shape name="Cloud"><Code>id)Cloud(</Code></Shape>
+ <Shape name="Hexagon"><Code>id{{Hexagon}}</Code></Shape>
+ <Shape name="Default"><Code>Default shape without any brackets</Code></Shape>
+ </Shapes>
+ </NodeShapes>
+
+ <Icons>
+ <Description>Nodes can include icons using the <code>::icon(class)</code> syntax.</Description>
+ <CodeExample>
+ <![CDATA[
+ mindmap
+ Root
+ Node A
+ ::icon(fa fa-book)
+ Node B
+ ::icon(mdi mdi-lightbulb)
+ ]]>
+ </CodeExample>
+ <Note>Icon fonts must be included by the site administrator for proper rendering.</Note>
+ </Icons>
+
+ <CSSClasses>
+ <Description>Add custom styling classes using <code>:::</code>.</Description>
+ <CodeExample>
+ <![CDATA[
+ mindmap
+ Root
+ Important Node
+ :::urgent large
+ Regular Node
+ ]]>
+ </CodeExample>
+ <Note>Classes must be defined in your website or application CSS.</Note>
+ </CSSClasses>
+
+ <MarkdownSupport>
+ <Description>Supports markdown-style strings for rich text, line breaks, and auto-wrapping.</Description>
+ <CodeExample>
+ <![CDATA[
+ mindmap
+ id1["**Bold Root** with new line"]
+ id2["*Italicized* and long text that wraps"]
+ id3[Plain label]
+ ]]>
+ </CodeExample>
+ </MarkdownSupport>
+
+ <RenderingNote>
+ <Note>Indentation is relative, not absolute — Mermaid will infer hierarchy based on surrounding context even with inconsistent spacing.</Note>
+ </RenderingNote>
+
+ <Integration>
+ <Description>
+ From Mermaid v11, mindmaps are included natively. For older versions, use external imports with lazy loading.
+ </Description>
+ <CodeExample>
+ <![CDATA[
+ <script type="module">
+ import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@11/dist/mermaid.esm.min.mjs';
+ </script>
+ ]]>
+ </CodeExample>
+ </Integration>
+</MermaidMindmapGuide>
+
+`;
+
+// Extensive usage guidelines for the tool
+const citationRules = `USAGE GUIDELINES:
+To GET document metadata:
+- Use action="get" with optional documentId to return metadata for one or all documents
+- Returns field values, field definitions, and location information (layout vs data document)
+
+To GET ALL FIELD OPTIONS (call this first):
+- Use action="getFieldOptions" to retrieve metadata about all available document fields
+- No additional parameters are required
+- Returns structured metadata with field names, types, descriptions, and dependencies
+- ALWAYS call this before attempting to edit document metadata
+- Use this information to understand which fields need special handling
+
+To CREATE a new document:
+- Use action="create" with the following required parameters:
+ - title: The title of the document to create
+ - data: The content data for the document (text content, URL, etc.)
+ - doc_type: The type of document to create (text, web, image, etc.)
+- Example: {...inputs: { action: "create", title: "My Notes", data: "This is the content", doc_type: "text" }}
+- After creation, you can edit the document with more specific properties
+
+To EDIT document metadata:
+- Use action="edit" with required parameters:
+ - documentId: The ID of the document to edit
+ - fieldEdits: JSON array of fields to edit, each with fieldName and fieldValue
+- The tool will determine the correct document location automatically
+- Field names can be provided with or without leading underscores (e.g., both "width" and "_width" work)
+- Common fields like "width" and "height" are automatically mapped to "_width" and "_height"
+- All value types are supported: strings, numbers, and booleans
+- The tool will apply the edit to the correct document (layout or data) based on existing fields
+
+SPECIAL FIELD HANDLING:
+- Text fields: When editing the 'text' field, provide simple plain text
+ Example: {...inputs: { action: "edit", documentId: "doc123", fieldEdits: [{ fieldName: "text", fieldValue: "Hello world" }] }}
+ The tool will automatically convert your text to the proper RichTextField format
+- Width/Height: Set layout_autoHeight/layout_autoWidth to false before editing
+
+RECOMMENDED WORKFLOW:
+0. Understand the currently available documents that were provided as <available_documents> in the prompt
+1. Call action="getFieldOptions" to understand available fields
+3. Get document metadata with action="get" to see current values
+4. Edit fields with action="edit" using proper dependencies
+OR
+0. Understand the state of the currently available documents and their metadata using action="get" (this includes spacial positioning).
+1. Create a new document with action="create"
+2. Get its ID from the response
+3. Edit the document's properties with action="edit"
+
+HANDLING DEPENDENT FIELDS:
+- When editing some fields, you may need to update related dependent fields
+- For example, when changing "height", you should also set "layout_autoHeight" to false
+- Use the fieldEdits parameter to update dependent fields in a single operation:
+ {...inputs: { action: "edit", documentId: "doc123", fieldEdits: [
+ { fieldName: "layout_autoHeight", fieldValue: false },
+ { fieldName: "height", fieldValue: 300 }
+]}}
+- Always check for dependent fields that might affect your edits, such as:
+ - height → layout_autoHeight (set to false to allow manual height)
+ - width → layout_autoWidth (set to false to allow manual width)
+ - Other auto-sizing related properties
+
+Editing fields follows these rules:
+1. First checks if the field exists on the layout document using Doc.Get
+2. If it exists on the layout document, it's updated there
+3. If it has an underscore prefix (_), it's created/updated on the layout document
+4. Otherwise, the field is created/updated on the data document
+5. Fields with leading underscores are automatically handled correctly
+
+Examples:
+- To get field options: { action: "getFieldOptions" }
+- To get all document metadata: { action: "get" }
+- To get metadata for a specific document: { action: "get", documentId: "doc123" }
+- To edit a single field: { action: "edit", documentId: "doc123", fieldEdits: [{ fieldName: "backgroundColor", fieldValue: "#ff0000" }] }
+- To edit a width property: { action: "edit", documentId: "doc123", fieldEdits: [{ fieldName: "width", fieldValue: 300 }] }
+- To edit text content: { action: "edit", documentId: "doc123", fieldEdits: [{ fieldName: "text", fieldValue: "Simple plain text goes here" }] }
+- To disable auto-height: { action: "edit", documentId: "doc123", fieldEdits: [{ fieldName: "layout_autoHeight", fieldValue: false }] }
+- To create a text document: { action: "create", title: "My Notes", data: "This is my note content", doc_type: "text" }
+- To create a web document: { action: "create", title: "Google", data: "https://www.google.com", doc_type: "web" }
+- To edit height with its dependent field together:
+ { action: "edit", documentId: "doc123", fieldEdits: [
+ { fieldName: "layout_autoHeight", fieldValue: false },
+ { fieldName: "height", fieldValue: 200 }
+ ]}
+- IMPORTANT: MULTI STEP WORKFLOWS ARE NOT ONLY ALLOWED BUT ENCOURAGED. TAKE THINGS 1 STEP AT A TIME.
+- IMPORTANT: WHEN CITING A DOCUMENT, MAKE THE CHUNK ID THE DOCUMENT ID. WHENEVER YOU CITE A DOCUMENT, ALWAYS MAKE THE CITATION TYPE "text", THE "direct_text" FIELD BLANK, AND THE "chunk_id" FIELD THE DOCUMENT ID.`;
+const documentMetadataToolInfo: ToolInfo<DocumentMetadataToolParamsType> = {
+ name: 'documentMetadata',
+ description: toolDescription,
+ parameterRules: parameterDefinitions,
+ citationRules: citationRules,
+};
+
+/**
+ * A tool for extracting and modifying metadata from documents in a Freeform view.
+ * This tool collects metadata from both layout and data documents in a Freeform view
+ * and allows for editing document fields in the correct location.
+ */
+export class DocumentMetadataTool extends BaseTool<DocumentMetadataToolParamsType> {
+ private _docManager: AgentDocumentManager;
+
+ constructor(docManager: AgentDocumentManager) {
+ super(documentMetadataToolInfo);
+ this._docManager = docManager;
+ this._docManager.initializeFindDocsFreeform();
+ }
+
+ /**
+ * Executes the document metadata tool
+ * @param args The arguments for the tool
+ * @returns An observation with the results of the tool execution
+ */
+ async execute(args: ParametersType<DocumentMetadataToolParamsType>): Promise<Observation[]> {
+ console.log('DocumentMetadataTool: Executing with args:', args);
+
+ // Find all documents in the Freeform view
+ this._docManager.initializeFindDocsFreeform();
+
+ try {
+ // Validate required input parameters based on action
+ if (!this.inputValidator(args)) {
+ return [
+ {
+ type: 'text',
+ text: `Error: Invalid or missing parameters for action "${args.action}". ${this.getParameterRequirementsByAction(String(args.action))}`,
+ },
+ ];
+ }
+
+ // Ensure the action is valid and convert to string
+ const action = String(args.action);
+ if (!['get', 'edit', 'getFieldOptions', 'create'].includes(action)) {
+ return [
+ {
+ type: 'text',
+ text: 'Error: Invalid action. Valid actions are "get", "edit", "getFieldOptions", or "create".',
+ },
+ ];
+ }
+
+ // Safely convert documentId to string or undefined
+ const documentId = args.documentId ? String(args.documentId) : undefined;
+
+ // Perform the specified action
+ switch (action) {
+ case 'get': {
+ // Get metadata for a specific document or all documents
+ const result = this._docManager.getDocumentMetadata(documentId);
+ console.log('DocumentMetadataTool: Get metadata result:', result);
+ return [
+ {
+ type: 'text',
+ text: `Document metadata ${documentId ? 'for document ' + documentId : ''} retrieved successfully:\n${JSON.stringify(result, null, 2)}`,
+ },
+ ];
+ }
+
+ case 'edit': {
+ // Edit a specific field on a document
+ if (!documentId) {
+ return [
+ {
+ type: 'text',
+ text: 'Error: Document ID is required for edit actions.',
+ },
+ ];
+ }
+
+ // Ensure document exists
+ if (!this._docManager.has(documentId)) {
+ return [
+ {
+ type: 'text',
+ text: `Error: Document with ID ${documentId} not found.`,
+ },
+ ];
+ }
+
+ // Check for fieldEdits parameter
+ if (!args.fieldEdits) {
+ return [
+ {
+ type: 'text',
+ text: 'Error: fieldEdits is required for edit actions. Please provide a JSON array of field edits.',
+ },
+ ];
+ }
+
+ try {
+ // Parse fieldEdits array
+ const edits = JSON.parse(String(args.fieldEdits));
+ if (!Array.isArray(edits) || edits.length === 0) {
+ return [
+ {
+ type: 'text',
+ text: 'Error: fieldEdits must be a non-empty array of field edits.',
+ },
+ ];
+ }
+
+ // Track results for all edits
+ const results: {
+ success: boolean;
+ message: string;
+ fieldName?: string;
+ originalFieldName?: string;
+ newValue?: any;
+ warning?: string;
+ }[] = [];
+
+ let allSuccessful = true;
+
+ // Process each edit
+ for (const edit of edits) {
+ // Get fieldValue in its original form
+ let fieldValue = edit.fieldValue;
+
+ // Only convert to string if it's neither boolean nor number
+ if (typeof fieldValue !== 'boolean' && typeof fieldValue !== 'number') {
+ fieldValue = String(fieldValue);
+ }
+
+ const fieldName = String(edit.fieldName);
+
+ // Edit the field
+ const result = this._docManager.editDocumentField(documentId, fieldName, fieldValue);
+
+ console.log(`DocumentMetadataTool: Edit field result for ${fieldName}:`, result);
+
+ // Add to results
+ results.push(result);
+
+ // Update success status
+ if (!result.success) {
+ allSuccessful = false;
+ }
+ }
+
+ // Format response based on results
+ let responseText = '';
+ if (allSuccessful) {
+ responseText = `Successfully edited ${results.length} fields on document ${documentId}:\n`;
+ results.forEach(result => {
+ responseText += `- Field '${result.originalFieldName}': updated to ${JSON.stringify(result.newValue)}\n`;
+
+ // Add any warnings
+ if (result.warning) {
+ responseText += ` Warning: ${result.warning}\n`;
+ }
+ });
+ } else {
+ responseText = `Errors occurred while editing fields on document ${documentId}:\n`;
+ results.forEach(result => {
+ if (result.success) {
+ responseText += `- Field '${result.originalFieldName}': updated to ${JSON.stringify(result.newValue)}\n`;
+
+ // Add any warnings
+ if (result.warning) {
+ responseText += ` Warning: ${result.warning}\n`;
+ }
+ } else {
+ responseText += `- Error editing '${result.originalFieldName}': ${result.message}\n`;
+ }
+ });
+ }
+
+ // Get the updated metadata to return
+ const updatedMetadata = this._docManager.getDocumentMetadata(documentId);
+
+ return [
+ {
+ type: 'text',
+ text: `${responseText}\nUpdated metadata:\n${JSON.stringify(updatedMetadata, null, 2)}`,
+ },
+ ];
+ } catch (error) {
+ return [
+ {
+ type: 'text',
+ text: `Error processing fieldEdits: ${error instanceof Error ? error.message : String(error)}`,
+ },
+ ];
+ }
+ }
+
+ case 'getFieldOptions': {
+ // Get all available field options with metadata
+ const fieldOptions = this._docManager.getAllFieldMetadata();
+
+ return [
+ {
+ type: 'text',
+ text: `Document field options retrieved successfully.\nThis information should be consulted before editing document fields to understand available options and dependencies:\n${JSON.stringify(fieldOptions, null, 2)}`,
+ },
+ ];
+ }
+
+ case 'create': {
+ // Create a new document
+ if (!args.title || !args.data || !args.doc_type) {
+ return [
+ {
+ type: 'text',
+ text: 'Error: Title, data, and doc_type are required for create action.',
+ },
+ ];
+ }
+
+ const docType = String(args.doc_type);
+ const title = String(args.title);
+ const data = String(args.data);
+
+ const id = await this._docManager.createDocInDash(docType, data, { title: title });
+
+ if (!id) {
+ return [
+ {
+ type: 'text',
+ text: 'Error: Failed to create document.',
+ },
+ ];
+ }
+ // Get the created document's metadata
+ const createdMetadata = this._docManager.extractDocumentMetadata(id);
+
+ return [
+ {
+ type: 'text',
+ text: `Document created successfully.
+Document ID: ${id}
+Type: ${docType}
+Title: "${title}"
+
+The document has been created with default dimensions and positioning.
+You can now use the "edit" action to modify additional properties of this document.
+
+Next steps:
+1. Use the "getFieldOptions" action to understand available editable/addable fields/properties and their dependencies.
+2. To modify this document, use: { action: "edit", documentId: "${id}", fieldEdits: [{"fieldName":"property","fieldValue":"value"}] }
+3. To add styling, consider setting backgroundColor, fontColor, or other properties
+4. For text documents, you can edit the content with: { action: "edit", documentId: "${id}", fieldEdits: [{"fieldName":"text","fieldValue":"New content"}] }
+
+Full metadata for the created document:
+${JSON.stringify(createdMetadata, null, 2)}`,
+ },
+ ];
+ }
+
+ default:
+ return [
+ {
+ type: 'text',
+ text: 'Error: Unknown action. Valid actions are "get", "edit", "getFieldOptions", or "create".',
+ },
+ ];
+ }
+ } catch (error) {
+ console.error('DocumentMetadataTool execution error:', error);
+ return [
+ {
+ type: 'text',
+ text: `Error executing DocumentMetadataTool: ${error instanceof Error ? error.message : String(error)}`,
+ },
+ ];
+ }
+ }
+
+ /**
+ * Validates the input parameters for the DocumentMetadataTool
+ * This custom validator allows numbers and booleans to be passed for fieldValue
+ * while maintaining compatibility with the standard validation
+ *
+ * @param params The parameters to validate
+ * @returns True if the parameters are valid, false otherwise
+ */
+ inputValidator(params: ParametersType<DocumentMetadataToolParamsType>): boolean {
+ // Default validation for required fields
+ if (params.action === undefined) {
+ return false;
+ }
+
+ // For create action, validate required parameters
+ if (params.action === 'create') {
+ return !!(params.title && params.data && params.doc_type);
+ }
+
+ // For edit action, validate fieldEdits is provided
+ if (params.action === 'edit') {
+ if (!params.documentId || !params.fieldEdits) {
+ return false;
+ }
+
+ try {
+ // Parse fieldEdits and validate its structure
+ const edits = JSON.parse(String(params.fieldEdits));
+
+ // Ensure it's an array
+ if (!Array.isArray(edits)) {
+ console.log('fieldEdits is not an array');
+ return false;
+ }
+
+ // Ensure each item has fieldName and fieldValue
+ for (const edit of edits) {
+ if (!edit.fieldName) {
+ console.log('An edit is missing fieldName');
+ return false;
+ }
+ if (edit.fieldValue === undefined) {
+ console.log('An edit is missing fieldValue');
+ return false;
+ }
+ }
+
+ // Everything looks good with fieldEdits
+ return true;
+ } catch (error) {
+ console.log('Error parsing fieldEdits:', error);
+ return false;
+ }
+ }
+
+ // For get action with documentId, documentId is required
+ if (params.action === 'get' && params.documentId === '') {
+ return false;
+ }
+
+ // getFieldOptions action doesn't require any additional parameters
+ if (params.action === 'getFieldOptions') {
+ return true;
+ }
+
+ return true;
+ }
+
+ /**
+ * Returns the parameter requirements for a specific action
+ * @param action The action to get requirements for
+ * @returns A string describing the required parameters
+ */
+ private getParameterRequirementsByAction(action?: string): string {
+ if (!action) {
+ return 'Please specify an action: "get", "edit", "getFieldOptions", or "create".';
+ }
+
+ switch (action.toLowerCase()) {
+ case 'get':
+ return 'The "get" action accepts an optional documentId parameter.';
+ case 'edit':
+ return 'The "edit" action requires documentId and fieldEdits parameters. fieldEdits must be a JSON array of field edits.';
+ case 'getFieldOptions':
+ return 'The "getFieldOptions" action does not require any additional parameters. It returns metadata about all available document fields.';
+ case 'create':
+ return 'The "create" action requires title, data, and doc_type parameters.';
+ default:
+ return `Unknown action "${action}". Valid actions are "get", "edit", "getFieldOptions", or "create".`;
+ }
+ }
+}
diff --git a/src/client/views/nodes/chatbot/tools/FileContentTool.ts b/src/client/views/nodes/chatbot/tools/FileContentTool.ts
new file mode 100644
index 000000000..f994aab67
--- /dev/null
+++ b/src/client/views/nodes/chatbot/tools/FileContentTool.ts
@@ -0,0 +1,78 @@
+import { Observation } from '../types/types';
+import { ParametersType, ToolInfo } from '../types/tool_types';
+import { Vectorstore } from '../vectorstore/Vectorstore';
+import { BaseTool } from './BaseTool';
+
+const fileContentToolParams = [
+ {
+ name: 'filepaths',
+ type: 'string[]',
+ description: 'Array of file paths to retrieve content for. Limited to a maximum of 3 files.',
+ required: true,
+ },
+] as const;
+
+type FileContentToolParamsType = typeof fileContentToolParams;
+
+const fileContentToolInfo: ToolInfo<FileContentToolParamsType> = {
+ name: 'fileContent',
+ description: 'Retrieves the complete content of up to 3 specified files from the Dash codebase to help understand implementation details.',
+ citationRules: `When using the FileContentTool:
+1. Present file content clearly with proper code formatting
+2. Include the file path at the beginning of each file's content
+3. Use this tool after identifying relevant files with the CodebaseSummarySearchTool
+4. Maximum of 3 files can be retrieved at once`,
+ parameterRules: fileContentToolParams,
+};
+
+export class FileContentTool extends BaseTool<FileContentToolParamsType> {
+ constructor(private vectorstore: Vectorstore) {
+ super(fileContentToolInfo);
+ }
+
+ async execute(args: ParametersType<FileContentToolParamsType>): Promise<Observation[]> {
+ console.log(`Executing file content retrieval for: ${args.filepaths.join(', ')}`);
+
+ // Enforce the limit of 3 files
+ const filepaths = args.filepaths.slice(0, 3);
+ if (args.filepaths.length > 3) {
+ console.warn(`FileContentTool: Request for ${args.filepaths.length} files was limited to 3`);
+ }
+
+ const observations: Observation[] = [];
+
+ if (filepaths.length === 0) {
+ return [
+ {
+ type: 'text',
+ text: 'No filepaths provided. Please specify at least one file path to retrieve content.',
+ },
+ ];
+ }
+
+ // Add initial message
+ observations.push({
+ type: 'text',
+ text: `Retrieving content for ${filepaths.length} file(s):\n\n`,
+ });
+
+ // Fetch content for each file
+ for (const filepath of filepaths) {
+ const content = await this.vectorstore.getFileContent(filepath);
+
+ if (content) {
+ observations.push({
+ type: 'text',
+ text: `File: ${filepath}\n\n\`\`\`\n${content}\n\`\`\`\n\n`,
+ });
+ } else {
+ observations.push({
+ type: 'text',
+ text: `Error: Could not retrieve content for file "${filepath}"\n\n`,
+ });
+ }
+ }
+
+ return observations;
+ }
+}
diff --git a/src/client/views/nodes/chatbot/tools/FileNamesTool.ts b/src/client/views/nodes/chatbot/tools/FileNamesTool.ts
new file mode 100644
index 000000000..b69874afa
--- /dev/null
+++ b/src/client/views/nodes/chatbot/tools/FileNamesTool.ts
@@ -0,0 +1,34 @@
+import { Observation } from '../types/types';
+import { ParametersType, ToolInfo } from '../types/tool_types';
+import { Vectorstore } from '../vectorstore/Vectorstore';
+import { BaseTool } from './BaseTool';
+
+const fileNamesToolParams = [] as const;
+
+type FileNamesToolParamsType = typeof fileNamesToolParams;
+
+const fileNamesToolInfo: ToolInfo<FileNamesToolParamsType> = {
+ name: 'fileNames',
+ description: 'Retrieves the names of all files in the Dash codebase to help understand the codebase structure.',
+ citationRules: `No citation needed.`,
+ parameterRules: fileNamesToolParams,
+};
+
+export class FileNamesTool extends BaseTool<FileNamesToolParamsType> {
+ constructor(private vectorstore: Vectorstore) {
+ super(fileNamesToolInfo);
+ }
+
+ async execute(args: ParametersType<FileNamesToolParamsType>): Promise<Observation[]> {
+ console.log(`Executing file names retrieval`);
+
+ const filepaths = await this.vectorstore.getFileNames();
+
+ return [
+ {
+ type: 'text',
+ text: `The file names in the codebase are: ${filepaths}`,
+ },
+ ];
+ }
+}
diff --git a/src/client/views/nodes/chatbot/tools/RAGTool.ts b/src/client/views/nodes/chatbot/tools/RAGTool.ts
index ef374ed22..af44de520 100644
--- a/src/client/views/nodes/chatbot/tools/RAGTool.ts
+++ b/src/client/views/nodes/chatbot/tools/RAGTool.ts
@@ -3,6 +3,7 @@ import { Observation, RAGChunk } from '../types/types';
import { ParametersType, ToolInfo } from '../types/tool_types';
import { Vectorstore } from '../vectorstore/Vectorstore';
import { BaseTool } from './BaseTool';
+import { DocumentMetadataTool } from './DocumentMetadataTool';
const ragToolParams = [
{
@@ -11,13 +12,19 @@ const ragToolParams = [
description: "A detailed prompt representing an ideal chunk to embed and compare against document vectors to retrieve the most relevant content for answering the user's query.",
required: true,
},
+ {
+ name: 'doc_ids',
+ type: 'string[]',
+ description: 'An optional array of document IDs to retrieve chunks from. If you want to retrieve chunks from all documents, leave this as an empty array: [] (DO NOT LEAVE THIS EMPTY).',
+ required: false,
+ },
] as const;
type RAGToolParamsType = typeof ragToolParams;
const ragToolInfo: ToolInfo<RAGToolParamsType> = {
name: 'rag',
- description: 'Performs a RAG (Retrieval-Augmented Generation) search on user documents and returns a set of document chunks (text or images) to provide a grounded response based on user documents.',
+ description: `Performs a RAG (Retrieval-Augmented Generation) search on user documents (only PDF, audio, and video are supported—for information about other document types, use the ${DocumentMetadataTool.name} tool) and returns a set of document chunks (text or images) to provide a grounded response based on user documents.`,
citationRules: `When using the RAG tool, the structure must adhere to the format described in the ReAct prompt. Below are additional guidelines specifically for RAG-based responses:
1. **Grounded Text Guidelines**:
@@ -68,14 +75,14 @@ export class RAGTool extends BaseTool<RAGToolParamsType> {
}
async execute(args: ParametersType<RAGToolParamsType>): Promise<Observation[]> {
- const relevantChunks = await this.vectorstore.retrieve(args.hypothetical_document_chunk);
+ const relevantChunks = await this.vectorstore.retrieve(args.hypothetical_document_chunk, undefined, args.doc_ids ?? undefined);
const formattedChunks = await this.getFormattedChunks(relevantChunks);
return formattedChunks;
}
async getFormattedChunks(relevantChunks: RAGChunk[]): Promise<Observation[]> {
try {
- const { formattedChunks } = await Networking.PostToServer('/formatChunks', { relevantChunks }) as { formattedChunks: Observation[]}
+ const { formattedChunks } = (await Networking.PostToServer('/formatChunks', { relevantChunks })) as { formattedChunks: Observation[] };
if (!formattedChunks) {
throw new Error('Failed to format chunks');
diff --git a/src/client/views/nodes/chatbot/tools/SearchTool.ts b/src/client/views/nodes/chatbot/tools/SearchTool.ts
index 6a11407a5..8e6edce8c 100644
--- a/src/client/views/nodes/chatbot/tools/SearchTool.ts
+++ b/src/client/views/nodes/chatbot/tools/SearchTool.ts
@@ -3,6 +3,9 @@ import { Networking } from '../../../../Network';
import { BaseTool } from './BaseTool';
import { Observation } from '../types/types';
import { ParametersType, ToolInfo } from '../types/tool_types';
+import { Agent } from 'http';
+import { AgentDocumentManager } from '../utils/AgentDocumentManager';
+import { StrCast } from '../../../../../fields/Types';
const searchToolParams = [
{
@@ -19,18 +22,18 @@ type SearchToolParamsType = typeof searchToolParams;
const searchToolInfo: ToolInfo<SearchToolParamsType> = {
name: 'searchTool',
- citationRules: 'No citation needed. Cannot cite search results for a response. Use web scraping tools to cite specific information.',
+ citationRules: 'Always cite the search results for a response, if the search results are relevant to the response. Use the chunk_id to cite the search results. If the search results are not relevant to the response, do not cite them. ',
parameterRules: searchToolParams,
description: 'Search the web to find a wide range of websites related to a query or multiple queries. Returns a list of websites and their overviews based on the search queries.',
};
export class SearchTool extends BaseTool<SearchToolParamsType> {
- private _addLinkedUrlDoc: (url: string, id: string) => void;
+ private _docManager: AgentDocumentManager;
private _max_results: number;
- constructor(addLinkedUrlDoc: (url: string, id: string) => void, max_results: number = 4) {
+ constructor(docManager: AgentDocumentManager, max_results: number = 3) {
super(searchToolInfo);
- this._addLinkedUrlDoc = addLinkedUrlDoc;
+ this._docManager = docManager;
this._max_results = max_results;
}
@@ -45,14 +48,21 @@ export class SearchTool extends BaseTool<SearchToolParamsType> {
query,
max_results: this._max_results,
})) as { results: { url: string; snippet: string }[] };
- const data = results.map((result: { url: string; snippet: string }) => {
- const id = uuidv4();
- this._addLinkedUrlDoc(result.url, id);
- return {
- type: 'text' as const,
- text: `<chunk chunk_id="${id}" chunk_type="url"><url>${result.url}</url><overview>${result.snippet}</overview></chunk>`,
- };
- });
+ const data = await Promise.all(
+ results.map(async (result: { url: string; snippet: string }) => {
+ // Create a web document with the URL
+ const id = await this._docManager.createDocInDash('web', result.url, {
+ title: `Search Result: ${result.url}`,
+ text_html: result.snippet,
+ data_useCors: true,
+ });
+
+ return {
+ type: 'text' as const,
+ text: `<chunk chunk_id="${id}" chunk_type="url"><url>${result.url}</url><overview>${result.snippet}</overview></chunk>`,
+ };
+ })
+ );
return data;
} catch (error) {
console.log(error);
diff --git a/src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts b/src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts
index 19ccd0b36..727d35e2c 100644
--- a/src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts
+++ b/src/client/views/nodes/chatbot/tools/WebsiteInfoScraperTool.ts
@@ -3,12 +3,14 @@ import { Networking } from '../../../../Network';
import { BaseTool } from './BaseTool';
import { Observation } from '../types/types';
import { ParametersType, ToolInfo } from '../types/tool_types';
-
+import { AgentDocumentManager } from '../utils/AgentDocumentManager';
+import { Doc } from '../../../../../fields/Doc';
+import { StrCast, WebCast } from '../../../../../fields/Types';
const websiteInfoScraperToolParams = [
{
- name: 'urls',
+ name: 'chunk_ids',
type: 'string[]',
- description: 'The URLs of the websites to scrape',
+ description: 'The chunk_ids of the urls to scrape from the SearchTool.',
required: true,
max_inputs: 3,
},
@@ -20,6 +22,7 @@ const websiteInfoScraperToolInfo: ToolInfo<WebsiteInfoScraperToolParamsType> = {
name: 'websiteInfoScraper',
description: 'Scrape detailed information from specific websites relevant to the user query. Returns the text content of the webpages for further analysis and grounding.',
citationRules: `
+ !IMPORTANT! THESE CHUNKS REPLACE THE CHUNKS THAT ARE RETURNED FROM THE SEARCHTOOL.
Your task is to provide a comprehensive response to the user's prompt using the content scraped from relevant websites. Ensure you follow these guidelines for structuring your response:
1. Grounded Text Tag Structure:
@@ -66,38 +69,121 @@ const websiteInfoScraperToolInfo: ToolInfo<WebsiteInfoScraperToolParamsType> = {
};
export class WebsiteInfoScraperTool extends BaseTool<WebsiteInfoScraperToolParamsType> {
- private _addLinkedUrlDoc: (url: string, id: string) => void;
+ private _docManager: AgentDocumentManager;
- constructor(addLinkedUrlDoc: (url: string, id: string) => void) {
+ constructor(docManager: AgentDocumentManager) {
super(websiteInfoScraperToolInfo);
- this._addLinkedUrlDoc = addLinkedUrlDoc;
+ this._docManager = docManager;
}
- async execute(args: ParametersType<WebsiteInfoScraperToolParamsType>): Promise<Observation[]> {
- const urls = args.urls;
-
- // Create an array of promises, each one handling a website scrape for a URL
- const scrapingPromises = urls.map(async url => {
+ /**
+ * Attempts to scrape a website with retry logic
+ * @param url URL to scrape
+ * @param maxRetries Maximum number of retry attempts
+ * @returns The scraped content or error message
+ */
+ private async scrapeWithRetry(chunkDoc: Doc, maxRetries = 2): Promise<Observation> {
+ let lastError = '';
+ let retryCount = 0;
+ const url = WebCast(chunkDoc.data!)!.url.href;
+ console.log(url);
+ console.log(chunkDoc);
+ console.log(chunkDoc.data);
+ const id = chunkDoc.id;
+ // Validate URL format
+ try {
+ new URL(url); // This will throw if URL is invalid
+ } catch (e) {
+ return {
+ type: 'text',
+ text: `Invalid URL format: ${url}. Please provide a valid URL including http:// or https://`,
+ } as Observation;
+ }
+
+ while (retryCount <= maxRetries) {
try {
- const { website_plain_text } = await Networking.PostToServer('/scrapeWebsite', { url });
- const id = uuidv4();
- this._addLinkedUrlDoc(url, id);
+ // Add a slight delay between retries
+ if (retryCount > 0) {
+ console.log(`Retry attempt ${retryCount} for ${url}`);
+ await new Promise(resolve => setTimeout(resolve, retryCount * 2000)); // Increasing delay for each retry
+ }
+
+ const response = await Networking.PostToServer('/scrapeWebsite', { url });
+
+ if (!response || typeof response !== 'object') {
+ lastError = 'Empty or invalid response from server';
+ retryCount++;
+ continue;
+ }
+
+ const { website_plain_text } = response as { website_plain_text: string };
+
+ // Validate content quality
+ if (!website_plain_text) {
+ lastError = 'Retrieved content was empty';
+ retryCount++;
+ continue;
+ }
+
+ if (website_plain_text.length < 100) {
+ console.warn(`Warning: Content from ${url} is very short (${website_plain_text.length} chars)`);
+
+ // Still return it if this is our last try
+ if (retryCount === maxRetries) {
+ return {
+ type: 'text',
+ text: `<chunk chunk_id="${id}" chunk_type="url">\n${website_plain_text}\nNote: Limited content was retrieved from this URL.\n</chunk>`,
+ } as Observation;
+ }
+
+ lastError = 'Retrieved content was too short, trying again';
+ retryCount++;
+ continue;
+ }
+
+ // Process and return content if it looks good
return {
type: 'text',
text: `<chunk chunk_id="${id}" chunk_type="url">\n${website_plain_text}\n</chunk>`,
} as Observation;
} catch (error) {
- console.log(error);
- return {
- type: 'text',
- text: `An error occurred while scraping the website: ${url}`,
- } as Observation;
+ lastError = error instanceof Error ? error.message : 'Unknown error';
+ console.log(`Error scraping ${url} (attempt ${retryCount + 1}):`, error);
}
- });
+
+ retryCount++;
+ }
+
+ // All attempts failed
+ return {
+ type: 'text',
+ text: `Unable to scrape website: ${url}. Error: ${lastError}`,
+ } as Observation;
+ }
+
+ async execute(args: ParametersType<WebsiteInfoScraperToolParamsType>): Promise<Observation[]> {
+ const chunk_ids = args.chunk_ids;
+
+ // Create an array of promises, each one handling a website scrape for a URL
+ const scrapingPromises = chunk_ids.map(chunk_id => this.scrapeWithRetry(this._docManager.getDocument(chunk_id)!));
// Wait for all scraping promises to resolve
const results = await Promise.all(scrapingPromises);
+ // Check if we got any successful results
+ const successfulResults = results.filter(result => {
+ if (result.type !== 'text') return false;
+ return (result as { type: 'text'; text: string }).text.includes('chunk_id') && !(result as { type: 'text'; text: string }).text.includes('Unable to scrape');
+ });
+
+ // If all scrapes failed, provide a more helpful error message
+ if (successfulResults.length === 0 && results.length > 0) {
+ results.push({
+ type: 'text',
+ text: `Note: All website scraping attempts failed. Please try with different URLs or try again later.`,
+ } as Observation);
+ }
+
return results;
}
}
diff --git a/src/client/views/nodes/chatbot/tools/WikipediaTool.ts b/src/client/views/nodes/chatbot/tools/WikipediaTool.ts
index ee815532a..ec5d83e52 100644
--- a/src/client/views/nodes/chatbot/tools/WikipediaTool.ts
+++ b/src/client/views/nodes/chatbot/tools/WikipediaTool.ts
@@ -32,7 +32,7 @@ export class WikipediaTool extends BaseTool<WikipediaToolParamsType> {
async execute(args: ParametersType<WikipediaToolParamsType>): Promise<Observation[]> {
try {
- const { text } = await Networking.PostToServer('/getWikipediaSummary', { title: args.title });
+ const { text } = (await Networking.PostToServer('/getWikipediaSummary', { title: args.title })) as { text: string };
const id = uuidv4();
const url = `https://en.wikipedia.org/wiki/${args.title.replace(/ /g, '_')}`;
this._addLinkedUrlDoc(url, id);
diff --git a/src/client/views/nodes/chatbot/tools/dynamic/InspirationalQuotesTool.ts b/src/client/views/nodes/chatbot/tools/dynamic/InspirationalQuotesTool.ts
new file mode 100644
index 000000000..23bbe1d76
--- /dev/null
+++ b/src/client/views/nodes/chatbot/tools/dynamic/InspirationalQuotesTool.ts
@@ -0,0 +1,39 @@
+import { Observation } from '../../types/types';
+import { ParametersType, ToolInfo } from '../../types/tool_types';
+import { BaseTool } from '../BaseTool';
+
+const inspirationalQuotesParams = [
+ {
+ name: 'category',
+ type: 'string',
+ description: 'The category of inspirational quotes to retrieve',
+ required: false
+ }
+ ] as const;
+
+ type InspirationalQuotesParamsType = typeof inspirationalQuotesParams;
+
+ const inspirationalQuotesInfo: ToolInfo<InspirationalQuotesParamsType> = {
+ name: 'inspirationalquotestool',
+ description: 'Provides a random inspirational quote from a predefined list.',
+ citationRules: 'No citation needed.',
+ parameterRules: inspirationalQuotesParams
+ };
+
+ export class InspirationalQuotesTool extends BaseTool<InspirationalQuotesParamsType> {
+ constructor() {
+ super(inspirationalQuotesInfo);
+ }
+
+ async execute(args: ParametersType<InspirationalQuotesParamsType>): Promise<Observation[]> {
+ const quotes = [
+ "The only way to do great work is to love what you do. - Steve Jobs",
+ "The best time to plant a tree was 20 years ago. The second best time is now. - Chinese Proverb",
+ "Your time is limited, so don’t waste it living someone else’s life. - Steve Jobs",
+ "Not everything that is faced can be changed, but nothing can be changed until it is faced. - James Baldwin",
+ "The purpose of our lives is to be happy. - Dalai Lama"
+ ];
+ const randomQuote = quotes[Math.floor(Math.random() * quotes.length)];
+ return [{ type: 'text', text: randomQuote }];
+ }
+ } \ No newline at end of file
diff --git a/src/client/views/nodes/chatbot/types/tool_types.ts b/src/client/views/nodes/chatbot/types/tool_types.ts
index 6ae48992d..6a0b5e708 100644
--- a/src/client/views/nodes/chatbot/types/tool_types.ts
+++ b/src/client/views/nodes/chatbot/types/tool_types.ts
@@ -50,3 +50,29 @@ export type ParamType<P extends Parameter> = P['type'] extends keyof TypeMap ? T
export type ParametersType<P extends ReadonlyArray<Parameter>> = {
[K in P[number] as K['name']]: ParamType<K>;
};
+
+
+/**
+ * List of supported document types that can be created via text LLM.
+ */
+export enum supportedDocTypes {
+ flashcard = 'flashcard',
+ note = 'note',
+ html = 'html',
+ equation = 'equation',
+ functionplot = 'functionplot',
+ dataviz = 'dataviz',
+ notetaking = 'notetaking',
+ audio = 'audio',
+ video = 'video',
+ pdf = 'pdf',
+ rtf = 'rtf',
+ message = 'message',
+ collection = 'collection',
+ image = 'image',
+ deck = 'deck',
+ web = 'web',
+ comparison = 'comparison',
+ diagram = 'diagram',
+ script = 'script',
+} \ No newline at end of file
diff --git a/src/client/views/nodes/chatbot/types/types.ts b/src/client/views/nodes/chatbot/types/types.ts
index 882e74ebb..0d1804b2d 100644
--- a/src/client/views/nodes/chatbot/types/types.ts
+++ b/src/client/views/nodes/chatbot/types/types.ts
@@ -15,8 +15,9 @@ export enum CHUNK_TYPE {
TABLE = 'table',
URL = 'url',
CSV = 'CSV',
- MEDIA = 'media',
+ //MEDIA = 'media',
VIDEO = 'video',
+ AUDIO = 'audio',
}
export enum PROCESSING_TYPE {
@@ -100,6 +101,7 @@ export interface RAGChunk {
export interface SimplifiedChunk {
chunkId: string;
+ doc_id: string;
startPage?: number;
endPage?: number;
location?: string;
@@ -108,6 +110,7 @@ export interface SimplifiedChunk {
start_time?: number;
end_time?: number;
indexes?: string[];
+ text?: string;
}
export interface AI_Document {
diff --git a/src/client/views/nodes/chatbot/utils/AgentDocumentManager.ts b/src/client/views/nodes/chatbot/utils/AgentDocumentManager.ts
new file mode 100644
index 000000000..3c8b49f33
--- /dev/null
+++ b/src/client/views/nodes/chatbot/utils/AgentDocumentManager.ts
@@ -0,0 +1,1113 @@
+import { action, computed, makeObservable, observable, ObservableMap, reaction, runInAction } from 'mobx';
+import { observer } from 'mobx-react';
+import { v4 as uuidv4 } from 'uuid';
+import { Doc, StrListCast } from '../../../../../fields/Doc';
+import { DocData } from '../../../../../fields/DocSymbols';
+import { Id } from '../../../../../fields/FieldSymbols';
+import { List } from '../../../../../fields/List';
+import { DocCast, StrCast } from '../../../../../fields/Types';
+import { DocServer } from '../../../../DocServer';
+import { Docs, DocumentOptions } from '../../../../documents/Documents';
+import { DocumentManager } from '../../../../util/DocumentManager';
+import { LinkManager, UPDATE_SERVER_CACHE } from '../../../../util/LinkManager';
+import { DocumentView } from '../../DocumentView';
+import { ChatBox, parsedDoc } from '../chatboxcomponents/ChatBox';
+import { supportedDocTypes } from '../types/tool_types';
+import { CHUNK_TYPE, RAGChunk, SimplifiedChunk } from '../types/types';
+
+/**
+ * Interface representing a document in the freeform view
+ */
+interface AgentDocument {
+ layoutDoc: Doc;
+ dataDoc: Doc;
+}
+
+/**
+ * Class to manage documents in a freeform view
+ */
+export class AgentDocumentManager {
+ @observable private documentsById: ObservableMap<string, AgentDocument>;
+ private chatBox: ChatBox;
+ private chatBoxDocument: Doc | null = null;
+ private fieldMetadata: Record<string, any> = {};
+ @observable private simplifiedChunks: ObservableMap<string, SimplifiedChunk>;
+
+ /**
+ * Creates a new DocumentManager
+ * @param templateDocument The document that serves as a template for new documents
+ */
+ constructor(chatBox: ChatBox) {
+ makeObservable(this);
+ const agentDoc = DocCast(chatBox.Document.agentDocument) ?? new Doc();
+ const chunk_simpl = DocCast(agentDoc.chunk_simpl) ?? new Doc();
+
+ agentDoc.title = chatBox.Document.title + '_agentDocument';
+ chunk_simpl.title = '_chunk_simpl';
+ chatBox.Document.agentDocument = agentDoc;
+ DocCast(chatBox.Document.agentDocument)!.chunk_simpl = chunk_simpl;
+
+ this.simplifiedChunks = StrListCast(chunk_simpl.mapping).reduce((mapping, chunks) => {
+ StrListCast(chunks).forEach(chunk => {
+ const parsed = JSON.parse(StrCast(chunk));
+ mapping.set(parsed.chunkId, parsed);
+ });
+ return mapping;
+ }, new ObservableMap<string, SimplifiedChunk>());
+
+ this.documentsById = StrListCast(agentDoc.mapping).reduce((mapping, content) => {
+ const [id, layoutId, docId] = content.split(':');
+ const layoutDoc = DocServer.GetCachedRefField(layoutId);
+ const dataDoc = DocServer.GetCachedRefField(docId);
+ if (!layoutDoc || !dataDoc) {
+ console.warn(`Document with ID ${id} not found in mapping`);
+ } else {
+ mapping.set(id, { layoutDoc, dataDoc });
+ }
+ return mapping;
+ }, new ObservableMap<string, AgentDocument>());
+ console.log(`AgentDocumentManager initialized with ${this.documentsById.size} documents`);
+ this.chatBox = chatBox;
+ this.chatBoxDocument = chatBox.Document;
+
+ reaction(
+ () => this.documentsById.values(),
+ () => {
+ if (this.chatBoxDocument && DocCast(this.chatBoxDocument.agentDocument)) {
+ DocCast(this.chatBoxDocument.agentDocument)!.mapping = new List<string>(Array.from(this.documentsById.entries()).map(([id, agent]) => `${id}:${agent.dataDoc[Id]}:${agent.layoutDoc[Id]}`));
+ }
+ }
+ //{ fireImmediately: true }
+ );
+ reaction(
+ () => this.simplifiedChunks.values(),
+ () => {
+ if (this.chatBoxDocument && DocCast(this.chatBoxDocument.agentDocument)) {
+ DocCast(DocCast(this.chatBoxDocument.agentDocument)!.chunk_simpl)!.mapping = new List<string>(Array.from(this.simplifiedChunks.values()).map(chunk => JSON.stringify(chunk)));
+ }
+ }
+ //{ fireImmediately: true }
+ );
+ this.processDocument(this.chatBoxDocument);
+ this.initializeFieldMetadata();
+ }
+
+ /**
+ * Extracts field metadata from DocumentOptions class
+ */
+ private initializeFieldMetadata() {
+ // Parse DocumentOptions to extract field definitions
+ const documentOptionsInstance = new DocumentOptions();
+ const documentOptionsEntries = Object.entries(documentOptionsInstance);
+
+ for (const [fieldName, fieldInfo] of documentOptionsEntries) {
+ // Extract field information
+ const fieldData: Record<string, any> = {
+ name: fieldName,
+ withoutUnderscore: fieldName.startsWith('_') ? fieldName.substring(1) : fieldName,
+ description: '',
+ type: 'unknown',
+ required: false,
+ defaultValue: undefined,
+ possibleValues: [],
+ };
+
+ // Check if fieldInfo has description property (it's likely a FInfo instance)
+ if (fieldInfo && typeof fieldInfo === 'object' && 'description' in fieldInfo) {
+ fieldData.description = fieldInfo.description;
+
+ // Extract field type if available
+ if ('fieldType' in fieldInfo) {
+ fieldData.type = fieldInfo.fieldType;
+ }
+
+ // Extract possible values if available
+ if ('values' in fieldInfo && Array.isArray(fieldInfo.values)) {
+ fieldData.possibleValues = fieldInfo.values;
+ }
+ }
+
+ this.fieldMetadata[fieldName] = fieldData;
+ }
+ }
+
+ /**
+ * Gets all documents in the same Freeform view as the ChatBox
+ * Uses the LinkManager to get all linked documents, similar to how ChatBox does it
+ */
+ public initializeFindDocsFreeform() {
+ // Reset collections
+ //this.documentsById.clear();
+
+ try {
+ // Use the LinkManager approach which is proven to work in ChatBox
+ if (this.chatBoxDocument) {
+ console.log('Finding documents linked to ChatBox document with ID:', this.chatBoxDocument[Id]);
+
+ // Get directly linked documents via LinkManager
+ const linkedDocs = LinkManager.Instance.getAllRelatedLinks(this.chatBoxDocument)
+ .map(d => DocCast(LinkManager.getOppositeAnchor(d, this.chatBoxDocument!)))
+ .map(d => DocCast(d?.annotationOn, d))
+ .filter(d => d);
+
+ console.log(`Found ${linkedDocs.length} linked documents via LinkManager`);
+
+ // Process the linked documents
+ linkedDocs.forEach(async (doc: Doc | undefined) => {
+ if (doc) {
+ await this.processDocument(doc);
+ console.log('Processed linked document:', doc[Id], doc.title, doc.type);
+ }
+ });
+ }
+ } catch (error) {
+ console.error('Error finding documents in Freeform view:', error);
+ }
+ }
+
+ /**
+ * Process a document by ensuring it has an ID and adding it to the appropriate collections
+ * @param doc The document to process
+ */
+ @action
+ public async processDocument(doc: Doc): Promise<string> {
+ // Ensure document has a persistent ID
+ const docId = this.ensureDocumentId(doc);
+ if (doc.chunk_simplified) {
+ const newChunks: SimplifiedChunk[] = [];
+ for (const chunk of JSON.parse(StrCast(doc.chunk_simplified))) {
+ console.log('chunk', chunk);
+ newChunks.push(chunk as SimplifiedChunk);
+ }
+ console.log('Added simplified chunks to simplifiedChunks:', docId, newChunks);
+ this.addSimplifiedChunks(newChunks);
+ //DocCast(DocCast(this.chatBoxDocument!.agentDocument)!.chunk_simpl)!.mapping = new List<string>(Array.from(this.simplifiedChunks.values()).map(chunk => JSON.stringify(chunk)));
+ }
+ // Only add if we haven't already processed this document
+ if (!this.documentsById.has(docId)) {
+ this.documentsById.set(docId, { layoutDoc: doc, dataDoc: doc[DocData] });
+ console.log('Added document to documentsById:', doc[Id], docId, doc[Id], doc[DocData][Id]);
+ }
+ return docId;
+ }
+
+ /**
+ * Ensures a document has a persistent ID stored in its metadata
+ * @param doc The document to ensure has an ID
+ * @returns The document's ID
+ */
+ private ensureDocumentId(doc: Doc): string {
+ let docId: string | undefined;
+
+ // 1. Try the direct id property if it exists
+ if (doc[Id]) {
+ console.log('Found document ID (normal):', doc[Id]);
+ docId = doc[Id];
+ } else {
+ throw new Error('No document ID found');
+ }
+
+ return docId;
+ }
+
+ /**
+ * Extracts metadata from a specific document
+ * @param docId The ID of the document to extract metadata from
+ * @returns An object containing the document's metadata
+ */
+ public extractDocumentMetadata(id: string) {
+ if (!id) return null;
+ const agentDoc = this.documentsById.get(id);
+ if (!agentDoc) return null;
+ const layoutDoc = agentDoc.layoutDoc;
+ const dataDoc = agentDoc.dataDoc;
+
+ const metadata: Record<string, any> = {
+ id: layoutDoc[Id] || dataDoc[Id] || '',
+ title: layoutDoc.title || '',
+ type: layoutDoc.type || '',
+ fields: {
+ layout: {},
+ data: {},
+ },
+ fieldLocationMap: {},
+ };
+
+ // Process all known field definitions
+ Object.keys(this.fieldMetadata).forEach(fieldName => {
+ const fieldDef = this.fieldMetadata[fieldName];
+ const strippedName = fieldName.startsWith('_') ? fieldName.substring(1) : fieldName;
+
+ // Check if field exists on layout document
+ let layoutValue = undefined;
+ if (layoutDoc) {
+ layoutValue = layoutDoc[fieldName];
+ if (layoutValue !== undefined) {
+ // Field exists on layout document
+ metadata.fields.layout[fieldName] = this.formatFieldValue(layoutValue);
+ metadata.fieldLocationMap[strippedName] = 'layout';
+ }
+ }
+
+ // Check if field exists on data document
+ let dataValue = undefined;
+ if (dataDoc) {
+ dataValue = dataDoc[fieldName];
+ if (dataValue !== undefined) {
+ // Field exists on data document
+ metadata.fields.data[fieldName] = this.formatFieldValue(dataValue);
+ if (!metadata.fieldLocationMap[strippedName]) {
+ metadata.fieldLocationMap[strippedName] = 'data';
+ }
+ }
+ }
+
+ // For fields with stripped names (without leading underscore),
+ // also check if they exist on documents without the underscore
+ if (fieldName.startsWith('_')) {
+ const nonUnderscoreFieldName = fieldName.substring(1);
+
+ if (layoutDoc) {
+ const nonUnderscoreLayoutValue = layoutDoc[nonUnderscoreFieldName];
+ if (nonUnderscoreLayoutValue !== undefined) {
+ metadata.fields.layout[nonUnderscoreFieldName] = this.formatFieldValue(nonUnderscoreLayoutValue);
+ metadata.fieldLocationMap[nonUnderscoreFieldName] = 'layout';
+ }
+ }
+
+ if (dataDoc) {
+ const nonUnderscoreDataValue = dataDoc[nonUnderscoreFieldName];
+ if (nonUnderscoreDataValue !== undefined) {
+ metadata.fields.data[nonUnderscoreFieldName] = this.formatFieldValue(nonUnderscoreDataValue);
+ if (!metadata.fieldLocationMap[nonUnderscoreFieldName]) {
+ metadata.fieldLocationMap[nonUnderscoreFieldName] = 'data';
+ }
+ }
+ }
+ }
+ });
+
+ // Add common field aliases for easier discovery
+ // This helps users understand both width and _width refer to the same property
+ if (metadata.fields.layout._width !== undefined && metadata.fields.layout.width === undefined) {
+ metadata.fields.layout.width = metadata.fields.layout._width;
+ metadata.fieldLocationMap.width = 'layout';
+ }
+
+ if (metadata.fields.layout._height !== undefined && metadata.fields.layout.height === undefined) {
+ metadata.fields.layout.height = metadata.fields.layout._height;
+ metadata.fieldLocationMap.height = 'layout';
+ }
+
+ return metadata;
+ }
+
+ /**
+ * Formats a field value for JSON output
+ * @param value The field value to format
+ * @returns A JSON-friendly representation of the field value
+ */
+ private formatFieldValue(value: any): any {
+ if (value === undefined || value === null) {
+ return null;
+ }
+
+ // Handle Doc objects
+ if (value instanceof Doc) {
+ return {
+ type: 'Doc',
+ id: value[Id] || this.ensureDocumentId(value),
+ title: value.title || '',
+ docType: value.type || '',
+ };
+ }
+
+ // Handle RichTextField (try to extract plain text)
+ if (typeof value === 'string' && value.includes('"type":"doc"') && value.includes('"content":')) {
+ try {
+ const rtfObj = JSON.parse(value);
+ // If this looks like a rich text field structure
+ if (rtfObj.doc && rtfObj.doc.content) {
+ // Recursively extract text from the content
+ let plainText = '';
+ const extractText = (node: any) => {
+ if (node.text) {
+ plainText += node.text;
+ }
+ if (node.content && Array.isArray(node.content)) {
+ node.content.forEach((child: any) => extractText(child));
+ }
+ };
+
+ extractText(rtfObj.doc);
+
+ // If we successfully extracted text, show it, but also preserve the original value
+ if (plainText) {
+ return {
+ type: 'RichText',
+ text: plainText,
+ length: plainText.length,
+ // Don't include the full value as it can be very large
+ };
+ }
+ }
+ } catch (e) {
+ // If parsing fails, just treat as a regular string
+ }
+ }
+
+ // Handle arrays and complex objects
+ if (typeof value === 'object') {
+ // If the object has a toString method, use it
+ if (value.toString && value.toString !== Object.prototype.toString) {
+ return value.toString();
+ }
+
+ try {
+ // Try to convert to JSON string
+ return JSON.stringify(value);
+ } catch (e) {
+ return '[Complex Object]';
+ }
+ }
+
+ // Return primitive values as is
+ return value;
+ }
+
+ /**
+ * Converts a string field value to the appropriate type based on field metadata
+ * @param fieldName The name of the field
+ * @param fieldValue The string value to convert
+ * @returns The converted value with the appropriate type
+ */
+ private convertFieldValue(fieldName: string, fieldValue: any): any {
+ // If fieldValue is already a number or boolean, we don't need to convert it from string
+ if (typeof fieldValue === 'number' || typeof fieldValue === 'boolean') {
+ return fieldValue;
+ }
+
+ // If fieldValue is a string "true" or "false", convert to boolean
+ if (typeof fieldValue === 'string') {
+ if (fieldValue.toLowerCase() === 'true') {
+ return true;
+ }
+ if (fieldValue.toLowerCase() === 'false') {
+ return false;
+ }
+ }
+
+ // If fieldValue is not a string (and not a number or boolean), convert it to string
+ if (typeof fieldValue !== 'string') {
+ fieldValue = String(fieldValue);
+ }
+
+ // Special handling for text field - convert to proper RichTextField format
+ if (fieldName === 'text') {
+ try {
+ // Check if it's already a valid JSON RichTextField
+ JSON.parse(fieldValue);
+ return fieldValue;
+ } catch (e) {
+ // It's a plain text string, so convert it to RichTextField format
+ const rtf = {
+ doc: {
+ type: 'doc',
+ content: [
+ {
+ type: 'paragraph',
+ content: [
+ {
+ type: 'text',
+ text: fieldValue,
+ },
+ ],
+ },
+ ],
+ },
+ };
+ return JSON.stringify(rtf);
+ }
+ }
+
+ // Get field metadata
+ const normalizedFieldName = fieldName.startsWith('_') ? fieldName : `_${fieldName}`;
+ const strippedFieldName = fieldName.startsWith('_') ? fieldName.substring(1) : fieldName;
+
+ // Check both versions of the field name in metadata
+ const fieldMeta = this.fieldMetadata[normalizedFieldName] || this.fieldMetadata[strippedFieldName];
+
+ // Special handling for width and height without metadata
+ if (!fieldMeta && (fieldName === '_width' || fieldName === '_height' || fieldName === 'width' || fieldName === 'height')) {
+ const num = Number(fieldValue);
+ return isNaN(num) ? fieldValue : num;
+ }
+
+ if (!fieldMeta) {
+ // If no metadata found, just return the string value
+ return fieldValue;
+ }
+
+ // Convert based on field type
+ const fieldType = fieldMeta.type;
+
+ if (fieldType === 'boolean') {
+ // Convert to boolean
+ return fieldValue.toLowerCase() === 'true';
+ } else if (fieldType === 'number') {
+ // Convert to number
+ const num = Number(fieldValue);
+ return isNaN(num) ? fieldValue : num;
+ } else if (fieldType === 'date') {
+ // Try to convert to date (stored as number timestamp)
+ try {
+ return new Date(fieldValue).getTime();
+ } catch (e) {
+ return fieldValue;
+ }
+ } else if (fieldType.includes('list') || fieldType.includes('array')) {
+ // Try to parse as JSON array
+ try {
+ return JSON.parse(fieldValue);
+ } catch (e) {
+ return fieldValue;
+ }
+ } else if (fieldType === 'json' || fieldType === 'object') {
+ // Try to parse as JSON object
+ try {
+ return JSON.parse(fieldValue);
+ } catch (e) {
+ return fieldValue;
+ }
+ }
+
+ // Default to string
+ return fieldValue;
+ }
+
+ /**
+ * Extracts all field metadata from DocumentOptions
+ * @returns A structured object containing metadata about all available document fields
+ */
+ public getAllFieldMetadata() {
+ // Start with our already populated fieldMetadata from the DocumentOptions class
+ const result: Record<string, any> = {
+ fieldCount: Object.keys(this.fieldMetadata).length,
+ fields: {},
+ fieldsByType: {
+ string: [],
+ number: [],
+ boolean: [],
+ //doc: [],
+ //list: [],
+ //date: [],
+ //enumeration: [],
+ //other: [],
+ },
+ fieldNameMappings: {},
+ commonFields: {
+ appearance: [],
+ position: [],
+ size: [],
+ content: [],
+ behavior: [],
+ layout: [],
+ },
+ };
+
+ // Process each field in the metadata
+ Object.entries(this.fieldMetadata).forEach(([fieldName, fieldInfo]) => {
+ const strippedName = fieldName.startsWith('_') ? fieldName.substring(1) : fieldName;
+
+ // Add to fieldNameMappings
+ if (fieldName.startsWith('_')) {
+ result.fieldNameMappings[strippedName] = fieldName;
+ }
+
+ // Create structured field metadata
+ const fieldData: Record<string, any> = {
+ name: fieldName,
+ displayName: strippedName,
+ description: fieldInfo.description || '',
+ type: fieldInfo.fieldType || 'unknown',
+ possibleValues: fieldInfo.values || [],
+ };
+
+ // Add field to fields collection
+ result.fields[fieldName] = fieldData;
+
+ // Categorize by field type
+ const type = fieldInfo.fieldType?.toLowerCase() || 'unknown';
+ if (type === 'string') {
+ result.fieldsByType.string.push(fieldName);
+ } else if (type === 'number') {
+ result.fieldsByType.number.push(fieldName);
+ } else if (type === 'boolean') {
+ result.fieldsByType.boolean.push(fieldName);
+ } else if (type === 'doc') {
+ //result.fieldsByType.doc.push(fieldName);
+ } else if (type === 'list') {
+ //result.fieldsByType.list.push(fieldName);
+ } else if (type === 'date') {
+ //result.fieldsByType.date.push(fieldName);
+ } else if (type === 'enumeration') {
+ //result.fieldsByType.enumeration.push(fieldName);
+ } else {
+ //result.fieldsByType.other.push(fieldName);
+ }
+
+ // Categorize by field purpose
+ if (fieldName.includes('width') || fieldName.includes('height') || fieldName.includes('size')) {
+ result.commonFields.size.push(fieldName);
+ } else if (fieldName.includes('color') || fieldName.includes('background') || fieldName.includes('border')) {
+ result.commonFields.appearance.push(fieldName);
+ } else if (fieldName.includes('x') || fieldName.includes('y') || fieldName.includes('position') || fieldName.includes('pan')) {
+ result.commonFields.position.push(fieldName);
+ } else if (fieldName.includes('text') || fieldName.includes('title') || fieldName.includes('data')) {
+ result.commonFields.content.push(fieldName);
+ } else if (fieldName.includes('action') || fieldName.includes('click') || fieldName.includes('event')) {
+ result.commonFields.behavior.push(fieldName);
+ } else if (fieldName.includes('layout')) {
+ result.commonFields.layout.push(fieldName);
+ }
+ });
+
+ // Add special section for auto-sizing related fields
+ result.autoSizingFields = {
+ height: {
+ autoHeightField: '_layout_autoHeight',
+ heightField: '_height',
+ displayName: 'height',
+ usage: 'To manually set height, first set layout_autoHeight to false',
+ },
+ width: {
+ autoWidthField: '_layout_autoWidth',
+ widthField: '_width',
+ displayName: 'width',
+ usage: 'To manually set width, first set layout_autoWidth to false',
+ },
+ };
+
+ // Add special section for text field format
+ result.specialFields = {
+ text: {
+ name: 'text',
+ description: 'Document text content',
+ format: 'RichTextField',
+ note: 'When setting text, provide plain text - it will be automatically converted to the correct format',
+ example: 'For setting: "Hello world" (plain text); For getting: Will be converted to plaintext for display',
+ },
+ };
+
+ return result;
+ }
+
+ /**
+ * Edits a specific field on a document
+ * @param docId The ID of the document to edit
+ * @param fieldName The name of the field to edit
+ * @param fieldValue The new value for the field (string, number, or boolean)
+ * @returns Object with success status, message, and additional information
+ */
+ public editDocumentField(
+ docId: string,
+ fieldName: string,
+ fieldValue: string | number | boolean
+ ): {
+ success: boolean;
+ message: string;
+ fieldName?: string;
+ originalFieldName?: string;
+ newValue?: any;
+ warning?: string;
+ } {
+ // Normalize field name (handle with/without underscore)
+ let normalizedFieldName = fieldName.startsWith('_') ? fieldName : fieldName;
+ const strippedFieldName = fieldName.startsWith('_') ? fieldName.substring(1) : fieldName;
+
+ // Handle common field name aliases (width → _width, height → _height)
+ // Many document fields use '_' prefix for layout properties
+ if (fieldName === 'width') {
+ normalizedFieldName = '_width';
+ } else if (fieldName === 'height') {
+ normalizedFieldName = '_height';
+ }
+
+ // Get the documents
+ const doc = this.documentsById.get(docId);
+ if (!doc) {
+ return { success: false, message: `Document with ID ${docId} not found` };
+ }
+
+ const { layoutDoc, dataDoc } = this.documentsById.get(docId) ?? { layoutDoc: null, dataDoc: null };
+
+ if (!layoutDoc && !dataDoc) {
+ return { success: false, message: `Could not find layout or data document for document with ID ${docId}` };
+ }
+
+ try {
+ // Convert the field value to the appropriate type based on field metadata
+ const convertedValue = this.convertFieldValue(normalizedFieldName, fieldValue);
+
+ let targetDoc: Doc | undefined;
+ let targetLocation: string;
+
+ // First, check if field exists on layout document using Doc.Get
+ if (layoutDoc) {
+ const fieldExistsOnLayout = Doc.Get(layoutDoc, normalizedFieldName, true) !== undefined;
+
+ // If it exists on layout document, update it there
+ if (fieldExistsOnLayout) {
+ targetDoc = layoutDoc;
+ targetLocation = 'layout';
+ }
+ // If it has an underscore prefix, it's likely a layout property even if not yet set
+ else if (normalizedFieldName.startsWith('_')) {
+ targetDoc = layoutDoc;
+ targetLocation = 'layout';
+ }
+ // Otherwise, look for or create on data document
+ else if (dataDoc) {
+ targetDoc = dataDoc;
+ targetLocation = 'data';
+ }
+ // If no data document available, default to layout
+ else {
+ targetDoc = layoutDoc;
+ targetLocation = 'layout';
+ }
+ }
+ // If no layout document, use data document
+ else if (dataDoc) {
+ targetDoc = dataDoc;
+ targetLocation = 'data';
+ } else {
+ return { success: false, message: `No valid document found for editing` };
+ }
+
+ if (!targetDoc) {
+ return { success: false, message: `Target document not available` };
+ }
+
+ // Set the field value on the target document
+ targetDoc[normalizedFieldName] = convertedValue;
+
+ return {
+ success: true,
+ message: `Successfully updated field '${normalizedFieldName}' on ${targetLocation} document (ID: ${docId})`,
+ fieldName: normalizedFieldName,
+ originalFieldName: fieldName,
+ newValue: convertedValue,
+ };
+ } catch (error) {
+ console.error('Error editing document field:', error);
+ return {
+ success: false,
+ message: `Error updating field: ${error instanceof Error ? error.message : String(error)}`,
+ };
+ }
+ }
+ /**
+ * Gets metadata for a specific document or all documents
+ * @param documentId Optional ID of a specific document to get metadata for
+ * @returns Document metadata or metadata for all documents
+ */
+ public getDocumentMetadata(documentId?: string): any {
+ if (documentId) {
+ console.log(`Returning document metadata for docID, ${documentId}:`, this.extractDocumentMetadata(documentId));
+ return this.extractDocumentMetadata(documentId);
+ } else {
+ // Get metadata for all documents
+ const documentsMetadata: Record<string, Record<string, any>> = {};
+ for (const documentId of this.documentsById.keys()) {
+ const metadata = this.extractDocumentMetadata(documentId);
+ if (metadata) {
+ documentsMetadata[documentId] = metadata;
+ } else {
+ console.warn(`No metadata found for document with ID: ${documentId}`);
+ }
+ }
+ return {
+ documentCount: this.documentsById.size,
+ documents: documentsMetadata,
+ //fieldDefinitions: this.fieldMetadata, // TODO: remove this, if fieldDefinitions are not needed.
+ };
+ }
+ }
+
+ /**
+ * Adds links between documents based on their IDs
+ * @param docIds Array of document IDs to link
+ * @param relationship Optional relationship type for the links
+ * @returns Array of created link documents
+ */
+ public addLinks(docIds: string[]): Doc[] {
+ const createdLinks: Doc[] = [];
+ // Use string keys for Set instead of arrays which don't work as expected as keys
+ const alreadyLinked = new Set<string>();
+
+ // Iterate over the document IDs and add links
+ docIds.forEach(docId1 => {
+ const doc1 = this.documentsById.get(docId1);
+ docIds.forEach(docId2 => {
+ if (docId1 === docId2) return; // Skip self-linking
+
+ // Create a consistent key regardless of document order
+ const linkKey = [docId1, docId2].sort().join('_');
+ if (alreadyLinked.has(linkKey)) return;
+
+ const doc2 = this.documentsById.get(docId2);
+ if (doc1?.layoutDoc && doc2?.layoutDoc) {
+ try {
+ // Create a link document between doc1 and doc2
+ const linkDoc = Docs.Create.LinkDocument(doc1.layoutDoc, doc2.layoutDoc);
+
+ // Set a default color if relationship doesn't specify one
+ if (!linkDoc.color) {
+ linkDoc.color = 'lightBlue'; // Default blue color
+ }
+
+ // Ensure link is visible by setting essential properties
+ linkDoc.link_visible = true;
+ linkDoc.link_enabled = true;
+ linkDoc.link_autoMove = true;
+ linkDoc.link_showDirected = true;
+
+ // Set the embedContainer to ensure visibility
+ // This is shown in the image as a key difference between visible/non-visible links
+ if (this.chatBoxDocument && this.chatBoxDocument.parent && typeof this.chatBoxDocument.parent === 'object' && 'title' in this.chatBoxDocument.parent) {
+ linkDoc.embedContainer = String(this.chatBoxDocument.parent.title);
+ } else if (doc1.layoutDoc.parent && typeof doc1.layoutDoc.parent === 'object' && 'title' in doc1.layoutDoc.parent) {
+ linkDoc.embedContainer = String(doc1.layoutDoc.parent.title);
+ } else {
+ // Default to a tab name if we can't find one
+ linkDoc.embedContainer = 'Untitled Tab 1';
+ }
+
+ // Add the link to the document system
+ LinkManager.Instance.addLink(linkDoc);
+
+ const ancestor = DocumentView.linkCommonAncestor(linkDoc);
+ ancestor?.ComponentView?.addDocument?.(linkDoc);
+ // Add to user document list to make it visible in the UI
+ Doc.AddDocToList(Doc.UserDoc(), 'links', linkDoc);
+
+ // Create a visual link for display
+ if (this.chatBoxDocument) {
+ // Make sure the docs are visible in the UI
+ this.chatBox._props.addDocument?.(doc1.layoutDoc);
+ this.chatBox._props.addDocument?.(doc2.layoutDoc);
+
+ // Use DocumentManager to ensure documents are visible
+ DocumentManager.Instance.showDocument(doc1.layoutDoc, { willZoomCentered: false });
+ DocumentManager.Instance.showDocument(doc2.layoutDoc, { willZoomCentered: false });
+ }
+
+ createdLinks.push(linkDoc);
+ alreadyLinked.add(linkKey);
+ } catch (error) {
+ console.error('Error creating link between documents:', error);
+ }
+ }
+ });
+ });
+
+ // Force update of the UI to show new links
+ setTimeout(() => {
+ try {
+ // Update server cache to ensure links are persisted
+ UPDATE_SERVER_CACHE && typeof UPDATE_SERVER_CACHE === 'function' && UPDATE_SERVER_CACHE();
+ } catch (e) {
+ console.warn('Could not update server cache after creating links:', e);
+ }
+ }, 100);
+
+ return createdLinks;
+ }
+ /**
+ * Helper method to validate a document type and ensure it's a valid supportedDocType
+ * @param docType The document type to validate
+ * @returns True if the document type is valid, false otherwise
+ */
+ private isValidDocType(docType: string): boolean {
+ return Object.values(supportedDocTypes).includes(docType as supportedDocTypes);
+ }
+ /**
+ * Creates a document in the dashboard and returns its ID.
+ * This is a public API used by tools like SearchTool.
+ *
+ * @param docType The type of document to create
+ * @param data The data for the document
+ * @param options Optional configuration options
+ * @returns The ID of the created document
+ */
+
+ public async createDocInDash(docType: string, data: string, options?: any): Promise<string> {
+ // Validate doc_type
+ if (!this.isValidDocType(docType)) {
+ throw new Error(`Invalid document type: ${docType}`);
+ }
+
+ try {
+ // Create simple document with just title and data
+ const simpleDoc: parsedDoc = {
+ doc_type: docType,
+ title: options?.title ?? `Untitled Document ${this.documentsById.size + 1}`,
+ data: data,
+ x: options?.x ?? 0,
+ y: options?.y ?? 0,
+ _width: 300,
+ _height: 300,
+ _layout_fitWidth: false,
+ _layout_autoHeight: true,
+ };
+
+ // Additional handling for web documents
+ if (docType === 'web') {
+ // For web documents, don't sanitize the URL here
+ // Instead, set properties to handle content safely when loaded
+ simpleDoc._disable_resource_loading = true;
+ simpleDoc._sandbox_iframe = true;
+ simpleDoc.data_useCors = true;
+
+ // Specify a more permissive sandbox to allow content to render properly
+ // but still maintain security
+ simpleDoc._iframe_sandbox = 'allow-same-origin allow-scripts allow-popups allow-forms';
+ }
+
+ // Use the chatBox's createDocInDash method to create the document
+ if (!this.chatBox) {
+ throw new Error('ChatBox instance not available for creating document');
+ }
+
+ const doc = this.chatBox.whichDoc(simpleDoc, false);
+ if (doc) {
+ // Use MobX runInAction to properly modify observable state
+ runInAction(() => {
+ if (this.chatBoxDocument && doc) {
+ // Create link and add it to the document system
+ const linkDoc = Docs.Create.LinkDocument(this.chatBoxDocument, doc);
+ LinkManager.Instance.addLink(linkDoc);
+ if (doc.type !== 'web') {
+ // Add document to view
+ this.chatBox._props.addDocument?.(doc);
+
+ // Show document - defer actual display to prevent immediate resource loading
+ setTimeout(() => {
+ DocumentManager.Instance.showDocument(doc, { willZoomCentered: true }, () => {});
+ }, 100);
+ }
+ }
+ });
+
+ const id = await this.processDocument(doc);
+ return id;
+ } else {
+ throw new Error(`Error creating document. Created document not found.`);
+ }
+ } catch (error) {
+ throw new Error(`Error creating document: ${error}`);
+ }
+ }
+
+ /**
+ * Sanitizes web content to prevent errors with external resources
+ * @param content The web content to sanitize
+ * @returns Sanitized content
+ */
+ private sanitizeWebContent(content: string): string {
+ if (!content) return content;
+
+ try {
+ // Replace problematic resource references that might cause errors
+ const sanitized = content
+ // Remove preload links that might cause errors
+ .replace(/<link[^>]*rel=["']preload["'][^>]*>/gi, '')
+ // Remove map file references
+ .replace(/\/\/# sourceMappingURL=.*\.map/gi, '')
+ // Remove external CSS map files references
+ .replace(/\/\*# sourceMappingURL=.*\.css\.map.*\*\//gi, '')
+ // Add sandbox to iframes
+ .replace(/<iframe/gi, '<iframe sandbox="allow-same-origin" loading="lazy"')
+ // Prevent automatic resource loading for images
+ .replace(/<img/gi, '<img loading="lazy"')
+ // Prevent automatic resource loading for scripts
+ .replace(/<script/gi, '<script type="text/disabled"')
+ // Handle invalid URIs by converting relative URLs to absolute ones
+ .replace(/href=["'](\/[^"']+)["']/gi, (match, p1) => {
+ // Only handle relative URLs starting with /
+ if (p1.startsWith('/')) {
+ return `href="#disabled-link"`;
+ }
+ return match;
+ })
+ // Prevent automatic loading of CSS
+ .replace(/<link[^>]*rel=["']stylesheet["'][^>]*href=["']([^"']+)["']/gi, (match, href) => `<link rel="prefetch" data-original-href="${href}" />`);
+
+ // Wrap the content in a sandboxed container
+ return `
+ <div class="sandboxed-web-content">
+ <style>
+ /* Override styles to prevent external resource loading */
+ @font-face { font-family: 'disabled'; src: local('Arial'); }
+ * { font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif !important; }
+ img, iframe, frame, embed, object { max-width: 100%; }
+ </style>
+ ${sanitized}
+ </div>`;
+ } catch (e) {
+ console.warn('Error sanitizing web content:', e);
+ // Fall back to a safe container with the content as text
+ return `
+ <div class="sandboxed-web-content">
+ <p>Content could not be safely displayed. Raw content:</p>
+ <pre>${content.replace(/</g, '&lt;').replace(/>/g, '&gt;')}</pre>
+ </div>`;
+ }
+ }
+
+ public has(docId: string) {
+ return this.documentsById.has(docId);
+ }
+
+ /**
+ * Returns a list of all document IDs in the manager.
+ * @returns An array of document IDs (strings).
+ */
+ @computed
+ public get listDocs(): string {
+ const xmlDocs = Array.from(this.documentsById.entries()).map(([id, agentDoc]) => {
+ return `<document>
+ <id>${id}</id>
+ <title>${this.escapeXml(StrCast(agentDoc.layoutDoc.title))}</title>
+ <type>${this.escapeXml(StrCast(agentDoc.layoutDoc.type))}</type>
+ <summary>${this.escapeXml(StrCast(agentDoc.layoutDoc.summary))}</summary>
+</document>`;
+ });
+
+ return xmlDocs.join('\n');
+ }
+
+ private escapeXml(str: string): string {
+ return str.replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;').replace(/'/g, '&apos;');
+ }
+
+ @computed
+ public get docIds(): string[] {
+ return Array.from(this.documentsById.keys());
+ }
+
+ /**
+ * Gets a document by its ID
+ * @param docId The ID of the document to retrieve
+ * @returns The document if found, undefined otherwise
+ */
+ public getDocument(docId: string): Doc | undefined {
+ const docInfo = this.documentsById.get(docId);
+ return docInfo?.layoutDoc;
+ }
+
+ public getDataDocument(docId: string): Doc | undefined {
+ const docInfo = this.documentsById.get(docId);
+ return docInfo?.dataDoc;
+ }
+ /**
+ * Adds simplified chunks to a document for citation handling
+ * @param doc The document to add simplified chunks to
+ * @param chunks Array of full RAG chunks to simplify
+ * @param docType The type of document (e.g., 'pdf', 'video', 'audio', etc.)
+ * @returns The updated document with simplified chunks
+ */
+ @action
+ public addSimplifiedChunks(simplifiedChunks: SimplifiedChunk[]) {
+ simplifiedChunks.forEach(chunk => {
+ this.simplifiedChunks.set(chunk.chunkId, chunk);
+ });
+ }
+
+ public getSimplifiedChunks(chunks: RAGChunk[], docType: string): SimplifiedChunk[] {
+ console.log('chunks', chunks, 'simplifiedChunks', this.simplifiedChunks);
+ const simplifiedChunks: SimplifiedChunk[] = [];
+ // Create array of simplified chunks based on document type
+ for (const chunk of chunks) {
+ // Common properties across all chunk types
+ const baseChunk: SimplifiedChunk = {
+ chunkId: chunk.id,
+ //text: chunk.metadata.text,
+ doc_id: chunk.metadata.doc_id,
+ chunkType: chunk.metadata.type || CHUNK_TYPE.TEXT,
+ };
+
+ // Add type-specific properties
+ if (docType === 'video' || docType === 'audio') {
+ simplifiedChunks.push({
+ ...baseChunk,
+ start_time: chunk.metadata.start_time,
+ end_time: chunk.metadata.end_time,
+ indexes: chunk.metadata.indexes,
+ chunkType: docType === 'video' ? CHUNK_TYPE.VIDEO : CHUNK_TYPE.AUDIO,
+ } as SimplifiedChunk);
+ } else if (docType === 'pdf') {
+ simplifiedChunks.push({
+ ...baseChunk,
+ startPage: chunk.metadata.start_page,
+ endPage: chunk.metadata.end_page,
+ location: chunk.metadata.location,
+ } as SimplifiedChunk);
+ } else if (docType === 'csv') {
+ simplifiedChunks.push({
+ ...baseChunk,
+ rowStart: (chunk.metadata as any).row_start,
+ rowEnd: (chunk.metadata as any).row_end,
+ colStart: (chunk.metadata as any).col_start,
+ colEnd: (chunk.metadata as any).col_end,
+ } as SimplifiedChunk);
+ } else {
+ // Default for other document types
+ simplifiedChunks.push(baseChunk as SimplifiedChunk);
+ }
+ }
+ return simplifiedChunks;
+ }
+
+ /**
+ * Gets a specific simplified chunk by ID
+ * @param doc The document containing chunks
+ * @param chunkId The ID of the chunk to retrieve
+ * @returns The simplified chunk if found, undefined otherwise
+ */
+ @action
+ public getSimplifiedChunkById(chunkId: string): any | undefined {
+ return { foundChunk: this.simplifiedChunks.get(chunkId), doc: this.getDocument(this.simplifiedChunks.get(chunkId)?.doc_id || chunkId), dataDoc: this.getDataDocument(this.simplifiedChunks.get(chunkId)?.doc_id || chunkId) };
+ }
+
+ public getChunkIdsFromDocIds(docIds: string[]): string[] {
+ return docIds
+ .map(docId => {
+ for (const chunk of this.simplifiedChunks.values()) {
+ if (chunk.doc_id === docId) {
+ return chunk.chunkId;
+ }
+ }
+ })
+ .filter(chunkId => chunkId !== undefined) as string[];
+ }
+
+ /**
+ * Gets the original segments from a media document
+ * @param doc The document containing original media segments
+ * @returns Array of media segments or empty array if none exist
+ */
+ public getOriginalSegments(doc: Doc): any[] {
+ if (!doc || !doc.original_segments) {
+ return [];
+ }
+
+ try {
+ return JSON.parse(StrCast(doc.original_segments)) || [];
+ } catch (e) {
+ console.error('Error parsing original segments:', e);
+ return [];
+ }
+ }
+}
diff --git a/src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts b/src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts
index 6d524e40f..72060973b 100644
--- a/src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts
+++ b/src/client/views/nodes/chatbot/vectorstore/Vectorstore.ts
@@ -15,6 +15,8 @@ import { Networking } from '../../../../Network';
import { AI_Document, CHUNK_TYPE, RAGChunk } from '../types/types';
import OpenAI from 'openai';
import { Embedding } from 'openai/resources';
+import { AgentDocumentManager } from '../utils/AgentDocumentManager';
+import { Id } from '../../../../../fields/FieldSymbols';
dotenv.config();
@@ -23,23 +25,28 @@ dotenv.config();
* and OpenAI text-embedding-3-large for text embedding. It handles AI document management, uploads, and query-based retrieval.
*/
export class Vectorstore {
- private pinecone: Pinecone; // Pinecone client for managing the vector index.
+ private pinecone!: Pinecone; // Pinecone client for managing the vector index.
private index!: Index; // The specific Pinecone index used for document chunks.
- private openai: OpenAI; // OpenAI client for generating embeddings.
+ private summaryIndex!: Index; // The Pinecone index used for file summaries.
+ private openai!: OpenAI; // OpenAI client for generating embeddings.
private indexName: string = 'pdf-chatbot'; // Default name for the index.
- private _id: string; // Unique ID for the Vectorstore instance.
- private _doc_ids: () => string[]; // List of document IDs handled by this instance.
-
+ private summaryIndexName: string = 'file-summaries'; // Name for the summaries index.
+ private _id!: string; // Unique ID for the Vectorstore instance.
+ private docManager!: AgentDocumentManager; // Document manager for handling documents
+ private summaryCacheCount: number = 0; // Cache for the number of summaries
documents: AI_Document[] = []; // Store the documents indexed in the vectorstore.
+ private debug: boolean = true; // Enable debugging
+ private initialized: boolean = false;
/**
* Initializes the Pinecone and OpenAI clients, sets up the document ID list,
* and initializes the Pinecone index.
* @param id The unique identifier for the vectorstore instance.
- * @param doc_ids A function that returns a list of document IDs.
+ * @param docManager An instance of AgentDocumentManager to handle document management.
*/
- constructor(id: string, doc_ids: () => string[]) {
- const pineconeApiKey = process.env.PINECONE_API_KEY;
+ constructor(id: string, docManager: AgentDocumentManager) {
+ if (this.debug) console.log(`[DEBUG] Initializing Vectorstore with ID: ${id}`);
+ const pineconeApiKey = 'pcsk_3txLxJ_9fxdmAph4csnq4yxoDF5De5A8bJvjWaXXigBgshy4eoXggrXcxATJiH8vzXbrKm';
if (!pineconeApiKey) {
console.log('PINECONE_API_KEY is not defined - Vectorstore will be unavailable');
return;
@@ -49,8 +56,39 @@ export class Vectorstore {
this.pinecone = new Pinecone({ apiKey: pineconeApiKey });
this.openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY, dangerouslyAllowBrowser: true });
this._id = id;
- this._doc_ids = doc_ids;
- this.initializeIndex();
+ this.docManager = docManager;
+
+ // Proper async initialization sequence
+ this.initializeAsync(id);
+ }
+
+ /**
+ * Handles async initialization of all components
+ */
+ private async initializeAsync(id: string) {
+ try {
+ if (this.debug) console.log(`[DEBUG] Starting async initialization sequence for Vectorstore ID: ${id}`);
+
+ // Initialize the main document index
+ await this.initializeIndex();
+
+ // Initialize the summary index
+ await this.initializeSummaryIndex();
+
+ this.initialized = true;
+ if (this.debug) console.log(`[DEBUG] ✅ Vectorstore initialization complete, running test query...`);
+
+ // Run a single test query instead of multiple
+ await this.runSingleTestQuery();
+ } catch (error) {
+ console.error('[ERROR] Failed to initialize Vectorstore:', error);
+ }
+ }
+
+ async getFileNames() {
+ const response = await Networking.FetchFromServer('/getFileNames');
+ const filepaths = JSON.parse(response);
+ return filepaths;
}
/**
@@ -58,10 +96,13 @@ export class Vectorstore {
* Sets the index to use cosine similarity for vector similarity calculations.
*/
private async initializeIndex() {
+ if (this.debug) console.log(`[DEBUG] Initializing main document index: ${this.indexName}`);
const indexList: IndexList = await this.pinecone.listIndexes();
+ if (this.debug) console.log(`[DEBUG] Available Pinecone indexes: ${indexList.indexes?.map(i => i.name).join(', ') || 'none'}`);
// Check if the index already exists, otherwise create it.
if (!indexList.indexes?.some(index => index.name === this.indexName)) {
+ if (this.debug) console.log(`[DEBUG] Creating new index: ${this.indexName}`);
await this.pinecone.createIndex({
name: this.indexName,
dimension: 3072,
@@ -73,6 +114,9 @@ export class Vectorstore {
},
},
});
+ if (this.debug) console.log(`[DEBUG] ✅ Index ${this.indexName} created successfully`);
+ } else {
+ if (this.debug) console.log(`[DEBUG] ✅ Using existing index: ${this.indexName}`);
}
// Set the index for future use.
@@ -80,6 +124,453 @@ export class Vectorstore {
}
/**
+ * Initializes the Pinecone index for file summaries.
+ * Checks if it exists and creates it if necessary.
+ */
+ private async initializeSummaryIndex() {
+ if (this.debug) console.log(`[DEBUG] Initializing file summaries index: ${this.summaryIndexName}`);
+ const indexList: IndexList = await this.pinecone.listIndexes();
+
+ // Check if the index already exists, otherwise create it.
+ if (!indexList.indexes?.some(index => index.name === this.summaryIndexName)) {
+ if (this.debug) console.log(`[DEBUG] Creating new summary index: ${this.summaryIndexName}`);
+ await this.pinecone.createIndex({
+ name: this.summaryIndexName,
+ dimension: 3072,
+ metric: 'cosine',
+ spec: {
+ serverless: {
+ cloud: 'aws',
+ region: 'us-east-1',
+ },
+ },
+ });
+ if (this.debug) console.log(`[DEBUG] ✅ Summary index ${this.summaryIndexName} created successfully`);
+ } else {
+ if (this.debug) console.log(`[DEBUG] ✅ Using existing summary index: ${this.summaryIndexName}`);
+ }
+
+ // Set the summaries index for future use.
+ this.summaryIndex = this.pinecone.Index(this.summaryIndexName);
+
+ // Check if we need to index the file summaries
+ await this.processFileSummaries();
+ }
+
+ /**
+ * Processes file summaries from the JSON file if needed.
+ * Checks if the index contains the correct number of summaries before embedding.
+ */
+ private async processFileSummaries() {
+ if (this.debug) console.log(`[DEBUG] Starting file summaries processing`);
+ try {
+ // Get file summaries from the server
+ if (this.debug) console.log(`[DEBUG] Fetching file summaries from server...`);
+ const response = await Networking.FetchFromServer('/getFileSummaries');
+
+ if (!response) {
+ console.error('[ERROR] Failed to fetch file summaries');
+ return;
+ }
+ if (this.debug) console.log(`[DEBUG] File summaries response received (${response.length} bytes)`);
+
+ const summaries = JSON.parse(response);
+ const filepaths = Object.keys(summaries);
+ const summaryCount = filepaths.length;
+ this.summaryCacheCount = summaryCount;
+
+ if (this.debug) {
+ console.log(`[DEBUG] File summaries parsed: ${summaryCount} files`);
+ console.log(`[DEBUG] Sample filepaths: ${filepaths.slice(0, 3).join(', ')}...`);
+ console.log(`[DEBUG] Sample summary: "${summaries[filepaths[0]].substring(0, 100)}..."`);
+ }
+
+ // Check if index already has the correct number of summaries
+ try {
+ if (this.debug) console.log(`[DEBUG] Checking summary index stats...`);
+ const indexStats = await this.summaryIndex.describeIndexStats();
+ const vectorCount = indexStats.totalRecordCount;
+
+ if (this.debug) console.log(`[DEBUG] Summary index has ${vectorCount} records, expecting ${summaryCount}`);
+
+ if (vectorCount === summaryCount) {
+ console.log(`[DEBUG] ✅ Summary index already contains ${vectorCount} entries, skipping embedding.`);
+ return;
+ }
+
+ if (this.debug) console.log(`[DEBUG] ⚠️ Summary index contains ${vectorCount} entries, but there are ${summaryCount} summaries. Re-indexing.`);
+ } catch (error) {
+ console.error('[ERROR] Error checking summary index stats:', error);
+ }
+
+ // If we get here, we need to embed the summaries
+ await this.embedAndIndexFileSummaries(summaries);
+ } catch (error) {
+ console.error('[ERROR] Error processing file summaries:', error);
+ }
+ }
+
+ /**
+ * Embeds and indexes file summaries into the summary index.
+ * @param summaries Object mapping filepaths to summaries
+ */
+ private async embedAndIndexFileSummaries(summaries: Record<string, string>) {
+ if (this.debug) console.log(`[DEBUG] Starting embedding and indexing of file summaries...`);
+
+ const filepaths = Object.keys(summaries);
+ const summaryTexts = Object.values(summaries);
+
+ // Split into batches of 100 to avoid exceeding API limits
+ const batchSize = 100;
+ const totalBatches = Math.ceil(filepaths.length / batchSize);
+
+ if (this.debug) console.log(`[DEBUG] Processing ${filepaths.length} files in ${totalBatches} batches of size ${batchSize}`);
+
+ for (let i = 0; i < filepaths.length; i += batchSize) {
+ const batchFilepaths = filepaths.slice(i, i + batchSize);
+ const batchTexts = summaryTexts.slice(i, i + batchSize);
+
+ if (this.debug) {
+ console.log(`[DEBUG] Processing batch ${Math.floor(i / batchSize) + 1}/${totalBatches}`);
+ console.log(`[DEBUG] First file in batch: ${batchFilepaths[0]}`);
+ console.log(`[DEBUG] First summary in batch: "${batchTexts[0].substring(0, 50)}..."`);
+ }
+
+ try {
+ // Generate embeddings for this batch
+ if (this.debug) console.log(`[DEBUG] Generating embeddings for batch of ${batchTexts.length} summaries...`);
+ const startTime = Date.now();
+ const embeddingResponse = await this.openai.embeddings.create({
+ model: 'text-embedding-3-large',
+ input: batchTexts,
+ encoding_format: 'float',
+ });
+ const duration = Date.now() - startTime;
+ if (this.debug) console.log(`[DEBUG] ✅ Embeddings generated in ${duration}ms`);
+
+ // Prepare Pinecone records
+ if (this.debug) console.log(`[DEBUG] Preparing Pinecone records...`);
+ const pineconeRecords: PineconeRecord[] = batchTexts.map((text, index) => {
+ const embedding = (embeddingResponse.data as Embedding[])[index].embedding;
+ if (this.debug && index === 0) console.log(`[DEBUG] Sample embedding dimensions: ${embedding.length}, first few values: [${embedding.slice(0, 5).join(', ')}...]`);
+
+ return {
+ id: uuidv4(), // Generate a unique ID for each summary
+ values: embedding,
+ metadata: {
+ filepath: batchFilepaths[index],
+ summary: text,
+ } as RecordMetadata,
+ };
+ });
+
+ // Upload to Pinecone
+ if (this.debug) console.log(`[DEBUG] Upserting ${pineconeRecords.length} records to Pinecone...`);
+ const upsertStart = Date.now();
+ try {
+ await this.summaryIndex.upsert(pineconeRecords);
+ const upsertDuration = Date.now() - upsertStart;
+ if (this.debug) console.log(`[DEBUG] ✅ Batch ${Math.floor(i / batchSize) + 1}/${totalBatches} indexed in ${upsertDuration}ms`);
+ } catch (upsertError) {
+ console.error(`[ERROR] Failed to upsert batch ${Math.floor(i / batchSize) + 1}/${totalBatches} to Pinecone:`, upsertError);
+ // Try again with smaller batch
+ if (batchTexts.length > 20) {
+ console.log(`[DEBUG] 🔄 Retrying with smaller batch size...`);
+ // Split the batch in half and retry recursively
+ const midpoint = Math.floor(batchTexts.length / 2);
+ const firstHalf = {
+ filepaths: batchFilepaths.slice(0, midpoint),
+ texts: batchTexts.slice(0, midpoint),
+ };
+ const secondHalf = {
+ filepaths: batchFilepaths.slice(midpoint),
+ texts: batchTexts.slice(midpoint),
+ };
+
+ // Create a helper function to retry smaller batches
+ const retryBatch = async (paths: string[], texts: string[], batchNum: string) => {
+ try {
+ if (this.debug) console.log(`[DEBUG] Generating embeddings for sub-batch ${batchNum}...`);
+ const embRes = await this.openai.embeddings.create({
+ model: 'text-embedding-3-large',
+ input: texts,
+ encoding_format: 'float',
+ });
+
+ const records = texts.map((t, idx) => ({
+ id: uuidv4(),
+ values: (embRes.data as Embedding[])[idx].embedding,
+ metadata: {
+ filepath: paths[idx],
+ summary: t,
+ } as RecordMetadata,
+ }));
+
+ if (this.debug) console.log(`[DEBUG] Upserting sub-batch ${batchNum} (${records.length} records)...`);
+ await this.summaryIndex.upsert(records);
+ if (this.debug) console.log(`[DEBUG] ✅ Sub-batch ${batchNum} upserted successfully`);
+ } catch (retryError) {
+ console.error(`[ERROR] Failed to upsert sub-batch ${batchNum}:`, retryError);
+ }
+ };
+
+ await retryBatch(firstHalf.filepaths, firstHalf.texts, `${Math.floor(i / batchSize) + 1}.1`);
+ await retryBatch(secondHalf.filepaths, secondHalf.texts, `${Math.floor(i / batchSize) + 1}.2`);
+ }
+ }
+ } catch (error) {
+ console.error('[ERROR] Error processing batch:', error);
+ }
+ }
+
+ if (this.debug) console.log(`[DEBUG] ✅ File summary indexing complete for all ${filepaths.length} files`);
+
+ // Verify the index was populated correctly
+ try {
+ const indexStats = await this.summaryIndex.describeIndexStats();
+ const vectorCount = indexStats.totalRecordCount;
+ if (this.debug) console.log(`[DEBUG] 🔍 Final index verification: ${vectorCount} records in Pinecone index (expected ${filepaths.length})`);
+ } catch (error) {
+ console.error('[ERROR] Failed to verify index stats:', error);
+ }
+ }
+
+ /**
+ * Searches for file summaries similar to the given query.
+ * @param query The search query
+ * @param topK Number of results to return (default: 5)
+ * @returns Array of filepath and summary pairs with relevance scores
+ */
+ async searchFileSummaries(query: string, topK: number = 5): Promise<Array<{ filepath: string; summary: string; score?: number }>> {
+ if (!this.initialized) {
+ console.error('[ERROR] Cannot search - Vectorstore not fully initialized');
+ return [];
+ }
+
+ if (this.debug) console.log(`[DEBUG] Searching file summaries for query: "${query}" (topK=${topK})`);
+ try {
+ // Generate embedding for the query
+ if (this.debug) console.log(`[DEBUG] Generating embedding for query...`);
+ const startTime = Date.now();
+ const queryEmbeddingResponse = await this.openai.embeddings.create({
+ model: 'text-embedding-3-large',
+ input: query,
+ encoding_format: 'float',
+ });
+ const duration = Date.now() - startTime;
+
+ const queryEmbedding = queryEmbeddingResponse.data[0].embedding;
+ if (this.debug) {
+ console.log(`[DEBUG] ✅ Query embedding generated in ${duration}ms`);
+ console.log(`[DEBUG] Query embedding dimensions: ${queryEmbedding.length}`);
+ }
+
+ // Check if summary index is ready
+ try {
+ const indexStats = await this.summaryIndex.describeIndexStats();
+ const vectorCount = indexStats.totalRecordCount;
+ if (this.debug) console.log(`[DEBUG] Summary index contains ${vectorCount} records`);
+
+ if (vectorCount === 0) {
+ console.error('[ERROR] Summary index is empty, cannot perform search');
+ return [];
+ }
+ } catch (statsError) {
+ console.error('[ERROR] Failed to check summary index stats:', statsError);
+ console.error('[ERROR] Stats error details:', JSON.stringify(statsError));
+ }
+
+ // Test direct API access to Pinecone
+ if (this.debug) console.log(`[DEBUG] Testing Pinecone connection...`);
+ try {
+ const indexes = await this.pinecone.listIndexes();
+ console.log(`[DEBUG] Available Pinecone indexes: ${indexes.indexes?.map(idx => idx.name).join(', ')}`);
+ } catch (connectionError) {
+ console.error('[ERROR] Could not connect to Pinecone:', connectionError);
+ }
+
+ // Query the summaries index
+ if (this.debug) console.log(`[DEBUG] Querying Pinecone summary index (${this.summaryIndexName})...`);
+ const queryStart = Date.now();
+
+ let queryResponse;
+ try {
+ // First, make sure we can access the index
+ const indexInfo = await this.summaryIndex.describeIndexStats();
+ if (this.debug) console.log(`[DEBUG] Index stats:`, indexInfo);
+
+ queryResponse = await this.summaryIndex.query({
+ vector: queryEmbedding,
+ topK,
+ includeMetadata: true,
+ });
+
+ const queryDuration = Date.now() - queryStart;
+
+ if (this.debug) {
+ console.log(`[DEBUG] ✅ Pinecone query completed in ${queryDuration}ms`);
+ console.log(`[DEBUG] Raw Pinecone response:`, JSON.stringify(queryResponse, null, 2));
+ if (queryResponse.matches) {
+ console.log(`[DEBUG] Found ${queryResponse.matches.length} matching summaries`);
+ console.log(`[DEBUG] Match scores: ${queryResponse.matches.map(m => m.score?.toFixed(4)).join(', ')}`);
+ } else {
+ console.log(`[DEBUG] No matches in response`);
+ }
+ }
+ } catch (queryError) {
+ console.error('[ERROR] Pinecone query failed:', queryError);
+ if (typeof queryError === 'object' && queryError !== null) {
+ console.error('[ERROR] Query error details:', JSON.stringify(queryError, null, 2));
+ }
+ return [];
+ }
+
+ if (!queryResponse || !queryResponse.matches || queryResponse.matches.length === 0) {
+ console.log('[DEBUG] ⚠️ No matches found in Pinecone for query');
+ return [];
+ }
+
+ // Format results
+ const results = queryResponse.matches.map(match => {
+ if (!match.metadata) {
+ console.error('[ERROR] Match is missing metadata:', match);
+ return { filepath: 'unknown', summary: 'No summary available' };
+ }
+
+ return {
+ filepath: (match.metadata as { filepath: string }).filepath || 'unknown',
+ summary: (match.metadata as { summary: string }).summary || 'No summary available',
+ score: match.score,
+ };
+ });
+
+ if (this.debug) {
+ if (results.length > 0) {
+ console.log(`[DEBUG] Top result filepath: ${results[0]?.filepath}`);
+ console.log(`[DEBUG] Top result score: ${results[0]?.score}`);
+ console.log(`[DEBUG] Top result summary excerpt: "${results[0]?.summary?.substring(0, 100)}..."`);
+ } else {
+ console.log(`[DEBUG] No results returned after processing`);
+ }
+ }
+
+ return results;
+ } catch (error) {
+ console.error('[ERROR] Error searching file summaries:', error);
+ if (typeof error === 'object' && error !== null) {
+ console.error('[ERROR] Full error details:', JSON.stringify(error, null, 2));
+ }
+ return [];
+ }
+ }
+
+ /**
+ * Runs a single test query after setup to validate the file summary search functionality.
+ */
+ private async runSingleTestQuery() {
+ console.log(`\n[TEST] Running single test query to validate file summary search functionality...`);
+
+ // Verify the index is accessible
+ try {
+ const indexStats = await this.summaryIndex.describeIndexStats();
+ console.log(`[TEST] Pinecone index stats:`, JSON.stringify(indexStats, null, 2));
+ console.log(`[TEST] Summary index contains ${indexStats.totalRecordCount} indexed summaries`);
+ } catch (error) {
+ console.error('[TEST] ❌ Failed to access Pinecone index:', error);
+ return;
+ }
+
+ // Add a brief delay to ensure Pinecone has finished processing
+ console.log('[TEST] Waiting 2 seconds for Pinecone indexing to complete...');
+ await new Promise(resolve => setTimeout(resolve, 2000));
+
+ // Run a single test query
+ const query = 'React components for the UI';
+ console.log(`\n[TEST] Executing query: "${query}"`);
+
+ try {
+ const results = await this.searchFileSummaries(query);
+ console.log(`[TEST] Search returned ${results.length} results:`);
+
+ results.forEach((result, i) => {
+ console.log(`\n[TEST] Result ${i + 1}:`);
+ console.log(`[TEST] File: ${result.filepath}`);
+ console.log(`[TEST] Score: ${result.score}`);
+ console.log(`[TEST] Summary: "${result.summary?.substring(0, 150)}..."`);
+ });
+
+ // If we have results, fetch the content for the first one
+ if (results.length > 0) {
+ const topFilepath = results[0].filepath;
+ console.log(`\n[TEST] Fetching full content for top result: ${topFilepath}`);
+ const content = await this.getFileContent(topFilepath);
+
+ if (content) {
+ console.log(`[TEST] ✅ Content retrieved successfully (${content.length} chars)`);
+ console.log(`[TEST] Content excerpt:\n---\n${content.substring(0, 300)}...\n---`);
+ } else {
+ console.log(`[TEST] ❌ Failed to retrieve content for ${topFilepath}`);
+ }
+ } else {
+ console.log(`\n[TEST] ⚠️ No results to fetch content for`);
+ }
+
+ console.log(`\n[TEST] ✅ Test query completed`);
+ } catch (testError) {
+ console.error(`[TEST] ❌ Test query failed:`, testError);
+ if (typeof testError === 'object' && testError !== null) {
+ console.error('[TEST] Full error details:', JSON.stringify(testError, null, 2));
+ }
+ }
+ }
+
+ /**
+ * Gets the full content of a file by its filepath.
+ * @param filepath The filepath to look up
+ * @returns The file content or null if not found
+ */
+ async getFileContent(filepath: string): Promise<string | null> {
+ if (this.debug) console.log(`[DEBUG] Getting file content for: ${filepath}`);
+ try {
+ const startTime = Date.now();
+
+ // Use the Networking utility for consistent API access
+ // But convert the response to text manually to avoid JSON parsing
+ const rawResponse = await fetch('/getRawFileContent', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify({ filepath }),
+ });
+
+ if (!rawResponse.ok) {
+ const errorText = await rawResponse.text();
+ console.error(`[ERROR] Server returned error ${rawResponse.status}: ${errorText}`);
+ return null;
+ }
+
+ // Get the raw text content without JSON parsing
+ const content = await rawResponse.text();
+ const duration = Date.now() - startTime;
+
+ if (this.debug) {
+ console.log(`[DEBUG] ✅ File content retrieved in ${duration}ms`);
+ console.log(`[DEBUG] Content length: ${content.length} chars`);
+ console.log(`[DEBUG] Content excerpt: "${content.substring(0, 100)}..."`);
+ }
+
+ return content;
+ } catch (error) {
+ console.error('[ERROR] Error getting file content:', error);
+ if (typeof error === 'object' && error !== null) {
+ console.error('[ERROR] Full error details:', JSON.stringify(error, null, 2));
+ }
+ return null;
+ }
+ }
+
+ /**
* Adds an AI document to the vectorstore. Handles media file processing for audio/video,
* and text embedding for all document types. Updates document metadata during processing.
* @param doc The document to add.
@@ -103,21 +594,35 @@ export class Vectorstore {
const local_file_path = CsvCast(doc.data)?.url?.pathname ?? PDFCast(doc.data)?.url?.pathname ?? VideoCast(doc.data)?.url?.pathname ?? AudioCast(doc.data)?.url?.pathname;
if (!local_file_path) {
- console.log('Invalid file path.');
+ console.log('Not adding to vectorstore. Invalid file path for vectorstore addition.');
return;
}
const isAudioOrVideo = local_file_path.endsWith('.mp3') || local_file_path.endsWith('.mp4');
let result: AI_Document & { doc_id: string };
+
if (isAudioOrVideo) {
console.log('Processing media file...');
- const response = (await Networking.PostToServer('/processMediaFile', { fileName: path.basename(local_file_path) })) as { [key: string]: unknown };
- const segmentedTranscript = response.condensed;
+ progressCallback(10, 'Preparing media file for transcription...');
+
+ // Post to processMediaFile endpoint to get the transcript
+ const response = await Networking.PostToServer('/processMediaFile', { fileName: path.basename(local_file_path) });
+ progressCallback(60, 'Transcription completed. Processing transcript...');
+
+ // Type assertion to handle the response properties
+ const typedResponse = response as {
+ condensed: Array<{ text: string; indexes: string[]; start: number; end: number }>;
+ full: Array<unknown>;
+ summary: string;
+ };
+
+ const segmentedTranscript = typedResponse.condensed;
console.log(segmentedTranscript);
- const summary = response.summary as string;
+ const summary = typedResponse.summary;
doc.summary = summary;
+
// Generate embeddings for each chunk
- const texts = (segmentedTranscript as { text: string }[])?.map(chunk => chunk.text);
+ const texts = segmentedTranscript.map(chunk => chunk.text);
try {
const embeddingsResponse = await this.openai.embeddings.create({
@@ -125,54 +630,57 @@ export class Vectorstore {
input: texts,
encoding_format: 'float',
});
+ progressCallback(85, 'Embeddings generated. Finalizing document...');
- doc.original_segments = JSON.stringify(response.full);
- doc.ai_type = local_file_path.endsWith('.mp3') ? 'audio' : 'video';
- const doc_id = uuidv4();
+ doc.original_segments = JSON.stringify(typedResponse.full);
+ const doc_id = doc[Id];
+ console.log('doc_id in vectorstore', doc_id);
+ // Generate chunk IDs upfront so we can register them
+ const chunkIds = segmentedTranscript.map(() => uuidv4());
// Add transcript and embeddings to metadata
result = {
doc_id,
purpose: '',
file_name: local_file_path,
num_pages: 0,
- summary: '',
- chunks: (segmentedTranscript as { text: string; start: number; end: number; indexes: string[] }[]).map((chunk, index) => ({
- id: uuidv4(),
+ summary: summary,
+ chunks: segmentedTranscript.map((chunk, index) => ({
+ id: chunkIds[index], // Use pre-generated chunk ID
values: (embeddingsResponse.data as Embedding[])[index].embedding, // Assign embedding
metadata: {
indexes: chunk.indexes,
original_document: local_file_path,
- doc_id: doc_id,
+ doc_id: doc_id, // Ensure doc_id is consistent
file_path: local_file_path,
start_time: chunk.start,
end_time: chunk.end,
text: chunk.text,
- type: CHUNK_TYPE.VIDEO,
+ type: local_file_path.endsWith('.mp3') ? CHUNK_TYPE.AUDIO : CHUNK_TYPE.VIDEO,
},
})),
type: 'media',
};
+ progressCallback(95, 'Adding document to vectorstore...');
} catch (error) {
console.error('Error generating embeddings:', error);
+ doc.ai_document_status = 'ERROR';
throw new Error('Embedding generation failed');
}
doc.segmented_transcript = JSON.stringify(segmentedTranscript);
- // Simplify chunks for storage
- const simplifiedChunks = result.chunks.map(chunk => ({
- chunkId: chunk.id,
- start_time: chunk.metadata.start_time,
- end_time: chunk.metadata.end_time,
- indexes: chunk.metadata.indexes,
- chunkType: CHUNK_TYPE.VIDEO,
- text: chunk.metadata.text,
- }));
- doc.chunk_simpl = JSON.stringify({ chunks: simplifiedChunks });
+ // Use doc manager to add simplified chunks
+ const docType = local_file_path.endsWith('.mp3') ? 'audio' : 'video';
+ const simplifiedChunks = this.docManager.getSimplifiedChunks(result.chunks, docType);
+ doc.chunk_simplified = JSON.stringify(simplifiedChunks);
+ this.docManager.addSimplifiedChunks(simplifiedChunks);
} else {
- // Existing document processing logic remains unchanged
+ // Process regular document
console.log('Processing regular document...');
- const { jobId } = (await Networking.PostToServer('/createDocument', { file_path: local_file_path })) as { jobId: string };
+ const createDocumentResponse = await Networking.PostToServer('/createDocument', { file_path: local_file_path, doc_id: doc[Id] });
+
+ // Type assertion for the response
+ const { jobId } = createDocumentResponse as { jobId: string };
while (true) {
await new Promise(resolve => setTimeout(resolve, 2000));
@@ -188,29 +696,28 @@ export class Vectorstore {
progressCallback(progressResponseJson.progress, progressResponseJson.step);
}
}
- if (!doc.chunk_simpl) {
- doc.chunk_simpl = JSON.stringify({ chunks: [] });
+
+ // Collect all chunk IDs
+ const chunkIds = result.chunks.map(chunk => chunk.id);
+
+ if (result.doc_id !== doc[Id]) {
+ console.log('doc_id in vectorstore', result.doc_id, 'does not match doc_id in doc', doc[Id]);
}
+
+ // Use doc manager to add simplified chunks - determine document type from file extension
+ const fileExt = path.extname(local_file_path).toLowerCase();
+ const docType = fileExt === '.pdf' ? 'pdf' : fileExt === '.csv' ? 'csv' : 'text';
+ const simplifiedChunks = this.docManager.getSimplifiedChunks(result.chunks, docType);
+ doc.chunk_simplified = JSON.stringify(simplifiedChunks);
+ this.docManager.addSimplifiedChunks(simplifiedChunks);
+
doc.summary = result.summary;
doc.ai_purpose = result.purpose;
-
- result.chunks.forEach((chunk: RAGChunk) => {
- const chunkToAdd = {
- chunkId: chunk.id,
- startPage: chunk.metadata.start_page,
- endPage: chunk.metadata.end_page,
- location: chunk.metadata.location,
- chunkType: chunk.metadata.type as CHUNK_TYPE,
- text: chunk.metadata.text,
- };
- const new_chunk_simpl = JSON.parse(StrCast(doc.chunk_simpl));
- new_chunk_simpl.chunks = new_chunk_simpl.chunks.concat(chunkToAdd);
- doc.chunk_simpl = JSON.stringify(new_chunk_simpl);
- });
}
// Index the document
await this.indexDocument(result);
+ progressCallback(100, 'Document added successfully!');
// Preserve existing metadata updates
if (!doc.vectorstore_id) {
@@ -286,7 +793,7 @@ export class Vectorstore {
* @param topK The number of top results to return (default is 10).
* @returns A list of document chunks that match the query.
*/
- async retrieve(query: string, topK: number = 10): Promise<RAGChunk[]> {
+ async retrieve(query: string, topK: number = 10, docIds?: string[]): Promise<RAGChunk[]> {
console.log(`Retrieving chunks for query: ${query}`);
try {
// Generate an embedding for the query using OpenAI.
@@ -297,40 +804,45 @@ export class Vectorstore {
});
const queryEmbedding = queryEmbeddingResponse.data[0].embedding;
+ const _docIds = docIds?.length === 0 || !docIds ? this.docManager.docIds : docIds;
- // Extract the embedding from the response.
+ console.log('Using document IDs for retrieval:', _docIds);
- console.log(this._doc_ids());
// Query the Pinecone index using the embedding and filter by document IDs.
+ // We'll query based on document IDs that are registered in the document manager
const queryResponse: QueryResponse = await this.index.query({
vector: queryEmbedding,
filter: {
- doc_id: { $in: this._doc_ids() },
+ doc_id: { $in: _docIds },
},
topK,
includeValues: true,
includeMetadata: true,
});
- console.log(queryResponse);
-
- // Map the results into RAGChunks and return them.
- return queryResponse.matches.map(
- match =>
- ({
- id: match.id,
- values: match.values as number[],
- metadata: match.metadata as {
- text: string;
- type: string;
- original_document: string;
- file_path: string;
- doc_id: string;
- location: string;
- start_page: number;
- end_page: number;
- },
- }) as RAGChunk
- );
+ console.log(`Found ${queryResponse.matches.length} matching chunks`);
+
+ // For each retrieved chunk, ensure its document ID is registered in the document manager
+ // This maintains compatibility with existing code while ensuring consistency
+ const processedMatches = queryResponse.matches.map(match => {
+ const chunk = {
+ id: match.id,
+ values: match.values as number[],
+ metadata: match.metadata as {
+ text: string;
+ type: string;
+ original_document: string;
+ file_path: string;
+ doc_id: string;
+ location: string;
+ start_page: number;
+ end_page: number;
+ },
+ } as RAGChunk;
+
+ return chunk;
+ });
+
+ return processedMatches;
} catch (error) {
console.error(`Error retrieving chunks: ${error}`);
return [];
diff --git a/src/client/views/pdf/PDFViewer.tsx b/src/client/views/pdf/PDFViewer.tsx
index fc2567fbc..a88d8b282 100644
--- a/src/client/views/pdf/PDFViewer.tsx
+++ b/src/client/views/pdf/PDFViewer.tsx
@@ -50,6 +50,15 @@ interface IViewerProps extends FieldViewProps {
crop: (region: Doc | undefined, addCrop?: boolean) => Doc | undefined;
}
+// Add this type definition right after the existing imports
+interface FuzzySearchResult {
+ pageIndex: number;
+ matchIndex: number;
+ text: string;
+ score?: number;
+ isParagraph?: boolean;
+}
+
/**
* Handles rendering and virtualization of the pdf
*/
@@ -68,6 +77,9 @@ export class PDFViewer extends ObservableReactComponent<IViewerProps> {
@observable _showWaiting = true;
@observable Index: number = -1;
@observable private _loading = false;
+ @observable private _fuzzySearchEnabled = true;
+ @observable private _fuzzySearchResults: FuzzySearchResult[] = [];
+ @observable private _currentFuzzyMatchIndex = 0;
private _pdfViewer!: PDFJSViewer.PDFViewer;
private _styleRule: number | undefined; // stylesheet rule for making hyperlinks clickable
@@ -334,27 +346,557 @@ export class PDFViewer extends ObservableReactComponent<IViewerProps> {
return index;
};
+ // Normalize text by removing extra spaces, punctuation, and converting to lowercase
+ private normalizeText(text: string): string {
+ return text
+ .toLowerCase()
+ .replace(/\s+/g, ' ')
+ .replace(/[^\w\s]/g, ' ')
+ .trim();
+ }
+
+ // Compute similarity between two strings (0-1 where 1 is exact match)
+ private computeSimilarity(str1: string, str2: string): number {
+ const s1 = this.normalizeText(str1);
+ const s2 = this.normalizeText(str2);
+
+ if (s1 === s2) return 1;
+ if (s1.length === 0 || s2.length === 0) return 0;
+
+ // For very long texts, check if one contains chunks of the other
+ if (s1.length > 50 || s2.length > 50) {
+ // For long texts, check if significant chunks overlap
+ const longerText = s1.length > s2.length ? s1 : s2;
+ const shorterText = s1.length > s2.length ? s2 : s1;
+
+ // Break the shorter text into chunks
+ const words = shorterText.split(' ');
+ const chunkSize = Math.min(5, Math.floor(words.length / 2));
+
+ if (chunkSize > 0) {
+ let maxChunkMatch = 0;
+
+ // Check different chunks of the shorter text against the longer text
+ for (let i = 0; i <= words.length - chunkSize; i++) {
+ const chunk = words.slice(i, i + chunkSize).join(' ');
+ if (longerText.includes(chunk)) {
+ maxChunkMatch = Math.max(maxChunkMatch, chunk.length / shorterText.length);
+ }
+ }
+
+ if (maxChunkMatch > 0.2) {
+ return Math.min(0.9, maxChunkMatch + 0.3); // Boost the score, max 0.9
+ }
+ }
+
+ // Check for substantial overlap in content
+ const words1 = new Set(s1.split(' '));
+ const words2 = new Set(s2.split(' '));
+
+ let commonWords = 0;
+ for (const word of words1) {
+ if (word.length > 2 && words2.has(word)) {
+ // Only count meaningful words (length > 2)
+ commonWords++;
+ }
+ }
+
+ // Calculate ratio of common words
+ const overlapRatio = commonWords / Math.min(words1.size, words2.size);
+
+ // For long text, a lower match can still be significant
+ if (overlapRatio > 0.4) {
+ return Math.min(0.9, overlapRatio);
+ }
+ }
+
+ // Simple contains check for shorter texts
+ if (s1.includes(s2) || s2.includes(s1)) {
+ return (0.8 * Math.min(s1.length, s2.length)) / Math.max(s1.length, s2.length);
+ }
+
+ // For shorter texts, use Levenshtein for more precision
+ if (s1.length < 100 && s2.length < 100) {
+ // Calculate Levenshtein distance
+ const dp: number[][] = Array(s1.length + 1)
+ .fill(0)
+ .map(() => Array(s2.length + 1).fill(0));
+
+ for (let i = 0; i <= s1.length; i++) dp[i][0] = i;
+ for (let j = 0; j <= s2.length; j++) dp[0][j] = j;
+
+ for (let i = 1; i <= s1.length; i++) {
+ for (let j = 1; j <= s2.length; j++) {
+ const cost = s1[i - 1] === s2[j - 1] ? 0 : 1;
+ dp[i][j] = Math.min(
+ dp[i - 1][j] + 1, // deletion
+ dp[i][j - 1] + 1, // insertion
+ dp[i - 1][j - 1] + cost // substitution
+ );
+ }
+ }
+
+ const distance = dp[s1.length][s2.length];
+ return 1 - distance / Math.max(s1.length, s2.length);
+ }
+
+ return 0;
+ }
+
+ // Perform fuzzy search on PDF text content
+ private async performFuzzySearch(searchString: string, bwd?: boolean): Promise<boolean> {
+ if (!this._pdfViewer || !searchString.trim()) return false;
+
+ const normalizedSearch = this.normalizeText(searchString);
+ this._fuzzySearchResults = [];
+
+ // Adjust threshold based on text length - more lenient for longer text
+ let similarityThreshold = 0.6;
+ if (searchString.length > 100) similarityThreshold = 0.35;
+ else if (searchString.length > 50) similarityThreshold = 0.45;
+
+ console.log(`Using similarity threshold: ${similarityThreshold} for query length: ${searchString.length}`);
+
+ // For longer queries, also look for partial matches
+ const searchWords = normalizedSearch.split(' ').filter(w => w.length > 3);
+ const isLongQuery = searchWords.length > 5;
+
+ // Track best match for debugging
+ let bestMatchScore = 0;
+ let bestMatchText = '';
+
+ // Fallback strategy: extract key phrases for very long search queries
+ let keyPhrases: string[] = [];
+ if (searchString.length > 200) {
+ // Extract key phrases (chunks of 3-6 words) from the search string
+ const words = normalizedSearch.split(' ');
+ for (let i = 0; i < words.length - 2; i += 2) {
+ const phraseLength = Math.min(5, words.length - i);
+ if (phraseLength >= 3) {
+ keyPhrases.push(words.slice(i, i + phraseLength).join(' '));
+ }
+ }
+ console.log(`Using ${keyPhrases.length} key phrases for long search text`);
+ }
+
+ // Process PDF in batches to avoid memory issues
+ const totalPages = this._pageSizes.length;
+ const BATCH_SIZE = 10; // Process 10 pages at a time
+
+ console.log(`Searching all ${totalPages} pages in batches of ${BATCH_SIZE}`);
+
+ // Process PDF in batches
+ for (let batchStart = 0; batchStart < totalPages; batchStart += BATCH_SIZE) {
+ const batchEnd = Math.min(batchStart + BATCH_SIZE, totalPages);
+ console.log(`Processing pages ${batchStart + 1} to ${batchEnd} of ${totalPages}`);
+
+ // Process each page in current batch
+ for (let pageIndex = batchStart; pageIndex < batchEnd; pageIndex++) {
+ try {
+ const page = await this._props.pdf.getPage(pageIndex + 1);
+ const textContent = await page.getTextContent();
+
+ // For long text, try to reconstruct paragraphs first
+ let paragraphs: string[] = [];
+
+ try {
+ if (isLongQuery) {
+ // Group text items into paragraphs based on positions
+ let currentY: number | null = null;
+ let currentParagraph = '';
+
+ // Sort by Y position first, then X
+ const sortedItems = [...textContent.items].sort((a: any, b: any) => {
+ const aTransform = (a as any).transform || [];
+ const bTransform = (b as any).transform || [];
+ if (Math.abs(aTransform[5] - bTransform[5]) < 5) {
+ return (aTransform[4] || 0) - (bTransform[4] || 0);
+ }
+ return (aTransform[5] || 0) - (bTransform[5] || 0);
+ });
+
+ // Limit paragraph size to avoid overflows
+ const MAX_PARAGRAPH_LENGTH = 1000;
+
+ for (const item of sortedItems) {
+ const text = (item as any).str || '';
+ const transform = (item as any).transform || [];
+ const y = transform[5];
+
+ // If this is a new line or first item
+ if (currentY === null || Math.abs(y - currentY) > 5 || currentParagraph.length + text.length > MAX_PARAGRAPH_LENGTH) {
+ if (currentParagraph) {
+ paragraphs.push(currentParagraph.trim());
+ }
+ currentParagraph = text;
+ currentY = y;
+ } else {
+ // Continue the current paragraph
+ currentParagraph += ' ' + text;
+ }
+ }
+
+ // Add the last paragraph
+ if (currentParagraph) {
+ paragraphs.push(currentParagraph.trim());
+ }
+
+ // Limit the number of paragraph combinations to avoid exponential growth
+ const MAX_COMBINED_PARAGRAPHS = 5;
+
+ // Also create overlapping larger paragraphs for better context, but limit size
+ if (paragraphs.length > 1) {
+ const combinedCount = Math.min(paragraphs.length - 1, MAX_COMBINED_PARAGRAPHS);
+ for (let i = 0; i < combinedCount; i++) {
+ if (paragraphs[i].length + paragraphs[i + 1].length < MAX_PARAGRAPH_LENGTH) {
+ paragraphs.push(paragraphs[i] + ' ' + paragraphs[i + 1]);
+ }
+ }
+ }
+ }
+ } catch (paragraphError) {
+ console.warn('Error during paragraph reconstruction:', paragraphError);
+ // Continue with individual items if paragraph reconstruction fails
+ }
+
+ // For extremely long search texts, use our key phrases approach
+ if (keyPhrases.length > 0) {
+ // Check each paragraph for key phrases
+ for (const paragraph of paragraphs) {
+ let matchingPhrases = 0;
+ let bestPhraseScore = 0;
+
+ for (const phrase of keyPhrases) {
+ const similarity = this.computeSimilarity(paragraph, phrase);
+ if (similarity > 0.7) matchingPhrases++;
+ bestPhraseScore = Math.max(bestPhraseScore, similarity);
+ }
+
+ // If multiple key phrases match, this is likely a good result
+ if (matchingPhrases > 1 || bestPhraseScore > 0.8) {
+ this._fuzzySearchResults.push({
+ pageIndex,
+ matchIndex: paragraphs.indexOf(paragraph),
+ text: paragraph,
+ score: 0.7 + matchingPhrases * 0.05,
+ isParagraph: true,
+ });
+ }
+ }
+
+ // Also check each item directly
+ for (const item of textContent.items) {
+ const text = (item as any).str || '';
+ if (!text.trim()) continue;
+
+ for (const phrase of keyPhrases) {
+ const similarity = this.computeSimilarity(text, phrase);
+ if (similarity > 0.7) {
+ this._fuzzySearchResults.push({
+ pageIndex,
+ matchIndex: textContent.items.indexOf(item),
+ text: text,
+ score: similarity,
+ isParagraph: false,
+ });
+ break; // One matching phrase is enough for direct items
+ }
+ }
+ }
+
+ continue; // Skip normal processing for this page, we've used the key phrases approach
+ }
+
+ // Ensure paragraphs aren't too large before checking
+ paragraphs = paragraphs.filter(p => p.length < 5000);
+
+ // Check both individual items and reconstructed paragraphs
+ try {
+ const itemsToCheck = [
+ ...textContent.items.map((item: any) => ({
+ idx: textContent.items.indexOf(item),
+ text: (item as any).str || '',
+ isParagraph: false,
+ })),
+ ...paragraphs.map((p, i) => ({
+ idx: i,
+ text: p,
+ isParagraph: true,
+ })),
+ ];
+
+ for (const item of itemsToCheck) {
+ if (!item.text.trim() || item.text.length > 5000) continue;
+
+ const similarity = this.computeSimilarity(item.text, normalizedSearch);
+
+ // Track best match for debugging
+ if (similarity > bestMatchScore) {
+ bestMatchScore = similarity;
+ bestMatchText = item.text.substring(0, 100);
+ }
+
+ if (similarity > similarityThreshold) {
+ this._fuzzySearchResults.push({
+ pageIndex,
+ matchIndex: item.idx,
+ text: item.text,
+ score: similarity,
+ isParagraph: item.isParagraph,
+ });
+ }
+ }
+ } catch (itemCheckError) {
+ console.warn('Error checking items on page:', itemCheckError);
+ }
+ } catch (error) {
+ console.error(`Error extracting text from page ${pageIndex + 1}:`, error);
+ // Continue with other pages even if one fails
+ }
+ }
+
+ // Check if we already have good matches after each batch
+ // This allows us to stop early if we've found excellent matches
+ if (this._fuzzySearchResults.length > 0) {
+ // Sort results by similarity (descending)
+ this._fuzzySearchResults.sort((a, b) => (b.score || 0) - (a.score || 0));
+
+ // If we have an excellent match (score > 0.8), stop searching
+ if (this._fuzzySearchResults[0]?.score && this._fuzzySearchResults[0].score > 0.8) {
+ console.log(`Found excellent match (score: ${this._fuzzySearchResults[0].score?.toFixed(2)}) - stopping early`);
+ break;
+ }
+
+ // If we have several good matches (score > 0.6), stop searching
+ if (this._fuzzySearchResults.length >= 3 && this._fuzzySearchResults.every(r => r.score && r.score > 0.6)) {
+ console.log(`Found ${this._fuzzySearchResults.length} good matches - stopping early`);
+ break;
+ }
+ }
+
+ // Perform cleanup between batches to avoid memory buildup
+ if (batchEnd < totalPages) {
+ // Give the browser a moment to breathe and release memory
+ await new Promise(resolve => setTimeout(resolve, 1));
+ }
+ }
+
+ // If no results with advanced search, try standard search with key terms
+ if (this._fuzzySearchResults.length === 0 && searchWords.length > 3) {
+ // Find the most distinctive words (longer words are often more specific)
+ const distinctiveWords = searchWords
+ .filter(w => w.length > 4)
+ .sort((a, b) => b.length - a.length)
+ .slice(0, 3);
+
+ if (distinctiveWords.length > 0) {
+ console.log(`Falling back to standard search with distinctive term: ${distinctiveWords[0]}`);
+ this._pdfViewer.eventBus.dispatch('find', {
+ query: distinctiveWords[0],
+ phraseSearch: false,
+ highlightAll: true,
+ findPrevious: false,
+ });
+ return true;
+ }
+ }
+
+ console.log(`Best match (${bestMatchScore.toFixed(2)}): "${bestMatchText}"`);
+ console.log(`Found ${this._fuzzySearchResults.length} matches above threshold ${similarityThreshold}`);
+
+ // Sort results by similarity (descending)
+ this._fuzzySearchResults.sort((a, b) => (b.score || 0) - (a.score || 0));
+
+ // Navigate to the first/last result based on direction
+ if (this._fuzzySearchResults.length > 0) {
+ this._currentFuzzyMatchIndex = bwd ? this._fuzzySearchResults.length - 1 : 0;
+ this.navigateToFuzzyMatch(this._currentFuzzyMatchIndex);
+ return true;
+ } else if (bestMatchScore > 0) {
+ // If we found some match but below threshold, adjust threshold and try again
+ if (bestMatchScore > similarityThreshold * 0.7) {
+ console.log(`Lowering threshold to ${bestMatchScore * 0.9} and retrying search`);
+ similarityThreshold = bestMatchScore * 0.9;
+ return this.performFuzzySearch(searchString, bwd);
+ }
+ }
+
+ // Ultimate fallback: Use standard PDF.js search with the most common words
+ if (this._fuzzySearchResults.length === 0) {
+ // Extract a few words from the middle of the search string
+ const words = normalizedSearch.split(' ');
+ const middleIndex = Math.floor(words.length / 2);
+ const searchPhrase = words.slice(Math.max(0, middleIndex - 1), Math.min(words.length, middleIndex + 2)).join(' ');
+
+ console.log(`Falling back to standard search with phrase: ${searchPhrase}`);
+ this._pdfViewer.eventBus.dispatch('find', {
+ query: searchPhrase,
+ phraseSearch: true,
+ highlightAll: true,
+ findPrevious: false,
+ });
+ return true;
+ }
+
+ return false;
+ }
+
+ // Navigate to a specific fuzzy match
+ private navigateToFuzzyMatch(index: number): void {
+ if (index >= 0 && index < this._fuzzySearchResults.length) {
+ const match = this._fuzzySearchResults[index];
+ console.log(`Navigating to match: ${match.text.substring(0, 50)}... (score: ${match.score?.toFixed(2) || 'unknown'})`);
+
+ // Scroll to the page containing the match
+ this._pdfViewer.scrollPageIntoView({
+ pageNumber: match.pageIndex + 1,
+ });
+
+ // For paragraph matches, use a more specific approach
+ if (match.isParagraph) {
+ // Break the text into smaller chunks to improve highlighting
+ const words = match.text.split(/\s+/);
+ const normalizedSearch = this.normalizeText(match.text);
+
+ // Try to highlight with shorter chunks to get better visual feedback
+ if (words.length > 5) {
+ // Create 5-word overlapping chunks
+ const chunks = [];
+ for (let i = 0; i < words.length - 4; i += 3) {
+ chunks.push(words.slice(i, i + 5).join(' '));
+ }
+
+ // Highlight each chunk
+ if (chunks.length > 0) {
+ // Highlight the first chunk immediately
+ this._pdfViewer.eventBus.dispatch('find', {
+ query: chunks[0],
+ phraseSearch: true,
+ highlightAll: true,
+ findPrevious: false,
+ });
+
+ // Highlight the rest with small delays to avoid conflicts
+ chunks.slice(1).forEach((chunk, i) => {
+ setTimeout(
+ () => {
+ this._pdfViewer.eventBus.dispatch('find', {
+ query: chunk,
+ phraseSearch: true,
+ highlightAll: true,
+ findPrevious: false,
+ });
+ },
+ (i + 1) * 100
+ );
+ });
+ return;
+ }
+ }
+ }
+
+ // Standard highlighting for non-paragraph matches or short text
+ if (this._pdfViewer.findController) {
+ // For longer text, try to find the most unique phrases to highlight
+ if (match.text.length > 50) {
+ const words = match.text.split(/\s+/);
+ // Look for 3-5 word phrases that are likely to be unique
+ let phraseToHighlight = match.text;
+
+ if (words.length >= 5) {
+ // Take a phrase from the middle of the text
+ const middleIndex = Math.floor(words.length / 2);
+ phraseToHighlight = words.slice(middleIndex - 2, middleIndex + 3).join(' ');
+ }
+
+ console.log(`Highlighting phrase: "${phraseToHighlight}"`);
+
+ this._pdfViewer.eventBus.dispatch('find', {
+ query: phraseToHighlight,
+ phraseSearch: true,
+ highlightAll: true,
+ findPrevious: false,
+ });
+ } else {
+ // For shorter text, use the entire match
+ this._pdfViewer.eventBus.dispatch('find', {
+ query: match.text,
+ phraseSearch: true,
+ highlightAll: true,
+ findPrevious: false,
+ });
+ }
+ }
+ }
+ }
+
+ // Navigate to next fuzzy match
+ private nextFuzzyMatch(): boolean {
+ if (this._fuzzySearchResults.length === 0) return false;
+
+ this._currentFuzzyMatchIndex = (this._currentFuzzyMatchIndex + 1) % this._fuzzySearchResults.length;
+ this.navigateToFuzzyMatch(this._currentFuzzyMatchIndex);
+ return true;
+ }
+
+ // Navigate to previous fuzzy match
+ private prevFuzzyMatch(): boolean {
+ if (this._fuzzySearchResults.length === 0) return false;
+
+ this._currentFuzzyMatchIndex = (this._currentFuzzyMatchIndex - 1 + this._fuzzySearchResults.length) % this._fuzzySearchResults.length;
+ this.navigateToFuzzyMatch(this._currentFuzzyMatchIndex);
+ return true;
+ }
+
@action
search = (searchString: string, bwd?: boolean, clear: boolean = false) => {
- const findOpts = {
- caseSensitive: false,
- findPrevious: bwd,
- highlightAll: true,
- phraseSearch: true,
- query: searchString,
- };
if (clear) {
+ this._fuzzySearchResults = [];
this._pdfViewer?.eventBus.dispatch('findbarclose', {});
- } else if (!searchString) {
+ return true;
+ }
+
+ if (!searchString) {
bwd ? this.prevAnnotation() : this.nextAnnotation();
- } else if (this._pdfViewer?.pageViewsReady) {
- this._pdfViewer?.eventBus.dispatch('find', { ...findOpts, type: 'again' });
- } else if (this._mainCont.current) {
- const executeFind = () => this._pdfViewer?.eventBus.dispatch('find', findOpts);
- this._mainCont.current.addEventListener('pagesloaded', executeFind);
- this._mainCont.current.addEventListener('pagerendered', executeFind);
+ return true;
}
- return true;
+
+ // If we already have fuzzy search results, navigate through them
+ if (this._fuzzySearchEnabled && this._fuzzySearchResults.length > 0) {
+ return bwd ? this.prevFuzzyMatch() : this.nextFuzzyMatch();
+ }
+
+ // For new search, decide between fuzzy and standard search
+ if (this._fuzzySearchEnabled) {
+ // Start fuzzy search
+ this.performFuzzySearch(searchString, bwd);
+ return true;
+ } else {
+ // Use original PDF.js search
+ const findOpts = {
+ caseSensitive: false,
+ findPrevious: bwd,
+ highlightAll: true,
+ phraseSearch: true,
+ query: searchString,
+ };
+
+ if (this._pdfViewer?.pageViewsReady) {
+ this._pdfViewer?.eventBus.dispatch('find', { ...findOpts, type: 'again' });
+ } else if (this._mainCont.current) {
+ const executeFind = () => this._pdfViewer?.eventBus.dispatch('find', findOpts);
+ this._mainCont.current.addEventListener('pagesloaded', executeFind);
+ this._mainCont.current.addEventListener('pagerendered', executeFind);
+ }
+ return true;
+ }
+ };
+
+ // Toggle fuzzy search mode
+ @action
+ toggleFuzzySearch = (): boolean => {
+ this._fuzzySearchEnabled = !this._fuzzySearchEnabled;
+ return this._fuzzySearchEnabled;
};
@action
diff --git a/src/server/ApiManagers/AssistantManager.ts b/src/server/ApiManagers/AssistantManager.ts
index af25722a4..c7c347c71 100644
--- a/src/server/ApiManagers/AssistantManager.ts
+++ b/src/server/ApiManagers/AssistantManager.ts
@@ -39,6 +39,7 @@ export enum Directory {
csv = 'csv',
chunk_images = 'chunk_images',
scrape_images = 'scrape_images',
+ vectorstore = 'vectorstore',
}
// In-memory job tracking
@@ -92,6 +93,132 @@ export default class AssistantManager extends ApiManager {
const customsearch = google.customsearch('v1');
const openai = new OpenAI({ apiKey: env.OPENAI_API_KEY });
+ // Register an endpoint to retrieve file summaries from the json file
+ register({
+ method: Method.GET,
+ subscription: '/getFileSummaries',
+ secureHandler: async ({ req, res }) => {
+ try {
+ // Read the file summaries JSON file
+ const filePath = path.join(filesDirectory, Directory.vectorstore, 'file_summaries.json');
+
+ if (!fs.existsSync(filePath)) {
+ res.status(404).send({ error: 'File summaries not found' });
+ return;
+ }
+
+ const data = fs.readFileSync(filePath, 'utf8');
+ res.send(data);
+ } catch (error) {
+ console.error('Error retrieving file summaries:', error);
+ res.status(500).send({
+ error: 'Failed to retrieve file summaries',
+ });
+ }
+ },
+ });
+
+ // Register an endpoint to retrieve file names from the file_summaries.json file
+ register({
+ method: Method.GET,
+ subscription: '/getFileNames',
+ secureHandler: async ({ res }) => {
+ const filePath = path.join(filesDirectory, Directory.vectorstore, 'file_summaries.json');
+ const data = fs.readFileSync(filePath, 'utf8');
+ console.log(Object.keys(JSON.parse(data)));
+
+ res.send(Object.keys(JSON.parse(data)));
+ },
+ });
+
+ // Register an endpoint to retrieve file content from the content json file
+ register({
+ method: Method.POST,
+ subscription: '/getFileContent',
+ secureHandler: async ({ req, res }) => {
+ const { filepath } = req.body;
+
+ if (!filepath) {
+ res.status(400).send({ error: 'Filepath is required' });
+ return;
+ }
+
+ try {
+ // Read the file content JSON file
+ const filePath = path.join(filesDirectory, Directory.vectorstore, 'file_content.json');
+
+ if (!fs.existsSync(filePath)) {
+ res.status(404).send({ error: 'File content database not found' });
+ return;
+ }
+
+ console.log(`[DEBUG] Retrieving content for: ${filepath}`);
+
+ // Read the JSON file in chunks to handle large files
+ const readStream = fs.createReadStream(filePath, { encoding: 'utf8' });
+ let jsonData = '';
+
+ readStream.on('data', chunk => {
+ jsonData += chunk;
+ });
+
+ readStream.on('end', () => {
+ try {
+ // Parse the JSON
+ const contentMap = JSON.parse(jsonData);
+
+ // Check if the filepath exists in the map
+ if (!contentMap[filepath]) {
+ console.log(`[DEBUG] Content not found for: ${filepath}`);
+ res.status(404).send({ error: `Content not found for filepath: ${filepath}` });
+ return;
+ }
+
+ // Return the file content as is, not as JSON
+ console.log(`[DEBUG] Found content for: ${filepath} (${contentMap[filepath].length} chars)`);
+ res.send(contentMap[filepath]);
+ } catch (parseError) {
+ console.error('Error parsing file_content.json:', parseError);
+ res.status(500).send({
+ error: 'Failed to parse file content database',
+ });
+ }
+ });
+
+ readStream.on('error', streamError => {
+ console.error('Error reading file_content.json:', streamError);
+ res.status(500).send({
+ error: 'Failed to read file content database',
+ });
+ });
+ } catch (error) {
+ console.error('Error retrieving file content:', error);
+ res.status(500).send({
+ error: 'Failed to retrieve file content',
+ });
+ }
+ },
+ });
+
+ // Register an endpoint to search file summaries
+ register({
+ method: Method.POST,
+ subscription: '/searchFileSummaries',
+ secureHandler: async ({ req, res }) => {
+ const { query, topK } = req.body;
+
+ if (!query) {
+ res.status(400).send({ error: 'Search query is required' });
+ return;
+ }
+
+ // This endpoint will be called by the client-side Vectorstore to perform the search
+ // The actual search is implemented in the Vectorstore class
+
+ res.send({ message: 'This endpoint should be called through the Vectorstore class' });
+ },
+ });
+
// Register Wikipedia summary API route
register({
method: Method.POST,
@@ -485,36 +612,76 @@ export default class AssistantManager extends ApiManager {
subscription: '/scrapeWebsite',
secureHandler: async ({ req, res }) => {
const { url } = req.body;
+ let browser = null;
try {
+ // Set a longer timeout for slow-loading pages
+ const navigationTimeout = 60000; // 60 seconds
+
// Launch Puppeteer browser to navigate to the webpage
- const browser = await puppeteer.launch({
- args: ['--no-sandbox', '--disable-setuid-sandbox'],
+ browser = await puppeteer.launch({
+ args: ['--no-sandbox', '--disable-setuid-sandbox', '--disable-dev-shm-usage'],
});
const page = await browser.newPage();
await page.setUserAgent('Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36');
- await page.goto(url, { waitUntil: 'networkidle2' });
+
+ // Set timeout for navigation
+ page.setDefaultNavigationTimeout(navigationTimeout);
+
+ // Navigate with timeout and wait for content to load
+ await page.goto(url, {
+ waitUntil: 'networkidle2',
+ timeout: navigationTimeout,
+ });
+
+ // Wait a bit longer to ensure dynamic content loads
+ await new Promise(resolve => setTimeout(resolve, 2000));
// Extract HTML content
const htmlContent = await page.content();
await browser.close();
+ browser = null;
- // Parse HTML content using JSDOM
- const dom = new JSDOM(htmlContent, { url });
+ let extractedText = '';
- // Extract readable content using Mozilla's Readability API
- const reader = new Readability(dom.window.document);
- const article = reader.parse();
+ // First try with Readability
+ try {
+ // Parse HTML content using JSDOM
+ const dom = new JSDOM(htmlContent, { url });
+
+ // Extract readable content using Mozilla's Readability API
+ const reader = new Readability(dom.window.document, {
+ // Readability configuration to focus on text content
+ charThreshold: 100,
+ keepClasses: false,
+ });
+ const article = reader.parse();
- if (article) {
- const plainText = article.textContent;
- res.send({ website_plain_text: plainText });
- } else {
- res.status(500).send({ error: 'Failed to extract readable content' });
+ if (article && article.textContent) {
+ extractedText = article.textContent;
+ } else {
+ // If Readability doesn't return useful content, try alternate method
+ extractedText = await extractEnhancedContent(htmlContent);
+ }
+ } catch (parsingError) {
+ console.error('Error parsing website content with Readability:', parsingError);
+ // Fallback to enhanced content extraction
+ extractedText = await extractEnhancedContent(htmlContent);
}
+
+ // Clean up the extracted text
+ extractedText = cleanupText(extractedText);
+
+ res.send({ website_plain_text: extractedText });
} catch (error) {
console.error('Error scraping website:', error);
+
+ // Clean up browser if still open
+ if (browser) {
+ await browser.close().catch(e => console.error('Error closing browser:', e));
+ }
+
res.status(500).send({
- error: 'Failed to scrape website',
+ error: 'Failed to scrape website: ' + ((error as Error).message || 'Unknown error'),
});
}
},
@@ -526,7 +693,7 @@ export default class AssistantManager extends ApiManager {
method: Method.POST,
subscription: '/createDocument',
secureHandler: async ({ req, res }) => {
- const { file_path } = req.body;
+ const { file_path, doc_id } = req.body;
const public_path = path.join(publicDirectory, file_path); // Resolve the file path in the public directory
const file_name = path.basename(file_path); // Extract the file name from the path
@@ -539,7 +706,7 @@ export default class AssistantManager extends ApiManager {
// Spawn the Python process and track its progress/output
// eslint-disable-next-line no-use-before-define
- spawnPythonProcess(jobId, public_path);
+ spawnPythonProcess(jobId, public_path, doc_id);
// Send the job ID back to the client for tracking
res.send({ jobId });
@@ -687,6 +854,193 @@ export default class AssistantManager extends ApiManager {
}
},
});
+
+ // Register an API route to capture a screenshot of a webpage using Puppeteer
+ // and return the image URL for display in the WebBox component
+ register({
+ method: Method.POST,
+ subscription: '/captureWebScreenshot',
+ secureHandler: async ({ req, res }) => {
+ const { url, width, height, fullPage } = req.body;
+
+ if (!url) {
+ res.status(400).send({ error: 'URL is required' });
+ return;
+ }
+
+ let browser = null;
+ try {
+ // Increase timeout for websites that load slowly
+ const navigationTimeout = 60000; // 60 seconds
+
+ // Launch a headless browser with additional options to improve stability
+ browser = await puppeteer.launch({
+ headless: true, // Use headless mode
+ args: [
+ '--no-sandbox',
+ '--disable-setuid-sandbox',
+ '--disable-dev-shm-usage',
+ '--disable-accelerated-2d-canvas',
+ '--disable-gpu',
+ '--window-size=1200,800',
+ '--disable-web-security', // Helps with cross-origin issues
+ '--disable-features=IsolateOrigins,site-per-process', // Helps with frames
+ ],
+ timeout: navigationTimeout,
+ });
+
+ const page = await browser.newPage();
+
+ // Set a larger viewport to capture more content
+ await page.setViewport({
+ width: Number(width) || 1200,
+ height: Number(height) || 800,
+ deviceScaleFactor: 1,
+ });
+
+ // Enable request interception to speed up page loading
+ await page.setRequestInterception(true);
+ page.on('request', request => {
+ // Skip unnecessary resources to speed up loading
+ const resourceType = request.resourceType();
+ if (resourceType === 'font' || resourceType === 'media' || resourceType === 'websocket' || request.url().includes('analytics') || request.url().includes('tracker')) {
+ request.abort();
+ } else {
+ request.continue();
+ }
+ });
+
+ // Set navigation and timeout options
+ console.log(`Navigating to URL: ${url}`);
+
+ // Navigate to the URL and wait for the page to load
+ await page.goto(url, {
+ waitUntil: ['networkidle2'],
+ timeout: navigationTimeout,
+ });
+
+ // Wait for a short delay after navigation to allow content to render
+ await new Promise(resolve => setTimeout(resolve, 2000));
+
+ // Take a screenshot
+ console.log('Taking screenshot...');
+ const screenshotPath = `./src/server/public/files/images/webpage_${Date.now()}.png`;
+ const screenshotOptions = {
+ path: screenshotPath,
+ fullPage: fullPage === true,
+ omitBackground: false,
+ type: 'png' as 'png',
+ clip:
+ fullPage !== true
+ ? {
+ x: 0,
+ y: 0,
+ width: Number(width) || 1200,
+ height: Number(height) || 800,
+ }
+ : undefined,
+ };
+
+ await page.screenshot(screenshotOptions);
+
+ // Get the full height of the page
+ const fullHeight = await page.evaluate(() => {
+ return Math.max(document.body.scrollHeight, document.documentElement.scrollHeight, document.body.offsetHeight, document.documentElement.offsetHeight, document.body.clientHeight, document.documentElement.clientHeight);
+ });
+
+ console.log(`Screenshot captured successfully with height: ${fullHeight}px`);
+
+ // Return the URL to the screenshot
+ const screenshotUrl = `/files/images/webpage_${Date.now()}.png`;
+ res.json({
+ screenshotUrl,
+ fullHeight,
+ });
+ } catch (error: any) {
+ console.error('Error capturing screenshot:', error);
+ res.status(500).send({
+ error: `Failed to capture screenshot: ${error.message}`,
+ details: error.stack,
+ });
+ } finally {
+ // Ensure browser is closed to free resources
+ if (browser) {
+ try {
+ await browser.close();
+ console.log('Browser closed successfully');
+ } catch (error) {
+ console.error('Error closing browser:', error);
+ }
+ }
+ }
+ },
+ });
+
+ // Register an endpoint to retrieve raw file content as plain text (no JSON parsing)
+ register({
+ method: Method.POST,
+ subscription: '/getRawFileContent',
+ secureHandler: async ({ req, res }) => {
+ const { filepath } = req.body;
+
+ if (!filepath) {
+ res.status(400).send('Filepath is required');
+ return;
+ }
+
+ try {
+ // Read the file content JSON file
+ const filePath = path.join(filesDirectory, Directory.vectorstore, 'file_content.json');
+
+ if (!fs.existsSync(filePath)) {
+ res.status(404).send('File content database not found');
+ return;
+ }
+
+ console.log(`[DEBUG] Retrieving raw content for: ${filepath}`);
+
+ // Read the JSON file
+ const readStream = fs.createReadStream(filePath, { encoding: 'utf8' });
+ let jsonData = '';
+
+ readStream.on('data', chunk => {
+ jsonData += chunk;
+ });
+
+ readStream.on('end', () => {
+ try {
+ // Parse the JSON
+ const contentMap = JSON.parse(jsonData);
+
+ // Check if the filepath exists in the map
+ if (!contentMap[filepath]) {
+ console.log(`[DEBUG] Content not found for: ${filepath}`);
+ res.status(404).send(`Content not found for filepath: ${filepath}`);
+ return;
+ }
+
+ // Set content type to plain text to avoid JSON parsing
+ res.setHeader('Content-Type', 'text/plain');
+
+ // Return the file content as plain text
+ console.log(`[DEBUG] Found content for: ${filepath} (${contentMap[filepath].length} chars)`);
+ res.send(contentMap[filepath]);
+ } catch (parseError) {
+ console.error('Error parsing file_content.json:', parseError);
+ res.status(500).send('Failed to parse file content database');
+ }
+ });
+
+ readStream.on('error', streamError => {
+ console.error('Error reading file_content.json:', streamError);
+ res.status(500).send('Failed to read file content database');
+ });
+ } catch (error) {
+ console.error('Error retrieving file content:', error);
+ res.status(500).send('Failed to retrieve file content');
+ }
+ },
+ });
}
}
@@ -696,7 +1050,7 @@ export default class AssistantManager extends ApiManager {
* @param file_name The name of the file to process.
* @param file_path The filepath of the file to process.
*/
-function spawnPythonProcess(jobId: string, file_path: string) {
+function spawnPythonProcess(jobId: string, file_path: string, doc_id: string) {
const venvPath = path.join(__dirname, '../chunker/venv');
const requirementsPath = path.join(__dirname, '../chunker/requirements.txt');
const pythonScriptPath = path.join(__dirname, '../chunker/pdf_chunker.py');
@@ -706,7 +1060,7 @@ function spawnPythonProcess(jobId: string, file_path: string) {
function runPythonScript() {
const pythonPath = process.platform === 'win32' ? path.join(venvPath, 'Scripts', 'python') : path.join(venvPath, 'bin', 'python3');
- const pythonProcess = spawn(pythonPath, [pythonScriptPath, jobId, file_path, outputDirectory]);
+ const pythonProcess = spawn(pythonPath, [pythonScriptPath, jobId, file_path, outputDirectory, doc_id]);
let pythonOutput = '';
let stderrOutput = '';
@@ -829,3 +1183,121 @@ function spawnPythonProcess(jobId: string, file_path: string) {
runPythonScript();
}
}
+
+/**
+ * Enhanced content extraction that focuses on meaningful text content.
+ * @param html The HTML content to process
+ * @returns Extracted and cleaned text content
+ */
+async function extractEnhancedContent(html: string): Promise<string> {
+ try {
+ // Create DOM to extract content
+ const dom = new JSDOM(html, { runScripts: 'outside-only' });
+ const document = dom.window.document;
+
+ // Remove all non-content elements
+ const elementsToRemove = [
+ 'script',
+ 'style',
+ 'iframe',
+ 'noscript',
+ 'svg',
+ 'canvas',
+ 'header',
+ 'footer',
+ 'nav',
+ 'aside',
+ 'form',
+ 'button',
+ 'input',
+ 'select',
+ 'textarea',
+ 'meta',
+ 'link',
+ 'img',
+ 'video',
+ 'audio',
+ '.ad',
+ '.ads',
+ '.advertisement',
+ '.banner',
+ '.cookie',
+ '.popup',
+ '.modal',
+ '.newsletter',
+ '[role="banner"]',
+ '[role="navigation"]',
+ '[role="complementary"]',
+ ];
+
+ elementsToRemove.forEach(selector => {
+ const elements = document.querySelectorAll(selector);
+ elements.forEach(el => el.remove());
+ });
+
+ // Get all text paragraphs with meaningful content
+ const contentElements = [
+ ...Array.from(document.querySelectorAll('p')),
+ ...Array.from(document.querySelectorAll('h1')),
+ ...Array.from(document.querySelectorAll('h2')),
+ ...Array.from(document.querySelectorAll('h3')),
+ ...Array.from(document.querySelectorAll('h4')),
+ ...Array.from(document.querySelectorAll('h5')),
+ ...Array.from(document.querySelectorAll('h6')),
+ ...Array.from(document.querySelectorAll('li')),
+ ...Array.from(document.querySelectorAll('td')),
+ ...Array.from(document.querySelectorAll('article')),
+ ...Array.from(document.querySelectorAll('section')),
+ ...Array.from(document.querySelectorAll('div:not([class]):not([id])')),
+ ];
+
+ // Extract text from content elements that have meaningful text
+ let contentParts: string[] = [];
+ contentElements.forEach(el => {
+ const text = el.textContent?.trim();
+ // Only include elements with substantial text (more than just a few characters)
+ if (text && text.length > 10 && !contentParts.includes(text)) {
+ contentParts.push(text);
+ }
+ });
+
+ // If no significant content found with selective approach, fallback to body
+ if (contentParts.length < 3) {
+ return document.body.textContent || '';
+ }
+
+ return contentParts.join('\n\n');
+ } catch (error) {
+ console.error('Error extracting enhanced content:', error);
+ return 'Failed to extract content from the webpage.';
+ }
+}
+
+/**
+ * Cleans up extracted text to improve readability and focus on useful content.
+ * @param text The raw extracted text
+ * @returns Cleaned and formatted text
+ */
+function cleanupText(text: string): string {
+ if (!text) return '';
+
+ return (
+ text
+ // Remove excessive whitespace and normalize line breaks
+ .replace(/\s+/g, ' ')
+ .replace(/\n\s*\n\s*\n+/g, '\n\n')
+ // Remove common boilerplate phrases
+ .replace(/cookie policy|privacy policy|terms of service|all rights reserved|copyright ©/gi, '')
+ // Remove email addresses
+ .replace(/[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/g, '')
+ // Remove URLs
+ .replace(/https?:\/\/[^\s]+/g, '')
+ // Remove social media handles
+ .replace(/@[a-zA-Z0-9_]+/g, '')
+ // Clean up any remaining HTML tags that might have been missed
+ .replace(/<[^>]*>/g, '')
+ // Fix spacing issues after cleanup
+ .replace(/ +/g, ' ')
+ .trim()
+ );
+}
diff --git a/src/server/api/dynamicTools.ts b/src/server/api/dynamicTools.ts
new file mode 100644
index 000000000..a7b7e1478
--- /dev/null
+++ b/src/server/api/dynamicTools.ts
@@ -0,0 +1,130 @@
+import * as express from 'express';
+import * as fs from 'fs';
+import * as path from 'path';
+
+// Define handler types to match project patterns
+type RouteHandler = (req: express.Request, res: express.Response) => any;
+
+/**
+ * Handles API endpoints for dynamic tools created by the agent
+ */
+export function setupDynamicToolsAPI(app: express.Express): void {
+ // Directory where dynamic tools will be stored
+ const dynamicToolsDir = path.join(process.cwd(), 'src', 'client', 'views', 'nodes', 'chatbot', 'tools', 'dynamic');
+
+ console.log(`Dynamic tools directory path: ${dynamicToolsDir}`);
+
+ // Ensure directory exists
+ if (!fs.existsSync(dynamicToolsDir)) {
+ try {
+ fs.mkdirSync(dynamicToolsDir, { recursive: true });
+ console.log(`Created dynamic tools directory at ${dynamicToolsDir}`);
+ } catch (error) {
+ console.error(`Failed to create dynamic tools directory: ${error}`);
+ }
+ }
+
+ /**
+ * Save a dynamic tool to the server
+ */
+ const saveDynamicTool: RouteHandler = (req, res) => {
+ try {
+ const { toolName, toolCode } = req.body;
+
+ if (!toolName || !toolCode) {
+ return res.status(400).json({
+ success: false,
+ error: 'Missing toolName or toolCode in request body',
+ });
+ }
+
+ // Validate the tool name (should be PascalCase)
+ if (!/^[A-Z][a-zA-Z0-9]*$/.test(toolName)) {
+ return res.status(400).json({
+ success: false,
+ error: 'Tool name must be in PascalCase format',
+ });
+ }
+
+ // Create the file path
+ const filePath = path.join(dynamicToolsDir, `${toolName}.ts`);
+
+ // Check if file already exists and is different
+ let existingCode = '';
+ if (fs.existsSync(filePath)) {
+ existingCode = fs.readFileSync(filePath, 'utf8');
+ }
+
+ // Only write if the file doesn't exist or the content is different
+ if (existingCode !== toolCode) {
+ fs.writeFileSync(filePath, toolCode, 'utf8');
+ console.log(`Saved dynamic tool: ${toolName}`);
+ } else {
+ console.log(`Dynamic tool ${toolName} already exists with the same content`);
+ }
+
+ return res.json({ success: true, toolName });
+ } catch (error) {
+ console.error('Error saving dynamic tool:', error);
+ return res.status(500).json({
+ success: false,
+ error: error instanceof Error ? error.message : 'Unknown error',
+ });
+ }
+ };
+
+ /**
+ * Get a list of all available dynamic tools
+ */
+ const getDynamicTools: RouteHandler = (req, res) => {
+ try {
+ // Get all TypeScript files in the dynamic tools directory
+ const files = fs
+ .readdirSync(dynamicToolsDir)
+ .filter(file => file.endsWith('.ts'))
+ .map(file => ({
+ name: path.basename(file, '.ts'),
+ path: path.join('dynamic', file),
+ }));
+
+ return res.json({ success: true, tools: files });
+ } catch (error) {
+ console.error('Error getting dynamic tools:', error);
+ return res.status(500).json({
+ success: false,
+ error: error instanceof Error ? error.message : 'Unknown error',
+ });
+ }
+ };
+
+ /**
+ * Get the code for a specific dynamic tool
+ */
+ const getDynamicTool: RouteHandler = (req, res) => {
+ try {
+ const { toolName } = req.params;
+ const filePath = path.join(dynamicToolsDir, `${toolName}.ts`);
+
+ if (!fs.existsSync(filePath)) {
+ return res.status(404).json({
+ success: false,
+ error: `Tool ${toolName} not found`,
+ });
+ }
+
+ const toolCode = fs.readFileSync(filePath, 'utf8');
+ return res.json({ success: true, toolName, toolCode });
+ } catch (error) {
+ console.error('Error getting dynamic tool:', error);
+ return res.status(500).json({
+ success: false,
+ error: error instanceof Error ? error.message : 'Unknown error',
+ });
+ }
+ };
+
+ // Register routes
+ app.post('/saveDynamicTool', saveDynamicTool);
+ app.get('/getDynamicTools', getDynamicTools);
+ app.get('/getDynamicTool/:toolName', getDynamicTool);
+}
diff --git a/src/server/chunker/pdf_chunker.py b/src/server/chunker/pdf_chunker.py
index 697550f2e..e34753176 100644
--- a/src/server/chunker/pdf_chunker.py
+++ b/src/server/chunker/pdf_chunker.py
@@ -153,7 +153,7 @@ class ElementExtractor:
xref = img_info[0] # XREF of the image in the PDF
base_image = page.parent.extract_image(xref) # Extract the image by its XREF
image_bytes = base_image["image"]
- image = Image.open(io.BytesIO(image_bytes)) # Convert bytes to PIL image
+ image = Image.open(io.BytesIO(image_bytes)).convert("RGB") # Ensure it's RGB before saving as PNG
width_ratio = img.width / page.rect.width # Scale factor for width
height_ratio = img.height / page.rect.height # Scale factor for height
@@ -276,12 +276,13 @@ class PDFChunker:
:param output_folder: Folder to store the output files (extracted tables/images).
:param image_batch_size: The batch size for processing visual elements.
"""
- self.client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) # Initialize the Anthropic API client
+ self.client = OpenAI() # ← replaces Anthropic()
self.output_folder = output_folder
self.image_batch_size = image_batch_size # Batch size for image processing
self.doc_id = doc_id # Add doc_id
self.element_extractor = ElementExtractor(output_folder, doc_id)
+
async def chunk_pdf(self, file_data: bytes, file_name: str, doc_id: str, job_id: str) -> List[Dict[str, Any]]:
"""
Processes a PDF file, extracting text and visual elements, and returning structured chunks.
@@ -518,124 +519,77 @@ class PDFChunker:
def batch_summarize_images(self, images: Dict[int, str]) -> Dict[int, str]:
"""
- Summarize images or tables by generating descriptive text.
-
- :param images: A dictionary mapping image numbers to base64-encoded image data.
- :return: A dictionary mapping image numbers to their generated summaries.
- """
- # Prompt for the AI model to summarize images and tables
- prompt = f"""<instruction>
- <task>
- You are tasked with summarizing a series of {len(images)} images and tables for use in a RAG (Retrieval-Augmented Generation) system.
- Your goal is to create concise, informative summaries that capture the essential content of each image or table.
- These summaries will be used for embedding, so they should be descriptive and relevant. The image or table will be outlined in red on an image of the full page that it is on. Where necessary, use the context of the full page to heklp with the summary but don't summarize other content on the page.
- </task>
-
- <steps>
- <step>Identify whether it's an image or a table.</step>
- <step>Examine its content carefully.</step>
- <step>
- Write a detailed summary that captures the main points or visual elements:
- <details>
- <table>After summarizing what the table is about, include the column headers, a detailed summary of the data, and any notable data trends.</table>
- 
- </details>
- </step>
- <step>Focus on writing summaries that would make it easy to retrieve the content if compared to a user query using vector similarity search.</step>
- <step>Keep summaries concise and include important words that may help with retrieval (but do not include numbers and numerical data).</step>
- </steps>
-
- <important_notes>
- <note>Avoid using special characters like &amp;, &lt;, &gt;, &quot;, &apos;, $, %, etc. Instead, use their word equivalents:</note>
- <note>Use "and" instead of &amp;.</note>
- <note>Use "dollars" instead of $.</note>
- <note>Use "percent" instead of %.</note>
- <note>Refrain from using quotation marks &quot; or apostrophes &apos; unless absolutely necessary.</note>
- <note>Ensure your output is in valid XML format.</note>
- </important_notes>
-
- <formatting>
- <note>Enclose all summaries within a root element called &lt;summaries&gt;.</note>
- <note>Use &lt;summary&gt; tags to enclose each individual summary.</note>
- <note>Include an attribute 'number' in each &lt;summary&gt; tag to indicate the sequence, matching the provided image numbers.</note>
- <note>Start each summary by indicating whether it's an image or a table (e.g., "This image shows..." or "The table presents...").</note>
- <note>If an image is completely blank, leave the summary blank (e.g., &lt;summary number="3"&gt;&lt;/summary&gt;).</note>
- </formatting>
-
- <example>
- <note>Do not replicate the example below—stay grounded to the content of the table or image and describe it completely and accurately.</note>
- <output>
- &lt;summaries&gt;
- &lt;summary number="1"&gt;
- The image shows two men shaking hands on stage at a formal event. The man on the left, in a dark suit and glasses, has a professional appearance, possibly an academic or business figure. The man on the right, Tim Cook, CEO of Apple, is recognizable by his silver hair and dark blue blazer. Cook holds a document titled "Tsinghua SEM EMBA," suggesting a link to Tsinghua University’s Executive MBA program. The backdrop displays English and Chinese text about business management and education, with the event dated October 23, 2014.
- &lt;/summary&gt;
- &lt;summary number="2"&gt;
- The table compares the company's assets between December 30, 2023, and September 30, 2023. Key changes include an increase in cash and cash equivalents, while marketable securities had a slight rise. Accounts receivable and vendor non-trade receivables decreased. Inventories and other current assets saw minor fluctuations. Non-current assets like marketable securities slightly declined, while property, plant, and equipment remained stable. Total assets showed minimal change, holding steady at around three hundred fifty-three billion dollars.
- &lt;/summary&gt;
- &lt;summary number="3"&gt;
- The table outlines the company's shareholders' equity as of December 30, 2023, versus September 30, 2023. Common stock and additional paid-in capital increased, and retained earnings shifted from a deficit to a positive figure. Accumulated other comprehensive loss decreased. Overall, total shareholders' equity rose significantly, while total liabilities and equity remained nearly unchanged at about three hundred fifty-three billion dollars.
- &lt;/summary&gt;
- &lt;summary number="4"&gt;
- The table details the company's liabilities as of December 30, 2023, compared to September 30, 2023. Current liabilities decreased due to lower accounts payable and other current liabilities, while deferred revenue slightly increased. Commercial paper significantly decreased, and term debt rose modestly. Non-current liabilities were stable, with minimal changes in term debt and other non-current liabilities. Total liabilities dropped from two hundred ninety billion dollars to two hundred seventy-nine billion dollars.
- &lt;/summary&gt;
- &lt;summary number="5"&gt;
- &lt;/summary&gt;
- &lt;/summaries&gt;
- </output>
- </example>
-
- <final_notes>
- <note>Process each image or table in the order provided.</note>
- <note>Maintain consistent formatting throughout your response.</note>
- <note>Ensure the output is in full, valid XML format with the root &lt;summaries&gt; element and each summary being within a &lt;summary&gt; element with the summary number specified as well.</note>
- </final_notes>
-</instruction>
- """
- content = []
- for number, img in images.items():
- content.append({"type": "text", "text": f"\nImage {number}:\n"})
- content.append({"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": img}})
+ Summarise a batch of images/tables with GPT‑4o using Structured Outputs.
+ :param images: {image_number: base64_png}
+ :return: {image_number: summary_text}
+ """
+ # -------- 1. Build the prompt -----------
+ content: list[dict] = []
+ for n, b64 in images.items():
+ content.append({"type": "text",
+ "text": f"\nImage {n} (outlined in red on the page):"})
+ content.append({"type": "image_url",
+ "image_url": {"url": f"data:image/png;base64,{b64}"}})
messages = [
- {"role": "user", "content": content}
+ {
+ "role": "system",
+ "content": (
+ "You are generating retrieval‑ready summaries for each highlighted "
+ "image or table. Start by identifying whether the element is an "
+ "image or a table, then write one informative sentence that a vector "
+ "search would find useful. Provide detail but limit to a couple of paragraphs per image."
+ ),
+ },
+ {"role": "user", "content": content},
]
+ schema = {
+ "type": "object",
+ "properties": {
+ "summaries": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "number": {"type": "integer"},
+ "type": {"type": "string", "enum": ["image", "table"]},
+ "summary": {"type": "string"}
+ },
+ "required": ["number", "type", "summary"],
+ "additionalProperties": False
+ }
+ }
+ },
+ "required": ["summaries"],
+ "additionalProperties": False
+ }
+
+ # ---------- OpenAI call -----------------------------------------------------
try:
- response = self.client.messages.create(
- model='claude-3-5-sonnet-20240620',
- system=prompt,
- max_tokens=400 * len(images), # Increased token limit for more detailed summaries
+ resp = self.client.chat.completions.create(
+ model="gpt-4o",
messages=messages,
+ max_tokens=400 * len(images),
temperature=0,
- extra_headers={"anthropic-beta": "max-tokens-3-5-sonnet-2024-07-15"}
+ response_format={
+ "type": "json_schema",
+ "json_schema": {
+ "name": "image_batch_summaries", # ← REQUIRED
+ "schema": schema, # ← REQUIRED
+ "strict": True # ← strongly recommended
+ },
+ },
)
- # Parse the response
- text = response.content[0].text
- #print(text)
- # Attempt to parse and fix the XML if necessary
- parser = etree.XMLParser(recover=True)
- root = etree.fromstring(text, parser=parser)
- # Check if there were errors corrected
- # if parser.error_log:
- # #print("XML Parsing Errors:")
- # for error in parser.error_log:
- # #print(error)
- # Extract summaries
- summaries = {}
- for summary in root.findall('summary'):
- number = int(summary.get('number'))
- content = summary.text.strip() if summary.text else ""
- if content: # Only include non-empty summaries
- summaries[number] = content
-
- return summaries
+ parsed = json.loads(resp.choices[0].message.content) # schema‑safe
+ return {item["number"]: item["summary"]
+ for item in parsed["summaries"]}
except Exception as e:
- # Print errors to stderr so they don't interfere with JSON output
- print(json.dumps({"error": str(e)}), file=sys.stderr)
- sys.stderr.flush()
-
+ # Log and fall back gracefully
+ print(json.dumps({"error": str(e)}), file=sys.stderr, flush=True)
+ return {}
class DocumentType(Enum):
"""
@@ -668,7 +622,7 @@ class Document:
Represents a document being processed, such as a PDF, handling chunking, embedding, and summarization.
"""
- def __init__(self, file_path: str, file_name: str, job_id: str, output_folder: str):
+ def __init__(self, file_path: str, file_name: str, job_id: str, output_folder: str, doc_id: str):
"""
Initialize the Document with file data, file name, and job ID.
@@ -681,7 +635,7 @@ class Document:
self.file_path = file_path
self.job_id = job_id
self.type = self._get_document_type(file_name) # Determine the document type (PDF, CSV, etc.)
- self.doc_id = job_id # Use the job ID as the document ID
+ self.doc_id = doc_id # Use the job ID as the document ID
self.chunks = [] # List to hold text and visual chunks
self.num_pages = 0 # Number of pages in the document (if applicable)
self.summary = "" # The generated summary for the document
@@ -767,7 +721,7 @@ class Document:
client = OpenAI() # Initialize OpenAI client for text generation
completion = client.chat.completions.create(
- model="gpt-3.5-turbo", # Specify the language model
+ model="gpt-4o", # Specify the language model
messages=[
{"role": "system",
"content": "You are an AI assistant tasked with summarizing a document. You are provided with important chunks from the document and provide a summary, as best you can, of what the document will contain overall. Be concise and brief with your response."},
@@ -801,7 +755,7 @@ class Document:
"doc_id": self.doc_id
}, indent=2) # Convert the document's attributes to JSON format
-def process_document(file_path, job_id, output_folder):
+def process_document(file_path, job_id, output_folder, doc_id):
"""
Top-level function to process a document and return the JSON output.
@@ -809,26 +763,27 @@ def process_document(file_path, job_id, output_folder):
:param job_id: The job ID for this document processing task.
:return: The processed document's data in JSON format.
"""
- new_document = Document(file_path, file_path, job_id, output_folder)
+ new_document = Document(file_path, file_path, job_id, output_folder, doc_id)
return new_document.to_json()
def main():
"""
Main entry point for the script, called with arguments from Node.js.
"""
- if len(sys.argv) != 4:
+ if len(sys.argv) != 5:
print(json.dumps({"error": "Invalid arguments"}), file=sys.stderr)
return
job_id = sys.argv[1]
file_path = sys.argv[2]
output_folder = sys.argv[3] # Get the output folder from arguments
+ doc_id = sys.argv[4]
try:
os.makedirs(output_folder, exist_ok=True)
# Process the document
- document_result = process_document(file_path, job_id, output_folder) # Pass output_folder
+ document_result = process_document(file_path, job_id, output_folder,doc_id) # Pass output_folder
# Output the final result as JSON to stdout
print(document_result)
diff --git a/src/server/index.ts b/src/server/index.ts
index 3b77359ec..887974ed8 100644
--- a/src/server/index.ts
+++ b/src/server/index.ts
@@ -2,6 +2,7 @@ import { yellow } from 'colors';
import * as dotenv from 'dotenv';
import * as mobileDetect from 'mobile-detect';
import * as path from 'path';
+import * as express from 'express';
import { logExecution } from './ActionUtilities';
import AssistantManager from './ApiManagers/AssistantManager';
import FlashcardManager from './ApiManagers/FlashcardManager';
diff --git a/src/server/server_Initialization.ts b/src/server/server_Initialization.ts
index 514e2ce1e..80cf977ee 100644
--- a/src/server/server_Initialization.ts
+++ b/src/server/server_Initialization.ts
@@ -21,6 +21,7 @@ import { Database } from './database';
import { WebSocket } from './websocket';
import axios from 'axios';
import { JSDOM } from 'jsdom';
+import { setupDynamicToolsAPI } from './api/dynamicTools';
/* RouteSetter is a wrapper around the server that prevents the server
from being exposed. */
@@ -210,6 +211,10 @@ export default async function InitializeServer(routeSetter: RouteSetter) {
// app.use(cors({ origin: (_origin: any, callback: any) => callback(null, true) }));
registerAuthenticationRoutes(app); // this adds routes to authenticate a user (login, etc)
registerCorsProxy(app); // this adds a /corsproxy/ route to allow clients to get to urls that would otherwise be blocked by cors policies
+
+ // Set up the dynamic tools API
+ setupDynamicToolsAPI(app);
+
isRelease && !SSL.Loaded && SSL.exit();
routeSetter(new RouteManager(app, isRelease)); // this sets up all the regular supervised routes (things like /home, download/upload api's, pdf, search, session, etc)
isRelease && process.env.serverPort && (resolvedPorts.server = Number(process.env.serverPort));
diff --git a/summarize_dash_ts.py b/summarize_dash_ts.py
new file mode 100644
index 000000000..69f80fde5
--- /dev/null
+++ b/summarize_dash_ts.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python3
+"""
+summarize_dash_ts.py – v4 (periodic-save edition)
+
+• Dumps every .ts/.tsx file (skipping node_modules, etc.)
+• Calls GPT-4o with Structured Outputs (JSON-schema “const” on filename)
+• Prints each raw JSON reply (unless --quiet)
+• Flushes the growing summary file to disk every N files (default 10)
+
+pip install openai tqdm rich
+"""
+
+from __future__ import annotations
+
+import argparse
+import json
+import os
+import pathlib
+import sys
+from textwrap import dedent
+from typing import Dict, Iterable, List
+
+import openai
+from rich.console import Console
+from rich.tree import Tree
+from tqdm import tqdm
+
+PERIODIC_SAVE_EVERY = 10 # ← change here if you want finer or coarser saves
+
+
+# ───────────────────────── CLI ──────────────────────────
+def parse_args() -> argparse.Namespace:
+ p = argparse.ArgumentParser(prog="summarize_dash_ts.py")
+ p.add_argument("-r", "--root", type=pathlib.Path, default=".", help="Repo root")
+ p.add_argument("--model", default="gpt-4o-2024-08-06")
+ p.add_argument("--api-key", help="OpenAI API key (else env var)")
+ p.add_argument("--max-tokens", type=int, default=512)
+ p.add_argument(
+ "--skip-dirs",
+ nargs="*",
+ default=["node_modules", ".git", "dist", "build", ".next"],
+ )
+ p.add_argument(
+ "--preview", type=int, default=5, help="How many summaries to echo at the end"
+ )
+ p.add_argument(
+ "--quiet",
+ action="store_true",
+ help="Suppress the per-file raw JSON spam once you trust the run",
+ )
+ return p.parse_args()
+
+
+# ────────────────── helpers ──────────────────
+def iter_ts(root: pathlib.Path, skip: List[str]) -> Iterable[pathlib.Path]:
+ for dpath, dnames, fnames in os.walk(root):
+ dnames[:] = [d for d in dnames if d not in skip]
+ for fn in fnames:
+ if fn.endswith((".ts", ".tsx")):
+ yield pathlib.Path(dpath) / fn
+
+
+def safe_open(p: pathlib.Path):
+ try:
+ return p.open(encoding="utf-8")
+ except UnicodeDecodeError:
+ return p.open(encoding="utf-8", errors="replace")
+
+
+def make_tree(paths: list[pathlib.Path], root: pathlib.Path) -> Tree:
+ t = Tree(str(root))
+ nodes: dict[pathlib.Path, Tree] = {root: t}
+ for p in sorted(paths):
+ cur = root
+ for part in p.relative_to(root).parts:
+ cur = cur / part
+ if cur not in nodes:
+ nodes[cur] = nodes[cur.parent].add(part)
+ return t
+
+
+def write_tree_with_summaries(*, tree: Tree, summaries: dict[pathlib.Path, str],
+ root: pathlib.Path, out_path: pathlib.Path) -> None:
+ tmp = out_path.with_suffix(".tmp")
+ with tmp.open("w", encoding="utf-8") as f:
+
+ def walk(node: Tree, rel_path: pathlib.Path = pathlib.Path("."), indent: str = ""):
+ last = node.children[-1] if node.children else None
+ for child in node.children:
+ marker = "└── " if child is last else "├── "
+ new_indent = indent + (" " if child is last else "│ ")
+ child_rel = rel_path / child.label # ← **the missing bit**
+
+ # absolute path used as dict-key during summarization loop
+ abs_path = root / child_rel
+ if abs_path in summaries:
+ f.write(f"{indent}{marker}{child.label} – {summaries[abs_path]}\n")
+ else:
+ f.write(f"{indent}{marker}{child.label}\n")
+
+ walk(child, child_rel, new_indent)
+
+ walk(tree)
+ tmp.replace(out_path)
+
+
+# ────────────────── prompt bits ──────────────────
+SYSTEM = """
+You are an expert TypeScript code summarizer for the Dash hypermedia code-base.
+
+You will be given ONE complete file and its **exact** relative path.
+
+Return ONLY JSON matching this shape:
+
+{
+ "filename": "<EXACT path you were given>",
+ "summary": "<3–5 sentences, <80 words>"
+}
+
+No markdown, no extra keys.
+""".strip()
+
+OVERVIEW = dedent(
+ """
+ Dash is a browser-based hypermedia system from Brown University that lets users
+ mix PDFs, web pages, audio, video, ink and rich-text on a free-form canvas,
+ create Vannevar-Bush-style “trails”, and tag/spatially arrange docs for
+ nonlinear workflows. 99 % of the code-base is TypeScript/React.
+ """
+).strip()
+
+SCHEMA_BASE = {
+ "type": "object",
+ "properties": {
+ "filename": {"type": "string"},
+ "summary": {"type": "string"},
+ },
+ "required": ["filename", "summary"],
+ "additionalProperties": False,
+}
+
+
+def ask_llm(
+ client: openai.OpenAI,
+ model: str,
+ rel_path: str,
+ code: str,
+ max_tokens: int,
+ verbose: bool = True,
+) -> str:
+ schema = {
+ "name": "dash_file_summary",
+ "strict": True,
+ "schema": dict(
+ SCHEMA_BASE,
+ properties=dict(
+ SCHEMA_BASE["properties"], filename={"type": "string", "const": rel_path}
+ ),
+ ),
+ }
+
+ messages = [
+ {"role": "system", "content": SYSTEM},
+ {
+ "role": "user",
+ "content": f"{OVERVIEW}\n\n(PATH = {rel_path})\n\n===== BEGIN FILE =====\n{code}\n===== END FILE =====",
+ },
+ ]
+
+ comp = client.chat.completions.create(
+ model=model,
+ messages=messages,
+ response_format={"type": "json_schema", "json_schema": schema},
+ max_tokens=max_tokens,
+ )
+
+ raw = comp.choices[0].message.content
+ if verbose:
+ print(f"\n📝 Raw JSON for {rel_path}:\n{raw}\n")
+
+ data = json.loads(raw)
+ if data["filename"] != rel_path:
+ Console().print(
+ f"[red]⚠︎ Filename mismatch – model said {data['filename']!r}[/red]"
+ )
+ data["filename"] = rel_path
+ return data["summary"].strip()
+
+
+# ────────────────── main ──────────────────
+def main() -> None:
+ args = parse_args()
+ openai.api_key = args.api_key or os.getenv("OPENAI_API_KEY") or sys.exit(
+ "Need OPENAI_API_KEY"
+ )
+
+ root = args.root.resolve()
+ con = Console()
+ con.print(f":mag: [bold]Scanning[/bold] {root}")
+
+ files = list(iter_ts(root, args.skip_dirs))
+ if not files:
+ con.print("[yellow]No TS/TSX files found[/yellow]")
+ return
+
+ # 1. full dump of file contents (unchanged)
+ tree = make_tree(files, root)
+ (root / "ts_files_with_content.txt").write_text(
+ Console(record=True, width=120).print(tree, end="") or ""
+ )
+ with (root / "ts_files_with_content.txt").open("a", encoding="utf-8") as fp:
+ for p in tqdm(files, desc="Dumping source"):
+ fp.write(f"{p.relative_to(root)}\n{'-'*80}\n")
+ fp.write(safe_open(p).read())
+ fp.write(f"\n{'='*80}\n\n")
+
+ # 2. summaries (periodic save)
+ client = openai.OpenAI()
+ summaries: Dict[pathlib.Path, str] = {}
+ out_file = root / "ts_files_with_summaries.txt"
+
+ for idx, p in enumerate(tqdm(files, desc="GPT-4o summarizing"), 1):
+ summaries[p] = ask_llm(
+ client,
+ args.model,
+ str(p.relative_to(root)),
+ safe_open(p).read(),
+ args.max_tokens,
+ verbose=not args.quiet,
+ )
+
+ if idx % PERIODIC_SAVE_EVERY == 0:
+ write_tree_with_summaries(tree=tree, summaries=summaries, root=root, out_path=out_file)
+ con.print(f"[green]✔ Flushed after {idx} files[/green]")
+
+ # final flush
+ write_tree_with_summaries(tree=tree, summaries=summaries, root=root, out_path=out_file)
+
+ # preview
+ con.print("\n[cyan]Sample summaries:[/cyan]")
+ for i, (p, s) in enumerate(list(summaries.items())[: args.preview], 1):
+ con.print(f"{i}. {p.relative_to(root)} → {s}")
+
+ con.print(f":sparkles: Done – wrote [bold]{out_file}[/bold]")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test_dynamic_tools.js b/test_dynamic_tools.js
new file mode 100644
index 000000000..b0d6844f3
--- /dev/null
+++ b/test_dynamic_tools.js
@@ -0,0 +1,44 @@
+// Quick test script to verify dynamic tool loading
+const fs = require('fs');
+const path = require('path');
+
+console.log('=== Testing Dynamic Tool Loading ===');
+
+// Check if the dynamic tools directory exists
+const dynamicToolsPath = path.join(__dirname, 'src/client/views/nodes/chatbot/tools/dynamic');
+console.log('Dynamic tools directory:', dynamicToolsPath);
+console.log('Directory exists:', fs.existsSync(dynamicToolsPath));
+
+if (fs.existsSync(dynamicToolsPath)) {
+ const files = fs.readdirSync(dynamicToolsPath);
+ const toolFiles = files.filter(file => file.endsWith('.ts') && !file.startsWith('.'));
+
+ console.log('Found tool files:', toolFiles);
+
+ for (const toolFile of toolFiles) {
+ const toolPath = path.join(dynamicToolsPath, toolFile);
+ const toolName = path.basename(toolFile, '.ts');
+
+ console.log(`\nTesting ${toolFile}:`);
+ console.log(' - Tool name:', toolName);
+ console.log(' - File size:', fs.statSync(toolPath).size, 'bytes');
+
+ // Try to read and check the file content
+ try {
+ const content = fs.readFileSync(toolPath, 'utf8');
+
+ // Check for required patterns
+ const hasExport = content.includes(`export class ${toolName}`);
+ const toolInfoMatch = content.match(/const\s+\w+Info.*?=\s*{[^}]*name\s*:\s*['"]([^'"]+)['"]/s);
+ const hasExtends = content.includes('extends BaseTool');
+
+ console.log(' - Has export class:', hasExport);
+ console.log(' - Extends BaseTool:', hasExtends);
+ console.log(' - Tool info name:', toolInfoMatch ? toolInfoMatch[1] : 'NOT FOUND');
+ } catch (error) {
+ console.log(' - Error reading file:', error.message);
+ }
+ }
+}
+
+console.log('\n=== Test Complete ===');